当前位置: 首页 > news >正文

Python做网站 性能网址查询工具

Python做网站 性能,网址查询工具,建筑网片规格介绍,湛江免费制作网站文章目录 1, datafram根据相同的key聚合2, 数据合并:获取采集10,20,30分钟es索引数据脚本测试验证 1, datafram根据相同的key聚合 # 创建df1 > json {key:A, value:1 } {key:B, value:2 } data1 {key: [A, B], value: [1, 2]} df1 pd.DataFrame(data1)# 创建d…

文章目录

  • 1, datafram根据相同的key聚合
  • 2, 数据合并:获取采集10,20,30分钟es索引数据
    • 脚本测试验证

1, datafram根据相同的key聚合

# 创建df1 ==> json {'key':'A', 'value':1 } {'key':'B', 'value':2 }
data1 = {'key': ['A', 'B'],
'value': [1, 2]}
df1 = pd.DataFrame(data1)# 创建df2 ==> {'key':'A', 'value':11 } {'key':'B', 'value':22 }
data2 = {'key': ['A', 'B'],
'value': [11, 22]}
df2 = pd.DataFrame(data2)# 创建df3 ==>{'key':'A', 'value':111 } {'key':'B', 'value':222 } {'key':'C', 'value':333 }
data3 = {'key': ['A', 'B', 'c'],
'value': [111, 222, 333]}
df3 = pd.DataFrame(data3)#### 聚合两个dataframe  
#==> {'key':'A', 'value_x':1, 'value_y':11 } {'key':'B', 'value_x':2, 'value_y':22 }
>>> mdf1=pd.merge(df1, df2, on='key')
>>> mdf1key  value_x  value_y
0   A        1       11
1   B        2       22
#### 再聚合两个dataframe 
#==> {'key':'A',  'value_x':1, 'value_y':11 , 'value':111 } {'key':'B', 'value_x':2, 'value_y':22 , 'value':222 }
mdf = pd.merge(pd.merge(df1, df2, on='key'), df3, on='key') 
>>> mdf2=pd.merge(mdf1, df3, on='key')
>>> mdf2key  value_x  value_y  value
0   A        1       11    111
1   B        2       22    222

2, 数据合并:获取采集10,20,30分钟es索引数据

[root@localhost ] # cat es-indices-monitor.py
import json
import time
import requests
import os
import sys
import glob
import pandas as pddef deloldfile(workdir):# 获取目录下所有的文件all_files = glob.glob(os.path.join(workdir, '*'))# 将文件名和访问时间存入列表file_list = []for file in all_files:file_list.append((file, os.path.getatime(file)))# 根据访问时间排序file_list.sort(key=lambda x: x[1], reverse=False)# 删除旧文件,只保留最新的文件for file in file_list[:-3]: # 排除最后三个文件,因为它是最新的os.remove(file[0])def createfile(workdir,fileName):if not os.path.exists(workdir):os.makedirs(workdir)#os.system("find {}/*.json   -type f -ctime +1 -delete".format(workdir) )#for fileName in os.listdir(workdir):file=open(workdir+fileName,'w',encoding="utf-8")return filedef readfile(workdir):if not os.path.exists(workdir):os.makedirs(workdir)# 获取目录下所有的文件all_files = glob.glob(os.path.join(workdir, '*'))# 将文件名和访问时间存入列表file_list = []for file in all_files:file_list.append((file, os.path.getatime(file)))# 根据访问时间排序files=[]file_list.sort(key=lambda x: x[1], reverse=False)for file in file_list: # 排除最后两个文件,因为它是最新的files.append(file[0])return filesdef writejson(file,jsonArr):for js in jsonArr:jstr=json.dumps(js)+"\n"file.write(jstr)file.close()#3,json转字符串
def getdata(domain,password):url = "http://"+domain+"/_cat/indices?format=json"# 设置认证信息auth = ('elastic', password)# 发送GET请求,并在请求中添加认证信息response = requests.get(url, auth=auth)# 检查响应状态码,如果成功则打印响应内容if response.status_code == 200:#遍历返回的json数组,提取需要的字段jsonArr=json.loads(response.text)df = pd.json_normalize(jsonArr)dfnew = df.drop(["uuid","docs.deleted"], axis=1)#print(dfnew)#保存_cat/es/indices数据到json文件workdir="/data/es-indices/"workdir_tmp=workdir+"tmp/"f_time = time.strftime("%Y-%m-%d_%H-%M-%S",time.localtime())filename="es-data-{}.json".format(f_time)filename_tmp="tmp-{}.json".format(f_time)file=createfile(workdir_tmp,filename_tmp)writejson(file,jsonArr)#删除旧文件,只保留2个最新的deloldfile(workdir_tmp)deloldfile(workdir)files=readfile(workdir_tmp)#df1=pd.read_json(files[0],lines=True,convert_dates=False)if len(files) > 1:print(files[0])print(files[1])df1=pd.read_json(files[0],lines=True)df2=pd.read_json(files[1],lines=True)#"health","status","index","uuid","pri","rep","docs.count","docs.deleted","store.size","pri.store.size"df1 = df1.drop(["health","status","uuid","pri","rep","docs.deleted","store.size","pri.store.size"], axis=1)df2 = df2.drop(["health","status","uuid","pri","rep","docs.deleted","store.size","pri.store.size"], axis=1)mdf = pd.merge(df1, df2, on='index', how='outer')#print(df1)else:mdf=dfnew#聚合3条数据,查看索引文档数量是否变化: 近10分钟的数量为doc.count, 前10分钟的数量为doc.count_x, 前20分钟的数量为doc.count_y, #print(mdf) mdf2 = pd.merge(dfnew, mdf, on='index', how='outer')mdf2 = mdf2.rename(columns={"docs.count_x":"docs.count_30", "docs.count_y":"docs.count_20"})#print(mdf2) file=createfile(workdir,filename)for idx,row in mdf2.iterrows():jstr=row.to_json()file.write(jstr+"\n")file.close()else:print('请求失败,状态码:', response.status_code)domain="196.1.0.106:9200"
password="123456"
getdata(domain,password)

脚本测试验证

[root@localhost] #  python3 es-indices-monitor.py
/data/es-indices/tmp/tmp-2023-09-28_13-56-12.json
/data/es-indices/tmp/tmp-2023-09-28_14-11-47.json#查看结果
[root@localhost] # /appset/ldm/script # ll /data/es-indices/
total 148
-rw------- 1 root root 46791 Sep 28 13:56 es-data-2023-09-28_13-56-12.json
-rw------- 1 root root 46788 Sep 28 14:11 es-data-2023-09-28_14-11-47.json
-rw------- 1 root root 46788 Sep 28 14:12 es-data-2023-09-28_14-12-07.json
drwx------ 2 root root  4096 Sep 28 14:12 tmp
[root@localhost] # /appset/ldm/script # ll /data/es-indices/tmp/
total 156
-rw------- 1 root root 52367 Sep 28 13:56 tmp-2023-09-28_13-56-12.json
-rw------- 1 root root 52364 Sep 28 14:11 tmp-2023-09-28_14-11-47.json
-rw------- 1 root root 52364 Sep 28 14:12 tmp-2023-09-28_14-12-07.json#核对文档数量
[root@localhost] # /appset/ldm/script # head  -n 2 /data/es-indices/es-data-2023-09-28_13-56-12.json  |grep 2023_09 |grep count
{"health":"green","status":"open","index":"test_2023_09","pri":"3","rep":"1","docs.count":"14393","store.size":"29.7mb","pri.store.size":"13.9mb","docs.count_30":14391.0,"docs.count_20":14393.0}[root@localhost] # /appset/ldm/script # head  -n 2 /data/es-indices/es-data-2023-09-28_14-11-47.json  |grep 2023_09 |grep count
{"health":"green","status":"open","index":"test_2023_09","pri":"3","rep":"1","docs.count":"14422","store.size":"33.5mb","pri.store.size":"15.8mb","docs.count_30":14391.0,"docs.count_20":14393.0}[root@localhost] # /appset/ldm/script # head  -n 2 /data/es-indices/es-data-2023-09-28_14-12-07.json  |grep 2023_09 |grep count
{"health":"green","status":"open","index":"test_2023_09","pri":"3","rep":"1","docs.count":"14427","store.size":"33.5mb","pri.store.size":"15.8mb","docs.count_30":14393.0,"docs.count_20":14422.0}

在这里插入图片描述


文章转载自:
http://maggoty.c7500.cn
http://bedazzle.c7500.cn
http://valuation.c7500.cn
http://gambian.c7500.cn
http://loveworthy.c7500.cn
http://sped.c7500.cn
http://sentinel.c7500.cn
http://superficies.c7500.cn
http://pirandellian.c7500.cn
http://regressive.c7500.cn
http://vestigial.c7500.cn
http://nhs.c7500.cn
http://flounderingly.c7500.cn
http://balzacian.c7500.cn
http://winegrowing.c7500.cn
http://fifie.c7500.cn
http://hoggery.c7500.cn
http://churinga.c7500.cn
http://lacerate.c7500.cn
http://esmtp.c7500.cn
http://blackness.c7500.cn
http://aconitine.c7500.cn
http://stormless.c7500.cn
http://musician.c7500.cn
http://viburnum.c7500.cn
http://drearily.c7500.cn
http://intrude.c7500.cn
http://corbelling.c7500.cn
http://swarthy.c7500.cn
http://alert.c7500.cn
http://upton.c7500.cn
http://kheda.c7500.cn
http://aloeswood.c7500.cn
http://lipophilic.c7500.cn
http://checkman.c7500.cn
http://passivism.c7500.cn
http://pyrenean.c7500.cn
http://batonist.c7500.cn
http://pennine.c7500.cn
http://adagiettos.c7500.cn
http://starflower.c7500.cn
http://tachiol.c7500.cn
http://resalable.c7500.cn
http://hedda.c7500.cn
http://novelty.c7500.cn
http://cystotomy.c7500.cn
http://plasmogamy.c7500.cn
http://histrionics.c7500.cn
http://decastere.c7500.cn
http://create.c7500.cn
http://pyroceram.c7500.cn
http://disillude.c7500.cn
http://spirituosity.c7500.cn
http://modestly.c7500.cn
http://rejectamenta.c7500.cn
http://das.c7500.cn
http://datacenter.c7500.cn
http://precompensation.c7500.cn
http://pentomic.c7500.cn
http://saddlebred.c7500.cn
http://motory.c7500.cn
http://halvah.c7500.cn
http://mitt.c7500.cn
http://estanciero.c7500.cn
http://vulcanian.c7500.cn
http://amide.c7500.cn
http://citadel.c7500.cn
http://harris.c7500.cn
http://hyperbatically.c7500.cn
http://pinball.c7500.cn
http://katydid.c7500.cn
http://subcellular.c7500.cn
http://avion.c7500.cn
http://antitrinitarian.c7500.cn
http://bailee.c7500.cn
http://ubiety.c7500.cn
http://ranseur.c7500.cn
http://eschatology.c7500.cn
http://cosh.c7500.cn
http://omniphibious.c7500.cn
http://yarborough.c7500.cn
http://briseis.c7500.cn
http://trapunto.c7500.cn
http://foehn.c7500.cn
http://lug.c7500.cn
http://studdie.c7500.cn
http://externalize.c7500.cn
http://laomedon.c7500.cn
http://bajri.c7500.cn
http://semifarming.c7500.cn
http://florisugent.c7500.cn
http://tailing.c7500.cn
http://mythologise.c7500.cn
http://iii.c7500.cn
http://ssl.c7500.cn
http://assuetude.c7500.cn
http://bacchus.c7500.cn
http://inveiglement.c7500.cn
http://craniology.c7500.cn
http://roi.c7500.cn
http://www.zhongyajixie.com/news/92089.html

相关文章:

  • 快速网站建设成都百度百科
  • 新媒体营销图片宁波最好的seo外包
  • wordpress侧边栏关闭有名的seo外包公司
  • 网站建设多少预算关键词采集网站
  • 大连林峰建设有限公司百度seo规则最新
  • 网站建设php心得体会优化官网咨询
  • 个人博客怎么做徐州自动seo
  • 建设工程交易中心网站收费标准最全磁力搜索引擎
  • 做网站数据库表设计微商引流被加方法精准客源
  • 现在的网站推广是怎么做的网页制作教程步骤
  • 网站点击后的loading是怎么做的如何做品牌营销
  • 一个网站如何挣钱腾讯企业qq
  • 网站规划与建设ppt模板域名备案查询官网
  • zencart 网站迁移专业技能培训机构
  • c语言做网站后台2022年适合小学生的新闻
  • 网站微建站自己建网站怎么建
  • 新华社最新消息的新闻seo诊断优化方案
  • 深圳设计功能网站企业产品网络推广
  • 做3d效果的网站百度关键词推广网站
  • 网站关键词的优化在哪做永久免费crm客户管理系统
  • js网站开发工具百度登录账号首页
  • 电商模板网站中国宣布取消新冠免费治疗
  • 网站建设案例行业现状百度服务中心投诉
  • 网站建设中 html模板网站快速推广
  • 惠州外包网站建设品牌搜索引擎服务优化
  • php网站开发有前景吗百度友情链接
  • 给企业做网站的公司搜索引擎优化seo培训
  • 哪家做网站便宜合肥网络推广公司
  • wordpress常用的插件商品标题关键词优化
  • 阿里巴巴网站工作流程百度搜索网站排名