Commit a328fa0a authored by 桂秋月's avatar 桂秋月

1

parent a3694992
No preview for this file type
......@@ -58,8 +58,14 @@ mysql_info={
"pwd":"123456",
"host":"172.16.9.89",
"port":"3306"
}
,
},
"feature_aliyun":{
"host":"aliyun-test.mysql.rds.aliyuncs.com",
"pwd":"NaL7F7j522Kn8D5kJz",
"name":"tjzimu_test",
"port":"3306"
},
"usergroup":{
"name":"tjzimuali_test",
"pwd":"bYut032QgkJ7ElMXwW",
......
......@@ -15,17 +15,20 @@ def mysqlInfo(namespace,dbtype='mysql'):
:param dbtype:查询的类型,目前支持mysql和mongo
:return: 获取namespace对应的mysql信息,对大数据的mysql不适用,此时需要自定义mysql_info
"""
url="https://holmes.liangkebang.com/k8s/service/detail?namespace={}" \
"&serviceType=base&serviceName={dbtype}".format(namespace,dbtype=dbtype)
url="https://doraemon.quantgroups.cn/ack/service/detail?namespace={}&serviceType=base&serviceName={dbtype}".format(namespace,dbtype=dbtype)
# url="https://holmes.liangkebang.com/k8s/service/detail?namespace={}" \
# "&serviceType=base&serviceName={dbtype}".format(namespace,dbtype=dbtype)
result=mysql_info.get(str(namespace)) or {}
if result:
return result
try:
resp=requests.get(url)
result['host']=jsonpath.jsonpath(resp.json(),'$..lanIp')[0]
#print(resp.json(),url)
result['host']=jsonpath.jsonpath(resp.json(),'$..nodeIp')[0]
result['port']=jsonpath.jsonpath(resp.json(),'$..nodePort')[0]
result['pwd']='qatest'
result['name']='qa'
#print(result)
return result
except:
raise Exception("该namespace[{}]找不到对应的mysql信息".format(namespace))
......@@ -39,7 +42,7 @@ def mysql_universal(namespace):
return temp
def execmysl(namespace,sql):
if 'select' not in sql or 'from' not in sql:
if 'select' not in sql.strip() and 'alter' not in sql.strip() and 'update' not in sql:
raise Exception('在mysql看来不是有效的sql',sql)
try:
print(sql)
......@@ -73,5 +76,5 @@ def concatSql(sql,**kwargs):
return sql+' where '+temp[:-4]
if __name__ == '__main__':
print(mysqlInfo('test1'))
#print(execmysl('test1',"select key_word as word,key_word_url as jumpUrl,icon_url as iconUrl from t_app_config_search_keyword where status=1 and DATE_FORMAT(end_time,'%Y-%m-%d') >= DATE_FORMAT('2022-06-14 15:38:50','%Y-%m-%d') and DATE_FORMAT('2022-06-14 15:38:50','%Y-%m-%d') >= DATE_FORMAT(start_time,'%Y-%m-%d') order by sort"))
\ No newline at end of file
#print(mysqlInfo('test1'))
print(execmysl('bd',"select 1 "))
\ No newline at end of file
......@@ -4,6 +4,7 @@ from collections import defaultdict
from urllib import parse
REDIS_CONFIG = {
"119":{
'REDIS_NODES': [
{'host': '172.24.17.119', 'port': 6371},
{'host': '172.24.17.119', 'port': 6372},
......@@ -15,6 +16,21 @@ REDIS_CONFIG = {
'REDIS_EXPIRE_TIME': 26 * 3600,
'REDIS_MAX_CONNECTIONS': 50,
'REDIS_PASSWD': 'redis',
},"89":{
'REDIS_NODES': [
{'host': '172.16.9.89', 'port': 6371},
{'host': '172.16.9.89', 'port': 6372},
{'host': '172.16.9.89', 'port': 6373},
{'host': '172.16.9.89', 'port': 6374},
{'host': '172.16.9.89', 'port': 6375},
{'host': '172.16.9.89', 'port': 6376}
],
'REDIS_EXPIRE_TIME': 6000,
'REDIS_MAX_CONNECTIONS': 50,
'REDIS_PASSWD':None
}
}
dapan_redis_config={
......@@ -27,33 +43,25 @@ dapan_redis_config={
'REDIS_MAX_CONNECTIONS': 50,
'REDIS_PASSWD': '1234',
}
from rediscluster import RedisCluster
def redis_conn(key):
REDIS_CONFIG_temp=REDIS_CONFIG.get(key)
return RedisCluster(startup_nodes=REDIS_CONFIG_temp.get('REDIS_NODES'),
max_connections=REDIS_CONFIG_temp.get('REDIS_MAX_CONNECTIONS'),
password=REDIS_CONFIG_temp.get('REDIS_PASSWD'), decode_responses=True)
redis_db_conn_119 = RedisCluster(startup_nodes=REDIS_CONFIG.get('REDIS_NODES'),
max_connections=REDIS_CONFIG.get('REDIS_MAX_CONNECTIONS'),
password=REDIS_CONFIG.get('REDIS_PASSWD'), decode_responses=True)
# dapan_redis_db_conn = RedisCluster(startup_nodes=dapan_redis_config.get('REDIS_NODES'),
# max_connections=dapan_redis_config.get('REDIS_MAX_CONNECTIONS'),
# password=dapan_redis_config.get('REDIS_PASSWD'),
# decode_responses=True,
# skip_full_coverage_check=True)
def getRedisValue(key,conntype=None,oper='select'):
def getRedisValue(redis_key,key,conntype=None,oper='select'):
'''
:param key:
:param conntype: 区分不同的redis连接
:param oper:如果是delete,则做删除操作。目前只支持delete,select
:return:select->获取所有匹配到key的值
'''
redis_db_conn=redis_conn(redis_key)
if 'select' in key or 'from' in key:
raise Exception('在redis看来不是有效的key',key)
redis_value = defaultdict()
if conntype == 'dapan':
redis_db_conn=dapan_redis_db_conn
else:
redis_db_conn=redis_db_conn_119
uuid_redis_result=redis_db_conn.keys(key.strip())
print('模糊匹配到的所有key:',uuid_redis_result)
if uuid_redis_result and oper=='delete':
......@@ -85,7 +93,7 @@ def getRedisValue(key,conntype=None,oper='select'):
if __name__ == '__main__':
userUuid='00003a93-2a32-4501-b338-755b6cb1ec49'
#t=getRedisValue("order_product_graph:179240378044417")
t=getRedisValue("pay_product_graph:179240378044417")
t=getRedisValue('89',"*_same_product_batch_uuid*")
print(t)
## search_fm_offline_feature:{md5(搜索词)}_query_offline
......
from databaseConn import *
from tools import *
from tools.publicFun import *
from tools.httprequest import *
from tools.listOperation import *
from databaseConn.mysqlOperation import *
#from databaseConn.redisOperation import *
from feature.publicSql import *
feature_path=os.path.dirname(__file__)
feature_report=os.path.join(feature_path,'tempFile')
##贷前域名
loan_url="https://finance-feature-calc-test.tjzimu.com" ##贷前域名
mid_url="https://finance-feature-box-test.tjzimu.com" ##贷中域名
pre_loan_url=loan_url+"/calc/features"
mid_loan_url=mid_url+'/feature/query'
##消费地图白名单,phone本身就是md5加密的
map_white_sql="""
select phone from finance_offline_feature.del_20220627
"""
##贷前贷中用户,未加密
feature_user_sql="""
select phone_no,uuid from xyqb_user.user order by rand() limit {num}
"""
import pandas as pd
import requests
from feature import *
#codes=["user_consumption_map_whitelist"]
def devResult(uuid,codes,codetype='pre'):
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'rc_auth_key': 'rc_offline',
'rc_auth_secret': '9d9}bc24!e1z1x3`(x~4r29d$+45n3)\'zb696b$85e>_]p2&4f{,a3~8b3e_ldt^'
}
temp={'uuid':uuid}
body={
"codes":codes
}
if codetype=='pre':
body["user_uuid"]=uuid
url=pre_loan_url
else:
url=mid_loan_url
body['tradeNo']=0
body['userUuid']=uuid
t=requests.post(url,data=body,headers=headers)
print(t.json())
temp[code+'_dev']=jsonpath.jsonpath(t.json(),'$..{}.value'.format(code))[0]
return temp
def baseinfo(num=10):
feature_user_df=execmysl('feature_aliyun',feature_user_sql.format(num=num))
feature_user_df=feature_user_df.astype(str)
feature_user_df['phone_no']=feature_user_df['phone_no'].apply(lambda x:md5jiami(x.encode()))
return feature_user_df
def getwhite(code,num=10):
feature_user_df=baseinfo(num)
try:
#return feature_user_df.head(1)
map_white_change_sql=concatSql(map_white_sql,**{"phone":feature_user_df['phone_no'].to_list()})
map_white_df=execmysl('feature_aliyun',map_white_change_sql)
#print(map_white_df['phone'].to_list)
#feature_user_df['iswhite']=feature_user_df[~(feature_user_df['phone_no'].isin(map_white_df['phone'].to_list()))]
newcolumes=code+'_test'
feature_user_df[newcolumes]=feature_user_df['phone_no'].apply(lambda x:1 if x in map_white_df['phone'].to_list() else 0)
feature_user_df[newcolumes]=feature_user_df['phone_no'].apply(lambda x:1 if x in map_white_df['phone'].to_list() else 0)
reportname=filePath(feature_report,newcolumes)
feature_user_df[['uuid',newcolumes]].to_excel(reportname,index=0,encoding = 'utf-8',float_format = str)
#devResult(code,reportname)
#return feature_user_df
return feature_user_df
except:
traceback.print_exc()
def dev_compare(code,codetype='pre',filepath=None):
temp=[]
filepath=os.path.join(feature_path,'tempFile','map_white_result_1656582391.xlsx')
test_df=readFile(filepath)
test_df=test_df.rename(columns={'user_consumption_map_whitelist_test':'userConsumptionMapWhitelistCalc#isWhite_test'})
# print(test_df.columns)
# return 0
for i in test_df['uuid'].to_list():
t=devResult(i,code,codetype)
temp.append(t)
dev_df=pd.DataFrame(temp)
df=pd.merge(test_df,dev_df,on='uuid',how='left')
#return df.head(2)
print(df.columns)
df['结果是否正确']=df[code+'_test']==df[code+'_dev']
df=df.astype(str)
reportname=filePath(feature_report,code)
df.to_excel(reportname,index=0,encoding = 'utf-8',float_format = str)
if __name__ == '__main__':
code='user_consumption_map_whitelist'
code='userConsumptionMapWhitelistCalc#isWhite'
print(dev_compare(code,codetype='mid'))
#print(getwhite(code,1000))
#print(devResult('58f23a26-df3e-4d3e-80c2-8a4fece16c0e'))
......@@ -10,4 +10,7 @@ from recommend.publicSql import *
cur_dir=os.path.dirname(os.path.abspath(__file__))
file_path=os.path.join(cur_dir,'tempFile')
nearline_url_aws="http://aws-online-recommend-parent.ec-test.qg-rc.net"
nearline_url_aliyun="http://172.16.9.90:19999"
\ No newline at end of file
nearline_url_aliyun="http://172.16.9.90:19999"
search_url_aliyun="http://172.16.9.90:8091"
nearline_url_bd="https://online-recommend-bd.liangkebang.net"
\ No newline at end of file
from recommend import *
\ No newline at end of file
import json
import requests
from recommend import *
......@@ -24,10 +26,10 @@ def diwenci(uuid=None,gid=None):
word=jsonpath.jsonpath(temp.json(),'$..word')
return word
def searchTopic(content):
def searchTopic(uuid,gid,content):
#uuid,gid=genUuidDeviceid()
uuid=""
gid="4205df9318c69521"
#uuid=""
#gid="58A9A9D2-6445-4550-9715-72151CC1A726"
print("uuid:",uuid,"====>","gid",gid)
#diwenci_result=diwenci(uuid,gid)
result_skus=preconditions(content,uuid,gid)
......@@ -35,44 +37,86 @@ def searchTopic(content):
print("搜索结果的skus,",result_skus)
#query+similar_query
similar_temp=[]
similar_change_sql=recommend_hot_query_similar_info_sql+" where hot_query="+"'"+str(content)+"'" +" order by rank asc"
similar_change_sql=recommend_hot_query_similar_info_sql+" where hot_query="+"'"+str(content)+"'" +" order by rank asc limit 5"
similar_df=execmysl('89',similar_change_sql)
similar_query_word=similar_df['hot_query_similar'].to_list() if not similar_df.empty else []
similar_temp+=similar_query_word
print("相似热词结果:",similar_temp)
##sku+query
if not result_skus:
print("底纹词,",diwenci_result)
#print("底纹词,",diwenci_result)
##如果搜索没有商品数,则结果=相似搜索词+底纹词,并去重
return removeRepeat(similar_temp+diwenci_result[:3])
return removeRepeat(similar_temp+diwenci_result)[:8],result_skus
sku_match_change_sql=concatSql(recommend_sku_query_match_info_sql,**{"sku_no":result_skus}) +'order by rank asc'
sku_match_df=execmysl('89',sku_match_change_sql)
sku_match_change_df=sku_match_df.astype(str)[['sku_no','query']]. \
groupby(by='query')['sku_no'].count().reset_index(name='count') \
sku_match_change_df=sku_match_df.astype(str)[['sku_no','query','rank']]. \
groupby(by=['query'])['sku_no'].count().reset_index(name='count') \
if not sku_match_df.empty else []
##通过query分组拿到skuno,如果3个元素的值都小于1,则空列表
try:
sku_match_temp=sku_match_change_df[sku_match_change_df['count']>1].sort_values(by='count',ascending=False)['query'].to_list()
if len(result_skus)==1:
sku_match_temp=sku_match_change_df['query'].to_list()
else:
sku_match_temp=sku_match_change_df[sku_match_change_df['count']>1].sort_values(by=['count'],ascending=0)['query'].to_list()
except:
sku_match_temp=[]
finally:
if not (similar_temp and sku_match_temp):
return removeRepeat(diwenci_result)
print("商品相关词:",sku_match_temp)
if not similar_temp and not sku_match_temp:
return removeRepeat(diwenci_result),result_skus
##从sku的query结果中,去掉相似query,然后2边交叉排序
sku_match_temp_remove_from_similar=bingji(sku_match_temp,similar_temp)
##因为随机取不好弄,所以拿到所有数据交叉的
return listCross(similar_temp,sku_match_temp_remove_from_similar)
return removeRepeat(listCross(similar_temp,sku_match_temp_remove_from_similar)+diwenci_result)[:8],result_skus
def supplementSearch(uuid,gid,searchword):
temp=[]
url=search_url_aliyun+'/searchReplenishment'
headers={
"XQG-USER-UUID":uuid if uuid else "",
"Content-Type":"application/json"
}
body={
"deviceId":gid if gid else "" ,
"userUuid": uuid if uuid else "",
"extraParams": {
"terminalType": 1
},
"searchContent": "",
"sortType": 1
}
for i in searchword[:3]:
body['searchContent']=i
t=requests.post(url,headers=headers,data=json.dumps(body))
#print(body)
#print(t.json())
skus=jsonpath.jsonpath(t.json(),'$..skuNo')
temp+=skus
return temp
def main(uuid,gid,searchword):
words,search_skus=searchTopic(uuid,gid,searchword)
recommend_supplement_result=supplementSearch(uuid,gid,words)
print("补量的推荐商品数据如下:",len(recommend_supplement_result),recommend_supplement_result)
print("补量的搜索商品数据:",search_skus)
if search_skus:
temp=[i for i in search_skus if i in recommend_supplement_result]
print("以上数据是否有重复:",temp if len(temp)>=1 else 0)
print("补量的推荐词如下:",words)
return words,recommend_supplement_result
if __name__ == '__main__':
print(searchTopic('苹果耳机'))
uuid=""
gid="65E052A9-7C04-433A-80B9-927964707456"
searchword='beats耳机'
print("最终推荐结果:",main(uuid,gid,searchword))
......@@ -19,7 +19,7 @@ def getseed(top10_sku):
result['sku_nos']=top10_sku
sql=concatSql(skuinfo_sql,**{'sku_no':top10_sku})
print(sql)
df=execmysl(119,sql)
df=execmysl(89,sql)
df['cid3']=df['cid3'].astype('string')
df['brand_id']=df['brand_id'].astype('string')
cid3=df.groupby(by=['cid3']).groups.__repr__()#['cid3']#.max()
......@@ -69,14 +69,16 @@ def seedSkus(top10_sku):
if __name__ == '__main__':
# top10_sku=[1,2,3,4]
# print(similarskus(top10_sku))
aa={}
skus={'skuinfo': [{'id': 13573, 'sku_no': '10982072256513', 'price': 139.9,'cid1': 1320, 'cid2': 1583, 'cid3': '1592', 'brand_name': '蜀道香','brand_id': 'nan', 'source_type': 6},
{'id': 229065184, 'sku_no': '275043759168001', 'price': 25.0, 'cid1': 1320, 'cid2': 1583, 'cid3': '1590', 'brand_name': '口水娃','brand_id': 'nan', 'source_type': 1},
{'id': 154060057, 'sku_no': '37993171924993', 'price': 39.9, 'cid1': 1320, 'cid2': 1583, 'cid3': '1590', 'brand_name': '口水娃', 'brand_id': 10590.0, 'source_type': 2},
{'id': 154058134, 'sku_no': '37993180305409', 'price': 36.0, 'cid1': 1320, 'cid2': 1583, 'cid3': '1590', 'brand_name': '口水娃', 'brand_id': 10590.0, 'source_type': 2},
{'id': 55340, 'sku_no': '37993239032321', 'price': 21.9, 'cid1': 1320, 'cid2': 1583,'cid3': '1590', 'brand_name': '口水娃', 'brand_id': 10590.0, 'source_type': 2},
{'id': 534359815, 'sku_no': '37993297751553', 'price': 69.9, 'cid1': 1320, 'cid2': 1583, 'cid3': '1590', 'brand_name': '盐津铺子', 'brand_id': 2574.0, 'source_type': 2},
{'id': 337773081, 'sku_no': '37993473904641', 'price': 35.9, 'cid1': 1320, 'cid2': 1583, 'cid3': '1590', 'brand_name': '口水娃', 'brand_id': 10590.0, 'source_type': 2},
{'id': 154060077, 'sku_no': '37993633292801', 'price': 13.9, 'cid1': 1320, 'cid2': 1583, 'cid3': '1590', 'brand_name': '口水娃', 'brand_id': 10590.0, 'source_type': 2},
{'id': 56569, 'sku_no': '37993926891009', 'price': 19.0, 'cid1': 1320, 'cid2': 1583, 'cid3': '1590', 'brand_name': '口水娃', 'brand_id': 10590.0, 'source_type': 2}]}
print('最终结果:',seedSkus(skus))
\ No newline at end of file
# aa={}
# skus={'skuinfo': [{'id': 13573, 'sku_no': '10982072256513', 'price': 139.9,'cid1': 1320, 'cid2': 1583, 'cid3': '1592', 'brand_name': '蜀道香','brand_id': 'nan', 'source_type': 6},
# {'id': 229065184, 'sku_no': '275043759168001', 'price': 25.0, 'cid1': 1320, 'cid2': 1583, 'cid3': '1590', 'brand_name': '口水娃','brand_id': 'nan', 'source_type': 1},
# {'id': 154060057, 'sku_no': '37993171924993', 'price': 39.9, 'cid1': 1320, 'cid2': 1583, 'cid3': '1590', 'brand_name': '口水娃', 'brand_id': 10590.0, 'source_type': 2},
# {'id': 154058134, 'sku_no': '37993180305409', 'price': 36.0, 'cid1': 1320, 'cid2': 1583, 'cid3': '1590', 'brand_name': '口水娃', 'brand_id': 10590.0, 'source_type': 2},
# {'id': 55340, 'sku_no': '37993239032321', 'price': 21.9, 'cid1': 1320, 'cid2': 1583,'cid3': '1590', 'brand_name': '口水娃', 'brand_id': 10590.0, 'source_type': 2},
# {'id': 534359815, 'sku_no': '37993297751553', 'price': 69.9, 'cid1': 1320, 'cid2': 1583, 'cid3': '1590', 'brand_name': '盐津铺子', 'brand_id': 2574.0, 'source_type': 2},
# {'id': 337773081, 'sku_no': '37993473904641', 'price': 35.9, 'cid1': 1320, 'cid2': 1583, 'cid3': '1590', 'brand_name': '口水娃', 'brand_id': 10590.0, 'source_type': 2},
# {'id': 154060077, 'sku_no': '37993633292801', 'price': 13.9, 'cid1': 1320, 'cid2': 1583, 'cid3': '1590', 'brand_name': '口水娃', 'brand_id': 10590.0, 'source_type': 2},
# {'id': 56569, 'sku_no': '37993926891009', 'price': 19.0, 'cid1': 1320, 'cid2': 1583, 'cid3': '1590', 'brand_name': '口水娃', 'brand_id': 10590.0, 'source_type': 2}]}
# print('最终结果:',seedSkus(skus))
top10_sku=['327855415236097', ' 323546673847809', ' 266105822909441', ' 437347360180225', '265358221515265', '265347593669633', '46676152944641', '294644949195265', '256787949822465', '277221256729601']
print(getseed(top10_sku))
\ No newline at end of file
# -*- coding: utf-8 -*-
from recommend import *
tempresult=defaultdict(list)
a=[{"a":1,"b":11},{"a":2,"b":22},{"a":3,"b":33}]
a=[{"a":1,"b":11},{"a":1,"b":22},{"a":3,"b":33}]
df=pd.DataFrame(a)
temp=df.fillna('invalid').astype(str).to_dict(orient='list')
#print(temp['a'].remove('invalid'))
print(temp)
for i in temp['a']:
print(i,type(i))
print(df)
t=df.groupby(by=['a']).groups#.__repr__()
print(t,type(t))
#print(temp)
\ No newline at end of file
......@@ -2,7 +2,7 @@ from databaseConn import *
from tools import *
from tools.fileOperation import *
from tools.listOperation import *
from databaseConn.redisOperation import *
#from databaseConn.redisOperation import *
from tools.publicFun import *
cur_dir=os.path.dirname(os.path.abspath(__file__))
file_path=os.path.join(cur_dir,'tempFile')
......
......@@ -17,6 +17,8 @@ def requestsend(uuid,deviceid,searchContent,selectedActivitie,page=1,searchtype=
raise Exception('env[{}]只能:test|pro'.format(env))
if env=='online':
baseurl="http://caesar-gateway.q-gp.com"
elif env=="test":
baseurl="http://172.16.9.90:8091"
else:
baseurl="http://open-search-engine.ec-{env}.qg-rc.net".format(env=env) if env=='test' \
else "http://open-search-engine.qg-{env}.qgwebservice.com".format(env=env)
......
from databaseConn.mysqlOperation import *
from recommend.publicSql import *
from databaseConn import *
git_url="http://git.quantgroup.cn/"
git_token='zubux2fMyyp8s8Cys3T6'
git_headers={
"PRIVATE-TOKEN":git_token
}
dapan_sql
\ No newline at end of file
from tools import *
import jenkins, requests, re, os
import pandas as pd
base_url = "http://jenkins.quantgroups.cn"
# java_url="/TKE/job/tke-java/"
jk = jenkins.Jenkins(base_url, username="qiuyue.gui", password="1234567890Yue@")
header = {"Host": "jenkins.quantgroups.cn"}
def buildJob(project, branch, env, jobname="java",prefix_job='ACK'):
'''
:param project:
:param branch:
:param env:
:param jobname: 只支持tke的构建
:return:
'''
jobname=jobname.upper()
jobs = jk.get_all_jobs()
#print("jobs:",jobs)
jobnames = []
joburls = []
projects_branch = get_project_branch(project)
#projects_branch={"kdsp":["master11"]}
temp_branchs=projects_branch.get(project)
if branch not in temp_branchs:
raise Exception("branch({0})项目不存在,确定一下".format(branch), temp_branchs)
jobname = prefix_job+'-' + jobname if prefix_job not in jobname else jobname
for kv in jobs:
jobnames.append(kv.get('name'))
joburls.append(kv.get('url'))
#jobs_name_url = dict(zip(jobnames, joburls))
if jobname not in jobnames:
print("jobname({0})错误,需要在其中一个:{1}".format(jobname, ','.join(jobnames)))
return 0
# jk.get_build_info()
# jk.build_job(jobname,branch,env)
#job_config = jk.get_job_config(jobname)
params = {"GIT_REPO": project, "DEPLOY": 'true', 'NAMESPACE': env, "BRANCH_NAME": branch}
# branchs_url=requests.get("")
t = jk.build_job(jobname, params)
print("project({0}-{1}-{2}) build succ".format(project, branch, env), t)
def get_project_branch(project):
"""
:param project:
:return: 根据project获取分支信息
"""
try:
temp_branchs=[]
num=1
project_url =git_url+'/api/v4/projects?search='+project
temp=requests.get(project_url,headers=git_headers)
project_name=jsonpath.jsonpath(temp.json(),'$[*].name')
project_id=jsonpath.jsonpath(temp.json(),'$[*].id')
project_info=dict(zip(project_name,project_id))
for i in range(10):
#brand_url=git_url+"/api/v4/projects/{project_id}/repository/branches?per_page=100".format(project_id=project_info.get(project))
brand_url=git_url+"/api/v4/projects/{project_id}/repository/branches?per_page=100&page={num}".format(project_id=project_info.get(project),num=i)
temp=requests.get(brand_url,headers=git_headers)
#print(temp.json())
brands_name=jsonpath.jsonpath(temp.json(),'$[*].name')
if brands_name:
temp_branchs+=brands_name
else:
break
project_brands={project:temp_branchs}
return project_brands
except:
print("project[{}]不存在".format(project))
traceback.print_exc(3)
def main(project,prefix_job='ACK'):
#="nearline-recommend-parent"
project_branch={
"kdsp":{
"branch":"feature-test1-test-202206081010",
"env":'bd',
"jobname":"java"},
"nearline-recommend-parent":{
"branch":"master",
"env":'bd',
"jobname":"java"
},
"online-recommend":{
"branch":"master",
"env":'bd',
"jobname":"java"
}
}
branch=project_branch.get(project).get('branch')
env=project_branch.get(project).get('env')
jobname=project_branch.get(project).get('jobname')
if not (branch and env and jobname):
raise Exception('项目错误,无法构建')
buildJob(project, branch, env, jobname=jobname,prefix_job=prefix_job)
if __name__ == '__main__':
project='online-recommend'
main(project)
......@@ -28,4 +28,4 @@ def modifyPasseord(name,namespace):
print("this is update")
if __name__ == '__main__':
modifyPasseord('haiyuan.wen','test1')
\ No newline at end of file
modifyPasseord('haiyuan.wen','yxm')
\ No newline at end of file
......@@ -58,8 +58,9 @@ def readRenameColums(filename,fcolums,sheetname=0):
return df
if __name__ == '__main__':
filename="/Users/dm/Downloads/量化派/测试用例/召回测试耗时的数据.xlsx"
df=readRenameColums(filename,['id'])
print(df)
filename="/Users/dm/Downloads/量化派/特征/百行贷前特征code(1).xlsx"
df=readFile(filename)
print(df['指标英文'].to_list())
......@@ -65,6 +65,33 @@ def listSubgroup(tt,size=6):
temp.append(tt[i:i+size])
return temp
def divlist(a,num=0):
if not num:
return a
divnum=int(len(a)/num)
temp=[]
flag=0
for i in range(divnum):
temp.append(a[flag:flag+divnum])
flag+=divnum
if divnum==i+1:
temp.append(a[flag:])
return temp
def split_by_length(init_list, children_list_len):
"""
按照长度拆分给定数组
:param init_list:
:param children_list_len:
:return:
"""
#print(next(tt[0]))
list_of_groups = zip(*(iter(init_list),) * children_list_len)
end_list = [list(i) for i in list_of_groups]
count = len(init_list) % children_list_len
end_list.append(init_list[-count:]) if count != 0 else end_list
return end_list
def countlist(a,num=0):
from collections import Counter
......@@ -92,7 +119,8 @@ def bingji(a,b):
if __name__ == '__main__':
a=[2,2,4,3,5,6,7,8]
b=[10,2,4,3,9]
print(bingji(b,a))
print(split_by_length(a,5))
#print(int(11/2))
#a=[[1,2],[2,3]]
#print(mergelist(a))
......
import threading,time
from tools.listOperation import *
class MyThread(threading.Thread):
def __init__(self,func,divlist):
super(MyThread,self).__init__()
self.func=func
self.divlist=divlist
def run(self):
#time.sleep(1)
#print(threading.currentThread().name)
self.func(self.divlist)
#print('over')
def func(divlist):
for i in divlist:
print(i)
def main(thnum):
num=list(range(100))
#thnum=5
divnum=int(len(num)/thnum)
divlists=split_by_length(num,divnum)
#print(divlists)
temp=[]
for i in range(thnum):
t=MyThread(func,divlists[i])
temp.append(t)
t.start()
for i in temp:
i.join()
if __name__ == '__main__':
t=time.time()
main(5)
tt=time.time()
print(tt-t)
# print("=="*50)
# t=time.time()
# for i in list(range(100)):
# print(i)
# tt=time.time()
# print(tt-t)
from tools import *
import hashlib
def md5jiami(mess):
t=hashlib.md5(mess)
return t.hexdigest()
def genUuidDeviceid():
"""
deviced必须随机生成,不然无法统计spu总数。故就算uuid是相同的,deviceid也不能相同
......
import requests
codes=['baihang_tile_info#homeInfoFirstTime', 'baihang_tile_info#homeInfoLastTime', 'baihang_tile_info#homeInfoNum', 'baihang_tile_info#homeInfoNumDrop', 'baihang_tile_info#workInfoFirstTime', 'baihang_tile_info#workInfoLastTime', 'baihang_tile_info#workInfoNum', 'baihang_tile_info#workInfoNumDrop', 'baihang_tile_info#firstQueryTime', 'baihang_tile_info#lastQueryTime', 'baihang_tile_info#queryNumHis', 'baihang_tile_info#queryNumHisApply', 'baihang_tile_info#queryCategoryHisApply', 'baihang_tile_info#queryHistoryReason1And90dNum', 'baihang_tile_info#queryHistoryReason1And365dNum', 'baihang_tile_info#queryHistory90dAnd365dRatio', 'baihang_tile_info#queryHistory30dNum', 'baihang_tile_info#diffQueryReason1Days', 'baihang_tile_info#diffFirstQueryDays', 'baihang_tile_info#newLoan90dNum', 'baihang_tile_info#newLoan360dNum', 'baihang_tile_info#newLoan90dAnd360dRatio', 'baihang_tile_info#revolvingLoanNoValidAccountRatio', 'baihang_tile_info#revolvingLoanNoValidAccountNum', 'baihang_tile_info#allSummaryLoanNums', 'baihang_tile_info#newLoanAmount180d', 'baihang_tile_info#newLoanAmount360d', 'baihang_tile_info#newLoanAmount180dAnd360dRatio', 'baihang_tile_info#newLoanTenant180dAnd360dRatio', 'third_data_source#baihang_homeInfo', 'third_data_source#baihang_nonRevolvingLoan_D180_applyTenantCount', 'third_data_source#baihang_nonRevolvingLoan_D180_averageLoanAmount', 'third_data_source#baihang_nonRevolvingLoan_D180_loanAmount', 'third_data_source#baihang_nonRevolvingLoan_D180_loanCount', 'third_data_source#baihang_nonRevolvingLoan_D180_loanTenantCount', 'third_data_source#baihang_nonRevolvingLoan_D180_maxLoanAmount', 'third_data_source#baihang_nonRevolvingLoan_D180_overdueLoanCount', 'third_data_source#baihang_nonRevolvingLoan_D30_applyTenantCount', 'third_data_source#baihang_nonRevolvingLoan_D30_averageLoanAmount', 'third_data_source#baihang_nonRevolvingLoan_D30_loanAmount', 'third_data_source#baihang_nonRevolvingLoan_D30_loanCount', 'third_data_source#baihang_nonRevolvingLoan_D30_loanTenantCount', 'third_data_source#baihang_nonRevolvingLoan_D30_maxLoanAmount', 'third_data_source#baihang_nonRevolvingLoan_D30_overdueLoanCount', 'third_data_source#baihang_nonRevolvingLoan_D360_applyTenantCount', 'third_data_source#baihang_nonRevolvingLoan_D360_averageLoanAmount', 'third_data_source#baihang_nonRevolvingLoan_D360_loanAmount', 'third_data_source#baihang_nonRevolvingLoan_D360_loanCount', 'third_data_source#baihang_nonRevolvingLoan_D360_loanTenantCount', 'third_data_source#baihang_nonRevolvingLoan_D360_maxLoanAmount', 'third_data_source#baihang_nonRevolvingLoan_D360_overdueLoanCount', 'third_data_source#baihang_nonRevolvingLoan_D90_applyTenantCount', 'third_data_source#baihang_nonRevolvingLoan_D90_averageLoanAmount', 'third_data_source#baihang_nonRevolvingLoan_D90_loanAmount', 'third_data_source#baihang_nonRevolvingLoan_D90_loanCount', 'third_data_source#baihang_nonRevolvingLoan_D90_loanTenantCount', 'third_data_source#baihang_nonRevolvingLoan_D90_maxLoanAmount', 'third_data_source#baihang_nonRevolvingLoan_D90_overdueLoanCount', 'third_data_source#baihang_nonRevolvingLoan_summary_loanCount', 'third_data_source#baihang_nonRevolvingLoan_summary_maxOverdueStatus', 'third_data_source#baihang_nonRevolvingLoan_summary_openLoanCount', 'third_data_source#baihang_nonRevolvingLoan_summary_overdueCount', 'third_data_source#baihang_nonRevolvingLoan_summary_remainingAmount', 'third_data_source#baihang_nonRevolvingLoan_summary_remainingMaxOverdueStatus', 'third_data_source#baihang_nonRevolvingLoan_summary_remainingOverdueAmount', 'third_data_source#baihang_nonRevolvingLoan_summary_remainingOverdueLoanCount', 'third_data_source#baihang_personalProfile_mobileCount', 'third_data_source#baihang_queryHistory', 'third_data_source#baihang_reportHeader_queryResult', 'third_data_source#baihang_revolvingLoan_D180_accountCount', 'third_data_source#baihang_revolvingLoan_D180_applyTenantCount', 'third_data_source#baihang_revolvingLoan_D180_creditLimitSum', 'third_data_source#baihang_revolvingLoan_D180_lendingAmount', 'third_data_source#baihang_revolvingLoan_D180_overdueAccountCount', 'third_data_source#baihang_revolvingLoan_D30_accountCount', 'third_data_source#baihang_revolvingLoan_D30_applyTenantCount', 'third_data_source#baihang_revolvingLoan_D30_creditLimitSum', 'third_data_source#baihang_revolvingLoan_D30_lendingAmount', 'third_data_source#baihang_revolvingLoan_D30_overdueAccountCount', 'third_data_source#baihang_revolvingLoan_D360_accountCount', 'third_data_source#baihang_revolvingLoan_D360_applyTenantCount', 'third_data_source#baihang_revolvingLoan_D360_creditLimitSum', 'third_data_source#baihang_revolvingLoan_D360_lendingAmount', 'third_data_source#baihang_revolvingLoan_D360_overdueAccountCount', 'third_data_source#baihang_revolvingLoan_D90_accountCount', 'third_data_source#baihang_revolvingLoan_D90_applyTenantCount', 'third_data_source#baihang_revolvingLoan_D90_creditLimitSum', 'third_data_source#baihang_revolvingLoan_D90_lendingAmount', 'third_data_source#baihang_revolvingLoan_D90_overdueAccountCount', 'third_data_source#baihang_revolvingLoan_summary_accountCount', 'third_data_source#baihang_revolvingLoan_summary_creditLimitSum', 'third_data_source#baihang_revolvingLoan_summary_maxCreditLimitPerTenant', 'third_data_source#baihang_revolvingLoan_summary_maxOverdueStatus', 'third_data_source#baihang_revolvingLoan_summary_overdueCount', 'third_data_source#baihang_revolvingLoan_summary_remainingAmount', 'third_data_source#baihang_revolvingLoan_summary_remainingMaxOverdueStatus', 'third_data_source#baihang_revolvingLoan_summary_remainingOverdueAccountCount', 'third_data_source#baihang_revolvingLoan_summary_remainingOverdueAmount', 'third_data_source#baihang_revolvingLoan_summary_validAccountCount', 'third_data_source#baihang_workInfo', 'third_data_source#baihang_revolvingLoan_D360_lendingAmount_add_nonRevo', 'third_data_source#baihang_revolvingLoan_D360_accountCount_div_add_nonRevo', 'third_data_source#baihang_revolvingLoan_D30_overdueAccountCount_add_nonRevo_div_90', 'third_data_source#baihang_revolvingLoan_D90_overdueAccountCount_add_nonRevo_div_360', 'third_data_source#baihang_revolvingLoan_D360_lendingAmount_sub_nonRevo', 'third_data_source#baihang_revolvingLoan_D90_accountCount_div_add_nonRevo', 'third_data_source#baihang_revolvingLoan_summary_overdueCount_div_summary_accountCount', 'third_data_source#baihang_nonRevolvingLoan_D30_overdueLoanCount_div_360', 'third_data_source#baihang_revolvingLoan_D90_accountCount_add_nonRevo_div_360', 'third_data_source#baihang_revolvingLoan_D90_accountCount_sub_nonRevo', 'third_data_source#baihang_revolvingLoan_D180_overdueAccountCount_add_nonRevo', 'third_data_source#baihang_revolvingLoan_D180_accountCount_div_add_nonRevo', 'third_data_source#baihang_revolvingLoan_D360_accountCount_add_nonRevo', 'third_data_source#baihang_revolvingLoan_D30_lendingAmount_add_nonRevo_div_360', 'third_data_source#baihang_revolvingLoan_D30_lendingAmount_div_360']
base_url="https://finance-feature-calc.tjzimu.com"
url="/calc/features"
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'rc_auth_key': 'rc_offline',
'rc_auth_secret': '9d9}bc24!e1z1x3`(x~4r29d$+45n3)\'zb696b$85e>_]p2&4f{,a3~8b3e_ldt^'
}
body={
"user_uuid":"26484438-2655-4c89-a4d2-fbdabe9381ec",
"codes":"{code}",
"business_type":0
}
def httpre(code):
#code=','.join(codes[:])
body['codes']=code
#print(body)
t=requests.post(base_url+url,headers=headers,data=body)
return t.json()
if __name__ == '__main__':
for i in codes:
print("跑的特征:",i)
print(httpre(i))
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment