tianyunperfect há 1 ano atrás
pai
commit
ea46052f6c
4 ficheiros alterados com 126 adições e 135 exclusões
  1. 27 17
      tmp4.py
  2. 11 108
      tmp5.py
  3. 42 10
      tmp6.py
  4. 46 0
      yizhi/checkHealth.py

+ 27 - 17
tmp4.py

@@ -1,22 +1,32 @@
-import re
+import threading
 
 import requests
+import json
+# ernie-speed-128k
+# ernie_speed
+# ernie-lite-8k
+url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-lite-8k?access_token=24.e83a74fae9d2c9989ee473fe50d8f1c4.2592000.1721975478.282335-87423633"
 
-# 获取 /etc/hosts 文件
-originData = ''
-with open('/etc/hosts', 'r') as f:
-    originData = f.read()
-# print(originData)
+payload = json.dumps({
+    "messages": [
+        {
+            "role": "user",
+            "content": "已知山药可以明目,已知黄连可以明目,根据上述信息,请问什么明目?不要解释,直接回答"
+        }
+    ]
+})
+headers = {
+    'User-Agent': 'Apifox/1.0.0 (https://apifox.com)',
+    'Content-Type': 'application/json'
+}
 
-# 正则查找,以a开头 b结尾的文本
-for i in re.findall(r'^# 地址可能会变动.*# GitHub Host End$', originData):
-    print(i)
 
-# url = 'https://gitlab.com/ineo6/hosts/-/raw/master/next-hosts'
-# response = requests.get(url)
-# data = response.text
-#
-# # 正则获取 从 "# 地址可能会变动" 开始 到 “# GitHub Host End”
-# data = data[data.find("# 地址可能会变动") + 16:data.find("# GitHub Host End")]
-#
-# print(data)
+def getRes():
+    print(1)
+    response = requests.request("POST", url, headers=headers, data=payload)
+    print(json.loads(response.text)['result'])
+
+
+# 并发5 ,访问
+for i in range(5):
+    threading.Thread(target=getRes).start()

+ 11 - 108
tmp5.py

@@ -1,114 +1,17 @@
-def exec(obj):
-    from sqlalchemy import create_engine
-    import pandas as pd
-    from urllib.parse import quote_plus as urlquote
-    import jieba
-    from gensim import corpora, models, similarities
+import jieba
 
-    def cReate_dAta_cOnn():
-        engine = create_engine('mysql+pymysql://aimp_user:'+urlquote('vjeygLP76n7%UPx@')+'@rm-qsls3302.mysql.rds.aliyuncs.com:3302/bi_application')
-        return engine
+# 设置用户词典(可选)
+# jieba.load_userdict("userdict.txt")
 
-    def rEad_aNd_dAtaframe(sql_query):
-        engine = cReate_dAta_cOnn()
-        df = pd.read_sql(sql_query,engine)
-        engine.dispose()
-        return df
+# 待分词的文本
+text = "田云 住在什么地方"
 
-    def flag(x):
-        if (x['sensoir'] == x['person']) and len(x['sensoir'])==3:
-            return 1
-        elif (x['sensoir'] == x['person']) and len(x['sensoir'])==2:
-            return 2
-        else:
-            return 3
+# 精确模式分词
+seg_list = jieba.cut(text, cut_all=False)
 
-    def flag1(x):
-        if x['f1'] == 1:
-            return 1
-        else:
-            return 0
+print("精确模式分词结果:", "/ ".join(seg_list))
 
-    def flag2(x):
-        if (x['f1'] == 2) and x['sims']>0.8:
-            return 1
-        else:
-            return 0
+# 全模式分词
+seg_list = jieba.cut(text, cut_all=True)
 
-    def flag3(x):
-        if x['sims']>0.95:
-            return 1
-        else:
-            return 0
-
-    def cal_similar(doc_goal,ssim):
-        doc = rEad_aNd_dAtaframe('''select distinct credit_no,econ_reg_address from ext_anti_fraud_address ''')
-        doc_list = [jieba.lcut(w) for w in doc['econ_reg_address']]
-        target = [word for word in jieba.cut(doc_goal)]
-        dictionary = corpora.Dictionary(doc_list)
-        corpus = [dictionary.doc2bow(doc) for doc in doc_list]
-        doc_goal_vec = dictionary.doc2bow(target)
-        tfidf = models.TfidfModel(corpus)
-        index = similarities.SparseMatrixSimilarity(tfidf[corpus], num_features = len(dictionary.keys()))
-        sims = index[tfidf[doc_goal_vec]]
-        similary = pd.DataFrame({"risk_address": list(doc['econ_reg_address']), "sims": list(sims)})
-        similary["申请企业注册地址"] = doc_goal
-        similary_data = similary[["申请企业注册地址", "risk_address", "sims"]].drop_duplicates()
-        similary_data= similary_data[similary_data["sims"]>=ssim]
-        return similary_data
-
-    lcity = list(obj['city'])
-    if len(lcity)>1 and len(lcity[0])>1:
-        lcity = list(obj['city'])[0]
-    elif len(lcity)==1 and len(lcity[0])>1:
-        lcity = obj['city']
-    else:
-    obj['flag1'] = 0
-    obj['flag2'] = 0
-    obj['flag3'] = 0
-    return obj
-credit = []
-address = []
-sensoir = list(obj['oper_names'])
-if len(sensoir)<1:
-    obj['flag1'] = 0
-    obj['flag2'] = 0
-    credit.append(obj['credit_code'])
-    address.append(obj['address_'])
-    df_app = pd.DataFrame({"credit": credit,"address": address})
-    df_add = rEad_aNd_dAtaframe("select distinct credit_no,econ_reg_address from ext_anti_fraud_address where city = {}".format(lcity))
-    similary_data=cal_similar(df_app['address'].max(),0.95)
-    if similary_data.shape[0]>0:
-        obj['flag3'] = 1
-    else:
-        obj['flag3'] = 0
-    obj['similary_data']=similary_data.to_json(orient='records')
-    return obj
-else:
-    for i in sensoir:
-        credit.append(obj['credit_code'])
-        address.append(obj['address_'])
-    df_app = pd.DataFrame({"credit": credit, "sensoir": sensoir,"address": address})
-    df_add = rEad_aNd_dAtaframe("select distinct credit_no,econ_reg_address from ext_anti_fraud_address where city = {}".format(lcity))
-    lcredit =str(list(df_add['credit_no'])).replace('[','').replace(']','')
-    df_per = rEad_aNd_dAtaframe("select distinct credit_no,person from ext_anti_fraud_senior_person where credit_no in ({})".format(lcredit))
-    df_dec = pd.merge(df_app,df_per,left_on = 'sensoir',right_on = 'person',how = 'inner')
-    df_f = pd.merge(df_dec,df_add,on = 'credit_no',how = 'left')
-    if df_f.shape[0]<1:
-        obj['flag1'] = 0
-        obj['flag2'] = 0
-        obj['flag3'] = 0
-        obj['df_f']=df_f.to_json(orient='records')
-        return obj
-    else:
-        df_f['f1']=df_f.apply(flag,axis=1)
-        similary_data=cal_similar(df_f['address'].max(),0)
-        df = pd.merge(df_f,similary_data,left_on='econ_reg_address',right_on = 'risk_address',how = 'left')
-        df['flag1']=df.apply(flag1,axis=1)
-        df['flag2']=df.apply(flag2,axis=1)
-        df['flag3']=df.apply(flag3,axis=1)#保存
-        obj['df']=df.to_json(orient='records')
-        obj['flag1']=df['flag1'].max()
-        obj['flag2']=df['flag2'].max()
-        obj['flag3']=df['flag3'].max()
-        return obj
+print("全模式分词结果:", "/ ".join(seg_list))

+ 42 - 10
tmp6.py

@@ -1,15 +1,47 @@
-import sys
+import json
+from time import sleep
 
+import requests
 
-def add_numbers(num1, num2):
-    sum = num1 + num2
-    print(sum)
+filePath = """C:\\Users\\admin\\Downloads\\7-SAT-顺序.json"""
 
+url = 'https://app.yizhizs.cn/api/card'
+myHeaders = {
+    'authorization': 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJyb2xlSWQiOiIzIiwidXNlck5hbWUiOiLlj7bovbvnnIkiLCJleHAiOjE3MjIxMjk4MjUsInVzZXJJZCI6IjEifQ.fIPuDsD3XfFlFsQxYobjjXz1s3puugbuicy-iNwfjTA',
+}
+# 读取文件内容,json格式
+with open(filePath, 'r', encoding='utf-8') as f:
+    data = f.read()
+    lines = json.loads(data)
+    num = 0
+    for line in lines:
+        jsonData = {
+            "cardBagId": "60412812777787392",
+            "cardType": "QUESTION_ANSWER",
+            "content": "{\"front\":\"<p>front-abcd</p>\",\"back\":{\"type\":\"QUESTION_ANSWER\",\"QUESTION_ANSWER\":{\"value\":\"<p>back-abcd</p>\"},\"MULTIPLE_CHOICE\":{\"values\":[{\"value\":\"\",\"checked\":false},{\"value\":\"\",\"checked\":false},{\"value\":\"\",\"checked\":false},{\"value\":\"\",\"checked\":false}]},\"TRUE_FALSE\":{\"value\":\"\"}}}"
+        }
+        num += 1
+        print(len(lines) - num)
+        try:
+            word = line['word']
+            translations = line['translations']
 
-if __name__ == "__main__":
-    # 从命令行参数中获取传递的参数
-    num1 = int(sys.argv[1])
-    num2 = int(sys.argv[2])
+            jsonData['content'] = jsonData['content'].replace('front-abcd', word)
 
-    # 调用函数进行计算并打印结果
-    add_numbers(num1, num2)
+            backRes = f''
+            for translation in translations:
+                backRes += f'<p>{translation["type"]}. {translation["translation"]}</p>'
+            if 'phrases' in line:
+                phrases = line['phrases']
+                backRes = backRes + f'<br/><p>词组: </p>'
+                for phrase in phrases[:3]:
+                    backRes += f"""<p class='custPhraseParent1'><span class='custPhrase1'>{phrase["phrase"]}</span><span class='custPhrase2'>{phrase["translation"]}</span></p>"""
+            jsonData['content'] = jsonData['content'].replace('back-abcd', backRes)
+            jsonData['content'] = json.loads(json.dumps(jsonData['content']))
+
+            response = requests.post(url, json=jsonData, headers=myHeaders, verify=False)
+            print(response.text)
+        except Exception as e:
+            print(e)
+
+print('done')

+ 46 - 0
yizhi/checkHealth.py

@@ -0,0 +1,46 @@
+import requests
+import time
+import json
+
+requests.packages.urllib3.disable_warnings()
+# 当前时间 格式化 方法
+def get_current_time():
+    current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
+    return current_time
+def check_website():
+    url = "https://app.yizhizs.cn/api/card-bag/findPublicPage?outTags=&pageNum=1&pageSize=20"
+    try:
+        response = requests.get(url, verify=False)
+        if response.json()['code'] == 200:
+            print("网站正常运行:" + get_current_time())
+        else:
+            print("网站异常,正在重试" + get_current_time())
+            send_alert()
+    except requests.exceptions.RequestException as e:
+        print("网站异常,正在重试" + get_current_time())
+        send_alert()
+
+
+def send_alert():
+    alert_url = "https://www.feishu.cn/flow/api/trigger-webhook/ee26be1f55031f2debd664cbd377a3b8"
+    payload = {
+        "title": "首页访问异常",
+        "content": "首页访问异常"
+    }
+    headers = {
+        "Content-Type": "application/json"
+    }
+    try:
+        response = requests.post(alert_url, headers=headers, data=json.dumps(payload))
+        if response.status_code == 200:
+            print("报警发送成功" + get_current_time())
+        else:
+            print("报警发送失败" + get_current_time())
+    except requests.exceptions.RequestException as e:
+        print("报警发送失败" + get_current_time())
+
+
+if __name__ == "__main__":
+    while True:
+        check_website()
+        time.sleep(20)