tianyun 3 år sedan
incheckning
aa9af4d96d

+ 270 - 0
fastapi-demo/config/__init__.py

@@ -0,0 +1,270 @@
+# -*- coding:utf-8 -*-
+# 配置文件初始化
+# 主要是一些 db 的初始化创建
+# 简单 Class 类函数的 初始化
+
+import asyncio
+import json
+import logging
+import os
+from concurrent.futures import ThreadPoolExecutor
+from configparser import ConfigParser
+from logging.handlers import TimedRotatingFileHandler
+
+import aredis
+import redis
+import requests
+import uvloop
+from aiokafka import AIOKafkaProducer
+from elasticsearch import Elasticsearch
+from requests import adapters
+
+adapters.DEFAULT_POOLSIZE = 100000
+
+asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
+
+
+class CfgBaseInit(object):
+    """
+    config: 配置文件对象
+    project_name: 项目名称
+    start_mode: 项目启动模式
+    """
+    config = ConfigParser()
+
+    config.read(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "config", "config.ini"))
+    project_name = config.get("CONFIG", "project_name")
+    project_path = os.path.join(os.path.abspath(os.path.dirname(__file__)).split(project_name)[0], project_name)
+    start_model = config.get("CONFIG", "start_model").upper()
+    __authentication = config.get("CONFIG", "authentication")
+    authentication = json.loads(__authentication if "'" not in __authentication else __authentication.replace("'", '"'))
+    __headers = config.get("CONFIG", "headers")
+    headers = json.loads(__headers if "'" not in __headers else __headers.replace("'", '"'))
+    bind = config.get(section=f"PROJECT-{start_model}", option="host")
+    _executor = ThreadPoolExecutor(max_workers=50000)  # 线程池
+    _async_kafka_session = None
+
+    kafka_server = config.get(f"BEHAVE-{start_model}", "kafka_server")
+
+    @staticmethod
+    def get_my_event_loop():
+        """
+            获取当前 loop 循环事件管理对象
+        :return:
+        """
+        event_loop = asyncio.get_event_loop()
+        # asyncio.set_event_loop(loop=event_loop)
+        return event_loop
+
+    @classmethod
+    def executor_submit(cls, *args, **kwargs):
+        """
+            改写 executor.submit 方法,加入默认的异常捕获功能;
+            使用方式与 原来的 executor.submit 相同
+        :param args:
+        :param kwargs:
+        :return:
+        """
+        if asyncio.iscoroutinefunction(args[0]):
+            loop = asyncio.new_event_loop()
+            if not loop.is_running():
+                executor_future = loop.run_until_complete(args[0](args[1:]))  # 处理 future 函数 or 对象
+                return executor_future
+        else:
+            thread_name_prefix = kwargs.get("thread_name_prefix", "")
+            if not thread_name_prefix:
+                thread_name_prefix = args[0].__name__
+            if "thread_name_prefix" in kwargs:
+                del kwargs['thread_name_prefix']
+                cls._executor._thread_name_prefix = thread_name_prefix
+            executor_future = cls._executor.submit(*args, **kwargs)  # 处理 普通 function;
+            # 线程任务增加任务名称前缀,与线程名称相同.
+            # 默认为 function.__name__
+            executor_future.__setattr__("name", thread_name_prefix)
+            executor_future.add_done_callback(cls._executor_callback)
+            return executor_future
+
+    @classmethod
+    def _executor_callback(cls, worker):
+        """
+            任务池中任务异常捕获回调函数
+            抛出异常
+        :param worker: 任务
+        :return:
+        """
+        worker_exception = worker.exception()
+        if worker_exception:
+            logger.exception("Worker return exception: {}".format(worker_exception))
+            raise worker_exception
+
+
+
+
+
+class Logger(object):
+    # 日志
+    log_path = CfgBaseInit.config.get("LOG", "log_path")
+    when = CfgBaseInit.config.get("LOG", "when")
+    backupCount = int(CfgBaseInit.config.get("LOG", "backupCount"))
+
+    formatter = logging.Formatter(
+        '%(asctime)s %(name)s %(process)d %(thread)d %(threadName)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
+    handler = TimedRotatingFileHandler(log_path, when=when, backupCount=backupCount)
+    handler.setFormatter(formatter)
+    logger = logging.getLogger(CfgBaseInit.project_name)
+    logger.addHandler(handler)
+    logger.setLevel(logging.DEBUG)
+
+    def __init__(self, cls):
+        self._cls = cls
+
+    def __call__(self, *args, **kwargs):
+        if not hasattr(self._cls, 'logger'):
+            self._cls.logger = logger
+        return self._cls(*args, **kwargs)
+
+    @staticmethod
+    def replace_blank(msg, *args, **kwargs):
+        msg = str(msg)
+        return f"{msg % args}".replace("\r", " ").replace("\n", " ")
+
+    def debug(self, msg, *args, **kwargs):
+        self.logger.debug(self.replace_blank(msg, *args, **kwargs))
+
+    def info(self, msg, *args, **kwargs):
+        self.logger.info(self.replace_blank(msg, *args, **kwargs))
+
+    def warning(self, msg, *args, **kwargs):
+        self.logger.warning(self.replace_blank(msg, *args, **kwargs))
+
+    def error(self, msg, *args, **kwargs):
+        self.logger.error(self.replace_blank(msg, *args, **kwargs))
+
+    def exception(self, msg, *args, **kwargs):
+        self.logger.exception(self.replace_blank(msg, *args, **kwargs))
+
+    def critical(self, msg, *args, **kwargs):
+        self.logger.critical(self.replace_blank(msg, *args, **kwargs))
+
+
+logger = Logger(Logger)
+
+
+class RedisInit(CfgBaseInit):
+    """
+        Redis 对象初始化
+    """
+    section = "REDIS-{}".format(CfgBaseInit.start_model.upper())
+    host = CfgBaseInit.config.get(section=section, option="host")
+    port = CfgBaseInit.config.get(section=section, option="port")
+    db = CfgBaseInit.config.get(section=section, option="db")
+
+    host_bak = CfgBaseInit.config.get(section=section, option="host_bak")
+    port_bak = CfgBaseInit.config.get(section=section, option="port_bak")
+    db_bak = CfgBaseInit.config.get(section=section, option="db_bak")
+    Pool = redis.ConnectionPool(host=host, port=port, db=db, max_connections=None, decode_responses=True,
+                                socket_keepalive=False)
+    Pool_bak = redis.ConnectionPool(host=host_bak, port=port_bak, db=db_bak, max_connections=None,
+                                    decode_responses=True)
+    conn = redis.Redis(connection_pool=Pool)
+
+
+class AsyncRedisInit(CfgBaseInit):
+    """
+        redis 异步连接池初始化
+    pipeline_demo:
+        async def pipeline_test():
+            conn = aredis.StrictRedis(connection_pool=AsyncRedisInit.Pool)
+            p = await conn.pipeline()
+            await p.hgetall("process_cache\0015f371f42f286b2eb461e75a437162c6e@cccxx@abrs_reco@d751713988987e9331980363e24189ce@d751713988987e9331980363e24189ce@d751713988987e9331980363e24189ce@0@0_0")
+            a = await p.execute()
+            return a
+
+    client_demo:
+        async def conn_test():
+            conn = aredis.StrictRedis(connection_pool=AsyncRedisInit.Pool)
+            a = await conn.hgetall("process_cache\0015f371f42f286b2eb461e75a437162c6e@cccxx@abrs_reco@d751713988987e9331980363e24189ce@d751713988987e9331980363e24189ce@d751713988987e9331980363e24189ce@0@0_0")
+            return a
+    """
+    section = "REDIS-{}".format(CfgBaseInit.start_model.upper())
+    host = CfgBaseInit.config.get(section=section, option="host")
+    port = CfgBaseInit.config.get(section=section, option="port")
+    try:
+        password = CfgBaseInit.config.get(section=section, option="password")
+    except Exception:
+        password = None
+    db = CfgBaseInit.config.get(section=section, option="db")
+
+    host_bak = CfgBaseInit.config.get(section=section, option="host_bak")
+    port_bak = CfgBaseInit.config.get(section=section, option="port_bak")
+    try:
+        password_bak = password
+    except Exception:
+        password_bak = None
+    db_bak = CfgBaseInit.config.get(section=section, option="db_bak")
+    # todo 不能使用链接池,链接池不能自动释放 conn
+    Pool = aredis.ConnectionPool(host=host, port=port, db=db,
+                                 password=password, max_connections=None, decode_responses=True,
+                                 socket_keepalive=False)
+    Pool_bak = aredis.ConnectionPool(host=host_bak, port=port_bak, db=db_bak, password=password_bak,
+                                     max_connections=None, decode_responses=True,
+                                     socket_keepalive=False)
+    conn = aredis.StrictRedis(connection_pool=Pool)
+
+
+class ElasticSearchInit(CfgBaseInit):
+    """
+        ElasticSearch 对象初始化
+    """
+    section = "ES-{}".format(CfgBaseInit.start_model.upper())
+    index = CfgBaseInit.config.get(section=section, option="index")
+    fc_index = CfgBaseInit.config.get(section=section, option="fc_index")
+    try:
+        es_timeout = float(CfgBaseInit.config.get(section=section, option="timeout"))
+    except Exception as e:
+        es_timeout = 10
+    es_client = Elasticsearch(hosts=[CfgBaseInit.config.get(section=section, option="host")], timeout=es_timeout)
+    fc_es_client = Elasticsearch(hosts=CfgBaseInit.config.get(section=section, option="fc_host").split(","),
+                                 timeout=es_timeout)
+
+
+class BehaviorAPI(CfgBaseInit):
+    """
+        行为数据对接接口 Behavior Api 初始化
+    """
+    section = "BEHAVIOR_API-{}".format(CfgBaseInit.start_model.upper())
+    url = CfgBaseInit.config.get(section=section, option="url")
+
+    @staticmethod
+    def push_behavior_log(data):
+        data = json.dumps(data)
+        r = requests.post(BehaviorAPI.url, data=data, headers=CfgBaseInit.headers)
+        return r
+
+class AsyncKafka(CfgBaseInit):
+    """异步请求模块
+    """
+    _async_kafka_session = None
+
+    @classmethod
+    async def __set_async_session(cls):
+        producer = AIOKafkaProducer(loop=cls.get_my_event_loop(),
+                                    value_serializer=lambda v: json.dumps(v).encode('utf-8'),
+                                    bootstrap_servers=CfgBaseInit.kafka_server)
+        await producer.start()
+        cls._async_kafka_session = producer
+        return producer
+
+    @classmethod
+    async def async_push(cls, payload):
+        """异步请求接口
+        """
+        request_session = cls._async_kafka_session or await cls.__set_async_session()
+        print(request_session)
+        await request_session.send_and_wait("eip_rec_behave", payload)
+
+
+if __name__ == '__main__':
+    a = BehaviorAPI.push_behavior_log({"a": 1})
+    print(a)
+    print(a.content)

+ 162 - 0
fastapi-demo/config/config.ini

@@ -0,0 +1,162 @@
+[CONFIG]
+project_name = eip_feed_behave
+;project_name = eip_ai_asgi_py387
+headers = {"content-Type": "application/json"}
+authentication = {
+                 "hezhiguo": "hezhiguo",
+                 "eip-ai": "eip-ai"
+                 }
+; dev: 开发(组内自测):redis-6379
+; test: 测试环境, ES index 不同
+; pro:预发、生产
+start_model = dev
+
+[LOG]
+; 绝对路径
+log_path = ../logs/eip_feed_behave/info_rec.log
+; h、d、m、s,时、天、分、秒
+when = d
+backupCount = 30
+
+[PROJECT-DEV]
+host = 0.0.0.0:8089
+
+[PROJECT-TEST]
+host = 0.0.0.0:8088
+
+[PROJECT-PRO]
+host = 0.0.0.0:80
+
+[ES-DEV]
+host = 10.10.3.83:9200
+fc_host = 10.10.3.83:9200,10.10.100.198:9200
+user = eip
+pwd = eip-pass
+index = zdfx_item_test
+;fc_index = hippo_text_doc_27_515
+fc_index = hippo_text_doc_321_0615
+timeout = 60
+
+[ES-TEST]
+host = 10.10.3.83:9200
+fc_host = 10.10.3.83:9200,10.10.100.198:9200
+user = eip
+pwd = eip-pass
+index = zdfx_item_test
+;fc_index = hippo_text_doc_27_515
+fc_index = hippo_text_doc_321_0615
+timeout = 60
+
+[ES-PRO]
+host = 10.10.3.83:9200
+fc_host = 10.10.3.83:9200,10.10.100.198:9200
+user = eip
+pwd = eip-pass
+index = eip_inference_feed_item
+;fc_index = hippo_text_doc_27_515
+fc_index = hippo_text_doc_321_0615
+timeout = 60
+
+[MYSQL-PRO]
+url = mysql+mysqlconnector://root:qwertyuiop@10.10.100.228:8137/eip_ai?auth_plugin=mysql_native_password
+hit_log_table = eip_ai_hit_log
+
+[REDIS-DEV]
+host = 127.0.0.1
+port = 6379
+db = 0
+expire = 1000
+host_bak = 127.0.0.1
+port_bak = 6379
+db_bak = 15
+expire_bak = 1000
+pool_size = 10
+users = {
+        "hezhiguo": "hezhiguo",
+        "eip-ai": "eip-ai"
+        }
+
+[REDIS-TEST]
+host = 127.0.0.1
+port = 6379
+db = 0
+expire = 1000
+host_bak = 127.0.0.1
+port_bak = 6379
+db_bak = 15
+expire_bak = 1000
+pool_size = 10
+users = {
+        "hezhiguo": "hezhiguo",
+        "eip-ai": "eip-ai"
+        }
+
+[REDIS-PRO]
+host = 127.0.0.1
+port = 6379
+db = 0
+expire = 1000
+host_bak = 127.0.0.1
+port_bak = 6379
+db_bak = 15
+expire_bak = 1000
+pool_size = 10
+users = {
+        "hezhiguo": "hezhiguo",
+        "eip-ai": "eip-ai"
+        }
+
+[KAFKA_BEHAVIOR-DEV]
+user_name = eip-ai
+dept_name = eip-ai
+host = 1.kafka.adh:9092,2.kafka.adh:9092,3.kafka.adh:9092,4.kafka.adh:9092,5.kafka.adh:9092,6.kafka.adh:9092,7.kafka.adh:9092
+topic = log_server_stream
+is_cluster = 0
+reset_offset_on_start = 0
+fetch_wait_max_ms = 30000
+queued_max_messages = 2000
+
+[KAFKA_BEHAVIOR-TEST]
+user_name = eip-ai
+dept_name = eip-ai
+host = 1.kafka.adh:9092,2.kafka.adh:9092,3.kafka.adh:9092,4.kafka.adh:9092,5.kafka.adh:9092,6.kafka.adh:9092,7.kafka.adh:9092
+topic = log_server_stream
+is_cluster = 0
+reset_offset_on_start = 0
+fetch_wait_max_ms = 30000
+queued_max_messages = 2000
+
+[KAFKA_BEHAVIOR-PRO]
+user_name = eip-ai
+dept_name = eip-ai
+host = 1.kafka.adh:9092,2.kafka.adh:9092,3.kafka.adh:9092,4.kafka.adh:9092,5.kafka.adh:9092,6.kafka.adh:9092,7.kafka.adh:9092
+topic = log_server_stream
+is_cluster = 0
+reset_offset_on_start = 0
+fetch_wait_max_ms = 30000
+queued_max_messages = 2000
+
+[BEHAVIOR_API-DEV]
+url = http://115.159.79.118:20001/log/mlamp/eip/dw/EIP/ABRS/behavior_test
+;url = http://192.168.64.20:5000/abrs_update
+[BEHAVIOR_API-TEST]
+url = http://115.159.79.118:20001/log/mlamp/eip/dw/EIP/ABRS/behavior_test
+;url = http://192.168.64.20:5000/abrs_update
+
+[BEHAVIOR_API-PRO]
+url = http://115.159.79.118:20001/log/mlamp/eip/dw/EIP/ABRS/behavior
+
+[BLOOM_FILTER]
+;盐值;
+SALTS = ("1", "2", "3", "4")
+;length; 长度必须小于 alts 长度,且在 3个以上
+SALTS_LEN_LESS_1 = 3
+;hash list; 2**9 * 2**20 * 2**3  # 512MB*1024*1024*8 :MB->bit位
+hash_list = 4294967296
+hash_func_name = md5
+
+[BEHAVE-DEV]
+kafka_server = eip-kafka-2.qa.mlamp.cn
+
+[BEHAVE-PRO]
+kafka_server = eip-ckafka-3.mlamp.cn

+ 16 - 0
fastapi-demo/config/supervisor.ini

@@ -0,0 +1,16 @@
+[program:eip_ai_v0.2_async]
+directory=/home/zhuzhiqiang/projects/eip_ai_v0.2_async/eip-ai
+command=/home/hezhiguo/.virtualenvs/eip-ai-async-py387/bin/python3 /home/hezhiguo/.virtualenvs/eip-ai-async-py387/bin/gunicorn api_main_8000:app  -c gunicorn_config.py
+autostart=true
+autorestart=true
+startsecs=5
+loglevel=info
+
+
+user=root
+stdout_logfile=/home/zhuzhiqiang/projects/eip_ai_v0.2_async/logs/supervisor/eip_ai_out.log
+stdout_logfile_maxbytes=1024MB
+stdout_logfile_backups=2
+stderr_logfile=/home/zhuzhiqiang/projects/eip_ai_v0.2_async/logs/supervisor/eip_ai_err.log
+stderr_logfile_maxbytes=1024MB
+stderr_logfile_backups=2

+ 89 - 0
fastapi-demo/gunicorn_config.py

@@ -0,0 +1,89 @@
+# -*- coding:utf-8 -*-
+"""
+    gunicorn 配置项
+"""
+# Gunicorn的配置可以参考:
+
+# https://blog.csdn.net/y472360651/article/details/78538188
+
+# https://docs.gunicorn.org/en/stable/settings.html#server-mechanics
+
+"""
+if __name__ == '__main__':
+    uvicorn.run(app='main:app', host="127.0.0.1", port=8000, reload=True, debug=True)
+
+
+requirements:
+    httptooles, uvloop, gunicorn, uvicorn
+
+
+
+shell:
+     uvicorn:
+        uvicorn local:app --reload --port 5000
+
+    gunicorn:
+        gunicorn api_main:app -b 0.0.0.0:8001 -w 4 -k uvicorn.workers.UvicornWorker  # -D 守护启动 -c 配置文件
+"""
+
+import os
+
+from config import CfgBaseInit
+
+project_path = CfgBaseInit.project_path
+project_name = CfgBaseInit.project_name
+
+# debug 开启
+debug = True
+
+# 后台守护模式
+# daemon = True
+daemon = False  # 如果考虑使用 supervisor 的话,将此关闭较为稳妥
+
+# 更改代码后台重新加载
+# reload = True
+reload = False
+
+# 预加载资源
+preload_app = True
+
+bind = CfgBaseInit.bind  # 绑定ip和端口号
+# backlog = 512                # 监听队列
+chdir = '{}'.format(project_path)  # gunicorn要切换到的目的工作目录
+timeout = 180  # 超时
+# worker_class = 'gevent' #使用gevent模式,还可以使用sync 模式,默认的是sync模式
+worker_class = 'uvicorn.workers.UvicornWorker'  # sync 模式, fastapi 框架使用的是 asgi ,wsgi 可使用 gevent 模式
+# 设置最大并发量
+worker_connections = 10000
+# 长链接保持时间 1-5s, 默认 2
+keepalive = 180
+
+# workers = multiprocessing.cpu_count() * 2 + 1    # 进程数
+workers = 3  # 进程数
+threads = min(32, (os.cpu_count() or 1) + 4)  # if "uvicorn" not in worker_class else 1  # 指定每个进程开启的线程数
+loglevel = 'debug'  # 日志级别,这个日志级别指的是错误日志的级别,而访问日志的级别无法设置
+access_log_format = '%(t)s %(p)s %(h)s "%(r)s" %(s)s %(L)s %(b)s %(f)s" "%(a)s"'  # 设置gunicorn访问日志格式,错误日志无法设置
+
+"""
+其每个选项的含义如下:
+h          remote address
+l          '-'
+u          currently '-', may be user name in future releases
+t          date of the request
+r          status line (e.g. ``GET / HTTP/1.1``)
+s          status
+b          response length or '-'
+f          referer
+a          user agent
+T          request time in seconds
+D          request time in microseconds
+L          request time in decimal seconds
+p          process ID
+"""
+
+# 如果使用service启动,这里的pid路径应与service的pid路径保持一致,否则无法启动
+# pidfile = "./log/fast.pid"
+pidfile = "{}/../logs/{}/pid.pid".format(project_path, project_name)
+
+accesslog = "{}/../logs/{}/gunicorn_access.log".format(project_path, project_name)  # 访问日志文件
+errorlog = "{}/../logs/{}/gunicorn_error.log".format(project_path, project_name)  # 错误日志文件

+ 37 - 0
fastapi-demo/main.py

@@ -0,0 +1,37 @@
+from fastapi import FastAPI
+from pydantic import BaseModel  # 参数校验
+
+app = FastAPI()
+
+
+@app.get("/")
+def index():
+    return {"message": "你已经正确创建api"}
+
+
+# http://localhost:8000/query?uid=1
+@app.get("/query")
+def query(uid):
+    msg = f'uid为{uid}'
+    return {'success': True, 'msg': msg}
+
+
+# http://localhost:8000/query/1   参数类型有校验
+@app.get("/query/{uid}")
+def query(uid: int):
+    msg = f'uid为{uid}'
+    return {'success': True, 'msg': msg}
+
+
+class People(BaseModel):
+    name: str
+    age: int
+    address: str
+    salary: float
+
+
+# jsonbody请求
+@app.post("/insert")
+def insert(people: People):
+    msg = f"名字:{people.name},年龄{people.age}"
+    return msg

+ 40 - 0
flask-demo/README.md

@@ -0,0 +1,40 @@
+## flask 项目示例
+
+启动
+```text
+uwsgi --ini uwsgi.ini
+```
+关闭
+```text
+uwsgi --stop uwsgi.pid
+```
+
+查看是否启动
+```text
+ps -ef|grep uwsgi
+```
+
+nginx配置
+```text
+# 第一行修改user为root用户
+# 在include /etc/nginx/sites-enabled/*文件下配置
+server {
+  # 监听80端口,服务器启动的时候就可以监听这个端口接收到的请求
+  listen 80; 
+  # 配置 服务器名称, 一般指定ip地址即可
+  server_name 192.168.10.51;
+
+  location / {
+    # 指定接收到的请求中可以包含的是数据的类型
+    include uwsgi_params;
+    # 指定将请求反向转发到后端的某个服务器~ uwsgi服务器的socket选项
+    uwsgi_pass 127.0.0.1:8000;
+  }
+  # 配置static路径
+  location /static {
+    # 配置静态资源访问处理 如果客户端请求/static/...就自动转发/home/...static_file/..
+    alias /home /worker/mysite/static_file/; # 静态文件夹所在的路径
+  }
+
+}
+```

+ 17 - 0
flask-demo/main.py

@@ -0,0 +1,17 @@
+from flask import Flask, redirect, request
+
+# 设置static为url根目录
+app = Flask(__name__, static_url_path="")
+# 文件上传大小 3MB
+app.config['MAX_CONTENT_LENGTH'] = 3 * 1024 * 1024
+
+
+@app.route("/", methods=['post', 'get'])
+def hello():
+    print(request.json)  # json参数
+    print(request.args)  # param参数
+    return redirect("/index.html")
+
+
+if __name__ == '__main__':
+    app.run()

BIN
flask-demo/static/img/Snipaste_2020-06-10_14-17-15.png


+ 13 - 0
flask-demo/static/index.html

@@ -0,0 +1,13 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+    <meta charset="UTF-8">
+    <title>index</title>
+</head>
+<body>
+hello world
+<img src="img/Snipaste_2020-06-10_14-17-15.png" alt="123">
+
+
+</body>
+</html>

+ 0 - 0
tmp/tmp.py