一个脑瘫的手册
.........................
.........★︵___︵★.........
........./ \.........
.........︴● ● ︴.........
.........︴≡ ﹏ ≡ ︴.........
.........\_____/.........
..╭╧╮╭╧╮╭╧╮╭╧╮╭╧╮╭╧╮╭╧╮╭╧╮..
..│我││來││写││B││U││G││哩│..
..╘∞╛╘∞╛╘∞╛╘∞╛╘∞╛╘∞╛╘∞╛╘∞╛..
.........................
Migrate
数据库迁移,一般在建表以后,就生成迁移文件,迁移文件记得及时git提交并做好相应的注释,方便管理
flask
迁移方法(链表)
python manage.py db upgrade # 更新数据库
python manage.py db migrate # 迁移数据库
python manage.py db downgrade # 数据库回滚
python manage.oy db heads # 查看当前链表头结点
数据库对应的表为 alembic_version
alembic
迁移方法(常用于tornado,链表)
alembic revision --autogenerate -m 'message' # 数据库迁移
alembic upgrade head # 数据库更新
alembic current # 获取状态信息
alembic downgrade base # 数据库回滚
- mysql大量数据平移注意点
# 根据数据量的大小来设置
show VARIABLES like '%max_allowed_packet%';
set global max_allowed_packet = 10*1024*1024*10 # 100M
Git
协同开发中常用逻辑流程
- 拉取最新分支
git pull
- 合并主分支到自己分支(develop)
git merge origin/develop # dev为开发分支
- 提交自己的代码
git status # 查看更改状态
git add . # 将所有修改提交到本地仓库 . 可替换成对应文件来自定义提交
git commit -m'message' # 提交到自己的本地分支
git push origin xxx # 提交到远程分支
gitlab
发起merge request
注意事项
gitlab
上发起merge request
将自己的branch
与develop
分支合并,
Assignee
选择项目负责人
- 设置账户
git config --global user.name ""
git config --global user.email ""
git config --global user.password ""
# 手动操作修改密码可直接从
windows --> 控制面板 -- > 用户账户 --> 凭证管理器 --> 管理Windows凭据 --> 修改对应账户密码
- 刷新远程以及本地分支
git fetch origin --prune
Docker
- 安装dokcer
curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
- 查看容器
docker ps -a
- 启动容器
docker start [CONTAINER NAME]
- 删除容器
docker rm [CONTAINER ID]
- 查看镜像
docker images
- 删除镜像
docker rmi [IMAGE ID]
- 进入容器
sudo docker exec -it [CONTAINER] bash
- mysql_sh
docker run --name {$databases_alias} -d -p 3306:3306 -v /home/{$user}/docker_volume/mysql/:/var/lib/mysql -e MYSQL_ROOT_PASSWORD={$pswd} mysql:5.7 # databases_alias为数据库别名、user为本机用户名、pswd为数据库密码
- redis_sh
docker run --name {$databases_alias} -d -p 6379:6379 -v /home/{$user}/docker_volume/redis:/data redis redis-server --appendonly yes --requirepass {$pswd} # databases_alias为数据库别名 user为用户名 pswd为数据库密码
- 容器加速
位置:阿里云–>控制台–>产品与服务–>容器镜像服务
# ubuntu
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://kd5v1bsi.mirror.aliyuncs.com"]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker
Virtual Box
- 端口挂载
控制–>设置–>网络–>端口转发
名称 | 协议 | 主机ip | 主机端口 | 子系统ip | 子系统端口 |
---|---|---|---|---|---|
xshell | TCP | 2222 | 22 | ||
mysql | TCP | 3306 | 3306 | ||
redis | TCP | 6379 | 6379 |
- 保持休眠
管理–>退出–>休眠状态
- 镜像
https://mirrors.aliyun.com/ubuntu/ # 安装linux server版镜像时切换镜像源地址
- 安装增强功能(
server
版本)
# 安装依赖
sudo apt-get install dkms
sudo apt-get install build-essential
# 在mnt下新建挂载文件
sudo mkdir /mnt/Vbox
# 挂载cdrom
sudo mount /dev/cdrom /mnt/Vbox
# 进入Vbox文件运行run脚本
sudo ./VBoxLinuxAdditions.run
# 挂载共享文件(固定分配)
sudo mount -t vboxsf [windows_filename] [/mnt/filename]
# 卸载
sudo umount /mnt/Vbox
Nginx
反向代理,即发出的请求最后的响应都回到自己的主机上,在修改
host
文件的时候可以理解为是在做DNS
域名解析,把访问的地址指向本机,然后再修改nginx_config
文件,将api
的接口地址指向本地,并完成端口的转发
- Host
windows可用notepad++
来直接进行修改(windows下享有系统管理员的访问权限)
对应目录位置:
C:\Windows\System32\drivers\etc
127.0.0.1 activate.com # 在文件末尾加上对应的url即可(即更改activate.com)
- nginx_config
先将nginx.conf
文件改为nginx.conf.bak
备份
添加一个server
版块
server {
listen 80;
server_name 127.0.0.1 localhost {$variable}; # 变量为api接口地址
access_log logs/qa.access.log; # 日志1 例如qa环境
error_log logs/qa.error.log; # 日志2 例如qa环境
root html;
index index.html index.htm index.php;
## send request back to apache ##
location / {
proxy_pass http://127.0.0.1:5000; # 本地服务端口
#Proxy Settings
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
proxy_max_temp_file_size 0;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffer_size 4k;
proxy_buffers 4 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 64k;
}
- shell operating
windows
切换盘的命令D:
(进入D盘)
start nginx # 启动
nginx -s quit # 关闭
Debug
断点一般打在需要检测的某个函数的第一行,不是def
那一行
F6 # step over 跳到下一步
F5 # step into 进入到子函数
F8 # Resume Program 跳过子函数 遇到某些循环特别多的函数可直接跳过
F7 # step out 跳过当前子函数并回到上一个函数
Virtualenv
开发中搭配虚拟环境是十分重要的,它可以做到多个开发项目各自用自己的依赖包
*注意pip
和pip3
的问题
- 安装
pip install virtualenv # 下载虚拟环境
- 配置镜像源
python -m pip install -U pip # 升级到最新版本
pip config set global.index-url https://pypi.douban.com/simple/ # 配置清豆瓣镜像源
- 常用命令
python -m venv venv # 创建名为venv的虚拟环境
cd venv #
\Scripts\activate.bat # 启动虚拟环境
\Scripts\deactivate.bat # 退出虚拟环境
- 依赖包
pip install wheel # 下载轮子
pip install -r requirements.txt # 下载依赖
pip freeze > requirements.txt # 集成包管理文件
Kubuctl
-
看日志的常用命令
kubectl get namespaces # 查看命名空间
kubectl get pods -n {$var_1} # 查看某个容器的pods 变量为容器名
kubectl get deployment -n {$var_1} # 查看某个容器的deployment
kubectl -n {$var_1} logs {$var_2} {$var_3} --tail=100 # 查看容器最近100条日志
- 正确删除一个
pod
# 1.先删除Pod
# 2.删除对应的deployment (因为deployment.yaml文件中定义了副本数量)
kubectl delete pod {$var_2} -n {$var_1} # 删除容器下的pod
kubectl delete deployment {$var_3} -n {$var_1} # 删除对应的deployment
remark: 当然也可以直接在阿里云上找到对应的集群,然后删除相应的docker
window
与linux
传输小文件
sudo apt install lrzsz # 替换config文件直接传输的办法
- 配置
config
连接集群
mkdir -p $HOME/.kube
sudo mv admin.conf $HOME/.kube/config # admin.conf 为集群文件
sudo chown $(id -u):$(id -g) $HOME/.kube/config
- 配置环境变量
export KUBECONFIG=$HOME/.kube/config # zsh为~/.zshrc,bash为~/.bashrc
Eclipse
ctrl + shift + r # 打开资源
shift + shift # 全局搜索
ctrl + o # 查看当前类的方法或某个特定方法
ctrl + e # 在打开的编辑器之间浏览
alt + shift + r # 重命名
alt + 方向键 # 将当前行的内容往上或者往下移动
ctrl + shift + f # 自动调整格式
shift + table # 向前调整4个空格
ctrl + table # 调用链切换
Sqlalchemy
null
值排最后
order_by((user.id == None), user.id.asc()) # null值排最后的写法
exist
if start_time and end_time:
exist_query = db.session.query(ClassDetail.class_room_id).filter(
and_(ClassDetail.class_start_time >= start_time, ClassDetail.class_end_time <= end_time)).filter(
ClassRoom.id == ClassDetail.class_room_id)
query_set = query_set.filter(exist_query.exists())
query_count_set = query_count_set.filter(exist_query.exists())
---------------------------------------------------------------------------------------
if has_course and has_course == BooleanEnum.YES:
exist_query = db.session.query(ClassDetail.id).outerjoin(ClassInfo,
ClassDetail.class_id == ClassInfo.id).filter(
and_(ClassRoom.id == ClassDetail.class_room_id, ClassInfo.delete_flag == BooleanEnum.NO))
query_set = query_set.filter(exist_query.exists())
query_count_set = query_count_set.filter(exist_query.exists())
not exist
if has_course and has_course == BooleanEnum.NO:
exist_query = db.session.query(ClassDetail.id).outerjoin(ClassInfo,
ClassDetail.class_id == ClassInfo.id).filter(
and_(ClassRoom.id == ClassDetail.class_room_id, ClassInfo.delete_flag == BooleanEnum.NO))
query_set = query_set.filter(~exist_query.exists())
query_count_set = query_count_set.filter(~exist_query.exists())
group_by
refund_detail_list = db.session.query(RefundApplyRecord.sign_up_id,
db.func.sum(RefundApplyRecord.class_fee).label('refund_class_fee'),
db.func.sum(RefundApplyRecord.material_fee).label('refund_material_fee'),
db.func.sum(RefundApplyRecord.insurance).label('refund_insurance')).filter(
RefundApplyRecord.refund_status.in_((RefundStatusEnum.REFUNDING, RefundStatusEnum.AGREE_REFUND))).group_by(
RefundApplyRecord.sign_up_id).having(RefundApplyRecord.sign_up_id.in_(sign_up_id_list)).all()
like
无法在索引中执行的处理办法
延迟关联:重写查询并巧妙的设计索引,将索引扩展至覆盖多个数据列
EXPLAIN select * from products join (select prod_id from products where actor="Luxun" and title like "%野草%") as t1 on (t1.prod_id=products.prod_id)
count
和group_by
的一个坑点
当count的目标数为0时, 会过滤掉,必要时需要转换,不然会丢失一些数据
IFNULL( su.total, 0 )
Celery
- celery(以线程方式启动,默认为4个线程)
Parameters: celery -A workers.celery worker -l info -P threads
- celery_beat
Parameters: celery -A workers.celery beat
# 异步任务配置常用 具体看文档即可
'schedule': crontab(minute='/30') # 每半小时执行一次
'schedule': crontab(hour='*/12', minute=0) # 每12个小时执行一次
- celery_config
结合redis的异步任务配置
import os
from celery.schedules import crontab
from dotenv import load_dotenv
from celery.signals import celeryd_after_setup
load_dotenv()
REDIS_CONFIG = {
'CELERY_REDIS_HOST': os.getenv('CELERY_REDIS_HOST', '127.0.0.1'),
'CELERY_REDIS_PORT': os.getenv('CELERY_REDIS_PORT', 6379),
'CELERY_REDIS_MAX_CONNECTION': int(os.getenv('CELERY_REDIS_MAX_CONNECTION', 20)),
'CELERY_REDIS_EXPIRE_TIME': int(os.getenv('CELERY_REDIS_EXPIRE_TIME', 15 * 60)),
'CELERY_REDIS_PASSWORD': os.getenv('CELERY_REDIS_PASSWORD', None),
'CELERY_REDIS_BACKEND_DB': os.getenv('CELERY_REDIS_BACKEND_DB', 4)
}
# Redis URL
if REDIS_CONFIG['CELERY_REDIS_PASSWORD']:
BACKEND_URL = f'redis://:{REDIS_CONFIG["CELERY_REDIS_PASSWORD"]}@{REDIS_CONFIG["CELERY_REDIS_HOST"]}:' \
f'{REDIS_CONFIG["CELERY_REDIS_PORT"]}/{REDIS_CONFIG["CELERY_REDIS_BACKEND_DB"]}'
else:
BACKEND_URL = f'redis://{REDIS_CONFIG["CELERY_REDIS_HOST"]}:' \
f'{REDIS_CONFIG["CELERY_REDIS_PORT"]}/{REDIS_CONFIG["CELERY_REDIS_BACKEND_DB"]}'
timezone = 'Asia/Shanghai'
accept_content = ['json', 'pickle']
task_serializer = 'pickle'
result_serializer = 'pickle'
task_compression = 'gzip'
result_compression = 'gzip'
broker_url = rabbitmq_url
result_backend = BACKEND_URL
imports = {
'workers.tasks.like_count', # 以点赞统计为例
}
task_routes = {
'task.like_count_celery': {'queue': 'task_queue'},
}
beat_schedule = {
'like_count_celery': {
'task': 'task.like_count_celery',
'schedule': crontab(minute='*/30') # 每半小时执行一次
}
}
@celeryd_after_setup.connect
def setup_direct_queue(sender, instance, **kwargs):
queue_names = ['queue_1','queue_2'] # 队列list
for queue_name in queue_names:
instance.app.amqp.queues.select_add(queue_name)
- _init_
from tornado.concurrent import Future
from tornado.ioloop import IOLoop
from celery import Celery
from celery.result import AsyncResult
from dotenv import load_dotenv
def async_task(task_id):
def _on_result(_result, _future):
if _result.ready():
_future.set_result(_result.result)
else:
IOLoop.current().add_callback(_on_result, _result, _future)
future = Future()
result = AsyncResult(task_id)
IOLoop.current().add_callback(_on_result, result, future)
return future
def async_celery_task(task_name, *args, **kwargs):
def _on_result(_result, _future):
if _result.ready():
_future.set_result(_result.result)
else:
IOLoop.current().add_callback(_on_result, _result, _future)
future = Future()
result = task_name.apply_async(*args, **kwargs)
IOLoop.current().add_callback(_on_result, result, future)
return future
load_dotenv()
celery = Celery('tasks')
celery.config_from_object('workers.celery_config')
- 关于celery的一些踩坑点
celery踩坑
q: celery会重复执行一些任务(celery-4.3.0 + redis)
q: celery执行长时间eta任务时,每隔一小时(调度时间)就会重复收到任务
解决方案以及原因:
1. ack_late参数(尽量不用)
Celery的CELERY_ACKS_LATE=True,表示Worker在任务执行完后才向Broker发送acks,告诉队列这个任务已经处理了,而不是在接收任务后执行前发送,这样可以在Worker处理任务过程中异常退出,这个任务还会被发送给别的Worker处理。但是可能的情况是,一个任务会被多次执行,所以一定要慎用
2. 扩大celery的调度时间,尽可能少的接收到重复任务(临时解决办法,并不推荐)
3. celery--5.1.0会更新修复(目前5.1.0b1--2021/04/28), 依赖也要做相应的调整,目前发现b1版本需要导入grp,grp为linux下专用 无法在windows运行
4. celery_once(通过redis单独拿一个db来做加锁的操作。。。测试了并不生效,还是重复执行了,只是次数减少了)
5. 换rabbitmq组件,用来做broker, 重点推荐!!!(毕竟celery最初及时用来支持rabbitmq的, 稳定性最好)
6. 换了rabbitmq组件后,带eta参数的task,设计到更新的话,都要加acks_late=True参数,才会做到真正的销毁
一些参考资料
kombu引发的问题,已经pr
具体的issues
相关的issues
换了mq后revoke_task_still_executes
DB_tool
- 关于
db
连接的一些封装
import os
from contextlib import contextmanager
from functools import wraps
from dotenv import load_dotenv
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, Session, sessionmaker
load_dotenv()
# 获取数据库连接与配置
MYSQL_USER = os.getenv('MYSQL_USER', 'xxxx') # 1
MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD', 'xxxx') # 2
MYSQL_HOST = os.getenv('MYSQL_HOST', '127.0.0.1')
MYSQL_PORT = int(os.getenv('MYSQL_PORT', 3306))
MYSQL_DATABASE_NAME = os.getenv('MYSQL_DATABASE_NAME', 'xxx') # 3
DATABASE_CONNECTION_TIMEOUT = os.getenv('DATABASE_CONNECTION_TIMEOUT', 30)
MYSQL_URL = f'{MYSQL_USER}:{MYSQL_PASSWORD}@{MYSQL_HOST}:{MYSQL_PORT}/{MYSQL_DATABASE_NAME}?charset=utf8&autocommit=false'
SQLALCHEMY_DATABASE_URI = f'mysql+mysqldb://{MYSQL_URL}'
engine = create_engine(SQLALCHEMY_DATABASE_URI, max_overflow=2, pool_size=10, isolation_level='REPEATABLE READ')
class SqlAlchemyTool:
def __init__(self, engine):
self._engine = engine
self._session_factory = sessionmaker(bind=engine, autocommit=False)
self._session = scoped_session(self._session_factory)
@property
def session(self):
return self._session
@contextmanager
def auto_commit(self):
try:
trans = self._session.begin_nested()
yield
self._session.commit()
except BaseException as e:
self._session.rollback()
raise e
else:
if trans.nested and not trans.parent.nested:
self._session.commit()
def with_transaction(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
with self.auto_commit():
return func(*args, **kwargs)
return wrapper
db = SqlAlchemyTool(engine)
Redis_tool
- 关于
redis
连接的封装
import redis
from threading import Lock
from dotenv import load_dotenv
from workers.celery_config import REDIS_CONFIG
DEFAULT_REDIS_EXPIRE_TIME = 15 * 50
load_dotenv()
class RedisTool:
_instance_lock = Lock()
def __new__(cls, *args, **kwargs):
# 单例模式实现
if not hasattr(RedisTool, "_instance"):
with RedisTool._instance_lock:
if not hasattr(RedisTool, "_instance"):
RedisTool._instance = super().__new__(cls)
return RedisTool._instance
@staticmethod
def get_pool(host, port, max_connections, password=None, db=0):
if password is not None:
url = f'redis://:{password}@{host}:{port}/{db}'
else:
url = f'redis://{host}:{port}/{db}'
_pool = redis.ConnectionPool.from_url(url=url, max_connections=max_connections)
return _pool
@staticmethod
def get_connection(pool):
connection = redis.Redis(connection_pool=pool, decode_responses=True)
return connection
@staticmethod
def acquire(conn, lock_name, data, expire_time=DEFAULT_REDIS_EXPIRE_TIME):
"""
获取redis锁(set nx ex 实现)
:return:
"""
result = conn.set(lock_name, data, nx=True, ex=expire_time)
return result
@staticmethod
def release(conn, lock_name, data):
"""
释放redis锁
:return:
"""
lua = """
if redis.call('get', KEYS[1]) == ARGV[1] then return redis.call('del', KEYS[1]) else return 0 end
"""
script = conn.register_script(lua)
result = script(keys=[lock_name], args=[data])
return result
pool = RedisTool.get_pool(
REDIS_CONFIG.get('CELERY_REDIS_HOST'), REDIS_CONFIG.get('CELERY_REDIS_PORT'),
REDIS_CONFIG.get('CELERY_REDIS_MAX_CONNECTION'), REDIS_CONFIG.get('CELERY_REDIS_PASSWORD'))
redis_tool = RedisTool.get_connection(pool)
Dockerfile
project_name
为例子
FROM i.harbor.dragonest.net/public/python:3.7
ADD requirements.txt requirements.txt
ARG pip_mirror=https://pypi.tuna.tsinghua.edu.cn/simple
RUN pip3 install -i ${pip_mirror} -U pip && pip3 install -i ${pip_mirror} -r requirements.txt
RUN useradd --create-home --no-log-init --shell /bin/bash project_name_user
RUN adduser project_name_user project_name_user
RUN echo 'project_name_user:project_name_user' | chpasswd
WORKDIR /home/project_name_user/project_name
RUN chown -R project_name_user:project_name_user /home/project_name_user/project_name
COPY . .
USER project_name_user
EXPOSE 8888 # 项目配置的端口号
项目部署最基本要求:
Dockerfile
+ 用到的数据库以及数据库版本,提供给运维即可
Gitlab-ci
yml文件主要是写这几个板块,根据环境不同修改对应变量即可
stages
variables
build_and_push
deploy_
Cached
import os
import json
import inspect
from common.util import gen_query_str, md5
from tornado.web import RequestHandler
from common.util import JSONEncoder
# 缓存时间,默认3分钟
CACHED_EXPIRED_TIME = os.getenv('CACHED_EXPIRED_TIME', 5 * 60)
class CacheHandler:
"""
缓存处理器
"""
async def set(self, cache_name, key, value, expired=CACHED_EXPIRED_TIME):
raise NotImplementedError()
async def get(self, key):
raise NotImplementedError()
async def delete(self, cache_name):
raise NotImplementedError()
@staticmethod
def generate_key(cache_name, url, method, params, process_func=md5):
return f'{cache_name}:' + process_func(f'{cache_name}:{url}:{method}:{params}')
def initialize(self, *args, **kwargs):
raise NotImplementedError()
class RedisCacheHandler(CacheHandler):
"""
缓存redis实现
"""
async def set(self, cache_name, key, value, expired=CACHED_EXPIRED_TIME):
"""
设置缓存
:param cache_name:
:param key:
:param value:
:param expired:
:return:
"""
with await self.redis_pool as redis:
await redis.sadd(f'{cache_name}:cache_key_set', key)
await redis.set(key, value, expire=expired)
async def get(self, key):
"""
获取key
:param key:
:return:
"""
with await self.redis_pool as redis:
# value = await redis.execute('get', key, encoding="utf-8")
value = await redis.get(key, encoding="utf-8")
return value
async def delete(self, *cache_names):
"""
删除cache_name下的所有缓存
:param cache_name:
:return:
"""
with await self.redis_pool as redis:
for _cache_name in cache_names:
all_cache_keys = await redis.smembers(f'{_cache_name}:cache_key_set', encoding='utf-8')
await redis.delete(f'{_cache_name}:cache_key_set')
if all_cache_keys:
await redis.delete(*all_cache_keys)
def initialize(self, handler_or_func, *args, **kwargs):
"""
初始化数据
:param :
:return:
"""
if isinstance(handler_or_func, RequestHandler):
self.redis_pool = handler_or_func.redis_pool
else:
arg_spec = inspect.getfullargspec(handler_or_func)
_args = arg_spec.args
if 'redis_pool' in _args:
redis_pool = args[_args.index('redis_pool')]
else:
redis_pool = kwargs.get('redis_pool')
self.redis_pool = redis_pool
def _get_result(chunk):
data = chunk[0]
cache_data = json.loads(data.decode("utf-8"))
code = cache_data.get('code')
if code != 200:
return None
return cache_data
def cached_request(cache_name, cache_handler_class=RedisCacheHandler):
"""
缓存装饰器,打在handler上
:param cache_name: 缓存名称
:param cache_handler_class: 缓存处理器类
:return:
"""
def inner(func):
async def wrapper(request_handler, *args, **kwargs):
url = request_handler.request.uri
method = request_handler.request.method
params = gen_query_str(request_handler.validate_data) if request_handler.validate_data else ''
# 通过请求uri\method\params\cache_name计算cache_key
cache_key = cache_handler_class.generate_key(cache_name, url, method, params)
cache_handler = cache_handler_class()
cache_handler.initialize(request_handler)
# 从缓存中获取cache_key对应的结果
result = await cache_handler.get(cache_key)
# 如果有结果,直接返回结果
if result is not None:
request_handler.write(json.loads(result))
else:
# 没有结果则请求数据库,并缓存结果
result = await func(request_handler, *args, **kwargs)
chunk = request_handler._write_buffer
chunk_result = _get_result(chunk)
if not request_handler._finished:
request_handler.finish()
if chunk_result:
await cache_handler.set(cache_name, cache_key, json.dumps(chunk_result, cls=JSONEncoder))
return result
return wrapper
return inner
def delete_cache(*cache_name, cache_handler_class=RedisCacheHandler):
"""
打在service上
删除缓存装饰器
:param cache_name:
:param cache_handler_class:
:return:
"""
def inner(func):
async def wrapper(*args, **kwargs):
result = await func(*args, **kwargs)
cache_handler = cache_handler_class()
cache_handler.initialize(func, *args, **kwargs)
# 删除 cache_name下缓存
await cache_handler.delete(*cache_name)
return result
return wrapper
return inner
JWT
- tgc
从headers里面获取tgc参数,并进行验证
class TgcValidateHandler(BaseHandler):
@acquire_connection
async def post(self):
tgc = self.request.headers.get('_tgc')
if not tgc:
raise NotLogin()
register_user_data = await validate_tgc(tgc)
pass
- tgc验证器
import json
import os
from urllib.parse import urljoin
import simplejson
from dotenv import load_dotenv
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
from tornado.httputil import HTTPHeaders
from .exceptions import TGCValidateFailed
load_dotenv()
# 获取单点登录的地址
SSO_BASE_URL = os.getenv('SSO_BASE_URL', '{$url}') # $url为单点登录的地址
def make_async_request(headers, post_url, body=None):
http_request = HTTPRequest(
url=urljoin(SSO_BASE_URL, post_url),
method='POST',
headers=headers,
allow_nonstandard_methods=True
)
if body:
body = simplejson.dumps(body)
http_request.body = body
return http_request
async def validate_tgc(tgc):
"""
验证tgc
:param tgc:
:return: user_id
"""
http_client = AsyncHTTPClient()
headers = HTTPHeaders({'Content-Type': 'application/json'})
headers.add("Cookie", f"sso_ticket={tgc}")
post_url = '/serviceValidate'
http_request = make_async_request(headers, post_url)
resp = await http_client.fetch(request=http_request)
if resp.code == 200:
result = json.loads(resp.body)
if result.get('code') != 200:
raise TGCValidateFailed()
return result.get('data')
else:
raise TGCValidateFailed()
Singleton
单例模式:保证一个类仅有一个实例,并提供一个访问它的全局访问点
redistool
为例
class RedisTool:
"""
redis 操作工具(分布式锁实现)
"""
__instance_lock = Lock()
def __new__(cls, *args, **kwargs):
# 单例模式实现
if not hasattr(RedisTool, "_instance"): # double-check locking
with RedisTool.__instance_lock:
if not hasattr(RedisTool, "_instance"): # remark
RedisTool._instance = super().__new__(cls)
RedisTool._instance.__pool = redis.ConnectionPool.from_url(url=REDIS_URL,
max_connections=REDIS_MAX_CONNECTION)
RedisTool._instance.__connection = redis.Redis(connection_pool=RedisTool._instance.__pool,
decode_responses=True)
return RedisTool._instance
Double-Check locking
不用让线程每次都加锁,而只是在实例未被创建的时候再加锁处理。同时也能保证多线程的安全
remark
lock
里再做一次instance
判断的原因:当instance
存在的时,直接返回,这没有问题。当instance
为null
并且同时有两个线程调用RedisTool
时,他们将都可以通过第一重_instance
的判断,然后由于lock
机制,这两个线程则只有一个进入,另一个在外排队等候,必须要其中一个进入并出来后,另一个才能进入。而此时如果没有了第二重的instance
判断,则第一个线程创建了实例,而第二个线程还是可以继续再创建新的实例,这样就没有达到单例模式的目的。
RabbitMQ
官方网站:https://www.rabbitmq.com/
Erlang
下载
https://www.erlang.org/downloads
- 插件安装
..\sbin> rabbitmq-plugins.bat enable rabbitmq_management
- 启动
..\sbin> rabbitmq-server.bat
- 参考资料