Commit 5a25ee8c authored by kewei.jia's avatar kewei.jia Committed by 智勇

整理优化加单元测试

parent b9c8cd2b
......@@ -20,7 +20,7 @@ const logFormat = ':req[x-real-ip] :req[x-forwarded-for] - -'
+ ' :status :content-length ":referrer"'
+ ' ":user-agent" :req[x-auth-token] :response-timems'
const cluster = require('../services/tke.clusterService').create()
// const cluster = require('../services/tke.clusterService').create()
const container = require('../services/tke.containerService').create()
const logger = log4js.getLogger()
......@@ -33,7 +33,7 @@ function loadRoutes(router) {
.use(async (ctx, next) => {
// 腾讯云
ctx.container = container
ctx.cluster = cluster
// ctx.cluster = cluster
await next()
}, bodyParser())
......@@ -44,7 +44,7 @@ function loadRoutes(router) {
.use('/tag', tag.routes())
}
exports.start = function () {
exports.start = function (port) {
// 加载各种服务
const app = new Koa()
const router = new Router()
......@@ -56,7 +56,7 @@ exports.start = function () {
deploy()
app.use(log4js.koaLogger(log4js.getLogger('http'), { level: 'auto', format: logFormat }))
app.use(router.routes())
app.listen(4000)
app.listen(port)
logger.info('server listening 4000')
logger.info('加载环境配置: ', process.env.NODE_ENV)
}
const Router = require('koa-router')
const yaml = require('js-yaml')
const qs = require('querystring')
const templates = require('../serviceTemplate')
const { getAllNamespace, createNamespace } = require('./../kubeService/service')
let newNsKey = ''
// const newNsKey = ''
const router = new Router();
module.exports = router
const keepNamespace = ['default', 'kube-system', 'monitor']
router.get('/', async (ctx) => {
const data = await ctx.cluster.namespace_get()
data.namespaces = data.namespaces && data.namespaces.filter(item => !keepNamespace.includes(item.name))
const data = await getAllNamespace()
ctx.body = ctx.ok(data)
})
......@@ -29,97 +26,92 @@ router.get('/get_namespace_for_jenkins', async (ctx) => {
router.get('/info', async (ctx) => {
const data = await ctx.cluster.namespace_get()
const data = await getAllNamespace()
const ns = data.namespaces.find(item => item.name === qs.unescape(ctx.request.query.namespace))
ctx.body = ctx.ok(ns)
})
router.post('/create', async (ctx) => {
await ctx.cluster.namespace_create(ctx.request.body.name, ctx.request.body.description)
// await ctx.cluster.ingress_create(ctx.request.body.name)
ctx.body = ctx.ok()
})
const IMAGES = {
zookeeper: 'zookeeper:3.4.10',
rabbitmq: 'rabbitmq:3.6-management',
mysql: 'mysql:5.7',
redis: 'ccr.ccs.tencentyun.com/qa-db/redis:v4',
}
router.post('/init', async (ctx) => {
const body = ctx.request.body
// if (body.code !== newNsKey) {
// throw new Error('创建确认码不正确')
// }
await ctx.cluster.namespace_create(body.namespace)
await ctx.cluster.ingress_create(body.namespace)
async function serviceCreate(element, index) {
if (index !== 'common') {
const data = {
namespace: body.namespace,
image: `ccr.ccs.tencentyun.com/qa-${index}/${element}:latest`,
type: index,
serviceName: element,
system_name: element,
}
const template = templates[index].replace(/{{([A-Za-z0-9_]+)}}/g, function () {
if (data[arguments[1]] === undefined) {
throw new Error('缺少模板所需变量')
}
return data[arguments[1]]
})
const params = yaml.load(template)
await ctx.cluster.service_create(params, index)
}
if (index === 'common') {
const data = {
namespace: body.namespace,
image: IMAGES[element],
}
const template = templates[element].replace(/{{([A-Za-z0-9_]+)}}/g, function () {
if (data[arguments[1]] === undefined) {
throw new Error('缺少模板所需变量')
}
return data[arguments[1]]
})
const params = yaml.load(template)
await ctx.cluster.service_create(params)
}
}
for (const index in body.systems) {
if ({}.hasOwnProperty.call(body.systems, index)) {
for (const item in body.systems[index]) {
if ({}.hasOwnProperty.call(body.systems[index], item)) {
serviceCreate(body.systems[index][item], index)
}
}
}
}
ctx.body = ctx.ok('正在创建')
})
router.post('/delete', async (ctx) => {
await ctx.cluster.namespace_delete(ctx.request.body.name)
// await ctx.cluster.namespace_create(ctx.request.body.name, ctx.request.body.description)
await createNamespace(ctx.request.body.name, ctx.request.body.description)
ctx.body = ctx.ok()
})
function generateUUID() {
let d = new Date().getTime();
const uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => {
const r = (d + Math.random() * 16) % 16 | 0;
d = Math.floor(d / 16);
return (c === 'x' ? r : ((r & 0x3) | 0x8)).toString(16);
});
return uuid;
}
router.get('/get_code', async (ctx) => {
newNsKey = newNsKey === '' ? generateUUID() : newNsKey
ctx.body = ctx.ok(newNsKey)
})
// const IMAGES = {
// zookeeper: 'zookeeper:3.4.10',
// rabbitmq: 'rabbitmq:3.6-management',
// mysql: 'mysql:5.7',
// redis: 'ccr.ccs.tencentyun.com/qa-db/redis:v4',
// }
//
// router.post('/init', async (ctx) => {
// const body = ctx.request.body
// await ctx.cluster.namespace_create(body.namespace)
// await ctx.cluster.ingress_create(body.namespace)
//
// async function serviceCreate(element, index) {
// if (index !== 'common') {
// const data = {
// namespace: body.namespace,
// image: `ccr.ccs.tencentyun.com/qa-${index}/${element}:latest`,
// type: index,
// serviceName: element,
// system_name: element,
// }
// const template = templates[index].replace(/{{([A-Za-z0-9_]+)}}/g, function () {
// if (data[arguments[1]] === undefined) {
// throw new Error('缺少模板所需变量')
// }
// return data[arguments[1]]
// })
// const params = yaml.load(template)
// await ctx.cluster.service_create(params, index)
// }
// if (index === 'common') {
// const data = {
// namespace: body.namespace,
// image: IMAGES[element],
// }
// const template = templates[element].replace(/{{([A-Za-z0-9_]+)}}/g, function () {
// if (data[arguments[1]] === undefined) {
// throw new Error('缺少模板所需变量')
// }
// return data[arguments[1]]
// })
// const params = yaml.load(template)
// await ctx.cluster.service_create(params)
// }
// }
//
// for (const index in body.systems) {
// if ({}.hasOwnProperty.call(body.systems, index)) {
// for (const item in body.systems[index]) {
// if ({}.hasOwnProperty.call(body.systems[index], item)) {
// serviceCreate(body.systems[index][item], index)
// }
// }
// }
// }
//
// ctx.body = ctx.ok('正在创建')
// })
//
// router.post('/delete', async (ctx) => {
// await ctx.cluster.namespace_delete(ctx.request.body.name)
// ctx.body = ctx.ok()
// })
//
// function generateUUID() {
// let d = new Date().getTime();
// const uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => {
// const r = (d + Math.random() * 16) % 16 | 0;
// d = Math.floor(d / 16);
// return (c === 'x' ? r : ((r & 0x3) | 0x8)).toString(16);
// });
// return uuid;
// }
//
// router.get('/get_code', async (ctx) => {
// newNsKey = newNsKey === '' ? generateUUID() : newNsKey
// ctx.body = ctx.ok(newNsKey)
// })
const Router = require('koa-router')
const yaml = require('js-yaml')
const templates = require('../serviceTemplate')
const IMAGES = ['rabbitmq:3.6-management']
const router = new Router()
module.exports = router
router.post('/create', async (ctx) => {
// let svc = await ctx.client.service_get('rabbitmq', ctx.request.body.namespace)
// if (svc) {
// ctx.body = ctx.fail('服务已经存在')
// return
// }
const data = {
namespace: ctx.request.body.namespace,
image: IMAGES[0],
}
const template = templates.rabbitmq.replace(/{{([A-Za-z0-9_]+)}}/g, function () {
if (data[arguments[1]] === undefined) {
throw new Error('缺少模板所需变量')
}
return data[arguments[1]]
})
const params = yaml.load(template)
await ctx.client.service_create(params)
ctx.body = ctx.ok('创建成功')
})
router.post('/delete', async (ctx) => {
await ctx.client.service_delete('rabbitmq', ctx.request.body.namespace)
ctx.body = ctx.ok('删除成功')
})
......@@ -2,11 +2,10 @@ const schedule = require('node-schedule')
const _ = require('lodash')
const sleep = require('sleep')
const logger = require('koa-log4').getLogger('deployLatest')
const cluster = require('../services/tke.clusterService').create()
// const cluster = require('../services/tke.clusterService').create()
const container = require('../services/tke.containerService').create()
const { serviceRestart, getServicesFormat } = require('../kubeService/service')
const { serviceRestart, getServicesFormat, getAllNamespace } = require('../kubeService/service')
const keepNamespace = ['default', 'kube-system']
const repoNS = ['qa-java', 'qa-ui', 'qa-node', 'qa-python']
const testNamespace = ['qa', 'fe', 'data', 'fis', 'xyqb2']
......@@ -19,7 +18,7 @@ const deploy = async () => {
const repoName = repo.reponame.split('/')[1]
const latest = await container.getTagByName(repo.reponame, 'latest')
const latestImageID = _.get(latest.tagInfo, '[0].tagId', '')
let ns = await cluster.namespace_get()
let ns = await getAllNamespace()
ns = ns.namespaces && ns.namespaces.filter(item => testNamespace.includes(item.name))
for (const namespace of ns) {
const svcs = await getServicesFormat(namespace.name)
......
......@@ -71,11 +71,6 @@ const dingTalkPush = async function (item, is_recover) {
},
body: JSON.stringify(dingData),
})
// if (JSON.parse(res).errcode === 0 && is_recover) {
// await redis.set(key, 'send')
// } else {
// logger.error(res.errmsg)
// }
}
const checkRecoverPod = async () => {
let stream = redis.scanStream({
......@@ -127,7 +122,7 @@ const checkErrorPod = async () => {
const res = await redis.get(key)
if (res != null) {
if (res === 'send') {
logger.info(item.metadata.name, ':已到达阈值并已发送钉钉提醒服务')
// logger.info(item.metadata.name, ':已到达阈值并已发送钉钉提醒服务')
} else {
const counter = Number(res)
if (counter > 5) {
......
const Router = require('koa-router')
const logger = require('koa-log4').getLogger()
const logger = require('koa-log4')
.getLogger()
const _ = require('lodash')
const Redis = require('ioredis')
const redis = new Redis(6380, '172.30.220.22')
const { ingressCreate, ingressDelete } = require('../kubeService/ingress')
const { projectConfig, defaultConfig } = require('../serviceTemplate/resourceLimit')
const { projectConfig, defaultConfig } = require('../resource/resourceLimit')
const {
getPods,
serviceCreate,
......@@ -25,7 +26,10 @@ module.exports = router
router.get('/', async (ctx) => {
const data = await getServicesFormat(ctx.query.namespace)
ctx.body = ctx.ok({ services: data, namespace: ctx.query.namespace })
ctx.body = ctx.ok({
services: data,
namespace: ctx.query.namespace,
})
})
const makeResouce = (serviceName, type) => {
......@@ -60,11 +64,15 @@ router.post('/create', async (ctx) => {
})
router.post('/details', async (ctx) => {
ctx.validate(ctx.Joi.object().keys({
serviceName: ctx.Joi.string().required(),
namespace: ctx.Joi.string().required(),
type: ctx.Joi.string().required(),
}))
ctx.validate(ctx.Joi.object()
.keys({
serviceName: ctx.Joi.string()
.required(),
namespace: ctx.Joi.string()
.required(),
type: ctx.Joi.string()
.required(),
}))
const data = await getServiceDetail(ctx.request.body.namespace, ctx.request.body.serviceName, ctx.request.body.type)
ctx.body = ctx.ok(data)
......@@ -140,12 +148,14 @@ router.get('/listEnvVars', async (ctx) => {
res[0].body.items.forEach(async (item) => {
const serviceName = (item.metadata.labels && item.metadata.labels['qcloud-app']) || item.metadata.name
const upperCaseName = serviceName.toUpperCase().replace(/-/g, '_')
const upperCaseName = serviceName.toUpperCase()
.replace(/-/g, '_')
envVars[`${upperCaseName}_SERVICE_HOST`] = item.status.hostIP
})
res[1].body.items.forEach(async (item) => {
const upperCaseName = item.metadata.name.toUpperCase().replace(/-/g, '_')
const upperCaseName = item.metadata.name.toUpperCase()
.replace(/-/g, '_')
envVars[`${upperCaseName}_SERVICE_PORT`] = _.get(item.spec.ports, '[0].nodePort', undefined)
item.spec.ports.forEach((i) => {
envVars[`${upperCaseName}_SERVICE_PORT_${i.port}`] = i.nodePort || i.port
......@@ -156,7 +166,10 @@ router.get('/listEnvVars', async (ctx) => {
envVars.DB_SERVICE_PORT = envVars.MYSQL_SERVICE_PORT
envVars.DB_SERVICE_PORT_3306 = envVars.MYSQL_SERVICE_PORT_3306
ctx.body = { details: envVars, success: true }
ctx.body = {
details: envVars,
success: true,
}
})
// for container
......@@ -169,7 +182,8 @@ router.get('/listEnvVarsNew', async (ctx) => {
res[0].body.items.forEach(async (item) => {
const serviceName = (item.metadata.labels && item.metadata.labels['qcloud-app']) || item.metadata.name
const upperCaseName = serviceName.toUpperCase().replace(/-/g, '_')
const upperCaseName = serviceName.toUpperCase()
.replace(/-/g, '_')
if (item.metadata.labels.type === 'base') {
envVars[`${upperCaseName}_SERVICE_HOST`] = serviceName
} else {
......@@ -178,7 +192,8 @@ router.get('/listEnvVarsNew', async (ctx) => {
})
res[1].body.items.forEach(async (item) => {
const upperCaseName = item.metadata.name.toUpperCase().replace(/-/g, '_')
const upperCaseName = item.metadata.name.toUpperCase()
.replace(/-/g, '_')
const type = item.metadata.labels.type
if (type === 'base') {
envVars[`${upperCaseName}_SERVICE_PORT`] = _.get(item.spec.ports, '[0].port', undefined)
......@@ -197,5 +212,8 @@ router.get('/listEnvVarsNew', async (ctx) => {
envVars.DB_SERVICE_PORT = envVars.MYSQL_SERVICE_PORT
envVars.DB_SERVICE_PORT_3306 = envVars.MYSQL_SERVICE_PORT_3306
ctx.body = { details: envVars, success: true }
ctx.body = {
details: envVars,
success: true,
}
})
const Router = require('koa-router')
const yaml = require('js-yaml')
const _ = require('lodash')
const logger = require('koa-log4').getLogger()
const templates = require('../serviceTemplate')
const lruCache = require('../services/lruCache.service')
const { ingressCreate, ingressDelete } = require('../kubeService/ingress')
const { projectConfig, defaultConfig } = require('../serviceTemplate/resourceLimit')
const { podGet, serviceCreate } = require('../kubeService/service')
const router = new Router()
module.exports = router
router.get('/', async (ctx) => {
// 取节点列表的第一个作为服务的访问ip
const cacheKey = 'k8s.nodes.first'
const c = lruCache.get(cacheKey)
let lanIp = lruCache.get(cacheKey)
if (!c) {
const res = await ctx.cluster.node_list()
lanIp = _.get(res, 'nodes[0].lanIp', '')
lruCache.set(cacheKey, lanIp)
}
const data = await ctx.cluster.service_list(ctx.query.namespace)
const podData = await podGet(ctx.query.namespace)
const getDetail = async (item) => {
const resData = await ctx.cluster.service_get(item.serviceName, ctx.query.namespace)
item.image = resData.service.containers[0].image
if (item.userLabels.type === 'base') {
item.portMappings = resData.service.portMappings
const pod = podData.body.items.filter(i => i.metadata.name.indexOf(item.serviceName) !== -1)
lanIp = pod[0].status.hostIP
item.lanIp = lanIp
}
}
const task = []
for (let i = 0; i < data.services.length; i += 1) {
task.push(getDetail(data.services[i]))
}
await Promise.all(task)
ctx.body = ctx.ok(data)
})
const createService = async (ctx) => {
const {
type, serviceName, namespace, image, system_name, domain, label, debug,
} = ctx.request.body
logger.info('创建服务', ctx.request.body)
if (label === 'base') {
await serviceCreate(namespace, serviceName, image, label)
ctx.body = ctx.ok('创建成功')
return
}
const data = {
serviceName,
namespace,
image,
system_name,
debug,
}
if (!system_name) {
// ui abTest的时候不一样
data.system_name = serviceName
}
// 资源限制
const resources = projectConfig[data.system_name] || defaultConfig[type]
logger.info('资源限制', JSON.stringify(resources))
data.resources = resources
const template = templates[type].replace(/{{([A-Za-z0-9_\.]+)}}/g, function () {
if (_.get(data, arguments[1], null) === null) {
throw new Error(`缺少模板所需变量: ${arguments[1]}`)
}
return _.get(data, arguments[1])
})
let params = yaml.load(template)
// todo: 放开限制,暂时只更新了clotho的镜像
// java项目的就绪检查需要定制
if (type === 'java' && data.system_name !== 'clotho') {
params = _.omitBy(params, (value, key) => key.indexOf('healthCheck') !== -1)
}
logger.info(params)
await ctx.cluster.service_create(params, label)
if (label !== 'base') {
if (serviceName === 'xyqb-user2') {
await ingressCreate(namespace, 'xyqb-user2-2', 'passportapi2')
}
await ingressCreate(namespace, serviceName, domain)
}
ctx.body = ctx.ok('创建成功')
}
router.post('/create', async (ctx) => {
await createService(ctx)
})
router.post('/details', async (ctx) => {
ctx.validate(ctx.Joi.object().keys({
serviceName: ctx.Joi.string().required(),
namespace: ctx.Joi.string().required(),
}))
const podData = await podGet(ctx.request.body.namespace)
// const lanIp = podData.body.items.filter(i => i.metadata.name.indexOf(ctx.request.body.serviceName) !== -1)[0].status.hostIP
const serviceItems = podData.body.items.filter(i => i.metadata.name.indexOf(ctx.request.body.serviceName) !== -1)
const lanIp = _.get(serviceItems, '[0].status.hostIP', '')
const data = await ctx.cluster.service_get(ctx.request.body.serviceName, ctx.request.body.namespace)
ctx.body = ctx.ok(Object.assign({}, data, { lanIp }))
})
router.post('/delete', async (ctx) => {
await ctx.cluster.service_delete(ctx.request.body.serviceName, ctx.request.body.namespace)
if (ctx.request.body.serviceName === 'xyqb-user2') {
await ingressDelete(ctx.request.body.namespace, 'xyqb-user2-2')
}
await ingressDelete(ctx.request.body.namespace, ctx.request.body.serviceName)
ctx.body = ctx.ok('删除成功')
})
router.post('/modifyImage', async (ctx) => {
let list = await ctx.cluster.service_list(ctx.request.body.namespace)
list = list.services.map(item => item.serviceName)
if (list.includes(ctx.request.body.serviceName)) {
await ctx.cluster.service_modifyImage(ctx.request.body.serviceName, ctx.request.body.image, ctx.request.body.namespace)
} else {
await createService(ctx)
}
ctx.body = ctx.ok('更新成功')
})
router.post('/instance', async (ctx) => {
const data = await ctx.cluster.instance_get(ctx.request.body.serviceName, ctx.request.body.namespace)
ctx.body = ctx.ok(data)
})
router.post('/redeploy', async (ctx) => {
await ctx.cluster.service_redeployment(ctx.request.body.serviceName, ctx.request.body.namespace)
ctx.body = ctx.ok('服务重部署成功')
})
const Router = require('koa-router')
const yaml = require('js-yaml')
const templates = require('../serviceTemplate')
const IMAGES = ['zookeeper:3.4.10']
const router = new Router()
module.exports = router
router.post('/create', async (ctx) => {
const data = {
namespace: ctx.request.body.namespace,
image: IMAGES[0],
}
const template = templates.zookeeper.replace(/{{([A-Za-z0-9_]+)}}/g, function () {
if (data[arguments[1]] === undefined) {
throw new Error('缺少模板所需变量')
}
return data[arguments[1]]
})
const params = yaml.load(template)
await ctx.client.service_create(params)
ctx.body = ctx.ok('创建成功')
})
router.post('/delete', async (ctx) => {
await ctx.client.service_delete('zookeeper', ctx.request.body.namespace)
ctx.body = ctx.ok('删除成功')
})
const app = require('../app')
app.start()
app.start(4000)
### 镜像说明
1. redis
```
基于redis-alpine,减小镜像大小
业务需要支持多端口,所以镜像中启动了5个单独的实例。
```
2. ui
```
基于centos7的openresty官方镜像
配置文件路径 /etc/nginx/conf.d/*.conf, 默认叫default.conf
ui项目,SYSTEM_NAME从环境变量获取,所以需要在nginx.conf中增加env
abTest的项目,在构建镜像时使用参数CONFIG=abTest/paycenter.conf
```
### 问题
1. 把自定义的conf加放到entrypoint中?
感觉就成了config_repository
FROM node:8-alpine
ARG CONFIG=conf/default.js
ARG PROJECT=koa2-hello
ARG TARGET=src/config/
# 拷贝代码
# todo: 从代码镜像中拷贝
ADD code.tar.gz /home/quant_group
# 拷贝配置文件
WORKDIR /home/quant_group/${PROJECT}
COPY ${CONFIG} ${TARGET}
COPY docker-entrypoint.sh ./
CMD ./docker-entrypoint.sh
#!/bin/bash
docker build --build-arg CONFIG=conf/opapi2.js -t test-nodejs .
\ No newline at end of file
#!/bin/bash
echo 192.168.4.3 git.q-gp.com >> /etc/hosts
curl -sSL http://git.q-gp.com/QA/qg-docker-entrypoints/raw/tke-ui/tke/nodejs.sh -o run.sh
sh run.sh
FROM redis:3-alpine
WORKDIR /home/quantgroups/redis
COPY conf/*.conf ./conf/
COPY start.sh .
RUN mkdir /var/log/redis \
&& mkdir /var/lib/redis \
&& mkdir /var/run/redis
EXPOSE 6379 6380 6381 6382 6383
CMD ./start.sh
# Redis configuration file example
# Note on units: when memory size is needed, it is possible to specify
# it in the usual form of 1k 5GB 4M and so forth:
#
# 1k => 1000 bytes
# 1kb => 1024 bytes
# 1m => 1000000 bytes
# 1mb => 1024*1024 bytes
# 1g => 1000000000 bytes
# 1gb => 1024*1024*1024 bytes
#
# units are case insensitive so 1GB 1Gb 1gB are all the same.
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
daemonize no
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
# default. You can specify a custom pid file location here.
pidfile /var/run/redis/redis.pid
# Accept connections on the specified port, default is 6379.
# If port 0 is specified Redis will not listen on a TCP socket.
port 6379
# If you want you can bind a single interface, if the bind option is not
# specified all the interfaces will listen for incoming connections.
#
bind 0.0.0.0
# Specify the path for the unix socket that will be used to listen for
# incoming connections. There is no default, so Redis will not listen
# on a unix socket when not specified.
#
# unixsocket /tmp/redis.sock
# unixsocketperm 755
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 0
# TCP keepalive.
#
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
# of communication. This is useful for two reasons:
#
# 1) Detect dead peers.
# 2) Take the connection alive from the point of view of network
# equipment in the middle.
#
# On Linux, the specified value (in seconds) is the period used to send ACKs.
# Note that to close the connection the double of the time is needed.
# On other kernels the period depends on the kernel configuration.
#
# A reasonable value for this option is 60 seconds.
tcp-keepalive 0
# Specify the server verbosity level.
# This can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
loglevel notice
# Specify the log file name. Also 'stdout' can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile /var/log/redis/redis.log
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
# and optionally update the other syslog parameters to suit your needs.
# syslog-enabled no
# Specify the syslog identity.
# syslog-ident redis
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
# syslog-facility local0
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT <dbid> where
# dbid is a number between 0 and 'databases'-1
databases 16
################################ SNAPSHOTTING #################################
#
# Save the DB on disk:
#
# save <seconds> <changes>
#
# Will save the DB if both the given number of seconds and the given
# number of write operations against the DB occurred.
#
# In the example below the behaviour will be to save:
# after 900 sec (15 min) if at least 1 key changed
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving at all commenting all the "save" lines.
#
# It is also possible to remove all the previously configured save
# points by adding a save directive with a single empty string argument
# like in the following example:
#
# save ""
save 900 1
save 300 10
save 60 10000
# By default Redis will stop accepting writes if RDB snapshots are enabled
# (at least one save point) and the latest background save failed.
# This will make the user aware (in an hard way) that data is not persisting
# on disk properly, otherwise chances are that no one will notice and some
# distater will happen.
#
# If the background saving process will start working again Redis will
# automatically allow writes again.
#
# However if you have setup your proper monitoring of the Redis server
# and persistence, you may want to disable this feature so that Redis will
# continue to work as usually even if there are problems with disk,
# permissions, and so forth.
stop-writes-on-bgsave-error yes
# Compress string objects using LZF when dump .rdb databases?
# For default that's set to 'yes' as it's almost always a win.
# If you want to save some CPU in the saving child set it to 'no' but
# the dataset will likely be bigger if you have compressible values or keys.
rdbcompression yes
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
# This makes the format more resistant to corruption but there is a performance
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
# for maximum performances.
#
# RDB files created with checksum disabled have a checksum of zero that will
# tell the loading code to skip the check.
rdbchecksum yes
# The filename where to dump the DB
dbfilename dump.rdb
# The working directory.
#
# The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive.
#
# The Append Only File will also be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
dir /var/lib/redis/
################################# REPLICATION #################################
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
# another Redis server. Note that the configuration is local to the slave
# so for example it is possible to configure the slave to save the DB with a
# different interval, or to listen to another port, and so on.
#
# slaveof <masterip> <masterport>
# If the master is password protected (using the "requirepass" configuration
# directive below) it is possible to tell the slave to authenticate before
# starting the replication synchronization process, otherwise the master will
# refuse the slave request.
#
# masterauth <master-password>
# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
slave-serve-stale-data yes
# You can configure a slave instance to accept writes or not. Writing against
# a slave instance may be useful to store some ephemeral data (because data
# written on a slave will be easily deleted after resync with the master) but
# may also cause problems if clients are writing to it because of a
# misconfiguration.
#
# Since Redis 2.6 by default slaves are read-only.
#
# Note: read only slaves are not designed to be exposed to untrusted clients
# on the internet. It's just a protection layer against misuse of the instance.
# Still a read only slave exports by default all the administrative commands
# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve
# security of read only slaves using 'rename-command' to shadow all the
# administrative / dangerous commands.
slave-read-only yes
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# seconds.
#
# repl-ping-slave-period 10
# The following option sets a timeout for both Bulk transfer I/O timeout and
# master data or ping response timeout. The default value is 60 seconds.
#
# It is important to make sure that this value is greater than the value
# specified for repl-ping-slave-period otherwise a timeout will be detected
# every time there is low traffic between the master and the slave.
#
# repl-timeout 60
# Disable TCP_NODELAY on the slave socket after SYNC?
#
# If you select "yes" Redis will use a smaller number of TCP packets and
# less bandwidth to send data to slaves. But this can add a delay for
# the data to appear on the slave side, up to 40 milliseconds with
# Linux kernels using a default configuration.
#
# If you select "no" the delay for data to appear on the slave side will
# be reduced but more bandwidth will be used for replication.
#
# By default we optimize for low latency, but in very high traffic conditions
# or when the master and slaves are many hops away, turning this to "yes" may
# be a good idea.
repl-disable-tcp-nodelay no
# The slave priority is an integer number published by Redis in the INFO output.
# It is used by Redis Sentinel in order to select a slave to promote into a
# master if the master is no longer working correctly.
#
# A slave with a low priority number is considered better for promotion, so
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
# pick the one wtih priority 10, that is the lowest.
#
# However a special priority of 0 marks the slave as not able to perform the
# role of master, so a slave with priority of 0 will never be selected by
# Redis Sentinel for promotion.
#
# By default the priority is 100.
slave-priority 100
################################## SECURITY ###################################
# Require clients to issue AUTH <PASSWORD> before processing any other
# commands. This might be useful in environments in which you do not trust
# others with access to the host running redis-server.
#
# This should stay commented out for backward compatibility and because most
# people do not need auth (e.g. they run their own servers).
#
# Warning: since Redis is pretty fast an outside user can try up to
# 150k passwords per second against a good box. This means that you should
# use a very strong password otherwise it will be very easy to break.
#
# requirepass foobared
# Command renaming.
#
# It is possible to change the name of dangerous commands in a shared
# environment. For instance the CONFIG command may be renamed into something
# hard to guess so that it will still be available for internal-use tools
# but not available for general clients.
#
# Example:
#
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
#
# It is also possible to completely kill a command by renaming it into
# an empty string:
#
# rename-command CONFIG ""
#
# Please note that changing the name of commands that are logged into the
# AOF file or transmitted to slaves may cause problems.
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default
# this limit is set to 10000 clients, however if the Redis server is not
# able to configure the process file limit to allow for the specified limit
# the max number of allowed clients is set to the current file limit
# minus 32 (as Redis reserves a few file descriptors for internal uses).
#
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
# maxclients 10000
# Don't use more memory than the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys
# accordingly to the eviction policy selected (see maxmemmory-policy).
#
# If Redis can't remove keys according to the policy, or if the policy is
# set to 'noeviction', Redis will start to reply with errors to commands
# that would use more memory, like SET, LPUSH, and so on, and will continue
# to reply to read-only commands like GET.
#
# This option is usually useful when using Redis as an LRU cache, or to set
# an hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have slaves attached to an instance with maxmemory on,
# the size of the output buffers needed to feed the slaves are subtracted
# from the used memory count, so that network problems / resyncs will
# not trigger a loop where keys are evicted, and in turn the output
# buffer of slaves is full with DELs of keys evicted triggering the deletion
# of more keys, and so forth until the database is completely emptied.
#
# In short... if you have slaves attached it is suggested that you set a lower
# limit for maxmemory so that there is some free RAM on the system for slave
# output buffers (but this is not needed if the policy is 'noeviction').
#
# maxmemory <bytes>
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# is reached. You can select among five behaviors:
#
# volatile-lru -> remove the key with an expire set using an LRU algorithm
# allkeys-lru -> remove any key accordingly to the LRU algorithm
# volatile-random -> remove a random key with an expire set
# allkeys-random -> remove a random key, any key
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
# noeviction -> don't expire at all, just return an error on write operations
#
# Note: with any of the above policies, Redis will return an error on write
# operations, when there are not suitable keys for eviction.
#
# At the date of writing this commands are: set setnx setex append
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
# getset mset msetnx exec sort
#
# The default is:
#
# maxmemory-policy volatile-lru
# LRU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can select as well the sample
# size to check. For instance for default Redis will check three keys and
# pick the one that was used less recently, you can change the sample size
# using the following configuration directive.
#
# maxmemory-samples 3
############################## APPEND ONLY MODE ###############################
# By default Redis asynchronously dumps the dataset on disk. This mode is
# good enough in many applications, but an issue with the Redis process or
# a power outage may result into a few minutes of writes lost (depending on
# the configured save points).
#
# The Append Only File is an alternative persistence mode that provides
# much better durability. For instance using the default data fsync policy
# (see later in the config file) Redis can lose just one second of writes in a
# dramatic event like a server power outage, or a single write if something
# wrong with the Redis process itself happens, but the operating system is
# still running correctly.
#
# AOF and RDB persistence can be enabled at the same time without problems.
# If the AOF is enabled on startup Redis will load the AOF, that is the file
# with the better durability guarantees.
#
# Please check http://redis.io/topics/persistence for more information.
appendonly no
# The name of the append only file (default: "appendonly.aof")
# appendfilename appendonly.aof
# The fsync() call tells the Operating System to actually write data on disk
# instead to wait for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log . Slow, Safest.
# everysec: fsync only one time every second. Compromise.
#
# The default is "everysec", as that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
# "no" that will let the operating system flush the output buffer when
# it wants, for better performances (but if you can live with the idea of
# some data loss consider the default persistence mode that's snapshotting),
# or on the contrary, use "always" that's very slow but a bit safer than
# everysec.
#
# More details please check the following article:
# http://antirez.com/post/redis-persistence-demystified.html
#
# If unsure, use "everysec".
# appendfsync always
appendfsync everysec
# appendfsync no
# When the AOF fsync policy is set to always or everysec, and a background
# saving process (a background save or AOF log background rewriting) is
# performing a lot of I/O against the disk, in some Linux configurations
# Redis may block too long on the fsync() call. Note that there is no fix for
# this currently, as even performing fsync in a different thread will block
# our synchronous write(2) call.
#
# In order to mitigate this problem it's possible to use the following option
# that will prevent fsync() from being called in the main process while a
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving, the durability of Redis is
# the same as "appendfsync none". In practical terms, this means that it is
# possible to lose up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
#
# If you have latency problems turn this to "yes". Otherwise leave it as
# "no" that is the safest pick from the point of view of durability.
no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
#
# This is how it works: Redis remembers the size of the AOF file after the
# latest rewrite (if no rewrite has happened since the restart, the size of
# the AOF at startup is used).
#
# This base size is compared to the current size. If the current size is
# bigger than the specified percentage, the rewrite is triggered. Also
# you need to specify a minimal size for the AOF file to be rewritten, this
# is useful to avoid rewriting the AOF file even if the percentage increase
# is reached but it is still pretty small.
#
# Specify a percentage of zero in order to disable the automatic AOF
# rewrite feature.
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
################################ LUA SCRIPTING ###############################
# Max execution time of a Lua script in milliseconds.
#
# If the maximum execution time is reached Redis will log that a script is
# still in execution after the maximum allowed time and will start to
# reply to queries with an error.
#
# When a long running script exceed the maximum execution time only the
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
# used to stop a script that did not yet called write commands. The second
# is the only way to shut down the server in the case a write commands was
# already issue by the script but the user don't want to wait for the natural
# termination of the script.
#
# Set it to 0 or a negative value for unlimited execution without warnings.
lua-time-limit 5000
################################## SLOW LOG ###################################
# The Redis Slow Log is a system to log queries that exceeded a specified
# execution time. The execution time does not include the I/O operations
# like talking with the client, sending the reply and so forth,
# but just the time needed to actually execute the command (this is the only
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
#
# You can configure the slow log with two parameters: one tells Redis
# what is the execution time, in microseconds, to exceed in order for the
# command to get logged, and the other parameter is the length of the
# slow log. When a new command is logged the oldest one is removed from the
# queue of logged commands.
# The following time is expressed in microseconds, so 1000000 is equivalent
# to one second. Note that a negative number disables the slow log, while
# a value of zero forces the logging of every command.
slowlog-log-slower-than 10000
# There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the slow log with SLOWLOG RESET.
slowlog-max-len 128
############################### ADVANCED CONFIG ###############################
# Hashes are encoded using a memory efficient data structure when they have a
# small number of entries, and the biggest entry does not exceed a given
# threshold. These thresholds can be configured using the following directives.
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
# Similarly to hashes, small lists are also encoded in a special way in order
# to save a lot of space. The special representation is only used when
# you are under the following limits:
list-max-ziplist-entries 512
list-max-ziplist-value 64
# Sets have a special encoding in just one case: when a set is composed
# of just strings that happens to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
set-max-intset-entries 512
# Similarly to hashes and lists, sorted sets are also specially encoded in
# order to save a lot of space. This encoding is only used when the length and
# elements of a sorted set are below the following limits:
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into an hash table
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# active rehashing the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply form time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
# want to free memory asap when possible.
activerehashing yes
# The client output buffer limits can be used to force disconnection of clients
# that are not reading data from the server fast enough for some reason (a
# common reason is that a Pub/Sub client can't consume messages as fast as the
# publisher can produce them).
#
# The limit can be set differently for the three different classes of clients:
#
# normal -> normal clients
# slave -> slave clients and MONITOR clients
# pubsub -> clients subcribed to at least one pubsub channel or pattern
#
# The syntax of every client-output-buffer-limit directive is the following:
#
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
#
# A client is immediately disconnected once the hard limit is reached, or if
# the soft limit is reached and remains reached for the specified number of
# seconds (continuously).
# So for instance if the hard limit is 32 megabytes and the soft limit is
# 16 megabytes / 10 seconds, the client will get disconnected immediately
# if the size of the output buffers reach 32 megabytes, but will also get
# disconnected if the client reaches 16 megabytes and continuously overcomes
# the limit for 10 seconds.
#
# By default normal clients are not limited because they don't receive data
# without asking (in a push way), but just after a request, so only
# asynchronous clients may create a scenario where data is requested faster
# than it can read.
#
# Instead there is a default limit for pubsub and slave clients, since
# subscribers and slaves receive data in a push fashion.
#
# Both the hard or the soft limit can be disabled by setting them to zero.
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
# Redis calls an internal function to perform many background tasks, like
# closing connections of clients in timeot, purging expired keys that are
# never requested, and so forth.
#
# Not all tasks are perforemd with the same frequency, but Redis checks for
# tasks to perform accordingly to the specified "hz" value.
#
# By default "hz" is set to 10. Raising the value will use more CPU when
# Redis is idle, but at the same time will make Redis more responsive when
# there are many keys expiring at the same time, and timeouts may be
# handled with more precision.
#
# The range is between 1 and 500, however a value over 100 is usually not
# a good idea. Most users should use the default of 10 and raise this up to
# 100 only in environments where very low latency is required.
hz 10
# When a child rewrites the AOF file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
aof-rewrite-incremental-fsync yes
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all Redis server but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# include /path/to/local.conf
# include /path/to/other.conf
\ No newline at end of file
# Redis configuration file example
# Note on units: when memory size is needed, it is possible to specify
# it in the usual form of 1k 5GB 4M and so forth:
#
# 1k => 1000 bytes
# 1kb => 1024 bytes
# 1m => 1000000 bytes
# 1mb => 1024*1024 bytes
# 1g => 1000000000 bytes
# 1gb => 1024*1024*1024 bytes
#
# units are case insensitive so 1GB 1Gb 1gB are all the same.
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
daemonize no
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
# default. You can specify a custom pid file location here.
pidfile /var/run/redis/redis.pid
# Accept connections on the specified port, default is 6379.
# If port 0 is specified Redis will not listen on a TCP socket.
port 6380
# If you want you can bind a single interface, if the bind option is not
# specified all the interfaces will listen for incoming connections.
#
bind 0.0.0.0
# Specify the path for the unix socket that will be used to listen for
# incoming connections. There is no default, so Redis will not listen
# on a unix socket when not specified.
#
# unixsocket /tmp/redis.sock
# unixsocketperm 755
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 0
# TCP keepalive.
#
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
# of communication. This is useful for two reasons:
#
# 1) Detect dead peers.
# 2) Take the connection alive from the point of view of network
# equipment in the middle.
#
# On Linux, the specified value (in seconds) is the period used to send ACKs.
# Note that to close the connection the double of the time is needed.
# On other kernels the period depends on the kernel configuration.
#
# A reasonable value for this option is 60 seconds.
tcp-keepalive 0
# Specify the server verbosity level.
# This can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
loglevel notice
# Specify the log file name. Also 'stdout' can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile /var/log/redis/redis.log
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
# and optionally update the other syslog parameters to suit your needs.
# syslog-enabled no
# Specify the syslog identity.
# syslog-ident redis
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
# syslog-facility local0
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT <dbid> where
# dbid is a number between 0 and 'databases'-1
databases 16
################################ SNAPSHOTTING #################################
#
# Save the DB on disk:
#
# save <seconds> <changes>
#
# Will save the DB if both the given number of seconds and the given
# number of write operations against the DB occurred.
#
# In the example below the behaviour will be to save:
# after 900 sec (15 min) if at least 1 key changed
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving at all commenting all the "save" lines.
#
# It is also possible to remove all the previously configured save
# points by adding a save directive with a single empty string argument
# like in the following example:
#
# save ""
save 900 1
save 300 10
save 60 10000
# By default Redis will stop accepting writes if RDB snapshots are enabled
# (at least one save point) and the latest background save failed.
# This will make the user aware (in an hard way) that data is not persisting
# on disk properly, otherwise chances are that no one will notice and some
# distater will happen.
#
# If the background saving process will start working again Redis will
# automatically allow writes again.
#
# However if you have setup your proper monitoring of the Redis server
# and persistence, you may want to disable this feature so that Redis will
# continue to work as usually even if there are problems with disk,
# permissions, and so forth.
stop-writes-on-bgsave-error yes
# Compress string objects using LZF when dump .rdb databases?
# For default that's set to 'yes' as it's almost always a win.
# If you want to save some CPU in the saving child set it to 'no' but
# the dataset will likely be bigger if you have compressible values or keys.
rdbcompression yes
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
# This makes the format more resistant to corruption but there is a performance
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
# for maximum performances.
#
# RDB files created with checksum disabled have a checksum of zero that will
# tell the loading code to skip the check.
rdbchecksum yes
# The filename where to dump the DB
dbfilename dump.rdb
# The working directory.
#
# The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive.
#
# The Append Only File will also be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
dir /var/lib/redis/
################################# REPLICATION #################################
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
# another Redis server. Note that the configuration is local to the slave
# so for example it is possible to configure the slave to save the DB with a
# different interval, or to listen to another port, and so on.
#
# slaveof <masterip> <masterport>
# If the master is password protected (using the "requirepass" configuration
# directive below) it is possible to tell the slave to authenticate before
# starting the replication synchronization process, otherwise the master will
# refuse the slave request.
#
# masterauth <master-password>
# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
slave-serve-stale-data yes
# You can configure a slave instance to accept writes or not. Writing against
# a slave instance may be useful to store some ephemeral data (because data
# written on a slave will be easily deleted after resync with the master) but
# may also cause problems if clients are writing to it because of a
# misconfiguration.
#
# Since Redis 2.6 by default slaves are read-only.
#
# Note: read only slaves are not designed to be exposed to untrusted clients
# on the internet. It's just a protection layer against misuse of the instance.
# Still a read only slave exports by default all the administrative commands
# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve
# security of read only slaves using 'rename-command' to shadow all the
# administrative / dangerous commands.
slave-read-only yes
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# seconds.
#
# repl-ping-slave-period 10
# The following option sets a timeout for both Bulk transfer I/O timeout and
# master data or ping response timeout. The default value is 60 seconds.
#
# It is important to make sure that this value is greater than the value
# specified for repl-ping-slave-period otherwise a timeout will be detected
# every time there is low traffic between the master and the slave.
#
# repl-timeout 60
# Disable TCP_NODELAY on the slave socket after SYNC?
#
# If you select "yes" Redis will use a smaller number of TCP packets and
# less bandwidth to send data to slaves. But this can add a delay for
# the data to appear on the slave side, up to 40 milliseconds with
# Linux kernels using a default configuration.
#
# If you select "no" the delay for data to appear on the slave side will
# be reduced but more bandwidth will be used for replication.
#
# By default we optimize for low latency, but in very high traffic conditions
# or when the master and slaves are many hops away, turning this to "yes" may
# be a good idea.
repl-disable-tcp-nodelay no
# The slave priority is an integer number published by Redis in the INFO output.
# It is used by Redis Sentinel in order to select a slave to promote into a
# master if the master is no longer working correctly.
#
# A slave with a low priority number is considered better for promotion, so
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
# pick the one wtih priority 10, that is the lowest.
#
# However a special priority of 0 marks the slave as not able to perform the
# role of master, so a slave with priority of 0 will never be selected by
# Redis Sentinel for promotion.
#
# By default the priority is 100.
slave-priority 100
################################## SECURITY ###################################
# Require clients to issue AUTH <PASSWORD> before processing any other
# commands. This might be useful in environments in which you do not trust
# others with access to the host running redis-server.
#
# This should stay commented out for backward compatibility and because most
# people do not need auth (e.g. they run their own servers).
#
# Warning: since Redis is pretty fast an outside user can try up to
# 150k passwords per second against a good box. This means that you should
# use a very strong password otherwise it will be very easy to break.
#
# requirepass foobared
# Command renaming.
#
# It is possible to change the name of dangerous commands in a shared
# environment. For instance the CONFIG command may be renamed into something
# hard to guess so that it will still be available for internal-use tools
# but not available for general clients.
#
# Example:
#
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
#
# It is also possible to completely kill a command by renaming it into
# an empty string:
#
# rename-command CONFIG ""
#
# Please note that changing the name of commands that are logged into the
# AOF file or transmitted to slaves may cause problems.
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default
# this limit is set to 10000 clients, however if the Redis server is not
# able to configure the process file limit to allow for the specified limit
# the max number of allowed clients is set to the current file limit
# minus 32 (as Redis reserves a few file descriptors for internal uses).
#
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
# maxclients 10000
# Don't use more memory than the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys
# accordingly to the eviction policy selected (see maxmemmory-policy).
#
# If Redis can't remove keys according to the policy, or if the policy is
# set to 'noeviction', Redis will start to reply with errors to commands
# that would use more memory, like SET, LPUSH, and so on, and will continue
# to reply to read-only commands like GET.
#
# This option is usually useful when using Redis as an LRU cache, or to set
# an hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have slaves attached to an instance with maxmemory on,
# the size of the output buffers needed to feed the slaves are subtracted
# from the used memory count, so that network problems / resyncs will
# not trigger a loop where keys are evicted, and in turn the output
# buffer of slaves is full with DELs of keys evicted triggering the deletion
# of more keys, and so forth until the database is completely emptied.
#
# In short... if you have slaves attached it is suggested that you set a lower
# limit for maxmemory so that there is some free RAM on the system for slave
# output buffers (but this is not needed if the policy is 'noeviction').
#
# maxmemory <bytes>
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# is reached. You can select among five behaviors:
#
# volatile-lru -> remove the key with an expire set using an LRU algorithm
# allkeys-lru -> remove any key accordingly to the LRU algorithm
# volatile-random -> remove a random key with an expire set
# allkeys-random -> remove a random key, any key
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
# noeviction -> don't expire at all, just return an error on write operations
#
# Note: with any of the above policies, Redis will return an error on write
# operations, when there are not suitable keys for eviction.
#
# At the date of writing this commands are: set setnx setex append
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
# getset mset msetnx exec sort
#
# The default is:
#
# maxmemory-policy volatile-lru
# LRU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can select as well the sample
# size to check. For instance for default Redis will check three keys and
# pick the one that was used less recently, you can change the sample size
# using the following configuration directive.
#
# maxmemory-samples 3
############################## APPEND ONLY MODE ###############################
# By default Redis asynchronously dumps the dataset on disk. This mode is
# good enough in many applications, but an issue with the Redis process or
# a power outage may result into a few minutes of writes lost (depending on
# the configured save points).
#
# The Append Only File is an alternative persistence mode that provides
# much better durability. For instance using the default data fsync policy
# (see later in the config file) Redis can lose just one second of writes in a
# dramatic event like a server power outage, or a single write if something
# wrong with the Redis process itself happens, but the operating system is
# still running correctly.
#
# AOF and RDB persistence can be enabled at the same time without problems.
# If the AOF is enabled on startup Redis will load the AOF, that is the file
# with the better durability guarantees.
#
# Please check http://redis.io/topics/persistence for more information.
appendonly no
# The name of the append only file (default: "appendonly.aof")
# appendfilename appendonly.aof
# The fsync() call tells the Operating System to actually write data on disk
# instead to wait for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log . Slow, Safest.
# everysec: fsync only one time every second. Compromise.
#
# The default is "everysec", as that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
# "no" that will let the operating system flush the output buffer when
# it wants, for better performances (but if you can live with the idea of
# some data loss consider the default persistence mode that's snapshotting),
# or on the contrary, use "always" that's very slow but a bit safer than
# everysec.
#
# More details please check the following article:
# http://antirez.com/post/redis-persistence-demystified.html
#
# If unsure, use "everysec".
# appendfsync always
appendfsync everysec
# appendfsync no
# When the AOF fsync policy is set to always or everysec, and a background
# saving process (a background save or AOF log background rewriting) is
# performing a lot of I/O against the disk, in some Linux configurations
# Redis may block too long on the fsync() call. Note that there is no fix for
# this currently, as even performing fsync in a different thread will block
# our synchronous write(2) call.
#
# In order to mitigate this problem it's possible to use the following option
# that will prevent fsync() from being called in the main process while a
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving, the durability of Redis is
# the same as "appendfsync none". In practical terms, this means that it is
# possible to lose up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
#
# If you have latency problems turn this to "yes". Otherwise leave it as
# "no" that is the safest pick from the point of view of durability.
no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
#
# This is how it works: Redis remembers the size of the AOF file after the
# latest rewrite (if no rewrite has happened since the restart, the size of
# the AOF at startup is used).
#
# This base size is compared to the current size. If the current size is
# bigger than the specified percentage, the rewrite is triggered. Also
# you need to specify a minimal size for the AOF file to be rewritten, this
# is useful to avoid rewriting the AOF file even if the percentage increase
# is reached but it is still pretty small.
#
# Specify a percentage of zero in order to disable the automatic AOF
# rewrite feature.
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
################################ LUA SCRIPTING ###############################
# Max execution time of a Lua script in milliseconds.
#
# If the maximum execution time is reached Redis will log that a script is
# still in execution after the maximum allowed time and will start to
# reply to queries with an error.
#
# When a long running script exceed the maximum execution time only the
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
# used to stop a script that did not yet called write commands. The second
# is the only way to shut down the server in the case a write commands was
# already issue by the script but the user don't want to wait for the natural
# termination of the script.
#
# Set it to 0 or a negative value for unlimited execution without warnings.
lua-time-limit 5000
################################## SLOW LOG ###################################
# The Redis Slow Log is a system to log queries that exceeded a specified
# execution time. The execution time does not include the I/O operations
# like talking with the client, sending the reply and so forth,
# but just the time needed to actually execute the command (this is the only
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
#
# You can configure the slow log with two parameters: one tells Redis
# what is the execution time, in microseconds, to exceed in order for the
# command to get logged, and the other parameter is the length of the
# slow log. When a new command is logged the oldest one is removed from the
# queue of logged commands.
# The following time is expressed in microseconds, so 1000000 is equivalent
# to one second. Note that a negative number disables the slow log, while
# a value of zero forces the logging of every command.
slowlog-log-slower-than 10000
# There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the slow log with SLOWLOG RESET.
slowlog-max-len 128
############################### ADVANCED CONFIG ###############################
# Hashes are encoded using a memory efficient data structure when they have a
# small number of entries, and the biggest entry does not exceed a given
# threshold. These thresholds can be configured using the following directives.
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
# Similarly to hashes, small lists are also encoded in a special way in order
# to save a lot of space. The special representation is only used when
# you are under the following limits:
list-max-ziplist-entries 512
list-max-ziplist-value 64
# Sets have a special encoding in just one case: when a set is composed
# of just strings that happens to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
set-max-intset-entries 512
# Similarly to hashes and lists, sorted sets are also specially encoded in
# order to save a lot of space. This encoding is only used when the length and
# elements of a sorted set are below the following limits:
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into an hash table
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# active rehashing the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply form time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
# want to free memory asap when possible.
activerehashing yes
# The client output buffer limits can be used to force disconnection of clients
# that are not reading data from the server fast enough for some reason (a
# common reason is that a Pub/Sub client can't consume messages as fast as the
# publisher can produce them).
#
# The limit can be set differently for the three different classes of clients:
#
# normal -> normal clients
# slave -> slave clients and MONITOR clients
# pubsub -> clients subcribed to at least one pubsub channel or pattern
#
# The syntax of every client-output-buffer-limit directive is the following:
#
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
#
# A client is immediately disconnected once the hard limit is reached, or if
# the soft limit is reached and remains reached for the specified number of
# seconds (continuously).
# So for instance if the hard limit is 32 megabytes and the soft limit is
# 16 megabytes / 10 seconds, the client will get disconnected immediately
# if the size of the output buffers reach 32 megabytes, but will also get
# disconnected if the client reaches 16 megabytes and continuously overcomes
# the limit for 10 seconds.
#
# By default normal clients are not limited because they don't receive data
# without asking (in a push way), but just after a request, so only
# asynchronous clients may create a scenario where data is requested faster
# than it can read.
#
# Instead there is a default limit for pubsub and slave clients, since
# subscribers and slaves receive data in a push fashion.
#
# Both the hard or the soft limit can be disabled by setting them to zero.
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
# Redis calls an internal function to perform many background tasks, like
# closing connections of clients in timeot, purging expired keys that are
# never requested, and so forth.
#
# Not all tasks are perforemd with the same frequency, but Redis checks for
# tasks to perform accordingly to the specified "hz" value.
#
# By default "hz" is set to 10. Raising the value will use more CPU when
# Redis is idle, but at the same time will make Redis more responsive when
# there are many keys expiring at the same time, and timeouts may be
# handled with more precision.
#
# The range is between 1 and 500, however a value over 100 is usually not
# a good idea. Most users should use the default of 10 and raise this up to
# 100 only in environments where very low latency is required.
hz 10
# When a child rewrites the AOF file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
aof-rewrite-incremental-fsync yes
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all Redis server but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# include /path/to/local.conf
# include /path/to/other.conf
\ No newline at end of file
# Redis configuration file example
# Note on units: when memory size is needed, it is possible to specify
# it in the usual form of 1k 5GB 4M and so forth:
#
# 1k => 1000 bytes
# 1kb => 1024 bytes
# 1m => 1000000 bytes
# 1mb => 1024*1024 bytes
# 1g => 1000000000 bytes
# 1gb => 1024*1024*1024 bytes
#
# units are case insensitive so 1GB 1Gb 1gB are all the same.
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
daemonize no
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
# default. You can specify a custom pid file location here.
pidfile /var/run/redis/redis.pid
# Accept connections on the specified port, default is 6379.
# If port 0 is specified Redis will not listen on a TCP socket.
port 6381
# If you want you can bind a single interface, if the bind option is not
# specified all the interfaces will listen for incoming connections.
#
bind 0.0.0.0
# Specify the path for the unix socket that will be used to listen for
# incoming connections. There is no default, so Redis will not listen
# on a unix socket when not specified.
#
# unixsocket /tmp/redis.sock
# unixsocketperm 755
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 0
# TCP keepalive.
#
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
# of communication. This is useful for two reasons:
#
# 1) Detect dead peers.
# 2) Take the connection alive from the point of view of network
# equipment in the middle.
#
# On Linux, the specified value (in seconds) is the period used to send ACKs.
# Note that to close the connection the double of the time is needed.
# On other kernels the period depends on the kernel configuration.
#
# A reasonable value for this option is 60 seconds.
tcp-keepalive 0
# Specify the server verbosity level.
# This can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
loglevel notice
# Specify the log file name. Also 'stdout' can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile /var/log/redis/redis.log
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
# and optionally update the other syslog parameters to suit your needs.
# syslog-enabled no
# Specify the syslog identity.
# syslog-ident redis
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
# syslog-facility local0
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT <dbid> where
# dbid is a number between 0 and 'databases'-1
databases 16
################################ SNAPSHOTTING #################################
#
# Save the DB on disk:
#
# save <seconds> <changes>
#
# Will save the DB if both the given number of seconds and the given
# number of write operations against the DB occurred.
#
# In the example below the behaviour will be to save:
# after 900 sec (15 min) if at least 1 key changed
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving at all commenting all the "save" lines.
#
# It is also possible to remove all the previously configured save
# points by adding a save directive with a single empty string argument
# like in the following example:
#
# save ""
save 900 1
save 300 10
save 60 10000
# By default Redis will stop accepting writes if RDB snapshots are enabled
# (at least one save point) and the latest background save failed.
# This will make the user aware (in an hard way) that data is not persisting
# on disk properly, otherwise chances are that no one will notice and some
# distater will happen.
#
# If the background saving process will start working again Redis will
# automatically allow writes again.
#
# However if you have setup your proper monitoring of the Redis server
# and persistence, you may want to disable this feature so that Redis will
# continue to work as usually even if there are problems with disk,
# permissions, and so forth.
stop-writes-on-bgsave-error yes
# Compress string objects using LZF when dump .rdb databases?
# For default that's set to 'yes' as it's almost always a win.
# If you want to save some CPU in the saving child set it to 'no' but
# the dataset will likely be bigger if you have compressible values or keys.
rdbcompression yes
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
# This makes the format more resistant to corruption but there is a performance
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
# for maximum performances.
#
# RDB files created with checksum disabled have a checksum of zero that will
# tell the loading code to skip the check.
rdbchecksum yes
# The filename where to dump the DB
dbfilename dump.rdb
# The working directory.
#
# The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive.
#
# The Append Only File will also be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
dir /var/lib/redis/
################################# REPLICATION #################################
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
# another Redis server. Note that the configuration is local to the slave
# so for example it is possible to configure the slave to save the DB with a
# different interval, or to listen to another port, and so on.
#
# slaveof <masterip> <masterport>
# If the master is password protected (using the "requirepass" configuration
# directive below) it is possible to tell the slave to authenticate before
# starting the replication synchronization process, otherwise the master will
# refuse the slave request.
#
# masterauth <master-password>
# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
slave-serve-stale-data yes
# You can configure a slave instance to accept writes or not. Writing against
# a slave instance may be useful to store some ephemeral data (because data
# written on a slave will be easily deleted after resync with the master) but
# may also cause problems if clients are writing to it because of a
# misconfiguration.
#
# Since Redis 2.6 by default slaves are read-only.
#
# Note: read only slaves are not designed to be exposed to untrusted clients
# on the internet. It's just a protection layer against misuse of the instance.
# Still a read only slave exports by default all the administrative commands
# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve
# security of read only slaves using 'rename-command' to shadow all the
# administrative / dangerous commands.
slave-read-only yes
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# seconds.
#
# repl-ping-slave-period 10
# The following option sets a timeout for both Bulk transfer I/O timeout and
# master data or ping response timeout. The default value is 60 seconds.
#
# It is important to make sure that this value is greater than the value
# specified for repl-ping-slave-period otherwise a timeout will be detected
# every time there is low traffic between the master and the slave.
#
# repl-timeout 60
# Disable TCP_NODELAY on the slave socket after SYNC?
#
# If you select "yes" Redis will use a smaller number of TCP packets and
# less bandwidth to send data to slaves. But this can add a delay for
# the data to appear on the slave side, up to 40 milliseconds with
# Linux kernels using a default configuration.
#
# If you select "no" the delay for data to appear on the slave side will
# be reduced but more bandwidth will be used for replication.
#
# By default we optimize for low latency, but in very high traffic conditions
# or when the master and slaves are many hops away, turning this to "yes" may
# be a good idea.
repl-disable-tcp-nodelay no
# The slave priority is an integer number published by Redis in the INFO output.
# It is used by Redis Sentinel in order to select a slave to promote into a
# master if the master is no longer working correctly.
#
# A slave with a low priority number is considered better for promotion, so
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
# pick the one wtih priority 10, that is the lowest.
#
# However a special priority of 0 marks the slave as not able to perform the
# role of master, so a slave with priority of 0 will never be selected by
# Redis Sentinel for promotion.
#
# By default the priority is 100.
slave-priority 100
################################## SECURITY ###################################
# Require clients to issue AUTH <PASSWORD> before processing any other
# commands. This might be useful in environments in which you do not trust
# others with access to the host running redis-server.
#
# This should stay commented out for backward compatibility and because most
# people do not need auth (e.g. they run their own servers).
#
# Warning: since Redis is pretty fast an outside user can try up to
# 150k passwords per second against a good box. This means that you should
# use a very strong password otherwise it will be very easy to break.
#
# requirepass foobared
# Command renaming.
#
# It is possible to change the name of dangerous commands in a shared
# environment. For instance the CONFIG command may be renamed into something
# hard to guess so that it will still be available for internal-use tools
# but not available for general clients.
#
# Example:
#
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
#
# It is also possible to completely kill a command by renaming it into
# an empty string:
#
# rename-command CONFIG ""
#
# Please note that changing the name of commands that are logged into the
# AOF file or transmitted to slaves may cause problems.
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default
# this limit is set to 10000 clients, however if the Redis server is not
# able to configure the process file limit to allow for the specified limit
# the max number of allowed clients is set to the current file limit
# minus 32 (as Redis reserves a few file descriptors for internal uses).
#
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
# maxclients 10000
# Don't use more memory than the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys
# accordingly to the eviction policy selected (see maxmemmory-policy).
#
# If Redis can't remove keys according to the policy, or if the policy is
# set to 'noeviction', Redis will start to reply with errors to commands
# that would use more memory, like SET, LPUSH, and so on, and will continue
# to reply to read-only commands like GET.
#
# This option is usually useful when using Redis as an LRU cache, or to set
# an hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have slaves attached to an instance with maxmemory on,
# the size of the output buffers needed to feed the slaves are subtracted
# from the used memory count, so that network problems / resyncs will
# not trigger a loop where keys are evicted, and in turn the output
# buffer of slaves is full with DELs of keys evicted triggering the deletion
# of more keys, and so forth until the database is completely emptied.
#
# In short... if you have slaves attached it is suggested that you set a lower
# limit for maxmemory so that there is some free RAM on the system for slave
# output buffers (but this is not needed if the policy is 'noeviction').
#
# maxmemory <bytes>
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# is reached. You can select among five behaviors:
#
# volatile-lru -> remove the key with an expire set using an LRU algorithm
# allkeys-lru -> remove any key accordingly to the LRU algorithm
# volatile-random -> remove a random key with an expire set
# allkeys-random -> remove a random key, any key
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
# noeviction -> don't expire at all, just return an error on write operations
#
# Note: with any of the above policies, Redis will return an error on write
# operations, when there are not suitable keys for eviction.
#
# At the date of writing this commands are: set setnx setex append
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
# getset mset msetnx exec sort
#
# The default is:
#
# maxmemory-policy volatile-lru
# LRU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can select as well the sample
# size to check. For instance for default Redis will check three keys and
# pick the one that was used less recently, you can change the sample size
# using the following configuration directive.
#
# maxmemory-samples 3
############################## APPEND ONLY MODE ###############################
# By default Redis asynchronously dumps the dataset on disk. This mode is
# good enough in many applications, but an issue with the Redis process or
# a power outage may result into a few minutes of writes lost (depending on
# the configured save points).
#
# The Append Only File is an alternative persistence mode that provides
# much better durability. For instance using the default data fsync policy
# (see later in the config file) Redis can lose just one second of writes in a
# dramatic event like a server power outage, or a single write if something
# wrong with the Redis process itself happens, but the operating system is
# still running correctly.
#
# AOF and RDB persistence can be enabled at the same time without problems.
# If the AOF is enabled on startup Redis will load the AOF, that is the file
# with the better durability guarantees.
#
# Please check http://redis.io/topics/persistence for more information.
appendonly no
# The name of the append only file (default: "appendonly.aof")
# appendfilename appendonly.aof
# The fsync() call tells the Operating System to actually write data on disk
# instead to wait for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log . Slow, Safest.
# everysec: fsync only one time every second. Compromise.
#
# The default is "everysec", as that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
# "no" that will let the operating system flush the output buffer when
# it wants, for better performances (but if you can live with the idea of
# some data loss consider the default persistence mode that's snapshotting),
# or on the contrary, use "always" that's very slow but a bit safer than
# everysec.
#
# More details please check the following article:
# http://antirez.com/post/redis-persistence-demystified.html
#
# If unsure, use "everysec".
# appendfsync always
appendfsync everysec
# appendfsync no
# When the AOF fsync policy is set to always or everysec, and a background
# saving process (a background save or AOF log background rewriting) is
# performing a lot of I/O against the disk, in some Linux configurations
# Redis may block too long on the fsync() call. Note that there is no fix for
# this currently, as even performing fsync in a different thread will block
# our synchronous write(2) call.
#
# In order to mitigate this problem it's possible to use the following option
# that will prevent fsync() from being called in the main process while a
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving, the durability of Redis is
# the same as "appendfsync none". In practical terms, this means that it is
# possible to lose up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
#
# If you have latency problems turn this to "yes". Otherwise leave it as
# "no" that is the safest pick from the point of view of durability.
no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
#
# This is how it works: Redis remembers the size of the AOF file after the
# latest rewrite (if no rewrite has happened since the restart, the size of
# the AOF at startup is used).
#
# This base size is compared to the current size. If the current size is
# bigger than the specified percentage, the rewrite is triggered. Also
# you need to specify a minimal size for the AOF file to be rewritten, this
# is useful to avoid rewriting the AOF file even if the percentage increase
# is reached but it is still pretty small.
#
# Specify a percentage of zero in order to disable the automatic AOF
# rewrite feature.
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
################################ LUA SCRIPTING ###############################
# Max execution time of a Lua script in milliseconds.
#
# If the maximum execution time is reached Redis will log that a script is
# still in execution after the maximum allowed time and will start to
# reply to queries with an error.
#
# When a long running script exceed the maximum execution time only the
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
# used to stop a script that did not yet called write commands. The second
# is the only way to shut down the server in the case a write commands was
# already issue by the script but the user don't want to wait for the natural
# termination of the script.
#
# Set it to 0 or a negative value for unlimited execution without warnings.
lua-time-limit 5000
################################## SLOW LOG ###################################
# The Redis Slow Log is a system to log queries that exceeded a specified
# execution time. The execution time does not include the I/O operations
# like talking with the client, sending the reply and so forth,
# but just the time needed to actually execute the command (this is the only
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
#
# You can configure the slow log with two parameters: one tells Redis
# what is the execution time, in microseconds, to exceed in order for the
# command to get logged, and the other parameter is the length of the
# slow log. When a new command is logged the oldest one is removed from the
# queue of logged commands.
# The following time is expressed in microseconds, so 1000000 is equivalent
# to one second. Note that a negative number disables the slow log, while
# a value of zero forces the logging of every command.
slowlog-log-slower-than 10000
# There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the slow log with SLOWLOG RESET.
slowlog-max-len 128
############################### ADVANCED CONFIG ###############################
# Hashes are encoded using a memory efficient data structure when they have a
# small number of entries, and the biggest entry does not exceed a given
# threshold. These thresholds can be configured using the following directives.
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
# Similarly to hashes, small lists are also encoded in a special way in order
# to save a lot of space. The special representation is only used when
# you are under the following limits:
list-max-ziplist-entries 512
list-max-ziplist-value 64
# Sets have a special encoding in just one case: when a set is composed
# of just strings that happens to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
set-max-intset-entries 512
# Similarly to hashes and lists, sorted sets are also specially encoded in
# order to save a lot of space. This encoding is only used when the length and
# elements of a sorted set are below the following limits:
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into an hash table
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# active rehashing the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply form time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
# want to free memory asap when possible.
activerehashing yes
# The client output buffer limits can be used to force disconnection of clients
# that are not reading data from the server fast enough for some reason (a
# common reason is that a Pub/Sub client can't consume messages as fast as the
# publisher can produce them).
#
# The limit can be set differently for the three different classes of clients:
#
# normal -> normal clients
# slave -> slave clients and MONITOR clients
# pubsub -> clients subcribed to at least one pubsub channel or pattern
#
# The syntax of every client-output-buffer-limit directive is the following:
#
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
#
# A client is immediately disconnected once the hard limit is reached, or if
# the soft limit is reached and remains reached for the specified number of
# seconds (continuously).
# So for instance if the hard limit is 32 megabytes and the soft limit is
# 16 megabytes / 10 seconds, the client will get disconnected immediately
# if the size of the output buffers reach 32 megabytes, but will also get
# disconnected if the client reaches 16 megabytes and continuously overcomes
# the limit for 10 seconds.
#
# By default normal clients are not limited because they don't receive data
# without asking (in a push way), but just after a request, so only
# asynchronous clients may create a scenario where data is requested faster
# than it can read.
#
# Instead there is a default limit for pubsub and slave clients, since
# subscribers and slaves receive data in a push fashion.
#
# Both the hard or the soft limit can be disabled by setting them to zero.
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
# Redis calls an internal function to perform many background tasks, like
# closing connections of clients in timeot, purging expired keys that are
# never requested, and so forth.
#
# Not all tasks are perforemd with the same frequency, but Redis checks for
# tasks to perform accordingly to the specified "hz" value.
#
# By default "hz" is set to 10. Raising the value will use more CPU when
# Redis is idle, but at the same time will make Redis more responsive when
# there are many keys expiring at the same time, and timeouts may be
# handled with more precision.
#
# The range is between 1 and 500, however a value over 100 is usually not
# a good idea. Most users should use the default of 10 and raise this up to
# 100 only in environments where very low latency is required.
hz 10
# When a child rewrites the AOF file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
aof-rewrite-incremental-fsync yes
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all Redis server but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# include /path/to/local.conf
# include /path/to/other.conf
\ No newline at end of file
# Redis configuration file example
# Note on units: when memory size is needed, it is possible to specify
# it in the usual form of 1k 5GB 4M and so forth:
#
# 1k => 1000 bytes
# 1kb => 1024 bytes
# 1m => 1000000 bytes
# 1mb => 1024*1024 bytes
# 1g => 1000000000 bytes
# 1gb => 1024*1024*1024 bytes
#
# units are case insensitive so 1GB 1Gb 1gB are all the same.
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
daemonize no
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
# default. You can specify a custom pid file location here.
pidfile /var/run/redis/redis.pid
# Accept connections on the specified port, default is 6379.
# If port 0 is specified Redis will not listen on a TCP socket.
port 6382
# If you want you can bind a single interface, if the bind option is not
# specified all the interfaces will listen for incoming connections.
#
bind 0.0.0.0
# Specify the path for the unix socket that will be used to listen for
# incoming connections. There is no default, so Redis will not listen
# on a unix socket when not specified.
#
# unixsocket /tmp/redis.sock
# unixsocketperm 755
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 0
# TCP keepalive.
#
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
# of communication. This is useful for two reasons:
#
# 1) Detect dead peers.
# 2) Take the connection alive from the point of view of network
# equipment in the middle.
#
# On Linux, the specified value (in seconds) is the period used to send ACKs.
# Note that to close the connection the double of the time is needed.
# On other kernels the period depends on the kernel configuration.
#
# A reasonable value for this option is 60 seconds.
tcp-keepalive 0
# Specify the server verbosity level.
# This can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
loglevel notice
# Specify the log file name. Also 'stdout' can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile /var/log/redis/redis.log
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
# and optionally update the other syslog parameters to suit your needs.
# syslog-enabled no
# Specify the syslog identity.
# syslog-ident redis
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
# syslog-facility local0
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT <dbid> where
# dbid is a number between 0 and 'databases'-1
databases 16
################################ SNAPSHOTTING #################################
#
# Save the DB on disk:
#
# save <seconds> <changes>
#
# Will save the DB if both the given number of seconds and the given
# number of write operations against the DB occurred.
#
# In the example below the behaviour will be to save:
# after 900 sec (15 min) if at least 1 key changed
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving at all commenting all the "save" lines.
#
# It is also possible to remove all the previously configured save
# points by adding a save directive with a single empty string argument
# like in the following example:
#
# save ""
save 900 1
save 300 10
save 60 10000
# By default Redis will stop accepting writes if RDB snapshots are enabled
# (at least one save point) and the latest background save failed.
# This will make the user aware (in an hard way) that data is not persisting
# on disk properly, otherwise chances are that no one will notice and some
# distater will happen.
#
# If the background saving process will start working again Redis will
# automatically allow writes again.
#
# However if you have setup your proper monitoring of the Redis server
# and persistence, you may want to disable this feature so that Redis will
# continue to work as usually even if there are problems with disk,
# permissions, and so forth.
stop-writes-on-bgsave-error yes
# Compress string objects using LZF when dump .rdb databases?
# For default that's set to 'yes' as it's almost always a win.
# If you want to save some CPU in the saving child set it to 'no' but
# the dataset will likely be bigger if you have compressible values or keys.
rdbcompression yes
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
# This makes the format more resistant to corruption but there is a performance
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
# for maximum performances.
#
# RDB files created with checksum disabled have a checksum of zero that will
# tell the loading code to skip the check.
rdbchecksum yes
# The filename where to dump the DB
dbfilename dump.rdb
# The working directory.
#
# The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive.
#
# The Append Only File will also be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
dir /var/lib/redis/
################################# REPLICATION #################################
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
# another Redis server. Note that the configuration is local to the slave
# so for example it is possible to configure the slave to save the DB with a
# different interval, or to listen to another port, and so on.
#
# slaveof <masterip> <masterport>
# If the master is password protected (using the "requirepass" configuration
# directive below) it is possible to tell the slave to authenticate before
# starting the replication synchronization process, otherwise the master will
# refuse the slave request.
#
# masterauth <master-password>
# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
slave-serve-stale-data yes
# You can configure a slave instance to accept writes or not. Writing against
# a slave instance may be useful to store some ephemeral data (because data
# written on a slave will be easily deleted after resync with the master) but
# may also cause problems if clients are writing to it because of a
# misconfiguration.
#
# Since Redis 2.6 by default slaves are read-only.
#
# Note: read only slaves are not designed to be exposed to untrusted clients
# on the internet. It's just a protection layer against misuse of the instance.
# Still a read only slave exports by default all the administrative commands
# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve
# security of read only slaves using 'rename-command' to shadow all the
# administrative / dangerous commands.
slave-read-only yes
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# seconds.
#
# repl-ping-slave-period 10
# The following option sets a timeout for both Bulk transfer I/O timeout and
# master data or ping response timeout. The default value is 60 seconds.
#
# It is important to make sure that this value is greater than the value
# specified for repl-ping-slave-period otherwise a timeout will be detected
# every time there is low traffic between the master and the slave.
#
# repl-timeout 60
# Disable TCP_NODELAY on the slave socket after SYNC?
#
# If you select "yes" Redis will use a smaller number of TCP packets and
# less bandwidth to send data to slaves. But this can add a delay for
# the data to appear on the slave side, up to 40 milliseconds with
# Linux kernels using a default configuration.
#
# If you select "no" the delay for data to appear on the slave side will
# be reduced but more bandwidth will be used for replication.
#
# By default we optimize for low latency, but in very high traffic conditions
# or when the master and slaves are many hops away, turning this to "yes" may
# be a good idea.
repl-disable-tcp-nodelay no
# The slave priority is an integer number published by Redis in the INFO output.
# It is used by Redis Sentinel in order to select a slave to promote into a
# master if the master is no longer working correctly.
#
# A slave with a low priority number is considered better for promotion, so
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
# pick the one wtih priority 10, that is the lowest.
#
# However a special priority of 0 marks the slave as not able to perform the
# role of master, so a slave with priority of 0 will never be selected by
# Redis Sentinel for promotion.
#
# By default the priority is 100.
slave-priority 100
################################## SECURITY ###################################
# Require clients to issue AUTH <PASSWORD> before processing any other
# commands. This might be useful in environments in which you do not trust
# others with access to the host running redis-server.
#
# This should stay commented out for backward compatibility and because most
# people do not need auth (e.g. they run their own servers).
#
# Warning: since Redis is pretty fast an outside user can try up to
# 150k passwords per second against a good box. This means that you should
# use a very strong password otherwise it will be very easy to break.
#
# requirepass foobared
# Command renaming.
#
# It is possible to change the name of dangerous commands in a shared
# environment. For instance the CONFIG command may be renamed into something
# hard to guess so that it will still be available for internal-use tools
# but not available for general clients.
#
# Example:
#
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
#
# It is also possible to completely kill a command by renaming it into
# an empty string:
#
# rename-command CONFIG ""
#
# Please note that changing the name of commands that are logged into the
# AOF file or transmitted to slaves may cause problems.
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default
# this limit is set to 10000 clients, however if the Redis server is not
# able to configure the process file limit to allow for the specified limit
# the max number of allowed clients is set to the current file limit
# minus 32 (as Redis reserves a few file descriptors for internal uses).
#
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
# maxclients 10000
# Don't use more memory than the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys
# accordingly to the eviction policy selected (see maxmemmory-policy).
#
# If Redis can't remove keys according to the policy, or if the policy is
# set to 'noeviction', Redis will start to reply with errors to commands
# that would use more memory, like SET, LPUSH, and so on, and will continue
# to reply to read-only commands like GET.
#
# This option is usually useful when using Redis as an LRU cache, or to set
# an hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have slaves attached to an instance with maxmemory on,
# the size of the output buffers needed to feed the slaves are subtracted
# from the used memory count, so that network problems / resyncs will
# not trigger a loop where keys are evicted, and in turn the output
# buffer of slaves is full with DELs of keys evicted triggering the deletion
# of more keys, and so forth until the database is completely emptied.
#
# In short... if you have slaves attached it is suggested that you set a lower
# limit for maxmemory so that there is some free RAM on the system for slave
# output buffers (but this is not needed if the policy is 'noeviction').
#
# maxmemory <bytes>
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# is reached. You can select among five behaviors:
#
# volatile-lru -> remove the key with an expire set using an LRU algorithm
# allkeys-lru -> remove any key accordingly to the LRU algorithm
# volatile-random -> remove a random key with an expire set
# allkeys-random -> remove a random key, any key
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
# noeviction -> don't expire at all, just return an error on write operations
#
# Note: with any of the above policies, Redis will return an error on write
# operations, when there are not suitable keys for eviction.
#
# At the date of writing this commands are: set setnx setex append
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
# getset mset msetnx exec sort
#
# The default is:
#
# maxmemory-policy volatile-lru
# LRU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can select as well the sample
# size to check. For instance for default Redis will check three keys and
# pick the one that was used less recently, you can change the sample size
# using the following configuration directive.
#
# maxmemory-samples 3
############################## APPEND ONLY MODE ###############################
# By default Redis asynchronously dumps the dataset on disk. This mode is
# good enough in many applications, but an issue with the Redis process or
# a power outage may result into a few minutes of writes lost (depending on
# the configured save points).
#
# The Append Only File is an alternative persistence mode that provides
# much better durability. For instance using the default data fsync policy
# (see later in the config file) Redis can lose just one second of writes in a
# dramatic event like a server power outage, or a single write if something
# wrong with the Redis process itself happens, but the operating system is
# still running correctly.
#
# AOF and RDB persistence can be enabled at the same time without problems.
# If the AOF is enabled on startup Redis will load the AOF, that is the file
# with the better durability guarantees.
#
# Please check http://redis.io/topics/persistence for more information.
appendonly no
# The name of the append only file (default: "appendonly.aof")
# appendfilename appendonly.aof
# The fsync() call tells the Operating System to actually write data on disk
# instead to wait for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log . Slow, Safest.
# everysec: fsync only one time every second. Compromise.
#
# The default is "everysec", as that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
# "no" that will let the operating system flush the output buffer when
# it wants, for better performances (but if you can live with the idea of
# some data loss consider the default persistence mode that's snapshotting),
# or on the contrary, use "always" that's very slow but a bit safer than
# everysec.
#
# More details please check the following article:
# http://antirez.com/post/redis-persistence-demystified.html
#
# If unsure, use "everysec".
# appendfsync always
appendfsync everysec
# appendfsync no
# When the AOF fsync policy is set to always or everysec, and a background
# saving process (a background save or AOF log background rewriting) is
# performing a lot of I/O against the disk, in some Linux configurations
# Redis may block too long on the fsync() call. Note that there is no fix for
# this currently, as even performing fsync in a different thread will block
# our synchronous write(2) call.
#
# In order to mitigate this problem it's possible to use the following option
# that will prevent fsync() from being called in the main process while a
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving, the durability of Redis is
# the same as "appendfsync none". In practical terms, this means that it is
# possible to lose up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
#
# If you have latency problems turn this to "yes". Otherwise leave it as
# "no" that is the safest pick from the point of view of durability.
no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
#
# This is how it works: Redis remembers the size of the AOF file after the
# latest rewrite (if no rewrite has happened since the restart, the size of
# the AOF at startup is used).
#
# This base size is compared to the current size. If the current size is
# bigger than the specified percentage, the rewrite is triggered. Also
# you need to specify a minimal size for the AOF file to be rewritten, this
# is useful to avoid rewriting the AOF file even if the percentage increase
# is reached but it is still pretty small.
#
# Specify a percentage of zero in order to disable the automatic AOF
# rewrite feature.
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
################################ LUA SCRIPTING ###############################
# Max execution time of a Lua script in milliseconds.
#
# If the maximum execution time is reached Redis will log that a script is
# still in execution after the maximum allowed time and will start to
# reply to queries with an error.
#
# When a long running script exceed the maximum execution time only the
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
# used to stop a script that did not yet called write commands. The second
# is the only way to shut down the server in the case a write commands was
# already issue by the script but the user don't want to wait for the natural
# termination of the script.
#
# Set it to 0 or a negative value for unlimited execution without warnings.
lua-time-limit 5000
################################## SLOW LOG ###################################
# The Redis Slow Log is a system to log queries that exceeded a specified
# execution time. The execution time does not include the I/O operations
# like talking with the client, sending the reply and so forth,
# but just the time needed to actually execute the command (this is the only
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
#
# You can configure the slow log with two parameters: one tells Redis
# what is the execution time, in microseconds, to exceed in order for the
# command to get logged, and the other parameter is the length of the
# slow log. When a new command is logged the oldest one is removed from the
# queue of logged commands.
# The following time is expressed in microseconds, so 1000000 is equivalent
# to one second. Note that a negative number disables the slow log, while
# a value of zero forces the logging of every command.
slowlog-log-slower-than 10000
# There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the slow log with SLOWLOG RESET.
slowlog-max-len 128
############################### ADVANCED CONFIG ###############################
# Hashes are encoded using a memory efficient data structure when they have a
# small number of entries, and the biggest entry does not exceed a given
# threshold. These thresholds can be configured using the following directives.
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
# Similarly to hashes, small lists are also encoded in a special way in order
# to save a lot of space. The special representation is only used when
# you are under the following limits:
list-max-ziplist-entries 512
list-max-ziplist-value 64
# Sets have a special encoding in just one case: when a set is composed
# of just strings that happens to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
set-max-intset-entries 512
# Similarly to hashes and lists, sorted sets are also specially encoded in
# order to save a lot of space. This encoding is only used when the length and
# elements of a sorted set are below the following limits:
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into an hash table
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# active rehashing the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply form time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
# want to free memory asap when possible.
activerehashing yes
# The client output buffer limits can be used to force disconnection of clients
# that are not reading data from the server fast enough for some reason (a
# common reason is that a Pub/Sub client can't consume messages as fast as the
# publisher can produce them).
#
# The limit can be set differently for the three different classes of clients:
#
# normal -> normal clients
# slave -> slave clients and MONITOR clients
# pubsub -> clients subcribed to at least one pubsub channel or pattern
#
# The syntax of every client-output-buffer-limit directive is the following:
#
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
#
# A client is immediately disconnected once the hard limit is reached, or if
# the soft limit is reached and remains reached for the specified number of
# seconds (continuously).
# So for instance if the hard limit is 32 megabytes and the soft limit is
# 16 megabytes / 10 seconds, the client will get disconnected immediately
# if the size of the output buffers reach 32 megabytes, but will also get
# disconnected if the client reaches 16 megabytes and continuously overcomes
# the limit for 10 seconds.
#
# By default normal clients are not limited because they don't receive data
# without asking (in a push way), but just after a request, so only
# asynchronous clients may create a scenario where data is requested faster
# than it can read.
#
# Instead there is a default limit for pubsub and slave clients, since
# subscribers and slaves receive data in a push fashion.
#
# Both the hard or the soft limit can be disabled by setting them to zero.
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
# Redis calls an internal function to perform many background tasks, like
# closing connections of clients in timeot, purging expired keys that are
# never requested, and so forth.
#
# Not all tasks are perforemd with the same frequency, but Redis checks for
# tasks to perform accordingly to the specified "hz" value.
#
# By default "hz" is set to 10. Raising the value will use more CPU when
# Redis is idle, but at the same time will make Redis more responsive when
# there are many keys expiring at the same time, and timeouts may be
# handled with more precision.
#
# The range is between 1 and 500, however a value over 100 is usually not
# a good idea. Most users should use the default of 10 and raise this up to
# 100 only in environments where very low latency is required.
hz 10
# When a child rewrites the AOF file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
aof-rewrite-incremental-fsync yes
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all Redis server but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# include /path/to/local.conf
# include /path/to/other.conf
\ No newline at end of file
daemonize no
pidfile /var/run/redis/redis.pid
port 6383
bind 0.0.0.0
timeout 0
tcp-keepalive 0
loglevel notice
logfile /var/log/redis.log
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /var/lib/redis/
slave-serve-stale-data yes
slave-read-only yes
repl-disable-tcp-nodelay no
slave-priority 100
appendonly no
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-entries 512
list-max-ziplist-value 64
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
#bin/bash
redis-server /home/quantgroups/redis/conf/redis-6380.conf &
redis-server /home/quantgroups/redis/conf/redis-6381.conf &
redis-server /home/quantgroups/redis/conf/redis-6382.conf &
redis-server /home/quantgroups/redis/conf/redis-6383.conf &
redis-server /home/quantgroups/redis/conf/redis-6379.conf
\ No newline at end of file
# 基于centos7
FROM openresty/openresty:1.13.6.2-centos
COPY nginx.conf /usr/local/openresty/nginx/conf/
WORKDIR /home/quant_group
COPY test-ui.conf ./conf.d/
COPY index.html ./test-ui/
CMD ["/usr/local/openresty/bin/openresty", "-g", "daemon off;"]
#!/bin/bash
docker build -t test-ui .
\ No newline at end of file
#!/bin/bash
docker build -t test-ui .
docker tag test-ui ccr.ccs.tencentyun.com/qa-app/test-ui:2
docker push ccr.ccs.tencentyun.com/qa-app/test-ui:2
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<title>2222</title>
</head>
<body>
<h1>22222</h1>
</body>
</html>
\ No newline at end of file
#user nobody;
worker_processes 1;
error_log logs/error.log;
error_log logs/error.log notice;
error_log logs/error.log info;
#pid logs/nginx.pid;
env SYSTEM_NAME;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
#gzip on;
include /home/quant_group/conf.d/*.conf;
}
\ No newline at end of file
server {
listen 80;
server_name _;
gzip on;
gzip_min_length 1k;
gzip_buffers 4 16k;
gzip_http_version 1.0;
gzip_comp_level 4;
gzip_types text/plain application/x-javascript text/css application/xml application/javascript;
gzip_vary on;
location / {
expires -1;
root /home/quant_group/test-ui;
try_files $uri $uri/ /index.html?$query_string;
}
}
\ No newline at end of file
# 基于centos7
FROM openresty/openresty:1.13.6.2-centos
RUN yum -y install epel-release \
&& yum -y install python-pip
WORKDIR /home/quant_group
# 加入测试工具集
COPY qa-replace .
RUN pip install -r requirements.txt \
&& yum clean all
# 拷贝nginx配置
COPY lua/*.lua /etc/nginx/lua/
ARG CONFIG=default.conf
COPY conf/nginx.conf /usr/local/openresty/nginx/conf/
COPY conf/${CONFIG} /etc/nginx/conf.d/
# COPY docker-entrypoint.sh ./
# CMD ./docker-entrypoint.sh
#!/bin/bash
tag=2.4
docker build -t test-ui .
# && docker tag test-ui 192.168.4.4/tmp/test-ui:${tag} && docker push 192.168.4.4/tmp/test-ui:${tag}
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<title>22</title>
</head>
<body>
<h1>op-xuezhijie.liangkebang.com</h1>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<title>1111</title>
</head>
<body>
<h1>1</h1>
</body>
</html>
\ No newline at end of file
server {
listen 80;
server_name _;
gzip on;
gzip_min_length 1k;
gzip_buffers 4 16k;
gzip_http_version 1.0;
gzip_comp_level 4;
gzip_types text/plain application/x-javascript text/css application/xml application/javascript;
gzip_vary on;
set $normalPath "/home/quant_group/spider-center-ui/dist";
set $channelPath "/home/quant_group/new-spider-center-ui/dist";
set $registerChannel "999999";
set $merchantChannel "";
set $excludeChannel "504,900";
set $tailNumber "1,2,3,4,5,6,7,8,9";
location / {
expires -1;
set_by_lua_file $rootPath /etc/nginx/lua/phoneChannel.lua;
header_filter_by_lua_file /etc/nginx/lua/addCookie.lua;
root $rootPath;
try_files $uri $uri/ /index.html?$query_string;
}
}
server {
listen 80;
server_name _;
gzip on;
gzip_min_length 1k;
gzip_buffers 4 16k;
gzip_http_version 1.0;
gzip_comp_level 4;
gzip_types text/plain application/x-javascript text/css application/xml application/javascript;
gzip_vary on;
location / {
expires -1;
set_by_lua $rootPath '
local pre = "/home/quant_group/code/";
local sys = os.getenv("SYSTEM_NAME");
return pre..sys.."/dist"
';
root $rootPath;
try_files $uri $uri/ /index.html?$query_string;
}
}
\ No newline at end of file
#user nobody;
worker_processes 1;
error_log logs/error.log;
error_log logs/error.log notice;
error_log logs/error.log info;
#pid logs/nginx.pid;
env SYSTEM_NAME;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
#gzip on;
include /home/quant_group/conf.d/*.conf;
}
\ No newline at end of file
#!/bin/bash
echo 192.168.4.3 git.q-gp.com >> /etc/hosts
curl -sSL http://git.q-gp.com/QA/qg-docker-entrypoints/raw/tke-ui/tke/ui.sh -o run.sh
sh run.sh
local args = ngx.req.get_uri_args()
local register_uri = args['registerFrom']
local merchant_uri = args['merchantId']
if (register_uri) then
ngx.header['Set-Cookie'] = {'registerFrom=' .. register_uri .. '; path=/; domain=.liangkebang.com;'}
end
if (merchant_uri) then
ngx.header['Set-Cookie'] = {'merchantId=' .. merchant_uri .. '; path=/; domain=.liangkebang.com;'}
end
\ No newline at end of file
function string.split(str)
local splitlist = {};
string.gsub(str, '[^,]+', function(w) table.insert(splitlist, w) end )
return splitlist;
end
function isInTable(value, tbl)
for k,v in ipairs(tbl) do
if v == value then
return true;
end
end
return false;
end
local path = ngx.var.normalPath;
local channelPath = ngx.var.channelPath;
-- 解析手机号和特殊渠道
local tailNumbers = string.split(ngx.var.tailNumber);
local excludeChannel = string.split(ngx.var.excludeChannel)
-- 解析渠道设置
local register = string.split(ngx.var.registerChannel);
-- cookie值
local register_cookie = ngx.var.cookie_registerFrom
-- uri参数值
local args = ngx.req.get_uri_args()
local register_uri = args['registerFrom']
local phoneNo = args['phoneNo']
if (not phoneNo or string.len(phoneNo) == 0) then
phoneNo = ngx.var.cookie_phoneNo
end
local baiTiao = '222';
if (register_uri) then
isBaitiao = register_uri == baiTiao
else
isBatitiao = register_cookie == baiTiao
end
if (isBatitiao) then
return channelPath;
end
-- 没有设置渠道,设置了手机号,属于特殊渠道(不属于特殊,但符合尾号规则)时,返回旧版
if (ngx.var.excludeChannel ~= '' or ngx.var.tailNumber ~= '') then
local hitExclude = isInTable(register_uri, excludeChannel) or isInTable(register_cookie, excludeChannel)
if (hitExclude) then
return path;
end
-- 是否符合手机尾号规则
local hitTailNumber = false
if phoneNo then
for key,value in ipairs(tailNumbers)
do
if (string.match(phoneNo, value..'$')) then
hitTailNumber = true
break
end
end
end
if (hitTailNumber) then
return channelPath;
end
end
return channelPath;
function string.split(str)
local splitlist = {};
string.gsub(str, '[^,]+', function(w) table.insert(splitlist, w) end )
return splitlist;
end
function isInTable(value, tbl)
for k,v in ipairs(tbl) do
if v == value then
return true;
end
end
return false;
end
local path = ngx.var.normalPath;
local channelPath = ngx.var.channelPath;
-- 没有设置渠道,返回旧目录
if (ngx.var.registerChannel == '' and ngx.var.merchantChannel == '') then
return path;
end
-- 解析渠道设置
local register = string.split(ngx.var.registerChannel);
local merchant = string.split(ngx.var.merchantChannel);
-- cookie值
local register_cookie = ngx.var.cookie_registerFrom
local merchant_cookie = ngx.var.cookie_merchantId
-- uri参数值
local args = ngx.req.get_uri_args()
local register_uri = args['registerFrom']
local merchant_uri = args['merchantId']
-- 优先匹配参数中的registerFrom
if (register_uri) then
hitRegister = isInTable(register_uri, register)
else
hitRegister = isInTable(register_cookie, register)
end
-- 优先匹配参数中的merchantId
if (merchant_uri) then
hitMerchantId = isInTable(merchant_uri, register)
else
hitMerchantId = isInTable(merchant_cookie, register)
end
local baiTiao = '222';
--
if (register_uri) then
isBaitiao = register_uri == baiTiao
else
isBatitiao = register_cookie == baiTiao
end
-- 是白条,未设置merchantId规则,命中
if (isBaitiao and ngx.var.merchantChannel == '') then
return channelPath
end
-- 是白条,设置了merchantId规则,匹配merchantId后命中
if (isBaitiao and ngx.var.merchantChannel ~= '' and hitMerchantId) then
return channelPath
end
-- 不是白条,匹配registerFrom后命中
if ( not isBaitiao and hitRegister) then
return channelPath
end
return path;
......@@ -4,7 +4,8 @@ const config = require('kubernetes-client').config
const _ = require('lodash')
const moment = require('moment')
const yaml = require('js-yaml')
const logger = require('koa-log4').getLogger('kubeService')
const logger = require('koa-log4')
.getLogger('kubeService')
const yamls = require('../yamls')
const APP_CONFIG = require('../config')
......@@ -14,7 +15,49 @@ const client = new Client({
),
version: '1.10',
})
const keepNamespace = ['default', 'kube-system', 'monitor', 'kube-public']
const getAllNamespace = async () => {
const res = await client.api.v1.namespace.get()
const data = {
namespaces: [],
}
res.body.items.forEach((item) => {
if (!keepNamespace.includes(item.metadata.name)) {
data.namespaces.push({
name: item.metadata.name,
description: item.metadata.annotations ? item.metadata.annotations.description : '',
status: item.status ? item.status.phase : '',
createdAt: moment(item.metadata.creationTimestamp)
.format('YYYY-MM-DD HH:mm:ss'),
})
}
})
return data
}
const createNamespace = async (name, description) => {
const parmas = {
apiVersion: 'v1',
kind: 'Namespace',
metadata: {
annotations: {
description,
},
name,
},
spec: {
imagePullSecrets: [
{
name: 'qcloudregistrykey',
},
{
name: 'tencenthubkey',
},
],
},
}
const res = await client.api.v1.namespaces.post({ body: parmas })
return res
}
const makeManifest = (data) => {
if (!data.debug) {
data.debug = '"0"'
......@@ -54,7 +97,9 @@ const serviceCreate = async (data) => {
manifest.spec.type = 'NodePort'
}
logger.info('创建svc', JSON.stringify(manifest))
await client.api.v1.namespaces(namespace).services.post({ body: manifest })
await client.api.v1.namespaces(namespace)
.services
.post({ body: manifest })
break;
case 'Deployment':
......@@ -63,16 +108,22 @@ const serviceCreate = async (data) => {
}
logger.info('创建deploy', serviceName, JSON.stringify(manifest))
await client.apis.apps.v1beta1.namespaces(namespace).deployments.post({ body: manifest })
await client.apis.apps.v1beta1.namespaces(namespace)
.deployments
.post({ body: manifest })
break;
case 'PersistentVolumeClaim':
pvcName = `${serviceName}-${namespace}`
pvc = await client.api.v1.namespaces(namespace).persistentvolumeclaims.get()
pvc = await client.api.v1.namespaces(namespace)
.persistentvolumeclaims
.get()
pvc = pvc.body.items.filter(item => item.metadata.name === pvcName)
if (!pvc.length) {
logger.info('创建pvc', JSON.stringify(manifest))
await client.api.v1.namespaces(namespace).persistentvolumeclaims.post({ body: manifest })
await client.api.v1.namespaces(namespace)
.persistentvolumeclaims
.post({ body: manifest })
}
break;
......@@ -85,10 +136,23 @@ const serviceCreate = async (data) => {
const imageUpdate = async (data) => {
const { namespace, serviceName } = data
const image = `ccr.ccs.tencentyun.com/${data.image}`
const updateObj = { spec: { template: { spec: { containers: [{ name: serviceName, image }] } } } }
const updateObj = {
spec: {
template: {
spec: {
containers: [{
name: serviceName,
image,
}],
},
},
},
}
logger.info('更新镜像', namespace, JSON.stringify(updateObj))
await client.apis.apps.v1beta1.namespaces(namespace).deployments(serviceName).patch({ body: updateObj })
await client.apis.apps.v1beta1.namespaces(namespace)
.deployments(serviceName)
.patch({ body: updateObj })
}
const deployUpdate = async (data) => {
......@@ -99,7 +163,9 @@ const deployUpdate = async (data) => {
const manifest = yaml.load(item);
if (manifest.kind === 'Deployment') {
logger.info('更新deploy:', JSON.stringify(manifest))
await client.apis.apps.v1beta1.namespaces(namespace).deployments(serviceName).put({ body: manifest })
await client.apis.apps.v1beta1.namespaces(namespace)
.deployments(serviceName)
.put({ body: manifest })
}
}
}
......@@ -156,7 +222,9 @@ const formatPodInfo = (podInfo) => {
podIp: podInfo.status.podIP,
lanIp: podInfo.status.hostIP,
startTime: podInfo.status.startTime,
createdAt: moment(new Date(podInfo.status.startTime)).startOf('minute').fromNow(),
createdAt: moment(new Date(podInfo.status.startTime))
.startOf('minute')
.fromNow(),
image,
imageID,
labels: podInfo.metadata.labels,
......@@ -172,7 +240,9 @@ const formatPodInfo = (podInfo) => {
const formatIngressInfo = obj => ({ host: _.get(obj.spec, 'rules[0].host', '') })
const getPods = async (namespace) => {
const data = await client.api.v1.namespaces(namespace).pods.get()
const data = await client.api.v1.namespaces(namespace)
.pods
.get()
return data
}
......@@ -183,9 +253,15 @@ const getServicesFormat = async (namespace) => {
const service = {}
const res = await Promise.all([
client.api.v1.namespaces(namespace).pods.get(),
client.api.v1.namespaces(namespace).services.get(),
client.apis.extensions.v1beta1.namespaces(namespace).ingresses.get(),
client.api.v1.namespaces(namespace)
.pods
.get(),
client.api.v1.namespaces(namespace)
.services
.get(),
client.apis.extensions.v1beta1.namespaces(namespace)
.ingresses
.get(),
])
res[0].body.items.forEach(async (item) => {
......@@ -217,8 +293,12 @@ const getServicesFormat = async (namespace) => {
const getServiceDetail = async (namespace, name, type) => {
const res = await Promise.all([
client.api.v1.namespaces(namespace).pods.get({ qs: { labelSelector: `qcloud-app=${name}` } }),
client.api.v1.namespaces(namespace).services(name).get(),
client.api.v1.namespaces(namespace)
.pods
.get({ qs: { labelSelector: `qcloud-app=${name}` } }),
client.api.v1.namespaces(namespace)
.services(name)
.get(),
])
res[0] = formatPodInfo(res[0].body.items[0])
......@@ -226,7 +306,9 @@ const getServiceDetail = async (namespace, name, type) => {
// if (!dict.commonService.includes(name)) {
if (type !== 'base') {
res[2] = await client.apis.extensions.v1beta1.namespaces(namespace).ingresses(name).get()
res[2] = await client.apis.extensions.v1beta1.namespaces(namespace)
.ingresses(name)
.get()
res[2] = formatIngressInfo(res[2].body)
}
......@@ -235,44 +317,60 @@ const getServiceDetail = async (namespace, name, type) => {
const serviceRestart = async (namespace, name) => {
logger.info('重置服务', namespace, name)
await client.api.v1.namespaces(namespace).pods(name).delete()
await client.api.v1.namespaces(namespace)
.pods(name)
.delete()
}
const serviceDelete = async (namespace, name) => {
try {
logger.info('删除deploy', namespace, name)
await client.apis.apps.v1beta1.namespaces(namespace).deployments(name).delete()
await client.apis.apps.v1beta1.namespaces(namespace)
.deployments(name)
.delete()
logger.info('删除svc', namespace, name)
await client.api.v1.namespaces(namespace).services(name).delete()
await client.api.v1.namespaces(namespace)
.services(name)
.delete()
} catch (error) {
logger.warn(error.toString())
}
}
const getReplicaSet = async (namespace) => {
const rsData = await client.apis.apps.v1beta2.namespaces(namespace).replicasets.get()
const rsData = await client.apis.apps.v1beta2.namespaces(namespace)
.replicasets
.get()
return rsData.body.items
}
const replicaSetDelete = async (namespace, name) => {
logger.info('删除rs', namespace, name)
await client.apis.apps.v1.namespaces(namespace).replicasets.delete({ qs: { labelSelector: `qcloud-app=${name}` } })
await client.apis.apps.v1.namespaces(namespace)
.replicasets
.delete({ qs: { labelSelector: `qcloud-app=${name}` } })
// await client.apis.apps.v1.namespaces(namespace).replicasets(rsName).delete()
}
const pvcDelete = async (namespace, name) => {
try {
logger.info('删除pvc', namespace, name)
await client.api.v1.namespaces(namespace).persistentvolumeclaim(`${name}-${namespace}`).delete()
await client.api.v1.namespaces(namespace)
.persistentvolumeclaim(`${name}-${namespace}`)
.delete()
} catch (error) {
logger.warn(error.toString())
}
}
const getServices = async (namespace) => {
const data = await client.api.v1.namespaces(namespace).services.get()
const data = await client.api.v1.namespaces(namespace)
.services
.get()
return data
}
module.exports = {
getAllNamespace,
createNamespace,
getServiceDetail,
getServicesFormat,
getServices,
......@@ -283,7 +381,6 @@ module.exports = {
serviceRestart,
serviceDelete,
imageUpdate,
getReplicaSet,
replicaSetDelete,
pvcDelete,
}
......@@ -85,6 +85,12 @@
"string-width": "^2.0.0"
}
},
"ansi-colors": {
"version": "3.2.3",
"resolved": "http://npmprivate.quantgroups.com/ansi-colors/-/ansi-colors-3.2.3.tgz",
"integrity": "sha512-LEHHyuhlPY3TmuUYMh2oz89lTShfvgbmzaBcxve9t/9Wuy7Dwf4yoAKcND7KFT1HAQfqZ12qtc+DUrBMeKF9nw==",
"dev": true
},
"ansi-escapes": {
"version": "3.2.0",
"resolved": "http://npmprivate.quantgroups.com/ansi-escapes/-/ansi-escapes-3.2.0.tgz",
......@@ -166,6 +172,11 @@
"resolved": "http://registry.npm.taobao.org/assert-plus/download/assert-plus-1.0.0.tgz",
"integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU="
},
"assertion-error": {
"version": "1.1.0",
"resolved": "http://npmprivate.quantgroups.com/assertion-error/-/assertion-error-1.1.0.tgz",
"integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw=="
},
"assign-symbols": {
"version": "1.0.0",
"resolved": "http://registry.npm.taobao.org/assign-symbols/download/assign-symbols-1.0.0.tgz",
......@@ -344,6 +355,12 @@
}
}
},
"browser-stdout": {
"version": "1.3.1",
"resolved": "http://npmprivate.quantgroups.com/browser-stdout/-/browser-stdout-1.3.1.tgz",
"integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==",
"dev": true
},
"bytes": {
"version": "3.0.0",
"resolved": "http://registry.npm.taobao.org/bytes/download/bytes-3.0.0.tgz",
......@@ -419,6 +436,19 @@
"resolved": "http://registry.npm.taobao.org/caseless/download/caseless-0.12.0.tgz",
"integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw="
},
"chai": {
"version": "4.2.0",
"resolved": "http://npmprivate.quantgroups.com/chai/-/chai-4.2.0.tgz",
"integrity": "sha512-XQU3bhBukrOsQCuwZndwGcCVQHyZi53fQ6Ys1Fym7E4olpIqqZZhhoFJoaKVvV17lWQoXYwgWN2nF5crA8J2jw==",
"requires": {
"assertion-error": "^1.1.0",
"check-error": "^1.0.2",
"deep-eql": "^3.0.1",
"get-func-name": "^2.0.0",
"pathval": "^1.1.0",
"type-detect": "^4.0.5"
}
},
"chalk": {
"version": "2.4.2",
"resolved": "http://registry.npm.taobao.org/chalk/download/chalk-2.4.2.tgz",
......@@ -436,6 +466,11 @@
"integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==",
"dev": true
},
"check-error": {
"version": "1.0.2",
"resolved": "http://npmprivate.quantgroups.com/check-error/-/check-error-1.0.2.tgz",
"integrity": "sha1-V00xLt2Iu13YkS6Sht1sCu1KrII="
},
"chokidar": {
"version": "2.0.4",
"resolved": "http://registry.npm.taobao.org/chokidar/download/chokidar-2.0.4.tgz",
......@@ -517,6 +552,17 @@
"integrity": "sha1-/xnt6Kml5XkyQUewwR8PvLq+1jk=",
"dev": true
},
"cliui": {
"version": "4.1.0",
"resolved": "http://npmprivate.quantgroups.com/cliui/-/cliui-4.1.0.tgz",
"integrity": "sha512-4FG+RSG9DL7uEwRUZXZn3SS34DiDPfzP0VOiEwtUWlE+AR2EIg+hSyvrIgUUfhdgR/UkAeW2QHgeP+hWrXs7jQ==",
"dev": true,
"requires": {
"string-width": "^2.1.1",
"strip-ansi": "^4.0.0",
"wrap-ansi": "^2.0.0"
}
},
"clone-response": {
"version": "1.0.2",
"resolved": "http://npmprivate.quantgroups.com/clone-response/-/clone-response-1.0.2.tgz",
......@@ -546,6 +592,12 @@
"type-is": "^1.6.16"
}
},
"code-point-at": {
"version": "1.1.0",
"resolved": "http://npmprivate.quantgroups.com/code-point-at/-/code-point-at-1.1.0.tgz",
"integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=",
"dev": true
},
"collection-visit": {
"version": "1.0.0",
"resolved": "http://registry.npm.taobao.org/collection-visit/download/collection-visit-1.0.0.tgz",
......@@ -718,6 +770,12 @@
"ms": "2.0.0"
}
},
"decamelize": {
"version": "1.2.0",
"resolved": "http://npmprivate.quantgroups.com/decamelize/-/decamelize-1.2.0.tgz",
"integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=",
"dev": true
},
"decode-uri-component": {
"version": "0.2.0",
"resolved": "http://registry.npm.taobao.org/decode-uri-component/download/decode-uri-component-0.2.0.tgz",
......@@ -731,6 +789,14 @@
"mimic-response": "^1.0.0"
}
},
"deep-eql": {
"version": "3.0.1",
"resolved": "http://npmprivate.quantgroups.com/deep-eql/-/deep-eql-3.0.1.tgz",
"integrity": "sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw==",
"requires": {
"type-detect": "^4.0.0"
}
},
"deep-equal": {
"version": "1.0.1",
"resolved": "http://registry.npm.taobao.org/deep-equal/download/deep-equal-1.0.1.tgz",
......@@ -827,6 +893,12 @@
"resolved": "http://registry.npm.taobao.org/destroy/download/destroy-1.0.4.tgz",
"integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA="
},
"diff": {
"version": "3.5.0",
"resolved": "http://npmprivate.quantgroups.com/diff/-/diff-3.5.0.tgz",
"integrity": "sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA==",
"dev": true
},
"doctrine": {
"version": "3.0.0",
"resolved": "http://npmprivate.quantgroups.com/doctrine/-/doctrine-3.0.0.tgz",
......@@ -870,6 +942,15 @@
"integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==",
"dev": true
},
"end-of-stream": {
"version": "1.4.1",
"resolved": "http://npmprivate.quantgroups.com/end-of-stream/-/end-of-stream-1.4.1.tgz",
"integrity": "sha512-1MkrZNvWTKCaigbn+W15elq2BB/L22nqrSY5DKlo3X6+vclJm8Bb5djXJBmEX6fS3+zCh/F4VBK5Z2KxJt4s2Q==",
"dev": true,
"requires": {
"once": "^1.4.0"
}
},
"error-ex": {
"version": "1.3.2",
"resolved": "http://npmprivate.quantgroups.com/error-ex/-/error-ex-1.3.2.tgz",
......@@ -1415,6 +1496,23 @@
"locate-path": "^2.0.0"
}
},
"flat": {
"version": "4.1.0",
"resolved": "http://npmprivate.quantgroups.com/flat/-/flat-4.1.0.tgz",
"integrity": "sha512-Px/TiLIznH7gEDlPXcUD4KnBusa6kR6ayRUVcnEAbreRIuhkqow/mun59BuRXwoYk7ZQOLW1ZM05ilIvK38hFw==",
"dev": true,
"requires": {
"is-buffer": "~2.0.3"
},
"dependencies": {
"is-buffer": {
"version": "2.0.3",
"resolved": "http://npmprivate.quantgroups.com/is-buffer/-/is-buffer-2.0.3.tgz",
"integrity": "sha512-U15Q7MXTuZlrbymiz95PJpZxu8IlipAp4dtS3wOdgPXx3mqBnslrWU14kxfHB+Py/+2PVKSr37dMAgM2A4uArw==",
"dev": true
}
}
},
"flat-cache": {
"version": "2.0.1",
"resolved": "http://npmprivate.quantgroups.com/flat-cache/-/flat-cache-2.0.1.tgz",
......@@ -2031,6 +2129,17 @@
"integrity": "sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=",
"dev": true
},
"get-caller-file": {
"version": "2.0.5",
"resolved": "http://npmprivate.quantgroups.com/get-caller-file/-/get-caller-file-2.0.5.tgz",
"integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
"dev": true
},
"get-func-name": {
"version": "2.0.0",
"resolved": "http://npmprivate.quantgroups.com/get-func-name/-/get-func-name-2.0.0.tgz",
"integrity": "sha1-6td0q+5y4gQJQzoGY2YCPdaIekE="
},
"get-stream": {
"version": "3.0.0",
"resolved": "http://registry.npm.taobao.org/get-stream/download/get-stream-3.0.0.tgz",
......@@ -2125,6 +2234,12 @@
"integrity": "sha1-/7cD4QZuig7qpMi4C6klPu77+wA=",
"dev": true
},
"growl": {
"version": "1.10.5",
"resolved": "http://npmprivate.quantgroups.com/growl/-/growl-1.10.5.tgz",
"integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==",
"dev": true
},
"har-schema": {
"version": "2.0.0",
"resolved": "http://registry.npm.taobao.org/har-schema/download/har-schema-2.0.0.tgz",
......@@ -2205,6 +2320,12 @@
}
}
},
"he": {
"version": "1.2.0",
"resolved": "http://npmprivate.quantgroups.com/he/-/he-1.2.0.tgz",
"integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==",
"dev": true
},
"hoek": {
"version": "6.1.3",
"resolved": "http://npmprivate.quantgroups.com/hoek/-/hoek-6.1.3.tgz",
......@@ -2373,6 +2494,12 @@
"p-is-promise": "^1.1.0"
}
},
"invert-kv": {
"version": "2.0.0",
"resolved": "http://npmprivate.quantgroups.com/invert-kv/-/invert-kv-2.0.0.tgz",
"integrity": "sha512-wPVv/y/QQ/Uiirj/vh3oP+1Ww+AWehmi1g5fFWGPF6IpCBCDVrhgHRMvrLfdYcwDh3QJbGXDW4JAuzxElLSqKA==",
"dev": true
},
"ioredis": {
"version": "4.10.0",
"resolved": "http://npmprivate.quantgroups.com/ioredis/-/ioredis-4.10.0.tgz",
......@@ -2930,6 +3057,15 @@
"package-json": "^4.0.0"
}
},
"lcid": {
"version": "2.0.0",
"resolved": "http://npmprivate.quantgroups.com/lcid/-/lcid-2.0.0.tgz",
"integrity": "sha512-avPEb8P8EGnwXKClwsNUgryVjllcRqtMYa49NTsbQagYuT1DcXnl1915oxWjoyGrXR6zH/Y0Zc96xWsPcoDKeA==",
"dev": true,
"requires": {
"invert-kv": "^2.0.0"
}
},
"levn": {
"version": "0.3.0",
"resolved": "http://npmprivate.quantgroups.com/levn/-/levn-0.3.0.tgz",
......@@ -2996,6 +3132,15 @@
"resolved": "http://npmprivate.quantgroups.com/lodash.merge/-/lodash.merge-4.6.1.tgz",
"integrity": "sha512-AOYza4+Hf5z1/0Hztxpm2/xiPZgi/cjMqdnKTUWTBSKchJlxXXuUSxCCl8rJlf4g6yww/j6mA8nC8Hw/EZWxKQ=="
},
"log-symbols": {
"version": "2.2.0",
"resolved": "http://npmprivate.quantgroups.com/log-symbols/-/log-symbols-2.2.0.tgz",
"integrity": "sha512-VeIAFslyIerEJLXHziedo2basKbMKtTw3vfn5IzG0XTjhAVEJyNHnL2p7vc+wBDSdQuUpNw3M2u6xb9QsAY5Eg==",
"dev": true,
"requires": {
"chalk": "^2.0.1"
}
},
"long": {
"version": "4.0.0",
"resolved": "http://npmprivate.quantgroups.com/long/-/long-4.0.0.tgz",
......@@ -3028,6 +3173,15 @@
"pify": "^3.0.0"
}
},
"map-age-cleaner": {
"version": "0.1.3",
"resolved": "http://npmprivate.quantgroups.com/map-age-cleaner/-/map-age-cleaner-0.1.3.tgz",
"integrity": "sha512-bJzx6nMoP6PDLPBFmg7+xRKeFZvFboMrGlxmNj9ClvX53KrmvM5bXFXEWjbz4cz1AFn+jWJ9z/DJSz7hrs0w3w==",
"dev": true,
"requires": {
"p-defer": "^1.0.0"
}
},
"map-cache": {
"version": "0.2.2",
"resolved": "http://registry.npm.taobao.org/map-cache/download/map-cache-0.2.2.tgz",
......@@ -3048,6 +3202,31 @@
"resolved": "http://registry.npm.taobao.org/media-typer/download/media-typer-0.3.0.tgz",
"integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g="
},
"mem": {
"version": "4.3.0",
"resolved": "http://npmprivate.quantgroups.com/mem/-/mem-4.3.0.tgz",
"integrity": "sha512-qX2bG48pTqYRVmDB37rn/6PT7LcR8T7oAX3bf99u1Tt1nzxYfxkgqDwUwolPlXweM0XzBOBFzSx4kfp7KP1s/w==",
"dev": true,
"requires": {
"map-age-cleaner": "^0.1.1",
"mimic-fn": "^2.0.0",
"p-is-promise": "^2.0.0"
},
"dependencies": {
"mimic-fn": {
"version": "2.1.0",
"resolved": "http://npmprivate.quantgroups.com/mimic-fn/-/mimic-fn-2.1.0.tgz",
"integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==",
"dev": true
},
"p-is-promise": {
"version": "2.1.0",
"resolved": "http://npmprivate.quantgroups.com/p-is-promise/-/p-is-promise-2.1.0.tgz",
"integrity": "sha512-Y3W0wlRPK8ZMRbNq97l4M5otioeA5lm1z7bkNkxCka8HSPjR0xRWmpCmc9utiaLP9Jb1eD8BgeIxTW4AIF45Pg==",
"dev": true
}
}
},
"methods": {
"version": "1.1.2",
"resolved": "http://registry.npm.taobao.org/methods/download/methods-1.1.2.tgz",
......@@ -3149,6 +3328,116 @@
}
}
},
"mocha": {
"version": "6.2.0",
"resolved": "http://npmprivate.quantgroups.com/mocha/-/mocha-6.2.0.tgz",
"integrity": "sha512-qwfFgY+7EKAAUAdv7VYMZQknI7YJSGesxHyhn6qD52DV8UcSZs5XwCifcZGMVIE4a5fbmhvbotxC0DLQ0oKohQ==",
"dev": true,
"requires": {
"ansi-colors": "3.2.3",
"browser-stdout": "1.3.1",
"debug": "3.2.6",
"diff": "3.5.0",
"escape-string-regexp": "1.0.5",
"find-up": "3.0.0",
"glob": "7.1.3",
"growl": "1.10.5",
"he": "1.2.0",
"js-yaml": "3.13.1",
"log-symbols": "2.2.0",
"minimatch": "3.0.4",
"mkdirp": "0.5.1",
"ms": "2.1.1",
"node-environment-flags": "1.0.5",
"object.assign": "4.1.0",
"strip-json-comments": "2.0.1",
"supports-color": "6.0.0",
"which": "1.3.1",
"wide-align": "1.1.3",
"yargs": "13.2.2",
"yargs-parser": "13.0.0",
"yargs-unparser": "1.5.0"
},
"dependencies": {
"debug": {
"version": "3.2.6",
"resolved": "http://npmprivate.quantgroups.com/debug/-/debug-3.2.6.tgz",
"integrity": "sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==",
"dev": true,
"requires": {
"ms": "^2.1.1"
}
},
"find-up": {
"version": "3.0.0",
"resolved": "http://npmprivate.quantgroups.com/find-up/-/find-up-3.0.0.tgz",
"integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==",
"dev": true,
"requires": {
"locate-path": "^3.0.0"
}
},
"js-yaml": {
"version": "3.13.1",
"resolved": "http://npmprivate.quantgroups.com/js-yaml/-/js-yaml-3.13.1.tgz",
"integrity": "sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==",
"dev": true,
"requires": {
"argparse": "^1.0.7",
"esprima": "^4.0.0"
}
},
"locate-path": {
"version": "3.0.0",
"resolved": "http://npmprivate.quantgroups.com/locate-path/-/locate-path-3.0.0.tgz",
"integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==",
"dev": true,
"requires": {
"p-locate": "^3.0.0",
"path-exists": "^3.0.0"
}
},
"ms": {
"version": "2.1.1",
"resolved": "http://npmprivate.quantgroups.com/ms/-/ms-2.1.1.tgz",
"integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==",
"dev": true
},
"p-limit": {
"version": "2.2.0",
"resolved": "http://npmprivate.quantgroups.com/p-limit/-/p-limit-2.2.0.tgz",
"integrity": "sha512-pZbTJpoUsCzV48Mc9Nh51VbwO0X9cuPFE8gYwx9BTCt9SF8/b7Zljd2fVgOxhIF/HDTKgpVzs+GPhyKfjLLFRQ==",
"dev": true,
"requires": {
"p-try": "^2.0.0"
}
},
"p-locate": {
"version": "3.0.0",
"resolved": "http://npmprivate.quantgroups.com/p-locate/-/p-locate-3.0.0.tgz",
"integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==",
"dev": true,
"requires": {
"p-limit": "^2.0.0"
}
},
"p-try": {
"version": "2.2.0",
"resolved": "http://npmprivate.quantgroups.com/p-try/-/p-try-2.2.0.tgz",
"integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==",
"dev": true
},
"supports-color": {
"version": "6.0.0",
"resolved": "http://npmprivate.quantgroups.com/supports-color/-/supports-color-6.0.0.tgz",
"integrity": "sha512-on9Kwidc1IUQo+bQdhi8+Tijpo0e1SS6RoGo2guUwn5vdaxw8RXOF9Vb2ws+ihWOmh4JnCJOvaziZWP1VABaLg==",
"dev": true,
"requires": {
"has-flag": "^3.0.0"
}
}
}
},
"moment": {
"version": "2.24.0",
"resolved": "http://npmprivate.quantgroups.com/moment/-/moment-2.24.0.tgz",
......@@ -3216,6 +3505,24 @@
"integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==",
"dev": true
},
"node-environment-flags": {
"version": "1.0.5",
"resolved": "http://npmprivate.quantgroups.com/node-environment-flags/-/node-environment-flags-1.0.5.tgz",
"integrity": "sha512-VNYPRfGfmZLx0Ye20jWzHUjyTW/c+6Wq+iLhDzUI4XmhrDd9l/FozXV3F2xOaXjvp0co0+v1YSR3CMP6g+VvLQ==",
"dev": true,
"requires": {
"object.getownpropertydescriptors": "^2.0.3",
"semver": "^5.7.0"
},
"dependencies": {
"semver": {
"version": "5.7.1",
"resolved": "http://npmprivate.quantgroups.com/semver/-/semver-5.7.1.tgz",
"integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
"dev": true
}
}
},
"node-forge": {
"version": "0.8.2",
"resolved": "http://npmprivate.quantgroups.com/node-forge/-/node-forge-0.8.2.tgz",
......@@ -3318,6 +3625,12 @@
"path-key": "^2.0.0"
}
},
"number-is-nan": {
"version": "1.0.1",
"resolved": "http://npmprivate.quantgroups.com/number-is-nan/-/number-is-nan-1.0.1.tgz",
"integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=",
"dev": true
},
"oauth-sign": {
"version": "0.9.0",
"resolved": "http://registry.npm.taobao.org/oauth-sign/download/oauth-sign-0.9.0.tgz",
......@@ -3397,6 +3710,16 @@
"has": "^1.0.3"
}
},
"object.getownpropertydescriptors": {
"version": "2.0.3",
"resolved": "http://npmprivate.quantgroups.com/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.0.3.tgz",
"integrity": "sha1-h1jIRvW0B62rDyNuCYbxSwUcqhY=",
"dev": true,
"requires": {
"define-properties": "^1.1.2",
"es-abstract": "^1.5.1"
}
},
"object.pick": {
"version": "1.3.0",
"resolved": "http://registry.npm.taobao.org/object.pick/download/object.pick-1.3.0.tgz",
......@@ -3523,6 +3846,56 @@
"wordwrap": "~1.0.0"
}
},
"os-locale": {
"version": "3.1.0",
"resolved": "http://npmprivate.quantgroups.com/os-locale/-/os-locale-3.1.0.tgz",
"integrity": "sha512-Z8l3R4wYWM40/52Z+S265okfFj8Kt2cC2MKY+xNi3kFs+XGI7WXu/I309QQQYbRW4ijiZ+yxs9pqEhJh0DqW3Q==",
"dev": true,
"requires": {
"execa": "^1.0.0",
"lcid": "^2.0.0",
"mem": "^4.0.0"
},
"dependencies": {
"cross-spawn": {
"version": "6.0.5",
"resolved": "http://npmprivate.quantgroups.com/cross-spawn/-/cross-spawn-6.0.5.tgz",
"integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==",
"dev": true,
"requires": {
"nice-try": "^1.0.4",
"path-key": "^2.0.1",
"semver": "^5.5.0",
"shebang-command": "^1.2.0",
"which": "^1.2.9"
}
},
"execa": {
"version": "1.0.0",
"resolved": "http://npmprivate.quantgroups.com/execa/-/execa-1.0.0.tgz",
"integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==",
"dev": true,
"requires": {
"cross-spawn": "^6.0.0",
"get-stream": "^4.0.0",
"is-stream": "^1.1.0",
"npm-run-path": "^2.0.0",
"p-finally": "^1.0.0",
"signal-exit": "^3.0.0",
"strip-eof": "^1.0.0"
}
},
"get-stream": {
"version": "4.1.0",
"resolved": "http://npmprivate.quantgroups.com/get-stream/-/get-stream-4.1.0.tgz",
"integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==",
"dev": true,
"requires": {
"pump": "^3.0.0"
}
}
}
},
"os-tmpdir": {
"version": "1.0.2",
"resolved": "http://npmprivate.quantgroups.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz",
......@@ -3542,6 +3915,12 @@
"resolved": "http://npmprivate.quantgroups.com/p-cancelable/-/p-cancelable-0.4.1.tgz",
"integrity": "sha512-HNa1A8LvB1kie7cERyy21VNeHb2CWJJYqyyC2o3klWFfMGlFmWv2Z7sFgZH8ZiaYL95ydToKTFVXgMV/Os0bBQ=="
},
"p-defer": {
"version": "1.0.0",
"resolved": "http://npmprivate.quantgroups.com/p-defer/-/p-defer-1.0.0.tgz",
"integrity": "sha1-n26xgvbJqozXQwBKfU+WsZaw+ww=",
"dev": true
},
"p-finally": {
"version": "1.0.0",
"resolved": "http://registry.npm.taobao.org/p-finally/download/p-finally-1.0.0.tgz",
......@@ -3694,6 +4073,11 @@
}
}
},
"pathval": {
"version": "1.1.0",
"resolved": "http://npmprivate.quantgroups.com/pathval/-/pathval-1.1.0.tgz",
"integrity": "sha1-uULm1L3mUwBe9rcTYd74cn0GReA="
},
"performance-now": {
"version": "2.1.0",
"resolved": "http://registry.npm.taobao.org/performance-now/download/performance-now-2.1.0.tgz",
......@@ -3758,6 +4142,16 @@
"integrity": "sha1-c6VarZ4tlYFJJxMfv03Bti0ln0c=",
"dev": true
},
"pump": {
"version": "3.0.0",
"resolved": "http://npmprivate.quantgroups.com/pump/-/pump-3.0.0.tgz",
"integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==",
"dev": true,
"requires": {
"end-of-stream": "^1.1.0",
"once": "^1.3.1"
}
},
"punycode": {
"version": "2.1.1",
"resolved": "http://registry.npm.taobao.org/punycode/download/punycode-2.1.1.tgz",
......@@ -3973,6 +4367,18 @@
"uuid": "^3.3.2"
}
},
"require-directory": {
"version": "2.1.1",
"resolved": "http://npmprivate.quantgroups.com/require-directory/-/require-directory-2.1.1.tgz",
"integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=",
"dev": true
},
"require-main-filename": {
"version": "2.0.0",
"resolved": "http://npmprivate.quantgroups.com/require-main-filename/-/require-main-filename-2.0.0.tgz",
"integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==",
"dev": true
},
"resolve": {
"version": "1.10.0",
"resolved": "http://npmprivate.quantgroups.com/resolve/-/resolve-1.10.0.tgz",
......@@ -4084,6 +4490,12 @@
"semver": "^5.0.3"
}
},
"set-blocking": {
"version": "2.0.0",
"resolved": "http://npmprivate.quantgroups.com/set-blocking/-/set-blocking-2.0.0.tgz",
"integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=",
"dev": true
},
"set-value": {
"version": "2.0.0",
"resolved": "http://registry.npm.taobao.org/set-value/download/set-value-2.0.0.tgz",
......@@ -4660,6 +5072,11 @@
"prelude-ls": "~1.1.2"
}
},
"type-detect": {
"version": "4.0.8",
"resolved": "http://npmprivate.quantgroups.com/type-detect/-/type-detect-4.0.8.tgz",
"integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g=="
},
"type-is": {
"version": "1.6.16",
"resolved": "http://registry.npm.taobao.org/type-is/download/type-is-1.6.16.tgz",
......@@ -4897,6 +5314,21 @@
"isexe": "^2.0.0"
}
},
"which-module": {
"version": "2.0.0",
"resolved": "http://npmprivate.quantgroups.com/which-module/-/which-module-2.0.0.tgz",
"integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=",
"dev": true
},
"wide-align": {
"version": "1.1.3",
"resolved": "http://npmprivate.quantgroups.com/wide-align/-/wide-align-1.1.3.tgz",
"integrity": "sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==",
"dev": true,
"requires": {
"string-width": "^1.0.2 || 2"
}
},
"widest-line": {
"version": "2.0.1",
"resolved": "http://registry.npm.taobao.org/widest-line/download/widest-line-2.0.1.tgz",
......@@ -4912,6 +5344,53 @@
"integrity": "sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus=",
"dev": true
},
"wrap-ansi": {
"version": "2.1.0",
"resolved": "http://npmprivate.quantgroups.com/wrap-ansi/-/wrap-ansi-2.1.0.tgz",
"integrity": "sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU=",
"dev": true,
"requires": {
"string-width": "^1.0.1",
"strip-ansi": "^3.0.1"
},
"dependencies": {
"ansi-regex": {
"version": "2.1.1",
"resolved": "http://npmprivate.quantgroups.com/ansi-regex/-/ansi-regex-2.1.1.tgz",
"integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=",
"dev": true
},
"is-fullwidth-code-point": {
"version": "1.0.0",
"resolved": "http://npmprivate.quantgroups.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz",
"integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=",
"dev": true,
"requires": {
"number-is-nan": "^1.0.0"
}
},
"string-width": {
"version": "1.0.2",
"resolved": "http://npmprivate.quantgroups.com/string-width/-/string-width-1.0.2.tgz",
"integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=",
"dev": true,
"requires": {
"code-point-at": "^1.0.0",
"is-fullwidth-code-point": "^1.0.0",
"strip-ansi": "^3.0.0"
}
},
"strip-ansi": {
"version": "3.0.1",
"resolved": "http://npmprivate.quantgroups.com/strip-ansi/-/strip-ansi-3.0.1.tgz",
"integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
"dev": true,
"requires": {
"ansi-regex": "^2.0.0"
}
}
}
},
"wrappy": {
"version": "1.0.2",
"resolved": "http://npmprivate.quantgroups.com/wrappy/-/wrappy-1.0.2.tgz",
......@@ -4952,11 +5431,229 @@
"integrity": "sha1-SWsswQnsqNus/i3HK2A8F8WHCtQ=",
"dev": true
},
"y18n": {
"version": "4.0.0",
"resolved": "http://npmprivate.quantgroups.com/y18n/-/y18n-4.0.0.tgz",
"integrity": "sha512-r9S/ZyXu/Xu9q1tYlpsLIsa3EeLXXk0VwlxqTcFRfg9EhMW+17kbt9G0NrgCmhGb5vT2hyhJZLfDGx+7+5Uj/w==",
"dev": true
},
"yallist": {
"version": "3.0.3",
"resolved": "http://npmprivate.quantgroups.com/yallist/-/yallist-3.0.3.tgz",
"integrity": "sha512-S+Zk8DEWE6oKpV+vI3qWkaK+jSbIK86pCwe2IF/xwIpQ8jEuxpw9NyaGjmp9+BoJv5FV2piqCDcoCtStppiq2A=="
},
"yargs": {
"version": "13.2.2",
"resolved": "http://npmprivate.quantgroups.com/yargs/-/yargs-13.2.2.tgz",
"integrity": "sha512-WyEoxgyTD3w5XRpAQNYUB9ycVH/PQrToaTXdYXRdOXvEy1l19br+VJsc0vcO8PTGg5ro/l/GY7F/JMEBmI0BxA==",
"dev": true,
"requires": {
"cliui": "^4.0.0",
"find-up": "^3.0.0",
"get-caller-file": "^2.0.1",
"os-locale": "^3.1.0",
"require-directory": "^2.1.1",
"require-main-filename": "^2.0.0",
"set-blocking": "^2.0.0",
"string-width": "^3.0.0",
"which-module": "^2.0.0",
"y18n": "^4.0.0",
"yargs-parser": "^13.0.0"
},
"dependencies": {
"ansi-regex": {
"version": "4.1.0",
"resolved": "http://npmprivate.quantgroups.com/ansi-regex/-/ansi-regex-4.1.0.tgz",
"integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==",
"dev": true
},
"find-up": {
"version": "3.0.0",
"resolved": "http://npmprivate.quantgroups.com/find-up/-/find-up-3.0.0.tgz",
"integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==",
"dev": true,
"requires": {
"locate-path": "^3.0.0"
}
},
"locate-path": {
"version": "3.0.0",
"resolved": "http://npmprivate.quantgroups.com/locate-path/-/locate-path-3.0.0.tgz",
"integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==",
"dev": true,
"requires": {
"p-locate": "^3.0.0",
"path-exists": "^3.0.0"
}
},
"p-limit": {
"version": "2.2.0",
"resolved": "http://npmprivate.quantgroups.com/p-limit/-/p-limit-2.2.0.tgz",
"integrity": "sha512-pZbTJpoUsCzV48Mc9Nh51VbwO0X9cuPFE8gYwx9BTCt9SF8/b7Zljd2fVgOxhIF/HDTKgpVzs+GPhyKfjLLFRQ==",
"dev": true,
"requires": {
"p-try": "^2.0.0"
}
},
"p-locate": {
"version": "3.0.0",
"resolved": "http://npmprivate.quantgroups.com/p-locate/-/p-locate-3.0.0.tgz",
"integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==",
"dev": true,
"requires": {
"p-limit": "^2.0.0"
}
},
"p-try": {
"version": "2.2.0",
"resolved": "http://npmprivate.quantgroups.com/p-try/-/p-try-2.2.0.tgz",
"integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==",
"dev": true
},
"string-width": {
"version": "3.1.0",
"resolved": "http://npmprivate.quantgroups.com/string-width/-/string-width-3.1.0.tgz",
"integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==",
"dev": true,
"requires": {
"emoji-regex": "^7.0.1",
"is-fullwidth-code-point": "^2.0.0",
"strip-ansi": "^5.1.0"
}
},
"strip-ansi": {
"version": "5.2.0",
"resolved": "http://npmprivate.quantgroups.com/strip-ansi/-/strip-ansi-5.2.0.tgz",
"integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==",
"dev": true,
"requires": {
"ansi-regex": "^4.1.0"
}
}
}
},
"yargs-parser": {
"version": "13.0.0",
"resolved": "http://npmprivate.quantgroups.com/yargs-parser/-/yargs-parser-13.0.0.tgz",
"integrity": "sha512-w2LXjoL8oRdRQN+hOyppuXs+V/fVAYtpcrRxZuF7Kt/Oc+Jr2uAcVntaUTNT6w5ihoWfFDpNY8CPx1QskxZ/pw==",
"dev": true,
"requires": {
"camelcase": "^5.0.0",
"decamelize": "^1.2.0"
},
"dependencies": {
"camelcase": {
"version": "5.3.1",
"resolved": "http://npmprivate.quantgroups.com/camelcase/-/camelcase-5.3.1.tgz",
"integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
"dev": true
}
}
},
"yargs-unparser": {
"version": "1.5.0",
"resolved": "http://npmprivate.quantgroups.com/yargs-unparser/-/yargs-unparser-1.5.0.tgz",
"integrity": "sha512-HK25qidFTCVuj/D1VfNiEndpLIeJN78aqgR23nL3y4N0U/91cOAzqfHlF8n2BvoNDcZmJKin3ddNSvOxSr8flw==",
"dev": true,
"requires": {
"flat": "^4.1.0",
"lodash": "^4.17.11",
"yargs": "^12.0.5"
},
"dependencies": {
"camelcase": {
"version": "5.3.1",
"resolved": "http://npmprivate.quantgroups.com/camelcase/-/camelcase-5.3.1.tgz",
"integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
"dev": true
},
"find-up": {
"version": "3.0.0",
"resolved": "http://npmprivate.quantgroups.com/find-up/-/find-up-3.0.0.tgz",
"integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==",
"dev": true,
"requires": {
"locate-path": "^3.0.0"
}
},
"get-caller-file": {
"version": "1.0.3",
"resolved": "http://npmprivate.quantgroups.com/get-caller-file/-/get-caller-file-1.0.3.tgz",
"integrity": "sha512-3t6rVToeoZfYSGd8YoLFR2DJkiQrIiUrGcjvFX2mDw3bn6k2OtwHN0TNCLbBO+w8qTvimhDkv+LSscbJY1vE6w==",
"dev": true
},
"locate-path": {
"version": "3.0.0",
"resolved": "http://npmprivate.quantgroups.com/locate-path/-/locate-path-3.0.0.tgz",
"integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==",
"dev": true,
"requires": {
"p-locate": "^3.0.0",
"path-exists": "^3.0.0"
}
},
"p-limit": {
"version": "2.2.0",
"resolved": "http://npmprivate.quantgroups.com/p-limit/-/p-limit-2.2.0.tgz",
"integrity": "sha512-pZbTJpoUsCzV48Mc9Nh51VbwO0X9cuPFE8gYwx9BTCt9SF8/b7Zljd2fVgOxhIF/HDTKgpVzs+GPhyKfjLLFRQ==",
"dev": true,
"requires": {
"p-try": "^2.0.0"
}
},
"p-locate": {
"version": "3.0.0",
"resolved": "http://npmprivate.quantgroups.com/p-locate/-/p-locate-3.0.0.tgz",
"integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==",
"dev": true,
"requires": {
"p-limit": "^2.0.0"
}
},
"p-try": {
"version": "2.2.0",
"resolved": "http://npmprivate.quantgroups.com/p-try/-/p-try-2.2.0.tgz",
"integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==",
"dev": true
},
"require-main-filename": {
"version": "1.0.1",
"resolved": "http://npmprivate.quantgroups.com/require-main-filename/-/require-main-filename-1.0.1.tgz",
"integrity": "sha1-l/cXtp1IeE9fUmpsWqj/3aBVpNE=",
"dev": true
},
"yargs": {
"version": "12.0.5",
"resolved": "http://npmprivate.quantgroups.com/yargs/-/yargs-12.0.5.tgz",
"integrity": "sha512-Lhz8TLaYnxq/2ObqHDql8dX8CJi97oHxrjUcYtzKbbykPtVW9WB+poxI+NM2UIzsMgNCZTIf0AQwsjK5yMAqZw==",
"dev": true,
"requires": {
"cliui": "^4.0.0",
"decamelize": "^1.2.0",
"find-up": "^3.0.0",
"get-caller-file": "^1.0.1",
"os-locale": "^3.0.0",
"require-directory": "^2.1.1",
"require-main-filename": "^1.0.1",
"set-blocking": "^2.0.0",
"string-width": "^2.0.0",
"which-module": "^2.0.0",
"y18n": "^3.2.1 || ^4.0.0",
"yargs-parser": "^11.1.1"
}
},
"yargs-parser": {
"version": "11.1.1",
"resolved": "http://npmprivate.quantgroups.com/yargs-parser/-/yargs-parser-11.1.1.tgz",
"integrity": "sha512-C6kB/WJDiaxONLJQnF8ccx9SEeoTTLek8RVbaOIsrAUS8VrBEXfmeSnCZxygc+XC2sNMBIwOOnfcxiynjHsVSQ==",
"dev": true,
"requires": {
"camelcase": "^5.0.0",
"decamelize": "^1.2.0"
}
}
}
},
"ylru": {
"version": "1.2.1",
"resolved": "http://registry.npm.taobao.org/ylru/download/ylru-1.2.1.tgz",
......
......@@ -4,6 +4,7 @@
"description": "",
"main": "bin/www.js",
"dependencies": {
"chai": "^4.2.0",
"ioredis": "^4.10.0",
"joi": "^14.3.1",
"js-yaml": "^3.12.1",
......@@ -23,17 +24,19 @@
"eslint": "^5.15.3",
"eslint-config-airbnb-base": "^13.1.0",
"eslint-plugin-import": "^2.16.0",
"mocha": "^6.2.0",
"nodemon": "^1.18.9"
},
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1",
"test": "mocha",
"start": "NODE_ENV=env nodemon",
"lint": "eslint .",
"fix": "eslint --fix ."
},
"pre-commit": [
"fix",
"lint"
"lint",
"test"
],
"repository": {
"type": "git",
......
const fs = require('fs')
const ui = fs.readFileSync('serviceTemplate/ui.template.txt', 'utf8')
const java = fs.readFileSync('serviceTemplate/java.template.txt', 'utf8')
const node = fs.readFileSync('serviceTemplate/node.template.txt', 'utf8')
const rabbitmq = fs.readFileSync('serviceTemplate/rabbitmq.template.txt', 'utf8')
const zookeeper = fs.readFileSync('serviceTemplate/zookeeper.template.txt', 'utf8')
const mysql = fs.readFileSync('serviceTemplate/mysql.template.txt', 'utf8')
const redis = fs.readFileSync('serviceTemplate/redis.template.txt', 'utf8')
module.exports = {
ui,
node,
java,
rabbitmq,
zookeeper,
mysql,
redis,
}
serviceName: {{serviceName}}
namespace: {{namespace}}
replicas: 1
accessType: ClusterIP
portMappings.0.protocol: TCP
portMappings.0.lbPort: 80
portMappings.0.containerPort: 80
containers.0.containerName: {{system_name}}
containers.0.image: ccr.ccs.tencentyun.com/{{image}}
containers.0.envs.0.name: SYSTEM_NAME
containers.0.envs.0.value: {{system_name}}
containers.0.envs.1.name: NAMESPACE
containers.0.envs.1.value: {{namespace}}
containers.0.envs.2.name: DEBUG
containers.0.envs.2.value: {{debug}}
containers.0.cpu: {{resources.cpu.request}}
containers.0.cpuLimits: {{resources.cpu.limit}}
containers.0.memory: {{resources.memory.request}}
containers.0.memoryLimits: {{resources.memory.limit}}
containers.0.healthCheck.0.type: readyCheck
containers.0.healthCheck.0.checkMethod: methodCmd
containers.0.healthCheck.0.cmd: '/home/quant_group/readyCheck.sh'
containers.0.healthCheck.0.delayTime: 60
containers.0.healthCheck.0.intervalTime: 5
containers.0.healthCheck.0.timeOut: 2
containers.0.healthCheck.0.healthNum: 1
containers.0.healthCheck.0.unhealthNum: 20
serviceName: mysql
namespace: {{namespace}}
replicas: 1
accessType: NodePort
portMappings.0.protocol: TCP
portMappings.0.containerPort: 3306
containers.0.containerName: mysql
containers.0.image: ccr.ccs.tencentyun.com/{{image}}
containers.0.volumeMounts.0.volumeName: mysql-vol
containers.0.volumeMounts.0.mountPath: /var/lib/mysql
containers.0.volumeMounts.0.mode: rw
containers.0.envs.0.name: MYSQL_USER
containers.0.envs.0.value: qa
containers.0.envs.1.name: MYSQL_PASSWORD
containers.0.envs.1.value: qatest
containers.0.envs.2.name: MYSQL_ROOT_PASSWORD
containers.0.envs.2.value: qatest
containers.0.cpu: {{resources.cpu.request}}
containers.0.cpuLimits: {{resources.cpu.limit}}
containers.0.memory: {{resources.memory.request}}
containers.0.memoryLimits: {{resources.memory.limit}}
volumes.0.name: mysql-vol
volumes.0.volumeType: hostPath
volumes.0.hostPath: /var/lib/data/mysql/{{namespace}}
serviceName: {{serviceName}}
namespace: {{namespace}}
replicas: 1
accessType: ClusterIP
portMappings.0.protocol: TCP
portMappings.0.lbPort: 80
portMappings.0.containerPort: 80
containers.0.containerName: {{serviceName}}
containers.0.image: ccr.ccs.tencentyun.com/{{image}}
containers.0.envs.0.name: SYSTEM_NAME
containers.0.envs.0.value: {{serviceName}}
containers.0.envs.1.name: NAMESPACE
containers.0.envs.1.value: {{namespace}}
containers.0.envs.2.name: DEBUG
containers.0.envs.2.value: {{debug}}
containers.0.cpu: {{resources.cpu.request}}
containers.0.cpuLimits: {{resources.cpu.limit}}
containers.0.memory: {{resources.memory.request}}
containers.0.memoryLimits: {{resources.memory.limit}}
serviceName: rabbitmq
namespace: {{namespace}}
replicas: 1
accessType: NodePort
portMappings.0.protocol: TCP
portMappings.0.containerPort: 5672
portMappings.1.protocol: TCP
portMappings.1.containerPort: 15672
containers.0.containerName: rabbitmq
containers.0.image: ccr.ccs.tencentyun.com/{{image}}
containers.0.volumeMounts.0.volumeName: rabbitmq-vol
containers.0.volumeMounts.0.mountPath: /var/lib/rabbitmq
containers.0.volumeMounts.0.mode: rw
containers.0.envs.0.name: RABBITMQ_DEFAULT_USER
containers.0.envs.0.value: qa
containers.0.envs.1.name: RABBITMQ_DEFAULT_PASS
containers.0.envs.1.value: qatest
volumes.0.name: rabbitmq-vol
volumes.0.volumeType: hostPath
volumes.0.hostPath: /var/lib/data/rabbitmq/{{namespace}}
containers.0.cpu: {{resources.cpu.request}}
containers.0.cpuLimits: {{resources.cpu.limit}}
containers.0.memory: {{resources.memory.request}}
containers.0.memoryLimits: {{resources.memory.limit}}
serviceName: redis
namespace: {{namespace}}
replicas: 1
accessType: NodePort
portMappings.0.protocol: TCP
portMappings.0.containerPort: 6379
portMappings.1.protocol: TCP
portMappings.1.containerPort: 6380
portMappings.2.protocol: TCP
portMappings.2.containerPort: 6381
portMappings.3.protocol: TCP
portMappings.3.containerPort: 6382
portMappings.4.protocol: TCP
portMappings.4.containerPort: 6383
containers.0.containerName: redis
containers.0.image: ccr.ccs.tencentyun.com/{{image}}
containers.0.volumeMounts.0.volumeName: redis-vol
containers.0.volumeMounts.0.mountPath: /var/lib/redis
containers.0.volumeMounts.0.mode: rw
volumes.0.name: redis-vol
volumes.0.volumeType: hostPath
volumes.0.hostPath: /var/lib/data/redis/{{namespace}}
containers.0.cpu: {{resources.cpu.request}}
containers.0.cpuLimits: {{resources.cpu.limit}}
containers.0.memory: {{resources.memory.request}}
containers.0.memoryLimits: {{resources.memory.limit}}
serviceName: {{serviceName}}
namespace: {{namespace}}
replicas: 1
accessType: ClusterIP
portMappings.0.protocol: TCP
portMappings.0.lbPort: 80
portMappings.0.containerPort: 80
containers.0.containerName: {{system_name}}
containers.0.image: ccr.ccs.tencentyun.com/{{image}}
containers.0.envs.0.name: SYSTEM_NAME
containers.0.envs.0.value: {{system_name}}
containers.0.envs.1.name: NAMESPACE
containers.0.envs.1.value: {{namespace}}
containers.0.envs.2.name: DEBUG
containers.0.envs.2.value: {{debug}}
containers.0.cpu: {{resources.cpu.request}}
containers.0.cpuLimits: {{resources.cpu.limit}}
containers.0.memory: {{resources.memory.request}}
containers.0.memoryLimits: {{resources.memory.limit}}
serviceName: zookeeper
namespace: {{namespace}}
replicas: 1
accessType: NodePort
portMappings.0.protocol: TCP
portMappings.0.containerPort: 2181
portMappings.1.protocol: TCP
portMappings.1.containerPort: 9090
containers.0.containerName: zookeeper
containers.0.image: ccr.ccs.tencentyun.com/{{image}}
containers.0.volumeMounts.0.volumeName: zookeeper-vol
containers.0.volumeMounts.0.mountPath: /var/lib/zookeeper
containers.0.volumeMounts.0.mode: rw
containers.0.envs.0.name: ZOO_USER
containers.0.envs.0.value: zookeeper
containers.0.envs.1.name: ZOO_PORT
containers.0.envs.1.value: 2181
volumes.0.name: zookeeper-vol
volumes.0.volumeType: hostPath
volumes.0.hostPath: /var/lib/data/zookeeper/{{namespace}}
containers.0.cpu: {{resources.cpu.request}}
containers.0.cpuLimits: {{resources.cpu.limit}}
containers.0.memory: {{resources.memory.request}}
containers.0.memoryLimits: {{resources.memory.limit}}
const logger = require('koa-log4').getLogger()
const Client = require('./tke.service')
const domainConfig = require('../config')
class Cluster extends Client {
constructor() {
super();
this.endpoint = domainConfig.clusterPoint
this.imageDomain = domainConfig.imageDomain
}
namespace_get() {
return this.post('DescribeClusterNameSpaces', { clusterId: this.clusterId })
}
namespace_create(name, description) {
return this.post('CreateClusterNamespace', { name, description, clusterId: this.clusterId })
}
namespace_delete(name) {
return this.post('DeleteClusterNamespace', { 'names.0': name, clusterId: this.clusterId })
}
service_list(namespace) {
return this.post('DescribeClusterService', { namespace, clusterId: this.clusterId, allnamespace: 0 })
}
service_get(serviceName, namespace) {
return this.post('DescribeClusterServiceInfo', { serviceName, namespace, clusterId: this.clusterId })
}
async service_create(params, lable) {
logger.info('创建服务详细信息:', params, lable)
params.clusterId = this.clusterId
const str = JSON.stringify(params)
await this.post('CreateClusterService', params)
// 腾讯云api暂只能通过修改服务来改成Recreate策略
const modifyParams = JSON.parse(str)
modifyParams.strategy = 'Recreate'
await this.post('ModifyClusterService', modifyParams)
const updateLabels = {
clusterId: params.clusterId,
serviceName: params.serviceName,
namespace: params.namespace,
'labels.type': lable,
}
logger.info('修改服务标签信息:', updateLabels)
return this.post('ModifyServiceLabels', updateLabels)
}
service_delete(serviceName, namespace) {
return this.post('DeleteClusterService', { serviceName, namespace, clusterId: this.clusterId })
}
service_modifyImage(serviceName, image, namespace) {
return this.post('ModifyClusterServiceImage', {
serviceName,
namespace,
image: `${this.imageDomain}/${image}`,
clusterId: this.clusterId,
})
}
service_redeployment(serviceName, namespace) {
return this.post('RedeployClusterService', { serviceName, namespace, clusterId: this.clusterId })
}
instance_get(serviceName, namespace) {
return this.post('DescribeServiceInstance', { serviceName, namespace, clusterId: this.clusterId })
}
ingress_get(namespace) {
return this.post('DescribeIngress', { namespace, clusterId: this.clusterId })
}
ingress_create(namespace) {
logger.info()
return this.post('CreateIngress', {
ingressName: `qa-${namespace}`,
ingressDesc: '',
namespace,
clusterId: this.clusterId,
})
}
ingress_delete(ingressName, namespace) {
return this.post('DeleteIngress', { ingressName, namespace, clusterId: this.clusterId })
}
ingress_modify(ingressName, namespace, rules) {
rules.ingressName = ingressName
rules.namespace = namespace
rules.clusterId = this.clusterId
return this.post('MosifyIngress', rules)
}
node_list(limit = 1) {
return this.post('DescribeClusterInstances', { limit, clusterId: this.clusterId })
}
}
exports.create = function () {
return new Cluster()
}
const { describe, it } = require('mocha')
const { expect } = require('chai');
const request = require('request')
const app = require('./../app')
const awaitRequest = function (options) {
return new Promise((resolve, reject) => {
request(options, (error, res) => {
if (error) {
reject(error)
} else {
resolve(res)
}
})
})
}
const namespace = 'monitor'
describe('test tke-eos api', () => {
app.start(4000)
/**
* 获取所有的namespace
*/
describe('test get all namespace URL->http://localhost:4000/namespace', () => {
it('should return all namespace', async () => {
const res = await awaitRequest({
url: 'http://localhost:4000/namespace',
method: 'GET',
})
expect(res.statusCode).to.equal(200)
});
})
/**
* 获取某个namespace下的所有服务
*/
describe(`test get service URL->http://localhost:4000/service?namespace=${namespace}`, () => {
it(`should return service belong to namespace ${namespace}`, async () => {
const res = await awaitRequest({
url: `http://localhost:4000/service?namespace=${namespace}`,
method: 'GET',
})
expect(res.statusCode).to.equal(200)
});
})
/**
* 获取镜像
*/
describe('test get images URL->http://localhost:4000/repository', () => {
const data = { namespace: 'qa-ui' }
it('should return images list', async () => {
const res = await awaitRequest({
url: 'http://localhost:4000/repository',
method: 'POST',
headers: {
'Content-Type': 'application/json;charset=utf-8',
},
body: JSON.stringify(data),
})
expect(res.statusCode).to.equal(200)
});
})
/**
* 获取镜像标签
*/
describe('test get tag URL->http://localhost:4000/tag', () => {
const data = { reponame: 'qa-ui/new-op-ui' }
it('should return images list', async () => {
const res = await awaitRequest({
url: 'http://localhost:4000/tag',
method: 'POST',
headers: {
'Content-Type': 'application/json;charset=utf-8',
},
body: JSON.stringify(data),
})
expect(res.statusCode).to.equal(200)
});
})
/**
* 创建服务
*/
describe('test create service URL->http://localhost:4000/service/create', () => {
const data = {
debug: '"0"',
domain: 'new-op',
image: 'qa-ui/new-op-ui:latest',
label: 'ui',
mock: '0',
namespace,
serviceName: 'new-op-ui',
type: 'ui',
wechat: '"0"'
,
}
it('should return create service sucess', async () => {
const res = await awaitRequest({
url: 'http://localhost:4000/service/create',
method: 'POST',
headers: {
'Content-Type': 'application/json;charset=utf-8',
},
body: JSON.stringify(data),
})
expect(res.statusCode).to.equal(200)
});
})
describe('test details of a service URL->http://localhost:4000/service/details', async () => {
const data = {
namespace,
serviceName: 'new-op-ui',
type: 'ui',
}
it('should returns details of a service', async () => {
const res = await awaitRequest({
url: 'http://localhost:4000/service/details',
method: 'POST',
headers: {
'Content-Type': 'application/json;charset=utf-8',
},
body: JSON.stringify(data),
})
expect(res.statusCode).to.equal(200)
});
})
describe('test update a service URL->http://localhost:4000/service/modifyImage', async () => {
const data = {
namespace,
serviceName: 'new-op-ui',
type: 'ui',
image: 'qa-ui/new-op-ui:latest',
}
it('should update a service', async () => {
const res = await awaitRequest({
url: 'http://localhost:4000/service/modifyImage',
method: 'POST',
headers: {
'Content-Type': 'application/json;charset=utf-8',
},
body: JSON.stringify(data),
})
expect(res.statusCode).to.equal(200)
});
})
describe('test redeploy a service URL->http://localhost:4000/service/redeploy', async () => {
it('should redeploy a service', async () => {
const data1 = {
namespace,
serviceName: 'new-op-ui',
type: 'ui',
}
const res1 = await awaitRequest({
url: 'http://localhost:4000/service/details',
method: 'POST',
headers: {
'Content-Type': 'application/json;charset=utf-8',
},
body: JSON.stringify(data1),
})
const data = {
namespace,
podName: JSON.parse(res1.body).data.podName,
serviceName: 'new-op-ui',
}
const res = await awaitRequest({
url: 'http://localhost:4000/service/redeploy',
method: 'POST',
headers: {
'Content-Type': 'application/json;charset=utf-8',
},
body: JSON.stringify(data),
})
expect(res.statusCode).to.equal(200)
});
})
describe('test delete a service URL->http://localhost:4000/service/delete', async () => {
it('should delete a service', async () => {
const data1 = {
namespace,
serviceName: 'new-op-ui',
type: 'ui',
}
const res1 = await awaitRequest({
url: 'http://localhost:4000/service/details',
method: 'POST',
headers: {
'Content-Type': 'application/json;charset=utf-8',
},
body: JSON.stringify(data1),
})
const data = {
namespace,
podName: JSON.parse(res1.body).data.podName,
serviceName: 'new-op-ui',
}
const res = await awaitRequest({
url: 'http://localhost:4000/service/delete',
method: 'POST',
headers: {
'Content-Type': 'application/json;charset=utf-8',
},
body: JSON.stringify(data),
})
expect(res.statusCode).to.equal(200)
});
})
})
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment