Commit 63a33a67 authored by wentao.suo's avatar wentao.suo

Merge branch 'master' of http://git.quantgroup.cn/QA/qg-dockerfiles

# Conflicts:
#	baseimg/tools/db/my.cnf
parents acb65990 60efc2ce
FROM 192.168.4.4/baseimg/centos:201803291115 FROM 192.168.4.4/baseimg/centos:20180426
MAINTAINER daidekun "dekun.dai@quantgroup.cn"
WORKDIR /usr/local WORKDIR /usr/local
RUN wget http://mirrors.linuxeye.com/jdk/jdk-8u144-linux-x64.tar.gz RUN wget http://mirrors.linuxeye.com/jdk/jdk-8u144-linux-x64.tar.gz
......
FROM centos FROM centos
# 安装工具 # 安装工具
RUN yum install -y make gcc* \ RUN yum update \
&& yum install -y make gcc* \
&& yum install -y git \ && yum install -y git \
&& yum install -y vim \ && yum install -y vim \
&& yum install -y wget && yum install -y wget \
&& yum install -y sysvinit-tools \
&& yum install -y telnet
# 安装node # 安装node
RUN curl -sL https://rpm.nodesource.com/setup_8.x | bash - RUN curl -sL https://rpm.nodesource.com/setup_8.x | bash -
...@@ -21,7 +24,7 @@ RUN npm install gulp ...@@ -21,7 +24,7 @@ RUN npm install gulp
# 安装控制台组件 # 安装控制台组件
WORKDIR /home WORKDIR /home
RUN git clone http://192.168.3.60/QA/qg-xterm.git RUN git clone http://git.q-gp.com/QA/qg-xterm.git
WORKDIR /home/qg-xterm WORKDIR /home/qg-xterm
RUN npm install RUN npm install
...@@ -39,6 +42,9 @@ ENV LC_ALL en_US.UTF-8 ...@@ -39,6 +42,9 @@ ENV LC_ALL en_US.UTF-8
ENV TZ=Asia/Shanghai ENV TZ=Asia/Shanghai
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
# 宋体
RUN curl http://git.q-gp.com/QA/qg-docker-entrypoints/raw/master/fonts/install.sh | sh
# alias # alias
RUN echo "export PS1='\[\033[m\][\[\033[36m\]\u\[\033[m\]:\[\033[33;1m\]\w\[\033[m\]\[\033[m\]]\[\033[m\]# '" >> ~/.bash_profile RUN echo "export PS1='\[\033[m\][\[\033[36m\]\u\[\033[m\]:\[\033[33;1m\]\w\[\033[m\]\[\033[m\]]\[\033[m\]# '" >> ~/.bash_profile
RUN echo "export LANG='en_US.UTF-8';alias 'la=ls -al';alias rm='rm -i';alias mv='mv -i';alias grep='grep --color';alias vi='vim';alias pg='ps -ef | grep -i'" >> ~/.bashrc RUN echo "export LANG='en_US.UTF-8';alias 'la=ls -al';alias rm='rm -i';alias mv='mv -i';alias grep='grep --color';alias vi='vim';alias pg='ps -ef | grep -i'" >> ~/.bashrc
\ No newline at end of file
...@@ -24,19 +24,30 @@ pid-file = /var/run/mysqld/mysqld.pid ...@@ -24,19 +24,30 @@ pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock socket = /var/run/mysqld/mysqld.sock
datadir = /var/lib/mysql datadir = /var/lib/mysql
log-error = /var/log/mysql/error.log log-error = /var/log/mysql/error.log
# Disabling symbolic-links is recommended to prevent assorted security risks # Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0 symbolic-links=0
max_connections = 1000
connect_timeout = 3600
wait_timeout = 86400
interactive_timeout = 86400
innodb_lock_wait_timeout = 10
explicit_defaults_for_timestamp = true
log-bin=/var/lib/mysql/mysql-bin log-bin=/var/lib/mysql/mysql-bin
binlog-format=ROW #选择row模式 binlog-format=ROW
server_id=1 #配置mysql replaction需要定义,不能和canal的slaveId重复 server_id=1
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 1
bulk_insert_buffer_size = 32M
query_cache_type = 0
query_cache_size = 0
max_connections = 1024
max_connect_errors = 10
table_open_cache = 4096
max_allowed_packet = 512M
binlog_cache_size = 1M
max_heap_table_size = 64M
read_buffer_size = 8M
read_rnd_buffer_size = 16M
sort_buffer_size = 32K
join_buffer_size = 32K
thread_cache_size = 64
default_storage_engine = InnoDB
explicit_defaults_for_timestamp = true
character_set_server = utf8
FROM mysql:5.7.22
#定义工作目录
WORKDIR /var/lib/mysql
ENV AUTO_RUN_DIR /docker-entrypoint-initdb.d
COPY my.cnf /etc/mysql/mysql.conf.d/mysqld.cnf
COPY mysqld_charset.cnf /etc/mysql/conf.d/mysqld_charset.cnf
# 解决时区问题
ENV TZ=Asia/Shanghai
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
\ No newline at end of file
# -------------------------------------------------------------------------------
# Filename: init_db_data.sh
# Revision: 1.0
# Date: 2017/08
# Author: dekun.dai
# Description: 执行目录下所有数据库脚本
# -------------------------------------------------------------------------------
# sql_file_folder=$WORK_PATH/sql_to_run
#
# echo "cd $sql_file_folder"
# cd $sql_file_folder
#
# for entry in ./*
# do
# echo "run sql file: $entry"
# mysql -uroot < $entry
# done
#!/bin/bash
export NAMESPACE=`cat /var/run/secrets/kubernetes.io/serviceaccount/namespace`
echo "start replace db domain"
DB_FILE=dump.sql
HOST=$DB_SERVICE_HOST
PORT='3306'
USER='qa'
PASSWORD='qatest'
HTTPS='true'
echo "HOST=$HOST
PORT=$PORT
USER=$USER
PASSWORD=$PASSWORD
HTTPS=$HTTPS
NAMESPACE=$NAMESPACE"
EXCLUDED_TABLES=(
clotho.JOB_EXECUTION_LOG
clotho.JOB_STATUS_TRACE_LOG
notify.JOB_EXECUTION_LOG
notify.JOB_STATUS_TRACE_LOG
xyqb.JOB_EXECUTION_LOG
xyqb.JOB_STATUS_TRACE_LOG
xyqb.channel
xyqb.t_phonenum
mall.jd_address
mall.jd_brand
mall.jd_product_sku_image
mall.jd_property
mall.jd_sku
mall.jd_sku_detail_image
mall.jd_sku_specs
mall.spu_image
mall.sale_product_detail
mall.product_nature_value_bak
mall.product_sku
mall.product_sku_image
mall.product_specification
mall.product_spu
)
IGNORED_TABLES_STRING=''
for TABLE in "${EXCLUDED_TABLES[@]}"
do :
IGNORED_TABLES_STRING+=" --ignore-table=${TABLE}"
done
echo "mysqldump --host=${HOST} --port=${PORT} --user=${USER} --password=${PASSWORD} --all-databases --no-create-info ${IGNORED_TABLES_STRING} >> ${DB_FILE}"
mysqldump --host=${HOST} --port=${PORT} --user=${USER} --password=${PASSWORD} --all-databases ${IGNORED_TABLES_STRING} >> ${DB_FILE}
echo "sed file"
if [[ $HTTPS = true ]];then
sed -i "s#\.xyqb\.com#-${NAMESPACE}\.q-gp\.com#g; s#\.quantgroup\.cn#-${NAMESPACE}\.q-gp\.com#g; s#-base\.q-gp\.com#-${NAMESPACE}\.q-gp\.com#g" ${DB_FILE}
else
sed -i "s#\.xyqb\.com#-${NAMESPACE}\.q-gp\.com#g; s#\.quantgroup\.cn#-${NAMESPACE}\.q-gp\.com#g; s#-base\.q-gp\.com#-${NAMESPACE}\.q-gp\.com#g; s#\https#http#g" ${DB_FILE}
fi
echo "mysql dump file"
mysql --host=${HOST} --port=${PORT} --user=${USER} --password=${PASSWORD} < ${DB_FILE}
\ No newline at end of file
# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# The MySQL Server configuration file.
#
# For explanations see
# http://dev.mysql.com/doc/mysql/en/server-system-variables.html
[mysqld]
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
datadir = /var/lib/mysql
log-error = /var/log/mysql/error.log
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
log-bin=/var/lib/mysql/mysql-bin
binlog-format=ROW
server_id=1
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 1
bulk_insert_buffer_size = 32M
query_cache_type = 0
query_cache_size = 0
max_connections = 1024
max_connect_errors = 10
table_open_cache = 4096
max_allowed_packet = 512M
binlog_cache_size = 1M
max_heap_table_size = 64M
read_buffer_size = 8M
read_rnd_buffer_size = 16M
sort_buffer_size = 32K
join_buffer_size = 32K
thread_cache_size = 64
\ No newline at end of file
[mysqld]
character_set_server=utf8
character_set_filesystem=utf8
collation-server=utf8_general_ci
init-connect='SET NAMES utf8'
init_connect='SET collation_connection = utf8_general_ci'
skip-character-set-client-handshake
\ No newline at end of file
FROM 192.168.4.4/baseimg/centos:201803291115
ENV FASTDFS_PATH=/opt/fdfs \
FASTDFS_BASE_PATH=/var/fdfs \
PORT= \
GROUP_NAME= \
TRACKER_SERVER=
#create the dirs to store the files downloaded from internet
RUN mkdir -p ${FASTDFS_PATH}/libfastcommon \
&& mkdir -p ${FASTDFS_PATH}/fastdfs \
&& mkdir ${FASTDFS_BASE_PATH}
#compile the libfastcommon
WORKDIR ${FASTDFS_PATH}/libfastcommon
RUN git clone --branch V1.0.36 --depth 1 https://github.com/happyfish100/libfastcommon.git ${FASTDFS_PATH}/libfastcommon \
&& ./make.sh \
&& ./make.sh install \
&& rm -rf ${FASTDFS_PATH}/libfastcommon
#compile the fastdfs
WORKDIR ${FASTDFS_PATH}/fastdfs
RUN git clone --branch V5.11 --depth 1 https://github.com/happyfish100/fastdfs.git ${FASTDFS_PATH}/fastdfs \
&& ./make.sh \
&& ./make.sh install \
&& rm -rf ${FASTDFS_PATH}/fastdfs
EXPOSE 22122 23000 8080 8888
VOLUME ["$FASTDFS_BASE_PATH", "/etc/fdfs"]
COPY conf/*.* /etc/fdfs/
COPY start.sh /usr/bin/
#make the start.sh executable
RUN chmod 777 /usr/bin/start.sh
ENTRYPOINT ["/usr/bin/start.sh"]
CMD ["tracker"]
# connect timeout in seconds
# default value is 30s
connect_timeout=30
# network timeout in seconds
# default value is 30s
network_timeout=60
# the base path to store log files
base_path=/var/fdfs
# tracker_server can ocur more than once, and tracker_server format is
# "host:port", host can be hostname or ip address
tracker_server=192.168.0.197:22122
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = false
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# if load FastDFS parameters from tracker server
# since V4.05
# default value is false
load_fdfs_parameters_from_tracker=false
# if use storage ID instead of IP address
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# default value is false
# since V4.05
use_storage_id = false
# specify storage ids filename, can use relative or absolute path
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# since V4.05
storage_ids_filename = storage_ids.conf
#HTTP settings
http.tracker_server_port=80
#use "#include" directive to include HTTP other settiongs
##include http.conf
# HTTP default content type
http.default_content_type = application/octet-stream
# MIME types mapping filename
# MIME types file format: MIME_type extensions
# such as: image/jpeg jpeg jpg jpe
# you can use apache's MIME file: mime.types
http.mime_types_filename=mime.types
# if use token to anti-steal
# default value is false (0)
http.anti_steal.check_token=false
# token TTL (time to live), seconds
# default value is 600
http.anti_steal.token_ttl=900
# secret key to generate anti-steal token
# this parameter must be set when http.anti_steal.check_token set to true
# the length of the secret key should not exceed 128 bytes
http.anti_steal.secret_key=FastDFS1234567890
# return the content of the file when check token fail
# default value is empty (no file sepecified)
http.anti_steal.token_check_fail=/home/yuqing/fastdfs/conf/anti-steal.jpg
This diff is collapsed.
# is this config file disabled
# false for enabled
# true for disabled
disabled=false
# the name of the group this storage server belongs to
#
# comment or remove this item for fetching from tracker server,
# in this case, use_storage_id must set to true in tracker.conf,
# and storage_ids.conf must be configed correctly.
group_name=group1
# bind an address of this host
# empty for bind all addresses of this host
bind_addr=
# if bind an address of this host when connect to other servers
# (this storage server as a client)
# true for binding the address configed by above parameter: "bind_addr"
# false for binding any address of this host
client_bind=true
# the storage server port
port=23000
# connect timeout in seconds
# default value is 30s
connect_timeout=30
# network timeout in seconds
# default value is 30s
network_timeout=60
# heart beat interval in seconds
heart_beat_interval=30
# disk usage report interval in seconds
stat_report_interval=60
# the base path to store data and log files
base_path=/var/fdfs
# max concurrent connections the server supported
# default value is 256
# more max_connections means more memory will be used
max_connections=256
# the buff size to recv / send data
# this parameter must more than 8KB
# default value is 64KB
# since V2.00
buff_size = 256KB
# accept thread count
# default value is 1
# since V4.07
accept_threads=1
# work thread count, should <= max_connections
# work thread deal network io
# default value is 4
# since V2.00
work_threads=4
# if disk read / write separated
## false for mixed read and write
## true for separated read and write
# default value is true
# since V2.00
disk_rw_separated = true
# disk reader thread count per store base path
# for mixed read / write, this parameter can be 0
# default value is 1
# since V2.00
disk_reader_threads = 1
# disk writer thread count per store base path
# for mixed read / write, this parameter can be 0
# default value is 1
# since V2.00
disk_writer_threads = 1
# when no entry to sync, try read binlog again after X milliseconds
# must > 0, default value is 200ms
sync_wait_msec=50
# after sync a file, usleep milliseconds
# 0 for sync successively (never call usleep)
sync_interval=0
# storage sync start time of a day, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
sync_start_time=00:00
# storage sync end time of a day, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
sync_end_time=23:59
# write to the mark file after sync N files
# default value is 500
write_mark_file_freq=500
# path(disk or mount point) count, default value is 1
store_path_count=1
# store_path#, based 0, if store_path0 not exists, it's value is base_path
# the paths must be exist
store_path0=/var/fdfs
#store_path1=/var/fdfs2
# subdir_count * subdir_count directories will be auto created under each
# store_path (disk), value can be 1 to 256, default value is 256
subdir_count_per_path=256
# tracker_server can ocur more than once, and tracker_server format is
# "host:port", host can be hostname or ip address
tracker_server=192.168.209.121:22122
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
#unix group name to run this program,
#not set (empty) means run by the group of current user
run_by_group=
#unix username to run this program,
#not set (empty) means run by current user
run_by_user=
# allow_hosts can ocur more than once, host can be hostname or ip address,
# "*" (only one asterisk) means match all ip addresses
# we can use CIDR ips like 192.168.5.64/26
# and also use range like these: 10.0.1.[0-254] and host[01-08,20-25].domain.com
# for example:
# allow_hosts=10.0.1.[1-15,20]
# allow_hosts=host[01-08,20-25].domain.com
# allow_hosts=192.168.5.64/26
allow_hosts=*
# the mode of the files distributed to the data path
# 0: round robin(default)
# 1: random, distributted by hash code
file_distribute_path_mode=0
# valid when file_distribute_to_path is set to 0 (round robin),
# when the written file count reaches this number, then rotate to next path
# default value is 100
file_distribute_rotate_count=100
# call fsync to disk when write big file
# 0: never call fsync
# other: call fsync when written bytes >= this bytes
# default value is 0 (never call fsync)
fsync_after_written_bytes=0
# sync log buff to disk every interval seconds
# must > 0, default value is 10 seconds
sync_log_buff_interval=10
# sync binlog buff / cache to disk every interval seconds
# default value is 60 seconds
sync_binlog_buff_interval=10
# sync storage stat info to disk every interval seconds
# default value is 300 seconds
sync_stat_file_interval=300
# thread stack size, should >= 512KB
# default value is 512KB
thread_stack_size=512KB
# the priority as a source server for uploading file.
# the lower this value, the higher its uploading priority.
# default value is 10
upload_priority=10
# the NIC alias prefix, such as eth in Linux, you can see it by ifconfig -a
# multi aliases split by comma. empty value means auto set by OS type
# default values is empty
if_alias_prefix=
# if check file duplicate, when set to true, use FastDHT to store file indexes
# 1 or yes: need check
# 0 or no: do not check
# default value is 0
check_file_duplicate=0
# file signature method for check file duplicate
## hash: four 32 bits hash code
## md5: MD5 signature
# default value is hash
# since V4.01
file_signature_method=hash
# namespace for storing file indexes (key-value pairs)
# this item must be set when check_file_duplicate is true / on
key_namespace=FastDFS
# set keep_alive to 1 to enable persistent connection with FastDHT servers
# default value is 0 (short connection)
keep_alive=0
# you can use "#include filename" (not include double quotes) directive to
# load FastDHT server list, when the filename is a relative path such as
# pure filename, the base path is the base path of current/this config file.
# must set FastDHT server list when check_file_duplicate is true / on
# please see INSTALL of FastDHT for detail
##include /home/yuqing/fastdht/conf/fdht_servers.conf
# if log to access log
# default value is false
# since V4.00
use_access_log = false
# if rotate the access log every day
# default value is false
# since V4.00
rotate_access_log = false
# rotate access log time base, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
# default value is 00:00
# since V4.00
access_log_rotate_time=00:00
# if rotate the error log every day
# default value is false
# since V4.02
rotate_error_log = false
# rotate error log time base, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
# default value is 00:00
# since V4.02
error_log_rotate_time=00:00
# rotate access log when the log file exceeds this size
# 0 means never rotates log file by log file size
# default value is 0
# since V4.02
rotate_access_log_size = 0
# rotate error log when the log file exceeds this size
# 0 means never rotates log file by log file size
# default value is 0
# since V4.02
rotate_error_log_size = 0
# keep days of the log files
# 0 means do not delete old log files
# default value is 0
log_file_keep_days = 0
# if skip the invalid record when sync file
# default value is false
# since V4.02
file_sync_skip_invalid_record=false
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = false
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# use the ip address of this storage server if domain_name is empty,
# else this domain name will ocur in the url redirected by the tracker server
http.domain_name=
# the port of the web server on this storage server
http.server_port=8888
# <id> <group_name> <ip_or_hostname>
# 100001 group1 192.168.0.196
# 100002 group1 192.168.0.116
# is this config file disabled
# false for enabled
# true for disabled
disabled=false
# bind an address of this host
# empty for bind all addresses of this host
bind_addr=
# the tracker server port
port=22122
# connect timeout in seconds
# default value is 30s
connect_timeout=30
# network timeout in seconds
# default value is 30s
network_timeout=60
# the base path to store data and log files
base_path=/var/fdfs
# max concurrent connections this server supported
max_connections=256
# accept thread count
# default value is 1
# since V4.07
accept_threads=1
# work thread count, should <= max_connections
# default value is 4
# since V2.00
work_threads=4
# min buff size
# default value 8KB
min_buff_size = 8KB
# max buff size
# default value 128KB
max_buff_size = 128KB
# the method of selecting group to upload files
# 0: round robin
# 1: specify group
# 2: load balance, select the max free space group to upload file
store_lookup=2
# which group to upload file
# when store_lookup set to 1, must set store_group to the group name
store_group=group2
# which storage server to upload file
# 0: round robin (default)
# 1: the first server order by ip address
# 2: the first server order by priority (the minimal)
store_server=0
# which path(means disk or mount point) of the storage server to upload file
# 0: round robin
# 2: load balance, select the max free space path to upload file
store_path=0
# which storage server to download file
# 0: round robin (default)
# 1: the source storage server which the current file uploaded to
download_server=0
# reserved storage space for system or other applications.
# if the free(available) space of any stoarge server in
# a group <= reserved_storage_space,
# no file can be uploaded to this group.
# bytes unit can be one of follows:
### G or g for gigabyte(GB)
### M or m for megabyte(MB)
### K or k for kilobyte(KB)
### no unit for byte(B)
### XX.XX% as ratio such as reserved_storage_space = 10%
reserved_storage_space = 10%
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
#unix group name to run this program,
#not set (empty) means run by the group of current user
run_by_group=
#unix username to run this program,
#not set (empty) means run by current user
run_by_user=
# allow_hosts can ocur more than once, host can be hostname or ip address,
# "*" (only one asterisk) means match all ip addresses
# we can use CIDR ips like 192.168.5.64/26
# and also use range like these: 10.0.1.[0-254] and host[01-08,20-25].domain.com
# for example:
# allow_hosts=10.0.1.[1-15,20]
# allow_hosts=host[01-08,20-25].domain.com
# allow_hosts=192.168.5.64/26
allow_hosts=*
# sync log buff to disk every interval seconds
# default value is 10 seconds
sync_log_buff_interval = 10
# check storage server alive interval seconds
check_active_interval = 120
# thread stack size, should >= 64KB
# default value is 64KB
thread_stack_size = 64KB
# auto adjust when the ip address of the storage server changed
# default value is true
storage_ip_changed_auto_adjust = true
# storage sync file max delay seconds
# default value is 86400 seconds (one day)
# since V2.00
storage_sync_file_max_delay = 86400
# the max time of storage sync a file
# default value is 300 seconds
# since V2.00
storage_sync_file_max_time = 300
# if use a trunk file to store several small files
# default value is false
# since V3.00
use_trunk_file = false
# the min slot size, should <= 4KB
# default value is 256 bytes
# since V3.00
slot_min_size = 256
# the max slot size, should > slot_min_size
# store the upload file to trunk file when it's size <= this value
# default value is 16MB
# since V3.00
slot_max_size = 16MB
# the trunk file size, should >= 4MB
# default value is 64MB
# since V3.00
trunk_file_size = 64MB
# if create trunk file advancely
# default value is false
# since V3.06
trunk_create_file_advance = false
# the time base to create trunk file
# the time format: HH:MM
# default value is 02:00
# since V3.06
trunk_create_file_time_base = 02:00
# the interval of create trunk file, unit: second
# default value is 38400 (one day)
# since V3.06
trunk_create_file_interval = 86400
# the threshold to create trunk file
# when the free trunk file size less than the threshold, will create
# the trunk files
# default value is 0
# since V3.06
trunk_create_file_space_threshold = 20G
# if check trunk space occupying when loading trunk free spaces
# the occupied spaces will be ignored
# default value is false
# since V3.09
# NOTICE: set this parameter to true will slow the loading of trunk spaces
# when startup. you should set this parameter to true when neccessary.
trunk_init_check_occupying = false
# if ignore storage_trunk.dat, reload from trunk binlog
# default value is false
# since V3.10
# set to true once for version upgrade when your version less than V3.10
trunk_init_reload_from_binlog = false
# the min interval for compressing the trunk binlog file
# unit: second
# default value is 0, 0 means never compress
# FastDFS compress the trunk binlog when trunk init and trunk destroy
# recommand to set this parameter to 86400 (one day)
# since V5.01
trunk_compress_binlog_min_interval = 0
# if use storage ID instead of IP address
# default value is false
# since V4.00
use_storage_id = false
# specify storage ids filename, can use relative or absolute path
# since V4.00
storage_ids_filename = storage_ids.conf
# id type of the storage server in the filename, values are:
## ip: the ip address of the storage server
## id: the server id of the storage server
# this paramter is valid only when use_storage_id set to true
# default value is ip
# since V4.03
id_type_in_filename = ip
# if store slave file use symbol link
# default value is false
# since V4.01
store_slave_file_use_link = false
# if rotate the error log every day
# default value is false
# since V4.02
rotate_error_log = false
# rotate error log time base, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
# default value is 00:00
# since V4.02
error_log_rotate_time=00:00
# rotate error log when the log file exceeds this size
# 0 means never rotates log file by log file size
# default value is 0
# since V4.02
rotate_error_log_size = 0
# keep days of the log files
# 0 means do not delete old log files
# default value is 0
log_file_keep_days = 0
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = false
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# HTTP port on this tracker server
http.server_port=8080
# check storage HTTP server alive interval seconds
# <= 0 for never check
# default value is 30
http.check_alive_interval=30
# check storage HTTP server alive type, values are:
# tcp : connect to the storge server with HTTP port only,
# do not request and get response
# http: storage check alive url must return http status 200
# default value is tcp
http.check_alive_type=tcp
# check storage HTTP server alive uri/url
# NOTE: storage embed HTTP server support uri: /status.html
http.check_alive_uri=/status.html
#!/bin/bash
#set -e
if [ "$1" = "monitor" ] ; then
if [ -n "$TRACKER_SERVER" ] ; then
sed -i "s|tracker_server=.*$|tracker_server=${TRACKER_SERVER}|g" /etc/fdfs/client.conf
fi
fdfs_monitor /etc/fdfs/client.conf
exit 0
elif [ "$1" = "storage" ] ; then
FASTDFS_MODE="storage"
else
FASTDFS_MODE="tracker"
fi
if [ -n "$PORT" ] ; then
sed -i "s|^port=.*$|port=${PORT}|g" /etc/fdfs/"$FASTDFS_MODE".conf
fi
if [ -n "$TRACKER_SERVER" ] ; then
sed -i "s|tracker_server=.*$|tracker_server=${TRACKER_SERVER}|g" /etc/fdfs/storage.conf
sed -i "s|tracker_server=.*$|tracker_server=${TRACKER_SERVER}|g" /etc/fdfs/client.conf
fi
if [ -n "$GROUP_NAME" ] ; then
sed -i "s|group_name=.*$|group_name=${GROUP_NAME}|g" /etc/fdfs/storage.conf
fi
FASTDFS_LOG_FILE="${FASTDFS_BASE_PATH}/logs/${FASTDFS_MODE}d.log"
PID_NUMBER="${FASTDFS_BASE_PATH}/data/fdfs_${FASTDFS_MODE}d.pid"
echo "try to start the $FASTDFS_MODE node..."
if [ -f "$FASTDFS_LOG_FILE" ]; then
rm "$FASTDFS_LOG_FILE"
fi
# start the fastdfs node.
fdfs_${FASTDFS_MODE}d /etc/fdfs/${FASTDFS_MODE}.conf start
# wait for pid file(important!),the max start time is 5 seconds,if the pid number does not appear in 5 seconds,start failed.
TIMES=5
while [ ! -f "$PID_NUMBER" -a $TIMES -gt 0 ]
do
sleep 1s
TIMES=`expr $TIMES - 1`
done
# if the storage node start successfully, print the started time.
# if [ $TIMES -gt 0 ]; then
# echo "the ${FASTDFS_MODE} node started successfully at $(date +%Y-%m-%d_%H:%M)"
# # give the detail log address
# echo "please have a look at the log detail at $FASTDFS_LOG_FILE"
# # leave balnk lines to differ from next log.
# echo
# echo
# # make the container have foreground process(primary commond!)
# tail -F --pid=`cat $PID_NUMBER` /dev/null
# # else print the error.
# else
# echo "the ${FASTDFS_MODE} node started failed at $(date +%Y-%m-%d_%H:%M)"
# echo "please have a look at the log detail at $FASTDFS_LOG_FILE"
# echo
# echo
# fi
tail -f "$FASTDFS_LOG_FILE"
FROM 192.168.4.36/baseimg/centos:201803291115
ENV KONG_VERSION 0.12.3
RUN yum install -y wget https://bintray.com/kong/kong-community-edition-rpm/download_file?file_path=centos/7/kong-community-edition-$KONG_VERSION.el7.noarch.rpm && \
yum clean all
COPY docker-entrypoint.sh /docker-entrypoint.sh
ENTRYPOINT ["/docker-entrypoint.sh"]
EXPOSE 8000 8443 8001 8444
STOPSIGNAL SIGTERM
CMD ["/usr/local/openresty/nginx/sbin/nginx", "-c", "/usr/local/kong/nginx.conf", "-p", "/usr/local/kong/"]
\ No newline at end of file
#!/bin/sh
set -e
# Disabling nginx daemon mode
export KONG_NGINX_DAEMON="off"
# Setting default prefix (override any existing variable)
export KONG_PREFIX="/usr/local/kong"
# Prepare Kong prefix
if [ "$1" = "/usr/local/openresty/nginx/sbin/nginx" ]; then
kong prepare -p "/usr/local/kong"
fi
exec "$@"
\ No newline at end of file
FROM 192.168.4.4/baseimg/centos:201803291115
ENV YUM_REPO_URL="https://dev.mysql.com/get/mysql57-community-release-el7-10.noarch.rpm "
USER root
RUN \
yum -y install epel-release && \
rpm -ivh ${YUM_REPO_URL} && \
yum-config-manager --disable mysql55-community && \
yum-config-manager --enable mysql56-community && \
yum-config-manager --disable mysql57-community && \
yum-config-manager --disable mysql80-community && \
yum clean all
RUN yum -y update && yum -y install \
mysql-community-server
RUN \
yum -y autoremove && \
yum clean metadata && \
yum clean all && \
yum -y install hostname && \
yum clean all
COPY ./docker-entrypoint.sh /
EXPOSE 3306
VOLUME /var/lib/mysql
VOLUME /var/log/mysql
ENTRYPOINT ["/docker-entrypoint.sh"]
\ No newline at end of file
#!/bin/sh -eu #!/bin/sh -eu
curl -sSL http://192.168.3.60/QA/qg-docker-entrypoints/raw/master/dev/common/mysql/run.sh | sh curl -sSL http://git.q-gp.com/QA/qg-docker-entrypoints/raw/master/dev/common/mysql/run.sh | sh
##
## MySQL 5.7
##
FROM 192.168.4.4/baseimg/centos:201803291115 FROM 192.168.4.4/baseimg/centos:201803291115
### ###
...@@ -15,6 +18,19 @@ ENV MY_GROUP="mysql" ...@@ -15,6 +18,19 @@ ENV MY_GROUP="mysql"
ENV MY_UID="48" ENV MY_UID="48"
ENV MY_GID="48" ENV MY_GID="48"
# Files
ENV MYSQL_BASE_INCL="/etc/my.cnf.d"
ENV MYSQL_CUST_INCL1="/etc/mysql/conf.d"
ENV MYSQL_CUST_INCL2="/etc/mysql/docker-default.d"
ENV MYSQL_DEF_DAT="/var/lib/mysql"
ENV MYSQL_DEF_LOG="/var/log/mysql"
ENV MYSQL_DEF_PID="/var/run/mysqld"
ENV MYSQL_DEF_SCK="/var/sock/mysqld"
ENV MYSQL_LOG_SLOW="${MYSQL_DEF_LOG}/slow.log"
ENV MYSQL_LOG_ERROR="${MYSQL_DEF_LOG}/error.log"
ENV MYSQL_LOG_QUERY="${MYSQL_DEF_LOG}/query.log"
### ###
### Install ### Install
### ###
...@@ -40,18 +56,6 @@ RUN \ ...@@ -40,18 +56,6 @@ RUN \
yum -y install hostname && \ yum -y install hostname && \
yum clean all yum clean all
# Files
ENV MYSQL_BASE_INCL="/etc/my.cnf.d"
ENV MYSQL_CUST_INCL1="/etc/mysql/conf.d"
ENV MYSQL_CUST_INCL2="/etc/mysql/docker-default.d"
ENV MYSQL_DEF_DAT="/var/lib/mysql"
ENV MYSQL_DEF_LOG="/var/log/mysql"
ENV MYSQL_DEF_PID="/var/run/mysqld"
ENV MYSQL_DEF_SCK="/var/sock/mysqld"
ENV MYSQL_LOG_SLOW="${MYSQL_DEF_LOG}/slow.log"
ENV MYSQL_LOG_ERROR="${MYSQL_DEF_LOG}/error.log"
ENV MYSQL_LOG_QUERY="${MYSQL_DEF_LOG}/query.log"
## ##
## Configure ## Configure
...@@ -89,11 +93,10 @@ RUN \ ...@@ -89,11 +93,10 @@ RUN \
chmod 0775 ${MYSQL_DEF_PID} && \ chmod 0775 ${MYSQL_DEF_PID} && \
chmod 0775 ${MYSQL_DEF_LOG} chmod 0775 ${MYSQL_DEF_LOG}
## ##
## Bootstrap Scipts ## Bootstrap Scipts
## ##
COPY ./scripts/docker-entrypoint.sh / COPY ./docker-entrypoint.sh /
## ##
......
#!/bin/sh -eu
curl -sSL http://git.q-gp.com/QA/qg-docker-entrypoints/raw/master/dev/common/mysql/run.sh | sh
# Pull base image
FROM 192.168.4.36/baseimg/centos:201803291115
# Postgresql version
ENV PG_VERSION 9.4
ENV PGVERSION 94
# Set the environment variables
ENV HOME /var/lib/pgsql
ENV PGDATA /var/lib/pgsql/9.4/data
# Install postgresql and run InitDB
RUN rpm -vih https://download.postgresql.org/pub/repos/yum/$PG_VERSION/redhat/rhel-7-x86_64/pgdg-centos$PGVERSION-$PG_VERSION-2.noarch.rpm && \
yum update -y && \
yum install -y sudo \
pwgen \
postgresql$PGVERSION \
postgresql$PGVERSION-server \
postgresql$PGVERSION-contrib && \
yum clean all
# Copy
COPY data/postgresql-setup /usr/pgsql-$PG_VERSION/bin/postgresql$PGVERSION-setup
# Working directory
WORKDIR /var/lib/pgsql
# Run initdb
RUN /usr/pgsql-$PG_VERSION/bin/postgresql$PGVERSION-setup initdb
# Copy config file
COPY data/postgresql.conf /var/lib/pgsql/$PG_VERSION/data/postgresql.conf
COPY data/pg_hba.conf /var/lib/pgsql/$PG_VERSION/data/pg_hba.conf
COPY data/postgresql.sh /usr/local/bin/postgresql.sh
# Change own user
RUN chown -R postgres:postgres /var/lib/pgsql/$PG_VERSION/data/* && \
usermod -G wheel postgres && \
sed -i 's/.*requiretty$/#Defaults requiretty/' /etc/sudoers && \
chmod +x /usr/local/bin/postgresql.sh
# Set volume
VOLUME ["/var/lib/pgsql"]
# Set username
USER postgres
# Run PostgreSQL Server
CMD ["/bin/bash", "/usr/local/bin/postgresql.sh"]
# Expose ports.
EXPOSE 5432
\ No newline at end of file
# PostgreSQL Client Authentication Configuration File
# ===================================================
#
# Refer to the "Client Authentication" section in the PostgreSQL
# documentation for a complete description of this file. A short
# synopsis follows.
#
# This file controls: which hosts are allowed to connect, how clients
# are authenticated, which PostgreSQL user names they can use, which
# databases they can access. Records take one of these forms:
#
# local DATABASE USER METHOD [OPTIONS]
# host DATABASE USER ADDRESS METHOD [OPTIONS]
# hostssl DATABASE USER ADDRESS METHOD [OPTIONS]
# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS]
#
# (The uppercase items must be replaced by actual values.)
#
# The first field is the connection type: "local" is a Unix-domain
# socket, "host" is either a plain or SSL-encrypted TCP/IP socket,
# "hostssl" is an SSL-encrypted TCP/IP socket, and "hostnossl" is a
# plain TCP/IP socket.
#
# DATABASE can be "all", "sameuser", "samerole", "replication", a
# database name, or a comma-separated list thereof. The "all"
# keyword does not match "replication". Access to replication
# must be enabled in a separate record (see example below).
#
# USER can be "all", a user name, a group name prefixed with "+", or a
# comma-separated list thereof. In both the DATABASE and USER fields
# you can also write a file name prefixed with "@" to include names
# from a separate file.
#
# ADDRESS specifies the set of hosts the record matches. It can be a
# host name, or it is made up of an IP address and a CIDR mask that is
# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that
# specifies the number of significant bits in the mask. A host name
# that starts with a dot (.) matches a suffix of the actual host name.
# Alternatively, you can write an IP address and netmask in separate
# columns to specify the set of hosts. Instead of a CIDR-address, you
# can write "samehost" to match any of the server's own IP addresses,
# or "samenet" to match any address in any subnet that the server is
# directly connected to.
#
# METHOD can be "trust", "reject", "md5", "password", "gss", "sspi",
# "ident", "peer", "pam", "ldap", "radius" or "cert". Note that
# "password" sends passwords in clear text; "md5" is preferred since
# it sends encrypted passwords.
#
# OPTIONS are a set of options for the authentication in the format
# NAME=VALUE. The available options depend on the different
# authentication methods -- refer to the "Client Authentication"
# section in the documentation for a list of which options are
# available for which authentication methods.
#
# Database and user names containing spaces, commas, quotes and other
# special characters must be quoted. Quoting one of the keywords
# "all", "sameuser", "samerole" or "replication" makes the name lose
# its special character, and just match a database or username with
# that name.
#
# This file is read on server startup and when the postmaster receives
# a SIGHUP signal. If you edit the file on a running system, you have
# to SIGHUP the postmaster for the changes to take effect. You can
# use "pg_ctl reload" to do that.
# Put your actual configuration here
# ----------------------------------
#
# If you want to allow non-local connections, you need to add more
# "host" records. In that case you will also need to make PostgreSQL
# listen on a non-local interface via the listen_addresses
# configuration parameter, or via the -i or -h command line switches.
# TYPE DATABASE USER ADDRESS METHOD
local all all trust
host all all 127.0.0.1/32 trust
host all all ::1/128 trust
#!/bin/sh
#
# postgresql-setup Initialization and upgrade operations for PostgreSQL
# PGVERSION is the full package version, e.g., 9.4.0
# Note: the specfile inserts the correct value during package build
PGVERSION=9.4.8
# PGMAJORVERSION is major version, e.g., 9.4 (this should match PG_VERSION)
PGMAJORVERSION=`echo "$PGVERSION" | sed 's/^\([0-9]*\.[0-9]*\).*$/\1/'`
# PGENGINE is the directory containing the postmaster executable
# Note: the specfile inserts the correct value during package build
PGENGINE=/usr/pgsql-$PGMAJORVERSION/bin
# The second parameter is the new database version, i.e. $PGMAJORVERSION in this case.
# Use "postgresql-$PGMAJORVERSION" service, if not specified.
SERVICE_NAME="$2"
if [ x"$SERVICE_NAME" = x ]
then
SERVICE_NAME=postgresql-$PGMAJORVERSION
fi
# note that these options are useful at least for help2man processing
case "$1" in
--version)
echo "postgresql-setup $PGVERSION"
exit 0
;;
esac
# this parsing technique fails for PGDATA pathnames containing spaces,
# but there's not much I can do about it given systemctl's output format...
PGDATA=/var/lib/pgsql/$PGMAJORVERSION/data/
# Log file for initdb
PGLOG=/var/lib/pgsql/$PGMAJORVERSION/initdb.log
export PGDATA
SU=su
script_result=0
# code shared between initdb and upgrade actions
perform_initdb(){
if [ ! -e "$PGDATA" ]; then
mkdir "$PGDATA" || return 1
chown postgres:postgres "$PGDATA"
chmod go-rwx "$PGDATA"
fi
# Clean up SELinux tagging for PGDATA
[ -x /sbin/restorecon ] && /sbin/restorecon "$PGDATA"
# Create the initdb log file if needed
if [ ! -e "$PGLOG" -a ! -h "$PGLOG" ]; then
touch "$PGLOG" || return 1
chown postgres:postgres "$PGLOG"
chmod go-rwx "$PGLOG"
[ -x /sbin/restorecon ] && /sbin/restorecon "$PGLOG"
fi
# Initialize the database
initdbcmd="$PGENGINE/initdb --pgdata='$PGDATA' --auth='ident'"
initdbcmd+=" $PGSETUP_INITDB_OPTIONS"
$SU -l postgres -c "$initdbcmd" >> "$PGLOG" 2>&1 < /dev/null
# Create directory for postmaster log files
mkdir "$PGDATA/pg_log"
chown postgres:postgres "$PGDATA/pg_log"
chmod go-rwx "$PGDATA/pg_log"
[ -x /sbin/restorecon ] && /sbin/restorecon "$PGDATA/pg_log"
if [ -f "$PGDATA/PG_VERSION" ]; then
return 0
fi
return 1
}
initdb(){
if [ -f "$PGDATA/PG_VERSION" ]; then
echo $"Data directory is not empty!"
echo
script_result=1
else
echo -n $"Initializing database ... "
if perform_initdb; then
echo $"OK"
else
echo $"failed, see $PGLOG"
script_result=1
fi
echo
fi
}
# See how we were called.
case "$1" in
initdb)
initdb
;;
*)
echo >&2 "$USAGE_STRING"
exit 2
esac
exit $script_result
This diff is collapsed.
#!/bin/bash
#Version
PG_VERSION="9.4"
#Settings
DB_NAME=${DB_NAME:-}
DB_USER=${DB_USER:-}
DB_PASS=${DB_PASS:-}
PG_PORT=5432
PG_CONFDIR="/var/lib/pgsql/$PG_VERSION/data"
PG_CTL="/usr/pgsql-$PG_VERSION/bin/pg_ctl"
PG_USER="postgres"
PSQL="/bin/psql"
create_dbuser() {
## Extract from https://github.com/CentOS/CentOS-Dockerfiles/blob/master/postgres/centos7/
## and modified by me
##
## Check to see if we have pre-defined credentials to use
if [ -n "${DB_USER}" ]; then
# run postgresql server
cd /var/lib/pgsql && sudo -u $PG_USER bash -c "$PG_CTL -D $PG_CONFDIR -o \"-c listen_addresses='*'\" -w start"
# generate password
if [ -z "${DB_PASS}" ]; then
echo "WARNING: "
echo "No password specified for \"${DB_USER}\". Generating one"
DB_PASS=$(pwgen -c -n -1 12)
echo "Password for \"${DB_USER}\" created as: \"${DB_PASS}\""
fi
# create user
echo "Creating user \"${DB_USER}\"..."
$PSQL -U $PG_USER -c "CREATE ROLE ${DB_USER} with CREATEROLE login superuser PASSWORD '${DB_PASS}';"
# if the user is already created set authentication method to md5
sudo -u $PG_USER bash -c "echo \"host all all 0.0.0.0/0 md5\" >> $PG_CONFDIR/pg_hba.conf"
else
# the user is not created set authentication method to trust
sudo -u $PG_USER bash -c "echo \"host all all 0.0.0.0/0 trust\" >> $PG_CONFDIR/pg_hba.conf"
fi
if [ -n "${DB_NAME}" ]; then
# create database
echo "Creating database \"${DB_NAME}\"..."
echo "CREATE DATABASE ${DB_NAME};"
$PSQL -U $PG_USER -c "CREATE DATABASE ${DB_NAME}"
# grant permission
if [ -n "${DB_USER}" ]; then
echo "Granting access to database \"${DB_NAME}\" for user \"${DB_USER}\"..."
$PSQL -U $PG_USER -c "GRANT ALL PRIVILEGES ON DATABASE ${DB_NAME} to ${DB_USER};"
fi
# stop postgresql server
sudo -u $PG_USER bash -c "$PG_CTL -D $PG_CONFDIR -m fast -w stop"
fi
}
postgresql_server () {
/usr/pgsql-$PG_VERSION/bin/postgres -D /var/lib/pgsql/$PG_VERSION/data -p $PG_PORT
}
####
####
create_dbuser
echo "Starting PostgreSQL $PG_VERSION server..."
postgresql_server
...@@ -39,6 +39,8 @@ ENV PATH=$PATH:/$DISTRO_NAME/bin \ ...@@ -39,6 +39,8 @@ ENV PATH=$PATH:/$DISTRO_NAME/bin \
ZOOCFGDIR=$ZOO_CONF_DIR ZOOCFGDIR=$ZOO_CONF_DIR
COPY ./scripts/docker-entrypoint.sh / COPY ./scripts/docker-entrypoint.sh /
RUN chmod +x /docker-entrypoint.sh
ENTRYPOINT ["/docker-entrypoint.sh"] ENTRYPOINT ["/docker-entrypoint.sh"]
CMD ["zkServer.sh", "start-foreground"] CMD ["zkServer.sh", "start-foreground"]
......
FROM 192.168.4.4/baseimg/jdk8:201803291115 FROM 192.168.4.4/baseimg/jdk8:20180426
USER root USER root
WORKDIR /home/quant_group WORKDIR /home/quant_group
......
#!/bin/sh #!/bin/sh
curl -sSL http://192.168.3.60/QA/qg-docker-entrypoints/raw/master/dev/java/run.sh | sh curl -sSL http://git.q-gp.com/QA/qg-docker-entrypoints/raw/master/dev/java/run.sh | sh
\ No newline at end of file \ No newline at end of file
#!/bin/sh #!/bin/sh
curl -sSL http://192.168.3.60/QA/qg-docker-entrypoints/raw/master/dev/lua-ui/run.sh | sh curl -sSL http://git.q-gp.com/QA/qg-docker-entrypoints/raw/master/dev/lua-ui/run.sh | sh
\ No newline at end of file \ No newline at end of file
#!/bin/sh #!/bin/sh
curl -sSL http://192.168.3.60/QA/qg-docker-entrypoints/raw/master/dev/nodejs/run.sh | sh curl -sSL http://git.q-gp.com/QA/qg-docker-entrypoints/raw/master/dev/nodejs/run.sh | sh
\ No newline at end of file \ No newline at end of file
#!/bin/sh #!/bin/sh
curl -sSL http://192.168.3.60/QA/qg-docker-entrypoints/raw/master/dev/python/run.sh | sh curl -sSL http://git.q-gp.com/QA/qg-docker-entrypoints/raw/master/dev/python/run.sh | sh
\ No newline at end of file \ No newline at end of file
#!/bin/sh #!/bin/sh
curl -sSL http://192.168.3.60/QA/qg-docker-entrypoints/raw/master/dev/tools/eos/run.sh | sh curl -sSL http://git.q-gp.com/QA/qg-docker-entrypoints/raw/master/dev/tools/eos/run.sh | sh
\ No newline at end of file \ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment