Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
E
enoch
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
DevOps
enoch
Commits
561ac413
Commit
561ac413
authored
Jul 25, 2019
by
fengjunkai
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'fileCache'
parents
53f8b9ed
893ad7e8
Changes
13
Show whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
239 additions
and
114 deletions
+239
-114
go.mod
go.mod
+1
-0
main.go
main.go
+4
-3
Interface.go
service/Interface.go
+3
-2
operator.go
service/alarm/operator.go
+28
-0
brave_message_handler.go
service/consumer/brave_message_handler.go
+11
-87
constant.go
service/consumer/constant.go
+3
-0
health_message_handler.go
service/consumer/health_message_handler.go
+37
-11
kafka_agent_sarama.go
service/consumer/kafka_agent_sarama.go
+16
-5
message_handler.go
service/consumer/message_handler.go
+2
-1
util.go
service/consumer/util.go
+87
-0
chunk_msg.go
service/end_points/chunk_msg.go
+5
-4
trace_msg.go
service/end_points/trace_msg.go
+1
-1
switcher.go
service/file_cache/switcher.go
+41
-0
No files found.
go.mod
View file @
561ac413
...
...
@@ -18,6 +18,7 @@ require (
github.com/onsi/gomega v1.4.3 // indirect
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a // indirect
github.com/robfig/cron v0.0.0-20180505203441-b41be1df6967
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df
)
main.go
View file @
561ac413
...
...
@@ -4,6 +4,7 @@ import (
"flag"
"git.quantgroup.cn/DevOps/enoch/service"
"git.quantgroup.cn/DevOps/enoch/service/conf"
"git.quantgroup.cn/DevOps/enoch/service/consumer"
"git.quantgroup.cn/DevOps/enoch/service/continuous_queries"
"git.quantgroup.cn/DevOps/enoch/service/data"
"git.quantgroup.cn/DevOps/enoch/service/file_cache"
...
...
@@ -31,7 +32,7 @@ func main() {
log
.
Fatalln
(
"create file error"
,
err
)
}
file_cache
.
Load
(
conf
.
GlobalConfig
.
FileCachePath
)
file_cache
.
RegisterJob
(
service
.
ReSubmit
)
file_cache
.
RegisterJob
(
consumer
.
ReSubmit
)
go
file_cache
.
Delete
()
port
:=
conf
.
GlobalConfig
.
Port
...
...
@@ -42,8 +43,8 @@ func main() {
//初始化redis连接池
data
.
RedisPoolInit
()
go
service
.
AgentClusterConsumer
(
conf
.
HealthTopic
(),
service
.
HealthMessageHandler
{})
go
service
.
AgentClusterConsumer
(
conf
.
BraveTopic
(),
service
.
BraveMessageHandler
{})
go
consumer
.
AgentClusterConsumer
(
conf
.
HealthTopic
(),
consumer
.
HealthMessageHandler
{})
go
consumer
.
AgentClusterConsumer
(
conf
.
BraveTopic
(),
consumer
.
BraveMessageHandler
{})
intPort
,
_
:=
strconv
.
Atoi
(
port
)
if
quartz
{
...
...
service/Interface.go
View file @
561ac413
...
...
@@ -2,6 +2,7 @@ package service
import
(
"fmt"
"git.quantgroup.cn/DevOps/enoch/service/consumer"
"git.quantgroup.cn/DevOps/enoch/service/util"
"net/http"
"strconv"
...
...
@@ -16,7 +17,7 @@ func DurationInterface(w http.ResponseWriter, r *http.Request) {
func
DurationCalcAndSendEmail
(
day
string
)
{
info
:=
""
Duration
(
day
,
func
(
sysName
string
,
durations
map
[
string
]
string
)
{
consumer
.
Duration
(
day
,
func
(
sysName
string
,
durations
map
[
string
]
string
)
{
info
=
info
+
"
\n
"
+
"系统名称 : "
+
strings
.
Split
(
sysName
,
":"
)[
1
]
+
"
\n
"
for
k
,
v
:=
range
durations
{
i
,
err
:=
strconv
.
Atoi
(
v
)
...
...
@@ -38,7 +39,7 @@ func CounterInterface(w http.ResponseWriter, r *http.Request) {
func
CounterCalcAndSendEmail
(
day
string
)
{
info
:=
""
Counter
(
day
,
func
(
sysName
string
,
durations
map
[
string
]
string
)
{
consumer
.
Counter
(
day
,
func
(
sysName
string
,
durations
map
[
string
]
string
)
{
info
=
info
+
"
\n
"
+
"系统名称 : "
+
strings
.
Split
(
sysName
,
":"
)[
1
]
+
"
\n
"
for
k
,
v
:=
range
durations
{
info
=
info
+
k
+
" , 次数:"
+
v
+
"
\n
"
...
...
service/alarm/operator.go
View file @
561ac413
...
...
@@ -49,6 +49,19 @@ func (Compare) Equal(alter string, real []string) bool {
return
rs
}
//限制同比
func
(
Compare
)
LimitComparedWithSame
(
alter
string
,
old
[]
string
,
current
[]
string
)
bool
{
logger
.
Info
.
Println
(
"old:"
,
strings
.
Join
(
old
,
","
),
"new: "
,
strings
.
Join
(
current
,
","
))
rs
:=
true
lastIndex
:=
len
(
current
)
-
1
for
i
,
r
:=
range
current
{
if
i
!=
0
||
i
!=
lastIndex
{
rs
=
rs
&&
limitCompareSame
(
alter
,
old
[
i
],
r
)
}
}
return
rs
}
/**
同比超过alter
*/
...
...
@@ -65,6 +78,15 @@ func (Compare) ComparedWithSame(alter string, old []string, current []string) bo
}
func
limitCompareSame
(
alter
string
,
old
string
,
current
string
)
bool
{
cf
:=
parseToFloat
(
current
)
of
:=
parseToFloat
(
old
)
if
cf
<
200
&&
of
<
200
{
return
false
}
return
(
cf
-
of
)
/
of
>
parseToFloat
(
alter
)
}
func
compareSame
(
alter
string
,
old
string
,
current
string
)
bool
{
cf
:=
parseToFloat
(
current
)
of
:=
parseToFloat
(
old
)
...
...
@@ -106,3 +128,9 @@ func (MsgBuilder) Equal(alter string) string {
func
(
MsgBuilder
)
ComparedWithSame
(
alter
string
)
string
{
return
"同比超过"
+
alter
}
//限制同比
func
(
MsgBuilder
)
LimitComparedWithSame
(
alter
string
)
string
{
return
"同比超过"
+
alter
}
\ No newline at end of file
service/
msg_process
.go
→
service/
consumer/brave_message_handler
.go
View file @
561ac413
package
service
package
consumer
import
(
"encoding/json"
"fmt"
"git.quantgroup.cn/DevOps/enoch/service/data"
"git.quantgroup.cn/DevOps/enoch/service/
file_cache
"
"git.quantgroup.cn/DevOps/enoch/service/
end_points
"
"git.quantgroup.cn/DevOps/enoch/service/log"
"github.com/gomodule/redigo/redis"
"github.com/influxdata/influxdb/client/v2"
...
...
@@ -21,20 +21,23 @@ var httpMethod = map[string]string{
func
(
BraveMessageHandler
)
MsgProcess
(
msg
string
)
{
traceMsg
:=
make
([]
TraceMsg
,
3
)
//[]TraceMsg{}
traceMsg
:=
make
([]
end_points
.
TraceMsg
,
3
)
//[]TraceMsg{}
err
:=
json
.
Unmarshal
([]
byte
(
msg
),
&
traceMsg
)
if
err
!=
nil
{
fmt
.
Println
(
err
)
logger
.
Error
.
Println
(
"brave 解析msg失败:"
,
err
)
}
//msgRedisProcess(traceMsg)
msgInfluxProcess
(
traceMsg
)
}
func
(
BraveMessageHandler
)
Destroy
()
{
if
len
(
pointSlice
)
>
0
{
logger
.
Info
.
Println
(
"braveMessageHandler 提交本地缓存数据:"
,
len
(
pointSlice
))
batchWrite
(
pointSlice
)
}
}
var
batchSize
=
5000
var
pointSlice
=
make
([]
*
client
.
Point
,
0
,
batchSize
)
func
msgInfluxProcess
(
traceMsgs
[]
TraceMsg
)
{
func
msgInfluxProcess
(
traceMsgs
[]
end_points
.
TraceMsg
)
{
for
_
,
traceMsg
:=
range
traceMsgs
{
...
...
@@ -83,84 +86,6 @@ func msgInfluxProcess(traceMsgs []TraceMsg) {
}
func
batchWrite
(
pointArray
[]
*
client
.
Point
)
{
if
file_cache
.
Enabled
()
{
logger
.
Info
.
Println
(
"写入缓存"
)
fileWrite
(
pointArray
)
}
else
{
err
:=
httpWrite
(
pointArray
)
if
err
!=
nil
{
file_cache
.
OpenCache
()
fileWrite
(
pointArray
)
}
logger
.
Info
.
Println
(
"写入influx"
,
len
(
pointArray
))
}
}
func
httpWrite
(
pointArray
[]
*
client
.
Point
)
error
{
c
:=
data
.
NewClient
()
defer
func
()
{
_
=
c
.
Close
()
}()
points
,
err
:=
client
.
NewBatchPoints
(
client
.
BatchPointsConfig
{
Database
:
"monitor"
,
//Precision : "ms",
})
if
err
!=
nil
{
return
err
}
points
.
AddPoints
(
pointArray
)
err
=
c
.
Write
(
points
)
if
err
!=
nil
{
return
err
}
return
nil
}
func
ReSubmit
(
data
[]
string
)
error
{
pointSlice
:=
make
([]
*
client
.
Point
,
0
)
for
_
,
v
:=
range
data
{
cp
:=
file_cache
.
CreateCachePoint
(
v
)
point
,
err
:=
client
.
NewPoint
(
cp
.
Name
,
cp
.
Tags
,
cp
.
Fields
,
cp
.
Time
)
if
err
!=
nil
{
logger
.
Error
.
Println
(
"构造client.point异常"
,
err
)
}
pointSlice
=
append
(
pointSlice
,
point
)
if
len
(
pointSlice
)
>
1000
{
err
:=
httpWrite
(
pointSlice
)
if
err
!=
nil
{
return
err
}
logger
.
Info
.
Println
(
"缓存重新提交:1000"
)
pointSlice
=
make
([]
*
client
.
Point
,
0
)
}
}
if
len
(
pointSlice
)
>
0
{
err
:=
httpWrite
(
pointSlice
)
if
err
!=
nil
{
logger
.
Info
.
Println
(
pointSlice
)
return
err
}
logger
.
Info
.
Println
(
"缓存重新提交:"
,
len
(
pointSlice
))
}
logger
.
Info
.
Println
(
"重新提交"
)
return
nil
}
func
fileWrite
(
pointArray
[]
*
client
.
Point
)
{
for
_
,
p
:=
range
pointArray
{
if
p
!=
nil
{
current
:=
file_cache
.
NewPoint
(
p
)
data
,
err
:=
json
.
Marshal
(
current
)
if
err
!=
nil
{
fmt
.
Println
(
err
)
}
file_cache
.
Write
(
string
(
data
))
}
}
}
func
Duration
(
day
string
,
fun
func
(
sysName
string
,
durations
map
[
string
]
string
))
{
conn
:=
data
.
Pool
.
Get
()
...
...
@@ -200,5 +125,4 @@ func Counter(day string, fun func(sysName string, durations map[string]string))
}
fun
(
string
(
redisKey
.
([]
uint8
)),
reply2
)
}
}
service/consumer/constant.go
0 → 100644
View file @
561ac413
package
consumer
var
batchSize
=
5000
service/
agent_msg_process
.go
→
service/
consumer/health_message_handler
.go
View file @
561ac413
package
service
package
consumer
import
(
"encoding/json"
"fmt"
"git.quantgroup.cn/DevOps/enoch/service/end_points"
"git.quantgroup.cn/DevOps/enoch/service/log"
"github.com/influxdata/influxdb/client/v2"
"math/big"
"net"
...
...
@@ -21,21 +21,44 @@ func (HealthMessageHandler) MsgProcess(msg string) {
chunkMsg
:=
end_points
.
ChunkMsg
{}
err
:=
json
.
Unmarshal
([]
byte
(
msg
),
&
chunkMsg
)
if
err
!=
nil
{
fmt
.
Println
(
err
)
logger
.
Error
.
Println
(
"healthMessageHandler解析json失败:"
,
err
)
logger
.
Error
.
Println
(
msg
)
}
buildMsg
(
chunkMsg
)
}
func
buildHealthInfluxMsg
(
appName
string
,
ip
string
,
timestamp
time
.
Time
,
submitLimit
int
,
db
map
[
string
]
end_points
.
DBDetail
)
{
func
(
HealthMessageHandler
)
Destroy
()
{
if
len
(
metricsPointSlice
)
>
0
{
logger
.
Info
.
Println
(
"metricsMessageHandler 提交本地缓存数据:"
,
len
(
metricsPointSlice
))
batchWrite
(
metricsPointSlice
)
}
if
len
(
healthPointSlice
)
>
0
{
logger
.
Info
.
Println
(
"HealthMessageHandler 提交本地缓存数据:"
,
len
(
healthPointSlice
))
batchWrite
(
healthPointSlice
)
}
}
func
buildHealthInfluxMsg
(
appName
string
,
ip
string
,
timestamp
time
.
Time
,
submitLimit
int
,
db
*
[]
byte
)
{
tags
:=
make
(
map
[
string
]
string
,
)
tags
[
"sys_name"
]
=
appName
tags
[
"host"
]
=
ip
fields
:=
make
(
map
[
string
]
interface
{})
for
k
,
v
:=
range
db
{
dbInfo
:=
end_points
.
DBInfo
{}
err
:=
json
.
Unmarshal
(
*
db
,
&
dbInfo
)
if
err
!=
nil
{
dbDetails
:=
end_points
.
DBDetail
{}
err
=
json
.
Unmarshal
(
*
db
,
&
dbDetails
)
if
err
==
nil
{
fields
[
dbDetails
.
Details
.
Database
]
=
isOK
(
dbDetails
.
Status
.
Code
)
}
}
else
{
for
k
,
v
:=
range
dbInfo
.
Details
{
var
fieldName
=
v
.
Details
.
Database
+
"—"
+
k
fields
[
fieldName
]
=
isOK
(
v
.
Status
.
Code
)
}
}
if
len
(
healthPointSlice
)
>=
submitLimit
{
go
batchWrite
(
healthPointSlice
)
healthPointSlice
=
make
([]
*
client
.
Point
,
0
,
batchSize
)
...
...
@@ -50,10 +73,12 @@ func buildMetricsInfluxMsg(appName string, ip string, timestamp time.Time, submi
tags
[
"sys_name"
]
=
appName
tags
[
"host"
]
=
ip
var
status
=
health
.
Status
status
:
=
health
.
Status
fields
[
"sever_status"
]
=
isOK
(
status
.
Code
)
var
diskSpace
=
health
.
Details
.
DiskSpace
.
Details
redis
:=
health
.
Details
.
Redis
fields
[
"redis_status"
]
=
isOK
(
redis
.
Status
.
Code
)
diskSpace
:=
health
.
Details
.
DiskSpace
.
Details
fields
[
"disk_tol"
]
=
diskSpace
.
Total
fields
[
"disk_free"
]
=
diskSpace
.
Free
...
...
@@ -111,7 +136,8 @@ func buildMsg(chunkMsg end_points.ChunkMsg) {
buildMetricsInfluxMsg
(
appName
,
ip
,
unix
,
sysNameCount
,
p
.
Health
,
p
.
Metrics
)
//health_info
buildHealthInfluxMsg
(
appName
,
ip
,
unix
,
sysNameCount
,
p
.
Health
.
Details
.
Db
.
Details
)
dbByte
,
_
:=
json
.
Marshal
(
p
.
Health
.
Details
.
Db
)
buildHealthInfluxMsg
(
appName
,
ip
,
unix
,
sysNameCount
,
&
dbByte
)
}
}
...
...
service/kafka_agent_sarama.go
→
service/
consumer/
kafka_agent_sarama.go
View file @
561ac413
package
service
package
consumer
import
(
"git.quantgroup.cn/DevOps/enoch/service/conf"
...
...
@@ -7,12 +7,14 @@ import (
"github.com/bsm/sarama-cluster"
"os"
"os/signal"
"sync/atomic"
"syscall"
)
var
consumerCount
int32
func
AgentClusterConsumer
(
kafkaConf
conf
.
KafkaConf
,
messageHandle
MessageHandler
)
{
config
:=
cluster
.
NewConfig
()
config
.
Consumer
.
Return
.
Errors
=
true
config
.
Consumer
.
Offsets
.
Initial
=
sarama
.
OffsetOldest
config
.
Group
.
Return
.
Notifications
=
true
...
...
@@ -21,10 +23,20 @@ func AgentClusterConsumer(kafkaConf conf.KafkaConf, messageHandle MessageHandler
if
err
!=
nil
{
panic
(
err
)
}
defer
func
()
{
_
=
consumer
.
Close
()
}(
)
atomic
.
AddInt32
(
&
consumerCount
,
1
)
// trap SIGINT to trigger a shutdown.
signals
:=
make
(
chan
os
.
Signal
,
1
)
signal
.
Notify
(
signals
,
os
.
Interrupt
)
signal
.
Notify
(
signals
,
syscall
.
SIGTERM
,
syscall
.
SIGINT
,
os
.
Interrupt
)
defer
func
()
{
_
=
consumer
.
Close
()
messageHandle
.
Destroy
()
atomic
.
AddInt32
(
&
consumerCount
,
-
1
)
logger
.
Info
.
Println
(
"consumer结束"
)
if
consumerCount
==
0
{
os
.
Exit
(
0
)
}
}()
// consume errors
go
func
()
{
...
...
@@ -52,5 +64,4 @@ func AgentClusterConsumer(kafkaConf conf.KafkaConf, messageHandle MessageHandler
return
}
}
}
service/
messageH
andler.go
→
service/
consumer/message_h
andler.go
View file @
561ac413
package
service
package
consumer
type
MessageHandler
interface
{
MsgProcess
(
msg
string
)
Destroy
()
}
service/consumer/util.go
0 → 100644
View file @
561ac413
package
consumer
import
(
"encoding/json"
"fmt"
"git.quantgroup.cn/DevOps/enoch/service/data"
"git.quantgroup.cn/DevOps/enoch/service/file_cache"
"git.quantgroup.cn/DevOps/enoch/service/log"
"github.com/influxdata/influxdb/client/v2"
)
func
batchWrite
(
pointArray
[]
*
client
.
Point
)
{
if
file_cache
.
Enabled
()
{
logger
.
Info
.
Println
(
"写入缓存"
)
fileWrite
(
pointArray
)
}
else
{
err
:=
httpWrite
(
pointArray
)
if
err
!=
nil
{
file_cache
.
OpenCache
()
fileWrite
(
pointArray
)
}
logger
.
Info
.
Println
(
"写入influx"
,
len
(
pointArray
))
}
}
func
httpWrite
(
pointArray
[]
*
client
.
Point
)
error
{
c
:=
data
.
NewClient
()
defer
func
()
{
_
=
c
.
Close
()
}()
points
,
err
:=
client
.
NewBatchPoints
(
client
.
BatchPointsConfig
{
Database
:
"monitor"
,
//Precision : "ms",
})
if
err
!=
nil
{
return
err
}
points
.
AddPoints
(
pointArray
)
err
=
c
.
Write
(
points
)
if
err
!=
nil
{
return
err
}
return
nil
}
func
ReSubmit
(
data
[]
string
)
error
{
pointSlice
:=
make
([]
*
client
.
Point
,
0
)
for
_
,
v
:=
range
data
{
cp
:=
file_cache
.
CreateCachePoint
(
v
)
point
,
err
:=
client
.
NewPoint
(
cp
.
Name
,
cp
.
Tags
,
cp
.
Fields
,
cp
.
Time
)
if
err
!=
nil
{
logger
.
Error
.
Println
(
"构造client.point异常"
,
err
)
}
pointSlice
=
append
(
pointSlice
,
point
)
if
len
(
pointSlice
)
>
1000
{
err
:=
httpWrite
(
pointSlice
)
if
err
!=
nil
{
return
err
}
logger
.
Info
.
Println
(
"缓存重新提交:1000"
)
pointSlice
=
make
([]
*
client
.
Point
,
0
)
}
}
if
len
(
pointSlice
)
>
0
{
err
:=
httpWrite
(
pointSlice
)
if
err
!=
nil
{
logger
.
Info
.
Println
(
pointSlice
)
return
err
}
logger
.
Info
.
Println
(
"缓存重新提交:"
,
len
(
pointSlice
))
}
logger
.
Info
.
Println
(
"重新提交"
)
return
nil
}
func
fileWrite
(
pointArray
[]
*
client
.
Point
)
{
for
_
,
p
:=
range
pointArray
{
if
p
!=
nil
{
current
:=
file_cache
.
NewPoint
(
p
)
data
,
err
:=
json
.
Marshal
(
current
)
if
err
!=
nil
{
fmt
.
Println
(
err
)
}
file_cache
.
Write
(
string
(
data
))
}
}
}
service/end_points/chunk_msg.go
View file @
561ac413
...
...
@@ -19,7 +19,8 @@ type Health struct {
type
Detail
struct
{
DiskSpace
DiskInfo
`json:"diskSpace"`
Redis
RedisInfo
`json:"redis"`
Db
DBInfo
`json:"db"`
Db
interface
{}
`json:"db"`
}
type
DBInfo
struct
{
...
...
service/trace_msg.go
→
service/
end_points/
trace_msg.go
View file @
561ac413
package
service
package
end_points
type
TraceMsg
struct
{
TraceId
string
`json:"traceId"`
...
...
service/file_cache/switcher.go
View file @
561ac413
package
file_cache
import
(
"encoding/json"
"git.quantgroup.cn/DevOps/enoch/service/log"
"net/http"
"strings"
"sync"
"time"
)
...
...
@@ -22,6 +25,7 @@ func (s *switcher) turnOn() {
s
.
state
=
true
s
.
origin
=
time
.
Now
()
.
Unix
()
create
()
senderDingDing
()
}
}
...
...
@@ -48,9 +52,16 @@ func (s *switcher) status() bool {
}
var
cacheSwitcher
*
switcher
var
alterMsg
string
const
(
url
=
"https://oapi.dingtalk.com/robot/send?access_token=9ffab8e4ae5f94e0fbf84aa91c9cb474d9e3d5bd0bb3c2daffe4cdfe0c2cbbc7"
contentType
=
"application/json;charset=utf-8"
)
func
init
()
{
cacheSwitcher
=
&
switcher
{}
alterMsg
=
buildDingDingMsg
()
}
func
Enabled
()
bool
{
...
...
@@ -60,3 +71,33 @@ func Enabled() bool {
func
OpenCache
()
{
cacheSwitcher
.
turnOn
()
}
func
senderDingDing
()
{
_
,
err
:=
http
.
Post
(
url
,
contentType
,
strings
.
NewReader
(
alterMsg
))
if
err
!=
nil
{
logger
.
Error
.
Println
(
err
)
}
}
func
buildDingDingMsg
()
string
{
msg
:=
dingDingMsg
{
MsgType
:
"text"
,
Text
:
text
{
Content
:
"influxdb 写超时,已启用文件缓存"
,
},
}
msgStr
,
err
:=
json
.
Marshal
(
msg
)
if
nil
!=
err
{
logger
.
Error
.
Println
(
"无法序列化ding ding msg"
,
err
)
}
return
string
(
msgStr
)
}
type
dingDingMsg
struct
{
MsgType
string
Text
text
}
type
text
struct
{
Content
string
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment