فهرست منبع

first commit in gogs.

dean 6 سال پیش
کامیت
0c1c82e67d
100فایلهای تغییر یافته به همراه4248 افزوده شده و 0 حذف شده
  1. 546 0
      Doc/saltstack/index.md
  2. 248 0
      Doc/saltstack/room_service.md
  3. 57 0
      Doc/saltstack/systembase.md
  4. 24 0
      ansible/README.md
  5. 55 0
      ansible/hosts
  6. 2 0
      ansible/hosts_liamo
  7. 5 0
      ansible/hosts_nginx
  8. 38 0
      ansible/roles/cassandra/README.md
  9. 2 0
      ansible/roles/cassandra/defaults/main.yml
  10. 15 0
      ansible/roles/cassandra/files/cassandra_daemon.sh
  11. 2 0
      ansible/roles/cassandra/handlers/main.yml
  12. 57 0
      ansible/roles/cassandra/meta/main.yml
  13. 12 0
      ansible/roles/cassandra/tasks/cassandra.yml
  14. 3 0
      ansible/roles/cassandra/tasks/main.yml
  15. 2 0
      ansible/roles/cassandra/tests/inventory
  16. 5 0
      ansible/roles/cassandra/tests/test.yml
  17. 2 0
      ansible/roles/cassandra/vars/main.yml
  18. 38 0
      ansible/roles/flume/README.md
  19. 2 0
      ansible/roles/flume/defaults/main.yml
  20. 53 0
      ansible/roles/flume/files/conf/backend.log.conf
  21. 32 0
      ansible/roles/flume/files/conf/flume-env.sh
  22. 68 0
      ansible/roles/flume/files/conf/log4j.properties
  23. 322 0
      ansible/roles/flume/files/conf/nginx.access.conf
  24. 2 0
      ansible/roles/flume/handlers/main.yml
  25. 57 0
      ansible/roles/flume/meta/main.yml
  26. 35 0
      ansible/roles/flume/tasks/flume_conf.yml
  27. 3 0
      ansible/roles/flume/tasks/main.yml
  28. 2 0
      ansible/roles/flume/tests/inventory
  29. 5 0
      ansible/roles/flume/tests/test.yml
  30. 2 0
      ansible/roles/flume/vars/main.yml
  31. 38 0
      ansible/roles/limao/README.md
  32. 2 0
      ansible/roles/limao/defaults/main.yml
  33. BIN
      ansible/roles/limao/files/docker.deb
  34. 11 0
      ansible/roles/limao/files/getmyip
  35. 2 0
      ansible/roles/limao/handlers/main.yml
  36. 57 0
      ansible/roles/limao/meta/main.yml
  37. 53 0
      ansible/roles/limao/tasks/docker_init.yml
  38. 4 0
      ansible/roles/limao/tasks/main.yml
  39. 38 0
      ansible/roles/limao/tasks/run_container.yml
  40. 119 0
      ansible/roles/limao/templates/server_config.json
  41. 2 0
      ansible/roles/limao/tests/inventory
  42. 5 0
      ansible/roles/limao/tests/test.yml
  43. 2 0
      ansible/roles/limao/vars/main.yml
  44. 38 0
      ansible/roles/logstash/README.md
  45. 2 0
      ansible/roles/logstash/defaults/main.yml
  46. 80 0
      ansible/roles/logstash/files/config/jvm.options
  47. 83 0
      ansible/roles/logstash/files/config/log4j2.properties
  48. 214 0
      ansible/roles/logstash/files/config/logstash.yml
  49. 97 0
      ansible/roles/logstash/files/config/nginx_access.conf
  50. 79 0
      ansible/roles/logstash/files/config/pipelines.yml
  51. 53 0
      ansible/roles/logstash/files/config/startup.options
  52. 14 0
      ansible/roles/logstash/files/patterns/aws
  53. 1 0
      ansible/roles/logstash/files/patterns/backend.conf
  54. 50 0
      ansible/roles/logstash/files/patterns/bacula
  55. 3 0
      ansible/roles/logstash/files/patterns/bind
  56. 13 0
      ansible/roles/logstash/files/patterns/bro
  57. 13 0
      ansible/roles/logstash/files/patterns/exim
  58. 91 0
      ansible/roles/logstash/files/patterns/firewalls
  59. 97 0
      ansible/roles/logstash/files/patterns/grok-patterns
  60. 39 0
      ansible/roles/logstash/files/patterns/haproxy
  61. 15 0
      ansible/roles/logstash/files/patterns/httpd
  62. 19 0
      ansible/roles/logstash/files/patterns/java
  63. 9 0
      ansible/roles/logstash/files/patterns/junos
  64. 16 0
      ansible/roles/logstash/files/patterns/linux-syslog
  65. 1 0
      ansible/roles/logstash/files/patterns/maven
  66. 1 0
      ansible/roles/logstash/files/patterns/mcollective
  67. 4 0
      ansible/roles/logstash/files/patterns/mcollective-patterns
  68. 7 0
      ansible/roles/logstash/files/patterns/mongodb
  69. 124 0
      ansible/roles/logstash/files/patterns/nagios
  70. 1 0
      ansible/roles/logstash/files/patterns/nginx
  71. 3 0
      ansible/roles/logstash/files/patterns/postgresql
  72. 13 0
      ansible/roles/logstash/files/patterns/rails
  73. 3 0
      ansible/roles/logstash/files/patterns/redis
  74. 2 0
      ansible/roles/logstash/files/patterns/ruby
  75. 4 0
      ansible/roles/logstash/files/patterns/squid
  76. 2 0
      ansible/roles/logstash/handlers/main.yml
  77. 57 0
      ansible/roles/logstash/meta/main.yml
  78. 38 0
      ansible/roles/logstash/tasks/logstash_conf.yml
  79. 3 0
      ansible/roles/logstash/tasks/main.yml
  80. 2 0
      ansible/roles/logstash/tests/inventory
  81. 5 0
      ansible/roles/logstash/tests/test.yml
  82. 2 0
      ansible/roles/logstash/vars/main.yml
  83. 38 0
      ansible/roles/nginx/README.md
  84. 2 0
      ansible/roles/nginx/defaults/main.yml
  85. 129 0
      ansible/roles/nginx/files/nginxd
  86. BIN
      ansible/roles/nginx/files/openresty-1.11.2.4.tar.gz
  87. BIN
      ansible/roles/nginx/files/pcre-8.41.zip
  88. 1 0
      ansible/roles/nginx/files/sslkey/sslkey.key
  89. 0 0
      ansible/roles/nginx/files/vhost/some vhost file over here
  90. 2 0
      ansible/roles/nginx/handlers/main.yml
  91. 57 0
      ansible/roles/nginx/meta/main.yml
  92. 152 0
      ansible/roles/nginx/tasks/installer.yml
  93. 5 0
      ansible/roles/nginx/tasks/main.yml
  94. 119 0
      ansible/roles/nginx/tasks/upstream.yml
  95. 35 0
      ansible/roles/nginx/tasks/vhost.yml
  96. 35 0
      ansible/roles/nginx/tasks/vhost.yml.bk
  97. 316 0
      ansible/roles/nginx/templates/nginx.conf
  98. 28 0
      ansible/roles/nginx/templates/openresty.sh
  99. 2 0
      ansible/roles/nginx/tests/inventory
  100. 0 0
      ansible/roles/nginx/tests/test.yml

+ 546 - 0
Doc/saltstack/index.md

@@ -0,0 +1,546 @@
+Saltstack
+======
+
+[toc]
+
+简介
+------
+线上所有的统一配置管理工具均有saltstack完成
+完成的线上任务:
+
+1. 用于系统的初始化
+
+2. 用于开源软件的配置管理,包括
+	* nginx
+	* phpfpm
+	* redis
+	* mongodb
+	* memcache
+	* ...
+
+3. 用于管理c++开发的所有组件配置已经更新
+	* roommaster
+	* chatroom
+	* gateserver
+	* secgateserver
+	* rediswriter
+	* ...
+
+## SaltStack介绍
+SaltStack,一种全新的基础设施管理方式,部署轻松,在几分钟内可运行起来,扩展性好,很容易管理上万台服务器,速度够快,服务器之间秒级通讯。
+
+### 特性
+1. 具备配置管理
+2. 可以远程执行
+3. 基于python实现
+4. 部署简单方便
+5. 支持API以及自定义模块,通过python可以进行轻松的扩展
+6. 主控端与被控端基于证书认证,安全可靠
+7. 基于ZeroMQ进行消息传输
+
+### 基本原理
+SaltStack采用C/S模式,server端就是salt的salt-master,client端为salt-minion,minion与master之间通过ZeroMQ消息队列通信
+
+minion上线后先与master端联系,把自己的pub key发过去,这时master端通过salt-key -L命令就会看到minion的key,接受该minion-key后,也就是master与minion已经互信
+
+master可以发送任何指令让minion执行了,salt有很多可执行模块,比如说cmd模块,在安装minion的时候已经自带了,它们通常位于你的python库中,locate salt | grep /usr/ 可以看到salt自带的所有东西。
+
+具体步骤如下
+
+1. Salt stack的Master与Minion之间通过ZeroMq进行消息传递,使用了ZeroMq的发布-订阅模式,连接方式包括tcp,ipc
+
+2. salt命令,将cmd.run ls命令从salt.client.LocalClient.cmd\_cli发布到master,获取一个Jodid,根据jobid获取命令执行结果。
+
+3. master接收到命令后,将要执行的命令发送给客户端minion。
+
+4. minion从消息总线上接收到要处理的命令,交给minion.\_handle\_aes处理
+
+5. minion.\_handle\_aes发起一个本地线程调用cmdmod执行ls命令。线程执行完ls后,调用minion.\_return\_pub方法,将执行结果通过消息总线返回给master
+
+6. master接收到客户端返回的结果,调用master.\_handle\_aes方法,将结果写的文件中
+
+7. salt.client.LocalClient.cmd\_cli通过轮询获取Job执行结果,将结果输出到终端。
+
+### 安装
+saltstack的包已经加入EPEL源,可以直接通过yum安装
+
+1. salt-master 安装
+	```bash
+    yum install salt-master
+    ```
+
+2. salt-minion 安装
+	```bash
+    wget -O install_salt.sh https://bootstrap.saltstack.com --no-check-certificate && sh install_salt.sh
+    ```
+
+### 提供的功能
+
+#### Target
+指定执行命令或者模块应用在哪个Minion上
+
+1. globbing匹配
+	```bash
+    salt 'Dy-JXQ-101' test.ping
+    ```
+2. RE正则
+	```bash
+    salt -E 'Dy-JXQ-9(1-2)$' test.ping
+    ```
+3. grains
+	```bash
+    salt -G "os:CentOS" test.ping
+
+    # 查看所有grains的键值
+    salt 'test' grains.items
+    # 查看所有grains项
+    salt 'test' grains.ls
+    # 查看某个grains的值
+    salt 'test' grains.item num_cpus
+    ```
+
+    在top file中匹配grains
+    ```bash
+    'node_type:webserver':
+    - match: grain
+    - nginx
+    - phpfpm
+    - ```
+
+4. nodegroups 是对minion分组
+	```bash
+    # /etc/salt/master.d/nodegroup.conf
+    nodegroups:
+      test1: 'L@test1, test2 or test3'
+      test2: 'G@os:CentOS or test2'
+
+    salt -N test1 test.ping
+    ```
+
+#### 批量执行
+同Target类似例子
+使用命令可以执行各种命令
+```bash
+	salt -E 'Dy-JXQ-10[1-9]$' cmd.run "uptime"
+```
+
+#### 多个Master
+多个Master,在我们的生产环境中还未正式使用
+多Master需要注意的地方:
+
+1. 2个master之间并不会共享minion key,互相独立
+2. 不会自动同步file\_roots,需要手动维护,可以使用git
+3. 不会自动同步Pillar\_roots,需要手动维护,可以使用git
+4. master的配置文件是独立
+
+#### Pillar
+Pillar在salt中是非常重要的组成部分,利用它可以完成动态的数据调整,可以使用sls文件编写
+适用场景
+
+1. 比较敏感的数据,如密码,key等
+2. 特殊数据到指定的minion上
+3. 动态内容
+
+##### 查看Minion的pillar信息
+```bash
+	salt '*' pillar.items
+	salt '*' pillar.item <key>
+	salt '*' pillar.get <key>:<key>
+```
+
+##### 编写pillar数据
+
+1. 指定pillar\_roots,默认是/srv/pillar
+
+2. 在top file中使用pillar
+```bash
+'*':
+    - {{ environment }}.epel
+    - {{ environment }}.ntp
+    - {{ environment }}.systembase
+    - {{ environment }}.openssh
+    - {{ environment }}.zabbix_agent
+    - {{ environment }}.douyu_sudo
+```
+
+3. 在state中通过jinja使用pillar数据
+```python
+{% set zabbix_conf = salt['pillar.get']('zabbix_agent:config_settings', config_map, merge=True) %}
+```
+
+#### Grains
+Grains主要提供一种对服务器静态信息的描述,可以对Grains增加自己对服务器节点的描述
+grains的解析使用yaml模块进行解析,所以对齐是必须的,否则会出现不可预料的错误
+
+通常Grains所提供的键值对有以下三种:
+
+1. 针对常用的键值对,比如系统版本号,网卡地址,mac地址,salt有一套原生的grains定义,该定义均由salt官方提供的python脚本提供,这些python脚本均内置到salt中
+
+2. 通过编写minion端的/etc/salt/grains 加入额外的grains定义
+```bash
+	environment: live
+	node_type:
+	  - webserver
+	  - memcache
+	  - room_service
+	  - redis
+	  - nginx_lua
+```
+
+3. 通过编写master端的/etc/salt/\_grains目录下的python脚本,自定义需要的grains值
+```python
+def hello():
+   	agrain = {}
+   	agrain['hello'] = 'saltstack'
+	return agrain
+```
+符合salt自己自定义的要求返回一个字典即可
+在三种情况下,会执行_grains目录下的py脚本
+	* 重启salt-minion
+	* 在master端执行 salt 'test' saltutil.sync_all
+	* master端执行 salt 'test' state.highstate
+备注:在room\_services的state模块中,使用了dy\_extends.py脚本对对应的服务器进行了定制化配置
+
+
+#### State
+它的核心是SLS(salt state file),文件默认是YAML格式,并默认使用jinja模板
+该State是salt的配置管理核心,所有的配置定义均由state文件通过salt解析完成
+
+##### 示例文件
+选取自ntp中的init.sls
+```python
+# Include :download:`map file <map.jinja>` of OS-specific package names and
+# file paths. Values can be overridden using Pillar.
+{% from "ntp/map.jinja" import ntp with context %}
+{% set service = {True: 'running', False: 'dead'} %}
+
+ntp:
+  pkg.installed:
+    - name: {{ ntp.lookup.package }}
+
+{% if 'ntp_conf' in ntp.lookup %}
+ntpd_conf:
+  file.managed:
+    - name: {{ ntp.lookup.ntp_conf }}
+    - source: salt://ntp/files/ntp.conf
+    - template: jinja
+    - context:
+      config: {{ ntp.settings.ntp_conf }}
+    - require:
+      - pkg: ntp
+{% endif %}
+
+{% if salt['service.status']('ntpd') != True %}
+correct_time:
+  cmd.run:
+    - name: ntpdate 2.asia.pool.ntp.org
+{% endif %}
+
+{% if 'ntpd' in ntp.settings %}
+ntpd:
+  service.{{ service.get(ntp.settings.ntpd) }}:
+    - name: {{ ntp.lookup.service }}
+    - enable: {{ ntp.settings.ntpd }}
+    - require:
+      - pkg: ntp
+      {% if salt['service.status']('ntpd') != True %}
+      - cmd: correct_time
+      {% endif %}
+    - watch:
+      - file: ntpd_conf
+{% endif %}
+```
+
+通过适当的配置,配置管理的功能和便利性会远超puppet
+
+##### 关于salt的render\_system
+salt默认的渲染器是yaml\_jinja,salt处理我们的sls文件时,会先把文件用jinja2处理,然后传给ymal处理器在处理,然后生成的是salt需要的python数据类型。除了yaml\_jinja还有yaml\_mako,yaml\_wempy,py,pydsl
+yaml\_jinja是默认的,而py是用纯python来写的。
+
+
+## 线上saltstack的使用
+
+### 约定
+针对我们的业务需求和基于一定的安全考虑
+线上的saltstack使用上会区分为
+	* 北京机房,一个独立的salt-master
+	* 外网机房,一个独立的salt-master
+
+两个salt-master的代码基本一样,基于room\_service和nginx的配置有些微不同
+nginx的模块主要是外网代理vhost的不同
+room\_service外网主要使用MsgRepeater的配置
+
+### 配置目录的组织
+salt的state配置采用归一化的编排,而针对不同环境的不同变量配置采用pillar的动态特性进行区分
+具体实施上采用以下的目录结构
+```bash
+├── pillar
+│   ├── bj-test
+│   │   ├── douyu_sudo.sls
+│   │   ├── epel.sls
+│   │   ├── init.sls
+│   │   ├── memcache.sls
+│   │   ├── mongodb.sls
+│   │   ├── nginx_lua.sls
+│   │   ├── nginx.sls
+│   │   ├── ntp.sls
+│   │   ├── openssh.sls
+│   │   ├── phpfpm.sls
+│   │   ├── redis.sls
+│   │   ├── room_service.sls
+│   │   ├── systembase.sls
+│   │   └── zabbix_agent.sls
+│   ├── dev
+│   │   ├── douyu_sudo.sls
+│   │   ├── epel.sls
+│   │   ├── init.sls
+│   │   ├── memcache.sls
+│   │   ├── mongodb.sls
+│   │   ├── nginx.sls
+│   │   ├── ntp.sls
+│   │   ├── openssh.sls
+│   │   ├── phpfpm.sls
+│   │   ├── redis.sls
+│   │   ├── room_service.sls
+│   │   ├── systembase.sls
+│   │   └── zabbix_agent.sls
+│   ├── live
+│   │   ├── douyu_sudo.sls
+│   │   ├── epel.sls
+│   │   ├── init.sls
+│   │   ├── memcache.sls
+│   │   ├── mongodb.sls
+│   │   ├── nginx_lua.sls
+│   │   ├── nginx.sls
+│   │   ├── ntp.sls
+│   │   ├── openssh.sls
+│   │   ├── phpfpm.sls
+│   │   ├── redis.sls
+│   │   ├── room_service.sls
+│   │   ├── systembase.sls
+│   │   └── zabbix_agent.sls
+│   ├── pressure
+│   │   ├── douyu_sudo.sls
+│   │   ├── epel.sls
+│   │   ├── init.sls
+│   │   ├── memcache.sls
+│   │   ├── mongodb.sls
+│   │   ├── nginx.sls
+│   │   ├── ntp.sls
+│   │   ├── openssh.sls
+│   │   ├── phpfpm.sls
+│   │   ├── redis.sls
+│   │   ├── room_service.sls
+│   │   ├── systembase.sls
+│   │   └── zabbix_agent.sls
+│   ├── prod
+│   │   ├── douyu_sudo.sls
+│   │   ├── epel.sls
+│   │   ├── init.sls
+│   │   ├── memcache.sls
+│   │   ├── mongodb.sls
+│   │   ├── nginx.sls
+│   │   ├── ntp.sls
+│   │   ├── openssh.sls
+│   │   ├── phpfpm.sls
+│   │   ├── redis.sls
+│   │   ├── room_service.sls
+│   │   ├── systembase.sls
+│   │   └── zabbix_agent.sls
+│   └── top.sls
+└── salt
+    ├── douyu_sudo
+    ├── epel
+    ├── _grains
+    ├── memcache
+    ├── mongodb
+    ├── mysql
+    ├── nginx
+    ├── nginx_lua
+    ├── ntp
+    ├── openssh
+    ├── phpfpm
+    ├── redis
+    ├── room_service
+    ├── systembase
+    ├── zabbix_agent
+    └── top.sls
+```
+
+#### 区分环境
+利用pillar的top.sls入口文件,对符合的主机进行环境区分
+
+1. 所有主机都必须在/etc/salt/grains中指定 'environment:prod/live/pressure/dev/bj-test‘
+	用于标示主机是否属于生产环境或者测试环境
+
+2. top.sls文件示例
+```python
+# top.sls
+{% set environment = salt['grains.get']('environment', '') %}
+include:
+  - {{ environment }}
+```
+```python
+# init.sls
+{% set environment = salt['grains.get']('environment', '') %}
+{{ saltenv if saltenv != None else env }}:
+  '*':
+    - {{ environment }}.epel
+    - {{ environment }}.ntp
+    - {{ environment }}.systembase
+    - {{ environment }}.openssh
+    - {{ environment }}.zabbix_agent
+    - {{ environment }}.douyu_sudo
+
+  'node_type:webserver':
+    - match: grain
+    - {{ environment }}.nginx
+    - {{ environment }}.phpfpm
+
+  'node_type:nginx':
+    - match: grain
+    - {{ environment }}.nginx
+
+  'node_type:nginx_lua':
+    - match: grain
+    - {{ environment }}.nginx_lua
+
+  'node_type:phpfpm':
+    - match: grain
+    - {{ environment }}.phpfpm
+
+  'node_type:memcache':
+    - match: grain
+    - {{ environment }}.memcache
+
+  'node_type:redis':
+    - match: grain
+    - {{ environment }}.redis
+
+  'node_type:mongodb':
+    - match: grain
+    - {{ environment }}.mongodb
+
+  'node_type:room_service':
+    - match: grain
+    - {{ environment }}.room_service
+```
+
+其中的node_type为自定义grains,用来区分主机需要安装的服务类型
+
+#### 典型的salt模块目录组成
+salt的所有使用的模块基本是自有,线上所提供的各种开源模块无法符合自身业务的需求
+以nginx目录结构为例
+```nginx
+nginx
+├── files
+│   ├── conf
+│   │   ├── mime.types
+│   │   └── nginx.conf
+│   ├── initd
+│   │   └── nginxd
+│   ├── logrotate
+│   │   ├── nginxd
+│   │   └── nginx_logfile_logrotate.py
+│   ├── monitor
+│   │   ├── nginx.conf
+│   │   ├── nginx_monitor.conf
+│   │   └── nginx_status.sh
+│   ├── nginx_install.sh
+│   ├── ssl_conf
+│   │   ├── server.key
+│   │   ├── server.pem
+│   │   ├── ybadmin.crt
+│   │   └── ybadminv1.key
+│   ├── tar_package
+│   │   ├── nginx-1.8.0.tar.gz
+│   │   ├── Nginx-accesskey-2.0.3.tar.gz
+│   │   └── ngx_cache_purge-master.zip
+│   ├── vhost
+│   │   ├── cooperate
+│   │   │   ├── backend
+│   │   │   │   ├── cooperate.backend.036yx.com.conf
+│   │   │   │   ├── cooperate.backend.2144.com.conf
+│   │   │   │   └── cooperate.backend.ilongyuan.com.cn.conf
+│   │   │   └── edge
+│   │   │       ├── cooperate.proxy.036yx.com.conf
+│   │   │       ├── cooperate.proxy.2144.com.conf
+│   │   │       └── cooperate.proxy.ilongyuan.com.cn.conf
+│   │   └── core
+│   │       ├── backend
+│   │       │   ├── adsys.douyutv.com.conf
+│   │       │   ├── api.douyutv.com.conf
+│   │       │   ├── file.douyutv.com.conf
+│   │       │   ├── ssl.douyutv.com.conf
+│   │       │   ├── static2.douyutv.com.conf
+│   │       │   ├── staticdn.douyutv.com.conf
+│   │       │   ├── staticlive.douyutv.com.conf
+│   │       │   ├── uc.douyutv.com.conf
+│   │       │   ├── upload.douyutv.com.conf
+│   │       │   └── www.douyutv.com.conf
+│   │       └── edge
+│   │           ├── proxy_adsys.douyutv.com.conf
+│   │           ├── proxy_api.douyutv.com.conf
+│   │           ├── proxy_ssl.douyutv.com.conf
+│   │           ├── proxy_static2.douyutv.com.conf
+│   │           ├── proxy_staticlive.douyutv.com.conf
+│   │           ├── proxy_uc.douyutv.com.conf
+│   │           ├── proxy_www.douyutv.com.conf
+│   │           └── proxy_ybadmin.douyutv.com.conf
+│   └── web_rsync
+│       ├── exclude.txt
+│       ├── rsync.pwd
+│       └── webrsync.sh
+├── init.sls
+├── map.jinja
+└── states
+    ├── config.sls
+    ├── install.sls
+    ├── logrotate.sls
+    ├── monitor.sls
+    ├── service.sls
+    ├── user.sls
+    └── vhost_conf
+        ├── cooperate_backend_vhost_group.sls
+        ├── cooperate_proxy_vhost_group.sls
+        ├── core_backend_vhost_adsys.sls
+        ├── core_backend_vhost_fileupload.sls
+        ├── core_backend_vhost_group.sls
+        ├── core_backend_vhost_ssl.sls
+        ├── core_backend_vhost_staticdn.sls
+        ├── core_backend_vhost_uc.sls
+        ├── core_proxy_vhost_group.sls
+        ├── core_proxy_vhost_ssl.sls
+        ├── douyu.sls
+        └── yuba_proxy_vhost_ybadmin.sls
+```
+
+1. init.sls 为整个模块的应用入口,用于囊括states目录下面的state定义
+2. map.jinja 为整个模块配置的默认变量设置,可以使用pillar覆盖map里面设置的变量,该件也是其他sls定义文件的变量的源
+3. files目录存放所有需要引入的文件,包括tar包,各种配置文件
+4. states目录存放所有的配置sls文件
+
+
+
+
+### 使用的模块
+现在使用的模块分为
+```bash
+salt
+    ├── douyu_sudo		# 控制服务器的sudo权限
+    ├── epel			# 控制所有CentOS主机的epel源
+    ├── _grains			# 扩展grains
+    ├── memcache		# memcache的配置管理
+    ├── mongodb			# mongodb的配置管理,暂时未上线
+    ├── mysql			# mysql的配置管理,暂时未上线
+    ├── nginx			# nginx的配置管理,包括所有在线使用的webserver
+    ├── nginx_lua		# nginx_lua环境模块,应php要求新加入的配置管理
+    ├── ntp				# ntp配置管理,控制所有服务器的ntp client同步时间
+    ├── openssh			# 服务器的ssh服务管理
+    ├── phpfpm			# phpfpm进程配置管理,线上使用的配置统一一致,个别phpfpm特殊需求
+    ├── redis			# redis的配置管理,负责控制所有redis主从关系
+    ├── room_service	# c++组件的配置管理,部分功能未启用
+    ├── systembase		# 系统初始化的配置管理,包括安装包,kernel参数优化
+    ├── zabbix_agent	# zabbix_agent的配置管理,监控客户端
+    └── top.sls			# 入口文件
+```

+ 248 - 0
Doc/saltstack/room_service.md

@@ -0,0 +1,248 @@
+SaltStack room\_service模块
+======
+
+简介
+------
+room\_service模块主要完成对c++组件的配置管理
+包括
+
+1. c++组件的目录部署
+2. c++组件的配置管理
+3. 可以利用该模块对指定组件进行更新
+
+## 模块目录结构
+systembase的目录结构
+
+* pillar目录下
+	```bash
+	pillar/
+	├── prod
+	│   ├── room_service.sls
+	└── top.sls
+	```
+
+* salt目录下
+	```bash
+	salt/_grains
+	└── dy_extend.py		# 扩展自定义grains,该脚本通过mysql获取各个服务器的角色
+	salt/room_service
+	├── files
+	│   ├── AdManagerServer
+	│   │   ├── AdManagerServer
+	│   │   ├── AdManagerServer0907-01r.tar.gz
+	│   │   ├── config.json
+	│   │   └── TaskPerfomance.txt
+	│   ├── AuthServer
+	│   │   ├── AuthServer
+	│   │   ├── config.json
+	│   │   └── TaskPerfomance.txt
+	│   ├── ChatRoom
+	│   │   ├── ChatRoom
+	│   │   ├── ChatRoom-20151010104032.tar.gz
+	│   │   ├── ChatRoom-20151023095908.tar.gz
+	│   │   ├── ChatRoom-20151028111604.tar.gz
+	│   │   ├── ChatRoom-20151028180908.tar.gz
+	│   │   ├── ChatRoom-20151029153541.tar.gz
+	│   │   ├── config.json
+	│   │   └── TaskPerfomance.txt
+	│   ├── EmailServer
+	│   │   ├── config.json
+	│   │   ├── EmailServer
+	│   │   └── TaskPerfomance.txt
+	│   ├── GambleServer
+	│   │   ├── config.json
+	│   │   ├── GambleServer
+	│   │   ├── GambleServer-20150921181341.tar.gz
+	│   │   ├── GambleServer-20151019174029.tar.gz
+	│   │   └── TaskPerfomance.txt
+	│   ├── GateServer
+	│   │   ├── config.json
+	│   │   ├── config.json_bak
+	│   │   ├── GateServer
+	│   │   ├── GateServer-20151012141957.tar.gz
+	│   │   ├── GateServer-20151022091720.tar.gz
+	│   │   └── TaskPerfomance.txt
+	│   ├── libexec
+	│   │   ├── component_controler.py
+	│   │   └── msg_server_info.sql
+	│   ├── LocationServer
+	│   │   ├── config.txt
+	│   │   ├── LocationServer
+	│   │   └── TaskPerfomance.txt
+	│   ├── LogServer
+	│   │   ├── config.json
+	│   │   ├── config.txt
+	│   │   ├── LogServer
+	│   │   └── TaskPerfomance.txt
+	│   ├── MobileGPServer
+	│   │   ├── config.json
+	│   │   ├── MobileGPServer
+	│   │   └── TaskPerfomance.txt
+	│   ├── MsgRepeater
+	│   │   ├── config.json
+	│   │   └── MsgRepeater
+	│   ├── MsgServer
+	│   │   ├── config.json
+	│   │   ├── MsgServer
+	│   │   ├── MsgServer1028r-4.tar.gz
+	│   │   ├── msgserver_bak.tar.gz
+	│   │   ├── server_list_inter.json
+	│   │   ├── server_list_online.json
+	│   │   └── TaskPerfomance.txt
+	│   ├── RankListServer
+	│   │   ├── config.json
+	│   │   ├── RanklistServer
+	│   │   ├── RanklistServer-201510291946.tar.gz
+	│   │   ├── RanklistServer-20151030131505.tar.gz
+	│   │   ├── RanklistServer-20151030163459.tar.gz
+	│   │   ├── RanklistServer-20151101013014.tar.gz
+	│   │   ├── RanklistServer-20151102142846.tar.gz
+	│   │   ├── RanklistServer-20151103093928.tar.gz
+	│   │   └── TaskPerfomance.txt
+	│   ├── RedisWriter
+	│   │   ├── config.json
+	│   │   ├── config.json_bak
+	│   │   ├── RedisWriter
+	│   │   ├── RedisWriter14
+	│   │   └── TaskPerfomance.txt
+	│   ├── RedPacketServer
+	│   │   ├── config.json
+	│   │   ├── RedPacketServer
+	│   │   ├── RedPacketServer-20150922124719.tar.gz
+	│   │   ├── RedPacketServer-20150924101646.tar.gz
+	│   │   ├── RedPacketServer-20150928104251.tar.gz
+	│   │   └── TaskPerfomance.txt
+	│   ├── room_log_clear.sh
+	│   ├── RoomMaster
+	│   │   ├── config.json
+	│   │   ├── RoomMaster
+	│   │   ├── RoomMaster-20150915185258.tar.gz
+	│   │   ├── RoomMaster-20151028180908.tar.gz
+	│   │   └── TaskPerfomance.txt
+	│   ├── room_rsync
+	│   │   ├── room.pwd
+	│   │   └── roomrsync.sh
+	│   ├── SecGateServer
+	│   │   ├── config.json
+	│   │   ├── SecGateServer
+	│   │   └── TaskPerfomance.txt
+	│   ├── SecurityServer
+	│   │   ├── config.txt
+	│   │   ├── SecurityServer
+	│   │   ├── TaskPerfomance.txt
+	│   │   └── verify_keys.txt
+	│   ├── SockPolicy
+	│   │   ├── cross-domain-policy.xml
+	│   │   └── sockpolicy
+	│   ├── StatusServer
+	│   │   ├── config.json
+	│   │   ├── StatusServer
+	│   │   ├── StatusServer-20151023091143.tar.gz
+	│   │   ├── StatusServer-20151026140832.tar.gz
+	│   │   └── TaskPerfomance.txt
+	│   ├── SyslogServer
+	│   │   ├── config.json
+	│   │   ├── DBCmd.txt
+	│   │   ├── Syslog-djw-20151016-183332.tar.gz
+	│   │   ├── SyslogServer
+	│   │   └── TaskPerfomance.txt
+	│   ├── SystemMaster
+	│   │   ├── config.txt
+	│   │   ├── SystemMaster
+	│   │   └── TaskPerfomance.txt
+	│   ├── TaskServer
+	│   │   ├── TaskPerfomance.txt
+	│   │   ├── TaskServer_business
+	│   │   │   ├── config.json
+	│   │   │   ├── TaskServer
+	│   │   │   ├── TaskServer-20151021142549.tar.gz
+	│   │   │   ├── TaskServer-20151022101310.tar.gz
+	│   │   │   └── TaskServer-20151102164213.tar.gz
+	│   │   ├── TaskServer_common
+	│   │   │   ├── config.json
+	│   │   │   └── TaskServer_common
+	│   │   └── TaskServerProxy
+	│   │       ├── config.json
+	│   │       └── TaskServerProxy
+	│   ├── tools
+	│   │   ├── addMsgServer.py
+	│   │   ├── component_controler.py
+	│   │   ├── DyComponentSoketAgent.py
+	│   │   ├── DYProcessManager.py
+	│   │   └── ProcessCpuUseage.py
+	│   ├── TransactServer
+	│   │   ├── config.json
+	│   │   ├── TaskPerfomance.txt
+	│   │   └── TransactServer
+	│   ├── WeightConvertServer
+	│   │   ├── config.txt
+	│   │   ├── TaskPerfomance.txt
+	│   │   └── WeightConvertServer
+	│   ├── YuBaUserServer
+	│   │   ├── config.json
+	│   │   ├── TaskPerfomance.txt
+	│   │   └── YuBaUserServer
+	│   ├── YuChiSecServer
+	│   │   ├── config.json
+	│   │   ├── TaskPerfomance.txt
+	│   │   └── YuChiSecondServer
+	│   └── YuChiTransact
+	│       ├── config.json
+	│       ├── TaskPerfomance.txt
+	│       └── YuChiTransactServer
+	├── init.sls
+	├── map.jinja
+	└── states
+		├── config.sls					# 定义c++组件运行的目录以及python脚本执行所需的库
+		├── logrotate.sls				# 定义轮转,每台服务器日志存储保留3天,其余全部转移至mfs集群
+		├── server
+		│   ├── AdManagerServer.sls
+		│   ├── AuthServer.sls
+		│   ├── ChatRoom.sls
+		│   ├── EmailServer.sls
+		│   ├── GambleServer.sls
+		│   ├── GateServer.sls
+		│   ├── LocationServer.sls
+		│   ├── LogServer.sls
+		│   ├── MobileGPServer.sls
+		│   ├── MsgRepeater.sls
+		│   ├── MsgServer.sls
+		│   ├── RankListServer.sls
+		│   ├── RedisWriter.sls
+		│   ├── RedPacketServer.sls
+		│   ├── RoomMaster.sls
+		│   ├── SecGateServer.sls
+		│   ├── SecurityServer.sls
+		│   ├── sockpolicy.sls
+		│   ├── StatusServer.sls
+		│   ├── SyslogServer.sls
+		│   ├── SystemMaster.sls
+		│   ├── TaskServer.sls
+		│   ├── TransactServer.sls
+		│   ├── WeightConvertServer.sls
+		│   ├── YuBaUserServer.sls
+		│   ├── YuChiSecServer.sls
+		│   └── YuChiTransact.sls
+		└── user.sls					# 定义c++组件的运行用户和组
+	```
+
+### 自定义grains脚本dy\_extend.py
+dy\_extend.py脚本为该模块的支撑,通过该脚本分发至agent,从mysql中读取本机的角色定义
+注意:
+
+1. 任何对角色的增加或减少,需要重新对某个服务器做出改变的,或者新加服务器,需要利用命令重新分发运行一遍
+2. dy\_extend.py的脚本需要读取stt\_config.web\_server\_info以及stt\_config.msg\_server\_info
+3. 该脚本需要MySQLdb模块,该模块在config.sls中已经定义(对服务器第一次部署,无法达到预期要求,需要运行第二遍)
+
+### server目录
+server目录下面定义的均为各个服务组件的配置管理,大部分服务的配置均类似
+
+1. 定义与mysql中server\_name中相同的目录,二进制文件也采用跟目录名相同
+2. 新建TaskPerformance.txt文件
+3. 新建log目录
+4. 新建 config.json 或者 config.txt文件
+5. 针对个别服务组件有重启的state,可以在配置文件有更改或者二进制文件有更新,即完成重启
+
+### map.jinja文件
+该文件中定义了大量的默认配置,以及从mysql中读取的定义的属于本机的配置
+所有的服务变量从该文件中拉取

+ 57 - 0
Doc/saltstack/systembase.md

@@ -0,0 +1,57 @@
+SaltStack systembase模块
+======
+
+简介
+------
+systembase模块主要完成对服务器的初始化配置
+包括
+
+1. 服务器的基础rpm包安装
+2. 对服务器kernel的参数进行优化设置
+3. 部署自有的logrotate轮转目录
+
+## 模块目录结构
+systembase的目录结构
+
+* pillar目录下
+	```bash
+	pillar/
+	├── prod
+	│   ├── systembase.sls
+	└── top.sls
+	```
+
+* salt目录下
+	```bash
+	salt/systembase
+	├── files
+	│   ├── 90-nproc.conf			# 进程上限设置
+	│   ├── clock					# 时区设置
+	│   ├── douyu_logrotate			# douyu_logrotate目录,用于常用日志轮转
+	│   │   ├── cron.daily
+	│   │   │   └── logrotate
+	│   │   ├── libexec
+	│   │   ├── logrotate.conf
+	│   │   └── logrotate.d
+	│   ├── limits.conf				# 文件描述符限制
+	│   ├── localtime				# 上海时区设置
+	│   ├── motd					# 开机界面(默认字符串为douyutv)
+	│   ├── profile					# 服务器环境变量设置,包括history的长度和记录格式
+	│   ├── selinux					# 默认关闭selinux设置
+	│   └── sysctl_config.sh		# 对kernel初始化脚本
+	├── init.sls
+	├── map.jinja
+	└── states
+		├── common_conf.sls
+		├── douyu_logrotate.sls
+		└── pkgs.sls
+	```
+
+### states文件
+1. pkgs.sls文件从map.jinja读取服务器需要安装的包列表,对服务器进行安装
+2. common\_conf.sls 文件定义各个配置文件的位置
+3. douyu\_logrotate.sls 文件定义轮转目录的位置
+
+### sysctl\_config.sh脚本
+该脚本参考服务器的内存数量以及服务器的cpu核数,对服务器进行针对性kernel参数优化
+具体优化可以参考脚本,脚本内部有各个注解

+ 24 - 0
ansible/README.md

@@ -0,0 +1,24 @@
+
+#### 关闭243 upstream
+  ansible-playbook -i hosts site.yml  --tags java_upstream  -e "close_243_javabackend=True"
+- - -
+#### 开启243 upstream
+  ansible-playbook -i hosts site.yml  --tags java_upstream  -e "open_243_javabackend=True"
+- - -
+#### 直接同步templates nginx.conf
+   ansible-playbook -i hosts site.yml  --tags  nginx_vhost
+- - -
+#### 安装nginx
+  ansible-playbook -i hosts site.yml  --tags  nginx_installer
+- - -
+#### 同步flume配置
+  ansible-playbook -i hosts site.yml  --tags  flume_conf
+- - -
+#### 同步hosts文件
+  ansible-playbook -i hosts site.yml --tags systembase_hosts
+- - -
+#### 生成配置,并启动容器
+  ansible-playbook -i hosts_liamo site.yml   -e "region=SG" -e "node_id=101" -e "opposite_ip=1.2.3.4" -e "sec_region=sz" -e "sec_opposite_ip=5.6.7.8" --tags limao_proxy,docker_init
+- - -
+#### 当proxy_list为空的情况下,不填写sec_opposite_ip即可生成空列表
+  ansible-playbook -i hosts_liamo site.yml   -e "region=SG" -e "node_id=101" -e "opposite_ip=1.2.3.4" -e "sec_region=sz" --tags limao_proxy

+ 55 - 0
ansible/hosts

@@ -0,0 +1,55 @@
+[all]
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password"
+
+[php]
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password"
+
+[phptest]
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password"
+
+[cassandra]
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password"
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password"
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password"
+
+#111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password"
+
+[pay]
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 165
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 243
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 106
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 248
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 249
+
+#111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password"
+
+[nginx]
+#111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password"
+
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 165
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 243
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 106 
+
+#111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password"
+#111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password"
+#111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password"
+
+[init]
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password"
+
+[flume]
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 165
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 243
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 106
+
+[logstash]
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 165
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 243
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 106
+
+[java]
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 165
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 243
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 106
+#111.111.111.111  ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 134
+

+ 2 - 0
ansible/hosts_liamo

@@ -0,0 +1,2 @@
+[limao]
+111.111.111.111 ansible_python_interpreter=/usr/bin/python3 ansible_ssh_user=root ansible_ssh_pass="ssh_password"

+ 5 - 0
ansible/hosts_nginx

@@ -0,0 +1,5 @@
+[nginx]
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 165
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 243
+111.111.111.111 ansible_ssh_user=root ansible_ssh_pass="ssh_password" # 106 
+

+ 38 - 0
ansible/roles/cassandra/README.md

@@ -0,0 +1,38 @@
+Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+    - hosts: servers
+      roles:
+         - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).

+ 2 - 0
ansible/roles/cassandra/defaults/main.yml

@@ -0,0 +1,2 @@
+---
+# defaults file for cassandra

+ 15 - 0
ansible/roles/cassandra/files/cassandra_daemon.sh

@@ -0,0 +1,15 @@
+#!/bin/bash
+while true
+do
+    pid_myself_count=`ps -ef |grep [/]opt/tools/cassandra_daemon.sh  |wc -l`
+    pid_cassandra_count=`ps -ef |grep org.apache.cassandra.service.CassandraDaemon |grep -v grep |wc -l`
+    if [ $pid_myself_count -gt 2 ]
+    then
+        exit 1
+    fi
+    if [ $pid_cassandra_count -lt 1 ]
+    then
+        cassandra -f >/dev/null 2>&1
+    fi
+    sleep 2
+done

+ 2 - 0
ansible/roles/cassandra/handlers/main.yml

@@ -0,0 +1,2 @@
+---
+# handlers file for cassandra

+ 57 - 0
ansible/roles/cassandra/meta/main.yml

@@ -0,0 +1,57 @@
+galaxy_info:
+  author: your name
+  description: your description
+  company: your company (optional)
+
+  # If the issue tracker for your role is not on github, uncomment the
+  # next line and provide a value
+  # issue_tracker_url: http://example.com/issue/tracker
+
+  # Some suggested licenses:
+  # - BSD (default)
+  # - MIT
+  # - GPLv2
+  # - GPLv3
+  # - Apache
+  # - CC-BY
+  license: license (GPLv2, CC-BY, etc)
+
+  min_ansible_version: 1.2
+
+  # If this a Container Enabled role, provide the minimum Ansible Container version.
+  # min_ansible_container_version:
+
+  # Optionally specify the branch Galaxy will use when accessing the GitHub
+  # repo for this role. During role install, if no tags are available,
+  # Galaxy will use this branch. During import Galaxy will access files on
+  # this branch. If Travis integration is configured, only notifications for this
+  # branch will be accepted. Otherwise, in all cases, the repo's default branch
+  # (usually master) will be used.
+  #github_branch:
+
+  #
+  # platforms is a list of platforms, and each platform has a name and a list of versions.
+  #
+  # platforms:
+  # - name: Fedora
+  #   versions:
+  #   - all
+  #   - 25
+  # - name: SomePlatform
+  #   versions:
+  #   - all
+  #   - 1.0
+  #   - 7
+  #   - 99.99
+
+  galaxy_tags: []
+    # List tags for your role here, one per line. A tag is a keyword that describes
+    # and categorizes the role. Users find roles by searching for tags. Be sure to
+    # remove the '[]' above, if you add tags to this list.
+    #
+    # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
+    #       Maximum 20 tags per role.
+
+dependencies: []
+  # List your role dependencies here, one per line. Be sure to remove the '[]' above,
+  # if you add dependencies to this list.

+ 12 - 0
ansible/roles/cassandra/tasks/cassandra.yml

@@ -0,0 +1,12 @@
+---
+- name: create dir /opt/tools
+  file:
+    path: /opt/tools
+    state: directory
+    mode: 0755
+
+- name: copy cassandra_daemon.sh to /opt/tools/
+  copy: src=cassandra_daemon.sh dest=/opt/tools/cassandra_daemon.sh owner=root group=root mode=0755
+
+- name: run deamon with background
+  shell: nohup bash /opt/tools/cassandra_daemon.sh >/dev/null 2>&1 &

+ 3 - 0
ansible/roles/cassandra/tasks/main.yml

@@ -0,0 +1,3 @@
+---
+# tasks file for cassandra
+- include: cassandra.yml

+ 2 - 0
ansible/roles/cassandra/tests/inventory

@@ -0,0 +1,2 @@
+localhost
+

+ 5 - 0
ansible/roles/cassandra/tests/test.yml

@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+  remote_user: root
+  roles:
+    - cassandra

+ 2 - 0
ansible/roles/cassandra/vars/main.yml

@@ -0,0 +1,2 @@
+---
+# vars file for cassandra

+ 38 - 0
ansible/roles/flume/README.md

@@ -0,0 +1,38 @@
+Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+    - hosts: servers
+      roles:
+         - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).

+ 2 - 0
ansible/roles/flume/defaults/main.yml

@@ -0,0 +1,2 @@
+---
+# defaults file for flume

+ 53 - 0
ansible/roles/flume/files/conf/backend.log.conf

@@ -0,0 +1,53 @@
+agent05.sources = source1 
+agent05.channels = sink01 sink02
+agent05.sinks = elasticSearch
+
+####source1######
+agent05.sources.source1.type = exec
+agent05.sources.source1.command = tail -F /var/prog/backend/logs/backend.log
+agent05.sources.source1.restart = true
+agent05.sources.source1.logStdErr = true
+agent05.sources.source1.batchSize = 500
+agent05.sources.source1.channels = sink02
+
+
+agent05.sources.source1.interceptors = interceptor1 interceptor2 interceptor3
+agent05.sources.source1.interceptors.interceptor1.type = host
+agent05.sources.source1.interceptors.interceptor1.hostHeader = host
+
+agent05.sources.source1.interceptors.interceptor2.type = regex_extractor
+agent05.sources.source1.interceptors.interceptor2.regex = ^[^\\s]*\\s(\\d{0,4}-\\d{1,2}-\\d{1,2}\\s\\d{1,2}:\\d{1,2}:\\d{1,2},\\d{1,3})\\s(.*)$
+agent05.sources.source1.interceptors.interceptor2.serializers = s1 s2 
+agent05.sources.source1.interceptors.interceptor2.serializers.s1.name = datetime
+agent05.sources.source1.interceptors.interceptor2.serializers.s2.name = content
+
+agent05.sources.source1.interceptors.interceptor3.type = timestamp
+
+agent05.sinks.elasticSearch.type = org.apache.flume.sink.elasticsearch.ElasticSearchSink
+agent05.sinks.elasticSearch.timeZone=Asia/Shanghai
+agent05.sinks.elasticSearch.channel = sink02
+agent05.sinks.elasticSearch.batchSize = 2000
+agent05.sinks.elasticSearch.hostNames = 10.31.88.120:9300
+agent05.sinks.elasticSearch.indexName = backend_log
+agent05.sinks.elasticSearch.indexType = static
+agent05.sinks.elasticSearch.clusterName = elasticsearch-zzb1
+agent05.sinks.elasticSearch.client = transport
+agent05.sinks.elasticSearch.serializer = org.apache.flume.sink.elasticsearch.ElasticSearchLogStashEventSerializer
+
+# Each sink's type must be defined
+agent05.sinks.loggerSink.type = logger
+
+#Specify the channel the sink should use
+agent05.sinks.loggerSink.channel = sink01
+
+# Each channel's type is defined.
+agent05.channels.sink01.type = memory
+agent05.channels.sink01.capacity = 10000
+agent05.channels.sink01.transactionCapacity = 10000
+agent05.channels.sink01.byteCapacityBufferPercentage = 20
+agent05.channels.sink01.keep-alive = 30
+
+agent05.channels.sink02.type = file
+agent05.channels.sink02.checkpointDir = /data/flume/data/checkpointDir1
+agent05.channels.sink02.dataDirs = /data/flume/data/dataDirs1
+

+ 32 - 0
ansible/roles/flume/files/conf/flume-env.sh

@@ -0,0 +1,32 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# If this file is placed at FLUME_CONF_DIR/flume-env.sh, it will be sourced
+# during Flume startup.
+
+# Enviroment variables can be set here.
+
+#JAVA_HOME=/usr/lib/jvm/java-6-sun
+
+# Give Flume more memory and pre-allocate, enable remote monitoring via JMX
+#JAVA_OPTS="-Xms8192m -Xmx8192m -Dcom.sun.management.jmxremote"
+JAVA_OPTS="-Xms2048m -Xmx2048m -XX:NewSize=1500m  -XX:SurvivorRatio=4 -XX:MaxDirectMemorySize=128m \
+-XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:-CMSConcurrentMTEnabled -XX:CMSInitiatingOccupancyFraction=90 -XX:+CMSParallelRemarkEnabled \
+-XX:+PrintGC -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:/home/flume/logs/gc.log"
+
+# Note that the Flume conf directory is always included in the classpath.
+#FLUME_CLASSPATH=""
+

+ 68 - 0
ansible/roles/flume/files/conf/log4j.properties

@@ -0,0 +1,68 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# Define some default values that can be overridden by system properties.
+#
+# For testing, it may also be convenient to specify
+# -Dflume.root.logger=DEBUG,console when launching flume.
+
+#flume.root.logger=DEBUG,console
+flume.root.logger=INFO,LOGFILE
+flume.log.dir=./logs
+flume.log.file=flume.log
+
+log4j.logger.org.apache.flume.lifecycle = INFO
+log4j.logger.org.jboss = WARN
+log4j.logger.org.mortbay = INFO
+log4j.logger.org.apache.avro.ipc.NettyTransceiver = WARN
+log4j.logger.org.apache.hadoop = INFO
+log4j.logger.org.apache.hadoop.hive = ERROR
+
+# Define the root logger to the system property "flume.root.logger".
+log4j.rootLogger=${flume.root.logger}
+
+
+# Stock log4j rolling file appender
+# Default log rotation configuration
+log4j.appender.LOGFILE=org.apache.log4j.RollingFileAppender
+log4j.appender.LOGFILE.MaxFileSize=100MB
+log4j.appender.LOGFILE.MaxBackupIndex=10
+log4j.appender.LOGFILE.File=${flume.log.dir}/${flume.log.file}
+log4j.appender.LOGFILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.LOGFILE.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss,SSS} %-5p [%t] (%C.%M:%L) %x - %m%n
+
+
+# Warning: If you enable the following appender it will fill up your disk if you don't have a cleanup job!
+# This uses the updated rolling file appender from log4j-extras that supports a reliable time-based rolling policy.
+# See http://logging.apache.org/log4j/companions/extras/apidocs/org/apache/log4j/rolling/TimeBasedRollingPolicy.html
+# Add "DAILY" to flume.root.logger above if you want to use this
+log4j.appender.DAILY=org.apache.log4j.rolling.RollingFileAppender
+log4j.appender.DAILY.rollingPolicy=org.apache.log4j.rolling.TimeBasedRollingPolicy
+log4j.appender.DAILY.rollingPolicy.ActiveFileName=${flume.log.dir}/${flume.log.file}
+log4j.appender.DAILY.rollingPolicy.FileNamePattern=${flume.log.dir}/${flume.log.file}.%d{yyyy-MM-dd}
+log4j.appender.DAILY.layout=org.apache.log4j.PatternLayout
+log4j.appender.DAILY.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss,SSS} %-5p [%t] (%C.%M:%L) %x - %m%n
+
+
+# console
+# Add "console" to flume.root.logger above if you want to use this
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d (%t) [%p - %l] %m%n

+ 322 - 0
ansible/roles/flume/files/conf/nginx.access.conf

@@ -0,0 +1,322 @@
+agent01.sources = source1 source2 source3 source4 source5 source6 source7 source8
+agent01.channels = sink01 sink02
+agent01.sinks = elasticSearch
+
+#agent01.sources.source1.type = exec 
+#agent01.sources.source1.command = tail -F /usr/local/openresty/nginx/logs/some_access.log
+#agent01.sources.source1.restart = true
+#agent01.sources.source1.logStdErr = true
+#agent01.sources.source1.batchSize = 500
+#agent01.sources.source1.channels = sink02
+
+
+####source1######
+agent01.sources.source1.type = exec
+agent01.sources.source1.command = tail -F /usr/local/openresty/nginx/logs/some_access.log
+agent01.sources.source1.restart = true
+agent01.sources.source1.logStdErr = true
+agent01.sources.source1.channels = sink02
+
+agent01.sources.source1.interceptors = interceptor1 interceptor2 interceptor3
+agent01.sources.source1.interceptors.interceptor1.type = host
+agent01.sources.source1.interceptors.interceptor1.hostHeader = host
+
+agent01.sources.source1.interceptors.interceptor2.type = regex_extractor
+agent01.sources.source1.interceptors.interceptor2.regex = ([^\\s]*)\\s-\\s([^\\s]*)\\s\\[(.*)\\]\\s+\\"([\\S]*)\\s+([\\S]*)\\s+[\\S]*\\"\\s+(\\d+)\\s+(\\d+)\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"(.*?)\\"\\s+\\"([^\\"]*)\\"
+agent01.sources.source1.interceptors.interceptor2.serializers = s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15
+agent01.sources.source1.interceptors.interceptor2.serializers.s1.name = remote_addr
+agent01.sources.source1.interceptors.interceptor2.serializers.s2.name = remote_user
+agent01.sources.source1.interceptors.interceptor2.serializers.s3.name = datetime
+#这里的时间已经是ISO8601格式,kibana可以直接识别为时间格式,所以下面的3行可以不用
+#agent01.sources.source1.interceptors.interceptor2.serializers.s3.type = org.apache.flume.interceptor.RegexExtractorInterceptorMillisSerializer
+#agent01.sources.source1.interceptors.interceptor2.serializers.s3.name = timestamp
+#agent01.sources.source1.interceptors.interceptor2.serializers.s3.pattern  = yyyy-MM-dd'T'HH:mm:ssZ
+agent01.sources.source1.interceptors.interceptor2.serializers.s4.name = http_method
+agent01.sources.source1.interceptors.interceptor2.serializers.s5.name = uri
+agent01.sources.source1.interceptors.interceptor2.serializers.s6.name = status
+agent01.sources.source1.interceptors.interceptor2.serializers.s7.name = body_length
+agent01.sources.source1.interceptors.interceptor2.serializers.s8.name = http_referer
+agent01.sources.source1.interceptors.interceptor2.serializers.s9.name = user_agent
+agent01.sources.source1.interceptors.interceptor2.serializers.s10.name = http_x_forwarded_for
+agent01.sources.source1.interceptors.interceptor2.serializers.s11.name = request_time
+agent01.sources.source1.interceptors.interceptor2.serializers.s12.name = upstream_addr
+agent01.sources.source1.interceptors.interceptor2.serializers.s13.name = upstream_response_time
+agent01.sources.source1.interceptors.interceptor2.serializers.s14.name = post_body
+agent01.sources.source1.interceptors.interceptor2.serializers.s15.name = domain_url
+
+agent01.sources.source1.interceptors.interceptor3.type = timestamp
+
+
+####source2######
+agent01.sources.source2.type = exec
+agent01.sources.source2.command = tail -F /usr/local/openresty/nginx/logs/some_access.log
+agent01.sources.source2.channels = sink02
+
+agent01.sources.source2.interceptors = interceptor1 interceptor2 interceptor3
+agent01.sources.source2.interceptors.interceptor1.type = host
+agent01.sources.source2.interceptors.interceptor1.hostHeader = host
+
+agent01.sources.source2.interceptors.interceptor2.type = regex_extractor
+agent01.sources.source2.interceptors.interceptor2.regex = ([^\\s]*)\\s-\\s([^\\s]*)\\s\\[(.*)\\]\\s+\\"([\\S]*)\\s+([\\S]*)\\s+[\\S]*\\"\\s+(\\d+)\\s+(\\d+)\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"(.*?)\\"\\s+\\"([^\\"]*)\\"
+agent01.sources.source2.interceptors.interceptor2.serializers = s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15
+agent01.sources.source2.interceptors.interceptor2.serializers.s1.name = remote_addr
+agent01.sources.source2.interceptors.interceptor2.serializers.s2.name = remote_user
+agent01.sources.source2.interceptors.interceptor2.serializers.s3.name = datetime
+agent01.sources.source2.interceptors.interceptor2.serializers.s4.name = http_method
+agent01.sources.source2.interceptors.interceptor2.serializers.s5.name = uri
+agent01.sources.source2.interceptors.interceptor2.serializers.s6.name = status
+agent01.sources.source2.interceptors.interceptor2.serializers.s7.name = body_length
+agent01.sources.source2.interceptors.interceptor2.serializers.s8.name = http_referer
+agent01.sources.source2.interceptors.interceptor2.serializers.s9.name = user_agent
+agent01.sources.source2.interceptors.interceptor2.serializers.s10.name = http_x_forwarded_for
+agent01.sources.source2.interceptors.interceptor2.serializers.s11.name = request_time
+agent01.sources.source2.interceptors.interceptor2.serializers.s12.name = upstream_addr
+agent01.sources.source2.interceptors.interceptor2.serializers.s13.name = upstream_response_time
+agent01.sources.source2.interceptors.interceptor2.serializers.s14.name = post_body
+agent01.sources.source2.interceptors.interceptor2.serializers.s15.name = domain_url
+
+agent01.sources.source2.interceptors.interceptor3.type = timestamp
+
+####source3######
+agent01.sources.source3.type = exec
+agent01.sources.source3.command = tail -F /usr/local/openresty/nginx/logs/some_access.log
+agent01.sources.source3.channels = sink02
+
+agent01.sources.source3.interceptors = interceptor1 interceptor2 interceptor3
+agent01.sources.source3.interceptors.interceptor1.type = host
+agent01.sources.source3.interceptors.interceptor1.hostHeader = host
+
+agent01.sources.source3.interceptors.interceptor2.type = regex_extractor
+agent01.sources.source3.interceptors.interceptor2.regex = ([^\\s]*)\\s-\\s([^\\s]*)\\s\\[(.*)\\]\\s+\\"([\\S]*)\\s+([\\S]*)\\s+[\\S]*\\"\\s+(\\d+)\\s+(\\d+)\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"(.*?)\\"\\s+\\"([^\\"]*)\\"
+agent01.sources.source3.interceptors.interceptor2.serializers = s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15
+agent01.sources.source3.interceptors.interceptor2.serializers.s1.name = remote_addr
+agent01.sources.source3.interceptors.interceptor2.serializers.s2.name = remote_user
+agent01.sources.source3.interceptors.interceptor2.serializers.s3.name = datetime
+agent01.sources.source3.interceptors.interceptor2.serializers.s4.name = http_method
+agent01.sources.source3.interceptors.interceptor2.serializers.s5.name = uri
+agent01.sources.source3.interceptors.interceptor2.serializers.s6.name = status
+agent01.sources.source3.interceptors.interceptor2.serializers.s7.name = body_length
+agent01.sources.source3.interceptors.interceptor2.serializers.s8.name = http_referer
+agent01.sources.source3.interceptors.interceptor2.serializers.s9.name = user_agent
+agent01.sources.source3.interceptors.interceptor2.serializers.s10.name = http_x_forwarded_for
+agent01.sources.source3.interceptors.interceptor2.serializers.s11.name = request_time
+agent01.sources.source3.interceptors.interceptor2.serializers.s12.name = upstream_addr
+agent01.sources.source3.interceptors.interceptor2.serializers.s13.name = upstream_response_time
+agent01.sources.source3.interceptors.interceptor2.serializers.s14.name = post_body
+agent01.sources.source3.interceptors.interceptor2.serializers.s15.name = domain_url
+
+agent01.sources.source3.interceptors.interceptor3.type = timestamp
+
+
+####source4######
+agent01.sources.source4.type = exec
+agent01.sources.source4.command = tail -F /usr/local/openresty/nginx/logs/some_access.log
+agent01.sources.source4.channels = sink02
+
+agent01.sources.source4.interceptors = interceptor1 interceptor2 interceptor3
+agent01.sources.source4.interceptors.interceptor1.type = host
+agent01.sources.source4.interceptors.interceptor1.hostHeader = host
+
+agent01.sources.source4.interceptors.interceptor2.type = regex_extractor
+agent01.sources.source4.interceptors.interceptor2.regex = ([^\\s]*)\\s-\\s([^\\s]*)\\s\\[(.*)\\]\\s+\\"([\\S]*)\\s+([\\S]*)\\s+[\\S]*\\"\\s+(\\d+)\\s+(\\d+)\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"(.*?)\\"\\s+\\"([^\\"]*)\\"
+agent01.sources.source4.interceptors.interceptor2.serializers = s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15
+agent01.sources.source4.interceptors.interceptor2.serializers.s1.name = remote_addr
+agent01.sources.source4.interceptors.interceptor2.serializers.s2.name = remote_user
+agent01.sources.source4.interceptors.interceptor2.serializers.s3.name = datetime
+agent01.sources.source4.interceptors.interceptor2.serializers.s4.name = http_method
+agent01.sources.source4.interceptors.interceptor2.serializers.s5.name = uri
+agent01.sources.source4.interceptors.interceptor2.serializers.s6.name = status
+agent01.sources.source4.interceptors.interceptor2.serializers.s7.name = body_length
+agent01.sources.source4.interceptors.interceptor2.serializers.s8.name = http_referer
+agent01.sources.source4.interceptors.interceptor2.serializers.s9.name = user_agent
+agent01.sources.source4.interceptors.interceptor2.serializers.s10.name = http_x_forwarded_for
+agent01.sources.source4.interceptors.interceptor2.serializers.s11.name = request_time
+agent01.sources.source4.interceptors.interceptor2.serializers.s12.name = upstream_addr
+agent01.sources.source4.interceptors.interceptor2.serializers.s13.name = upstream_response_time
+agent01.sources.source4.interceptors.interceptor2.serializers.s14.name = post_body
+agent01.sources.source4.interceptors.interceptor2.serializers.s15.name = domain_url
+
+agent01.sources.source4.interceptors.interceptor3.type = timestamp
+
+###source5######
+agent01.sources.source5.type = exec
+agent01.sources.source5.command = tail -F /usr/local/openresty/nginx/logs/some_access.log
+agent01.sources.source5.channels = sink02
+
+agent01.sources.source5.interceptors = interceptor1 interceptor2 interceptor3
+agent01.sources.source5.interceptors.interceptor1.type = host
+agent01.sources.source5.interceptors.interceptor1.hostHeader = host
+
+agent01.sources.source5.interceptors.interceptor2.type = regex_extractor
+agent01.sources.source5.interceptors.interceptor2.regex = ([^\\s]*)\\s-\\s([^\\s]*)\\s\\[(.*)\\]\\s+\\"([\\S]*)\\s+([\\S]*)\\s+[\\S]*\\"\\s+(\\d+)\\s+(\\d+)\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"(.*?)\\"\\s+\\"([^\\"]*)\\"
+agent01.sources.source5.interceptors.interceptor2.serializers = s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15
+agent01.sources.source5.interceptors.interceptor2.serializers.s1.name = remote_addr
+agent01.sources.source5.interceptors.interceptor2.serializers.s2.name = remote_user
+agent01.sources.source5.interceptors.interceptor2.serializers.s3.name = datetime
+agent01.sources.source5.interceptors.interceptor2.serializers.s4.name = http_method
+agent01.sources.source5.interceptors.interceptor2.serializers.s5.name = uri
+agent01.sources.source5.interceptors.interceptor2.serializers.s6.name = status
+agent01.sources.source5.interceptors.interceptor2.serializers.s7.name = body_length
+agent01.sources.source5.interceptors.interceptor2.serializers.s8.name = http_referer
+agent01.sources.source5.interceptors.interceptor2.serializers.s9.name = user_agent
+agent01.sources.source5.interceptors.interceptor2.serializers.s10.name = http_x_forwarded_for
+agent01.sources.source5.interceptors.interceptor2.serializers.s11.name = request_time
+agent01.sources.source5.interceptors.interceptor2.serializers.s12.name = upstream_addr
+agent01.sources.source5.interceptors.interceptor2.serializers.s13.name = upstream_response_time
+agent01.sources.source5.interceptors.interceptor2.serializers.s14.name = post_body
+agent01.sources.source5.interceptors.interceptor2.serializers.s15.name = domain_url
+
+agent01.sources.source5.interceptors.interceptor3.type = timestamp
+
+
+##source6######
+agent01.sources.source6.type = exec
+agent01.sources.source6.command = tail -F /usr/local/openresty/nginx/logs/some_access.log
+agent01.sources.source6.channels = sink02
+
+agent01.sources.source6.interceptors = interceptor1 interceptor2 interceptor3
+agent01.sources.source6.interceptors.interceptor1.type = host
+agent01.sources.source6.interceptors.interceptor1.hostHeader = host
+
+agent01.sources.source6.interceptors.interceptor2.type = regex_extractor
+agent01.sources.source6.interceptors.interceptor2.regex = ([^\\s]*)\\s-\\s([^\\s]*)\\s\\[(.*)\\]\\s+\\"([\\S]*)\\s+([\\S]*)\\s+[\\S]*\\"\\s+(\\d+)\\s+(\\d+)\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"(.*?)\\"\\s+\\"([^\\"]*)\\"
+agent01.sources.source6.interceptors.interceptor2.serializers = s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15
+agent01.sources.source6.interceptors.interceptor2.serializers.s1.name = remote_addr
+agent01.sources.source6.interceptors.interceptor2.serializers.s2.name = remote_user
+agent01.sources.source6.interceptors.interceptor2.serializers.s3.name = datetime
+agent01.sources.source6.interceptors.interceptor2.serializers.s4.name = http_method
+agent01.sources.source6.interceptors.interceptor2.serializers.s5.name = uri
+agent01.sources.source6.interceptors.interceptor2.serializers.s6.name = status
+agent01.sources.source6.interceptors.interceptor2.serializers.s7.name = body_length
+agent01.sources.source6.interceptors.interceptor2.serializers.s8.name = http_referer
+agent01.sources.source6.interceptors.interceptor2.serializers.s9.name = user_agent
+agent01.sources.source6.interceptors.interceptor2.serializers.s10.name = http_x_forwarded_for
+agent01.sources.source6.interceptors.interceptor2.serializers.s11.name = request_time
+agent01.sources.source6.interceptors.interceptor2.serializers.s12.name = upstream_addr
+agent01.sources.source6.interceptors.interceptor2.serializers.s13.name = upstream_response_time
+agent01.sources.source6.interceptors.interceptor2.serializers.s14.name = post_body
+agent01.sources.source6.interceptors.interceptor2.serializers.s15.name = domain_url
+
+agent01.sources.source6.interceptors.interceptor3.type = timestamp
+
+
+######source7######
+agent01.sources.source7.type = exec
+agent01.sources.source7.command = tail -F /usr/local/openresty/nginx/logs/some_access.log
+agent01.sources.source7.channels = sink02
+
+agent01.sources.source7.interceptors = interceptor1 interceptor2 interceptor3
+agent01.sources.source7.interceptors.interceptor1.type = host
+agent01.sources.source7.interceptors.interceptor1.hostHeader = host
+
+agent01.sources.source7.interceptors.interceptor2.type = regex_extractor
+agent01.sources.source7.interceptors.interceptor2.regex = ([^\\s]*)\\s-\\s([^\\s]*)\\s\\[(.*)\\]\\s+\\"([\\S]*)\\s+([\\S]*)\\s+[\\S]*\\"\\s+(\\d+)\\s+(\\d+)\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"(.*?)\\"\\s+\\"([^\\"]*)\\"
+agent01.sources.source7.interceptors.interceptor2.serializers = s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15
+agent01.sources.source7.interceptors.interceptor2.serializers.s1.name = remote_addr
+agent01.sources.source7.interceptors.interceptor2.serializers.s2.name = remote_user
+agent01.sources.source7.interceptors.interceptor2.serializers.s3.name = datetime
+agent01.sources.source7.interceptors.interceptor2.serializers.s4.name = http_method
+agent01.sources.source7.interceptors.interceptor2.serializers.s5.name = uri
+agent01.sources.source7.interceptors.interceptor2.serializers.s6.name = status
+agent01.sources.source7.interceptors.interceptor2.serializers.s7.name = body_length
+agent01.sources.source7.interceptors.interceptor2.serializers.s8.name = http_referer
+agent01.sources.source7.interceptors.interceptor2.serializers.s9.name = user_agent
+agent01.sources.source7.interceptors.interceptor2.serializers.s10.name = http_x_forwarded_for
+agent01.sources.source7.interceptors.interceptor2.serializers.s11.name = request_time
+agent01.sources.source7.interceptors.interceptor2.serializers.s12.name = upstream_addr
+agent01.sources.source7.interceptors.interceptor2.serializers.s13.name = upstream_response_time
+agent01.sources.source7.interceptors.interceptor2.serializers.s14.name = post_body
+agent01.sources.source7.interceptors.interceptor2.serializers.s15.name = domain_url
+
+agent01.sources.source7.interceptors.interceptor3.type = timestamp
+
+
+#####source8######
+agent01.sources.source8.type = exec
+agent01.sources.source8.command = tail -F /usr/local/openresty/nginx/logs/some_access.log
+agent01.sources.source8.channels = sink02
+
+agent01.sources.source8.interceptors = interceptor1 interceptor2 interceptor3
+agent01.sources.source8.interceptors.interceptor1.type = host
+agent01.sources.source8.interceptors.interceptor1.hostHeader = host
+
+agent01.sources.source8.interceptors.interceptor2.type = regex_extractor
+agent01.sources.source8.interceptors.interceptor2.regex = ([^\\s]*)\\s-\\s([^\\s]*)\\s\\[(.*)\\]\\s+\\"([\\S]*)\\s+([\\S]*)\\s+[\\S]*\\"\\s+(\\d+)\\s+(\\d+)\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"(.*?)\\"\\s+\\"([^\\"]*)\\"
+agent01.sources.source8.interceptors.interceptor2.serializers = s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15
+agent01.sources.source8.interceptors.interceptor2.serializers.s1.name = remote_addr
+agent01.sources.source8.interceptors.interceptor2.serializers.s2.name = remote_user
+agent01.sources.source8.interceptors.interceptor2.serializers.s3.name = datetime
+agent01.sources.source8.interceptors.interceptor2.serializers.s4.name = http_method
+agent01.sources.source8.interceptors.interceptor2.serializers.s5.name = uri
+agent01.sources.source8.interceptors.interceptor2.serializers.s6.name = status
+agent01.sources.source8.interceptors.interceptor2.serializers.s7.name = body_length
+agent01.sources.source8.interceptors.interceptor2.serializers.s8.name = http_referer
+agent01.sources.source8.interceptors.interceptor2.serializers.s9.name = user_agent
+agent01.sources.source8.interceptors.interceptor2.serializers.s10.name = http_x_forwarded_for
+agent01.sources.source8.interceptors.interceptor2.serializers.s11.name = request_time
+agent01.sources.source8.interceptors.interceptor2.serializers.s12.name = upstream_addr
+agent01.sources.source8.interceptors.interceptor2.serializers.s13.name = upstream_response_time
+agent01.sources.source8.interceptors.interceptor2.serializers.s14.name = post_body
+agent01.sources.source8.interceptors.interceptor2.serializers.s15.name = domain_url
+
+agent01.sources.source8.interceptors.interceptor3.type = timestamp
+
+#####source9######
+agent01.sources.source9.type = exec
+agent01.sources.source9.command = tail -F /usr/local/openresty/nginx/logs/some_access.log
+agent01.sources.source9.channels = sink02
+
+agent01.sources.source9.interceptors = interceptor1 interceptor2 interceptor3
+agent01.sources.source9.interceptors.interceptor1.type = host
+agent01.sources.source9.interceptors.interceptor1.hostHeader = host
+
+agent01.sources.source9.interceptors.interceptor2.type = regex_extractor
+agent01.sources.source9.interceptors.interceptor2.regex = ([^\\s]*)\\s-\\s([^\\s]*)\\s\\[(.*)\\]\\s+\\"([\\S]*)\\s+([\\S]*)\\s+[\\S]*\\"\\s+(\\d+)\\s+(\\d+)\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"([^\\"]*)\\"\\s+\\"(.*?)\\"\\s+\\"([^\\"]*)\\"
+agent01.sources.source9.interceptors.interceptor2.serializers = s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15
+agent01.sources.source9.interceptors.interceptor2.serializers.s1.name = remote_addr
+agent01.sources.source9.interceptors.interceptor2.serializers.s2.name = remote_user
+agent01.sources.source9.interceptors.interceptor2.serializers.s3.name = datetime
+agent01.sources.source9.interceptors.interceptor2.serializers.s4.name = http_method
+agent01.sources.source9.interceptors.interceptor2.serializers.s5.name = uri
+agent01.sources.source9.interceptors.interceptor2.serializers.s6.name = status
+agent01.sources.source9.interceptors.interceptor2.serializers.s7.name = body_length
+agent01.sources.source9.interceptors.interceptor2.serializers.s8.name = http_referer
+agent01.sources.source9.interceptors.interceptor2.serializers.s9.name = user_agent
+agent01.sources.source9.interceptors.interceptor2.serializers.s10.name = http_x_forwarded_for
+agent01.sources.source9.interceptors.interceptor2.serializers.s11.name = request_time
+agent01.sources.source9.interceptors.interceptor2.serializers.s12.name = upstream_addr
+agent01.sources.source9.interceptors.interceptor2.serializers.s13.name = upstream_response_time
+agent01.sources.source9.interceptors.interceptor2.serializers.s14.name = post_body
+agent01.sources.source9.interceptors.interceptor2.serializers.s15.name = domain_url
+
+agent01.sources.source9.interceptors.interceptor3.type = timestamp
+
+
+
+agent01.sinks.elasticSearch.type = org.apache.flume.sink.elasticsearch.ElasticSearchSink
+agent01.sinks.elasticSearch.timeZone=Asia/Shanghai
+agent01.sinks.elasticSearch.channel = sink02
+agent01.sinks.elasticSearch.batchSize = 2000
+agent01.sinks.elasticSearch.hostNames = 10.31.88.120:9300 
+agent01.sinks.elasticSearch.indexName = nginx_access_log
+agent01.sinks.elasticSearch.indexType = static
+agent01.sinks.elasticSearch.clusterName = elasticsearch-zzb1
+agent01.sinks.elasticSearch.client = transport
+agent01.sinks.elasticSearch.serializer = org.apache.flume.sink.elasticsearch.ElasticSearchLogStashEventSerializer
+
+# Each sink's type must be defined
+agent01.sinks.loggerSink.type = logger
+
+#Specify the channel the sink should use
+agent01.sinks.loggerSink.channel = sink01
+
+# Each channel's type is defined.
+agent01.channels.sink01.type = memory
+agent01.channels.sink01.capacity = 10000
+agent01.channels.sink01.transactionCapacity = 10000
+agent01.channels.sink01.byteCapacityBufferPercentage = 20
+agent01.channels.sink01.keep-alive = 30
+
+agent01.channels.sink02.type = file
+agent01.channels.sink02.checkpointDir = /data/flume/data/checkpointDir 
+agent01.channels.sink02.dataDirs = /data/flume/data/dataDirs

+ 2 - 0
ansible/roles/flume/handlers/main.yml

@@ -0,0 +1,2 @@
+---
+# handlers file for flume

+ 57 - 0
ansible/roles/flume/meta/main.yml

@@ -0,0 +1,57 @@
+galaxy_info:
+  author: your name
+  description: your description
+  company: your company (optional)
+
+  # If the issue tracker for your role is not on github, uncomment the
+  # next line and provide a value
+  # issue_tracker_url: http://example.com/issue/tracker
+
+  # Some suggested licenses:
+  # - BSD (default)
+  # - MIT
+  # - GPLv2
+  # - GPLv3
+  # - Apache
+  # - CC-BY
+  license: license (GPLv2, CC-BY, etc)
+
+  min_ansible_version: 1.2
+
+  # If this a Container Enabled role, provide the minimum Ansible Container version.
+  # min_ansible_container_version:
+
+  # Optionally specify the branch Galaxy will use when accessing the GitHub
+  # repo for this role. During role install, if no tags are available,
+  # Galaxy will use this branch. During import Galaxy will access files on
+  # this branch. If Travis integration is configured, only notifications for this
+  # branch will be accepted. Otherwise, in all cases, the repo's default branch
+  # (usually master) will be used.
+  #github_branch:
+
+  #
+  # platforms is a list of platforms, and each platform has a name and a list of versions.
+  #
+  # platforms:
+  # - name: Fedora
+  #   versions:
+  #   - all
+  #   - 25
+  # - name: SomePlatform
+  #   versions:
+  #   - all
+  #   - 1.0
+  #   - 7
+  #   - 99.99
+
+  galaxy_tags: []
+    # List tags for your role here, one per line. A tag is a keyword that describes
+    # and categorizes the role. Users find roles by searching for tags. Be sure to
+    # remove the '[]' above, if you add tags to this list.
+    #
+    # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
+    #       Maximum 20 tags per role.
+
+dependencies: []
+  # List your role dependencies here, one per line. Be sure to remove the '[]' above,
+  # if you add dependencies to this list.

+ 35 - 0
ansible/roles/flume/tasks/flume_conf.yml

@@ -0,0 +1,35 @@
+---
+- name: Add SSH authorized key for synchronize Moudle
+  authorized_key:
+    user: root
+    state: present
+    key: "{{ lookup('file', '/root/.ssh/id_rsa.pub') }}"
+  tags: flume_conf
+
+- name: Sync Flume Config Files
+  synchronize:  # need input password in command line if ssh key is none.
+    src: conf
+    dest: /opt/flume/apache-flume-1.8.0-bin
+    delete: yes
+  tags: flume_conf, flume_stop
+
+- name: Stop All Flume Agent
+  shell: ps -ef |grep [f]lume |awk '{print $2}'|xargs kill; echo "Stop All Agent Done."
+  tags: flume_conf, flume_stop
+
+
+- name: Clean Local Data
+  file: 
+    path: /data/flume/data
+    state: absent
+  tags: flume_stop, flume_conf
+
+- name: Start All Flume Agent
+  shell: "{{ item }}"
+  with_items:
+    - "nohup bin/flume-ng agent --conf conf --conf-file conf/nginx.access.conf --name agent01 -Dflume.root.logger=INFO,console >/dev/null 2>&1 &"
+    - "nohup bin/flume-ng agent --conf conf --conf-file conf/backend.log.conf --name agent05 -Dflume.root.logger=INFO,console >/dev/null 2>&1 &"
+  args:
+    chdir: /opt/flume/apache-flume-1.8.0-bin
+  tags: flume_conf
+

+ 3 - 0
ansible/roles/flume/tasks/main.yml

@@ -0,0 +1,3 @@
+---
+# tasks file for flume
+- include: flume_conf.yml

+ 2 - 0
ansible/roles/flume/tests/inventory

@@ -0,0 +1,2 @@
+localhost
+

+ 5 - 0
ansible/roles/flume/tests/test.yml

@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+  remote_user: root
+  roles:
+    - flume

+ 2 - 0
ansible/roles/flume/vars/main.yml

@@ -0,0 +1,2 @@
+---
+# vars file for flume

+ 38 - 0
ansible/roles/limao/README.md

@@ -0,0 +1,38 @@
+Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+    - hosts: servers
+      roles:
+         - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).

+ 2 - 0
ansible/roles/limao/defaults/main.yml

@@ -0,0 +1,2 @@
+---
+# defaults file for limao

BIN
ansible/roles/limao/files/docker.deb


+ 11 - 0
ansible/roles/limao/files/getmyip

@@ -0,0 +1,11 @@
+from socket  import socket, SOCK_DGRAM, AF_INET
+
+
+def localip():
+    s = socket(AF_INET, SOCK_DGRAM)
+    s.connect(('114.114.114.114', 0))
+    return s.getsockname()[0]
+
+
+if __name__ == "__main__":
+    print localip()

+ 2 - 0
ansible/roles/limao/handlers/main.yml

@@ -0,0 +1,2 @@
+---
+# handlers file for limao

+ 57 - 0
ansible/roles/limao/meta/main.yml

@@ -0,0 +1,57 @@
+galaxy_info:
+  author: your name
+  description: your description
+  company: your company (optional)
+
+  # If the issue tracker for your role is not on github, uncomment the
+  # next line and provide a value
+  # issue_tracker_url: http://example.com/issue/tracker
+
+  # Some suggested licenses:
+  # - BSD (default)
+  # - MIT
+  # - GPLv2
+  # - GPLv3
+  # - Apache
+  # - CC-BY
+  license: license (GPLv2, CC-BY, etc)
+
+  min_ansible_version: 1.2
+
+  # If this a Container Enabled role, provide the minimum Ansible Container version.
+  # min_ansible_container_version:
+
+  # Optionally specify the branch Galaxy will use when accessing the GitHub
+  # repo for this role. During role install, if no tags are available,
+  # Galaxy will use this branch. During import Galaxy will access files on
+  # this branch. If Travis integration is configured, only notifications for this
+  # branch will be accepted. Otherwise, in all cases, the repo's default branch
+  # (usually master) will be used.
+  #github_branch:
+
+  #
+  # platforms is a list of platforms, and each platform has a name and a list of versions.
+  #
+  # platforms:
+  # - name: Fedora
+  #   versions:
+  #   - all
+  #   - 25
+  # - name: SomePlatform
+  #   versions:
+  #   - all
+  #   - 1.0
+  #   - 7
+  #   - 99.99
+
+  galaxy_tags: []
+    # List tags for your role here, one per line. A tag is a keyword that describes
+    # and categorizes the role. Users find roles by searching for tags. Be sure to
+    # remove the '[]' above, if you add tags to this list.
+    #
+    # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
+    #       Maximum 20 tags per role.
+
+dependencies: []
+  # List your role dependencies here, one per line. Be sure to remove the '[]' above,
+  # if you add dependencies to this list.

+ 53 - 0
ansible/roles/limao/tasks/docker_init.yml

@@ -0,0 +1,53 @@
+---
+- name: Install Library
+  apt:  "name={{ item }} state=latest update_cache=yes"
+  with_items:
+    - libltdl7
+    - python
+    - python3-pip
+  tags: docker_init
+
+- name: Pip Install docker-py Module
+  pip:
+    name: docker-py
+  tags: docker_init
+
+
+- name: Copy Install File
+  copy: src=docker.deb dest=/tmp/docker.deb owner=root group=root mode=0744
+  tags: docker_init
+
+- name: Install dokcer.deb package
+  apt:
+    deb: /tmp/docker.deb
+  tags: docker_init
+
+- name: Restart Docker Service
+  systemd:
+    state: restarted
+    daemon_reload: yes
+    enabled: yes
+    name: docker.service
+  tags: docker_init
+
+
+- name: Log into registry.myregistry.com:5000
+  docker_login:
+    registry: registry.myregistry.com:5000
+    username: username
+    password: password
+    reauthorize: yes
+  tags: docker_init
+
+
+- name: Pull app Image
+  docker_image:
+    name: registry.myregistry.com:5000/app:1.0
+  tags: docker_init
+
+- name: Retag image
+  shell: docker tag registry.myregistry.com:5000/app:1.0  app:1.0
+  tags: docker_init
+
+
+

+ 4 - 0
ansible/roles/limao/tasks/main.yml

@@ -0,0 +1,4 @@
+---
+# tasks file for limao
+- include: docker_init.yml
+- include: run_container.yml

+ 38 - 0
ansible/roles/limao/tasks/run_container.yml

@@ -0,0 +1,38 @@
+---
+- name: Create Directory If It Doesn't Exist
+  file:
+    path: "{{ item }}"
+    state: directory
+    mode: 0755
+  with_items:
+    - /root/oopp_level_1
+    - /root/oopp_level_2
+  tags: limao_proxy
+
+- name: Define Server_config 1 
+  template: src=server_config.json dest=/root/oopp_level_1/server_config.json  owner=root group=root mode=0644
+  tags: limao_proxy
+
+- name: Define Server_config 2
+  vars:
+    sec: True
+  template: src=server_config.json dest=/root/oopp_level_2/server_config.json  owner=root group=root mode=0644 
+  tags: limao_proxy 
+
+- name: Stop And Reomove All Container
+  docker_container:
+    name: "{{ item }}"
+    state: absent
+  with_items:
+    - oopp-file-1
+    - oopp-file-2
+  ignore_errors: True
+  tags: limao_proxy
+
+
+- name: Start All Container
+  shell: "{{ item }}"
+  with_items:
+    - docker run --name oopp-file-1 -d  --restart "always" --log-opt max-size=3m --log-opt max-file=50  -v /root/oopp_level_1:/oopp_data -p 8388:8388 -p 8389:8389 oopp:1.0 oopp --config_file=/oopp_data/server_config.json
+    - docker run --name oopp-file-2 -d  --restart "always" --log-opt max-size=3m --log-opt max-file=50  -v /root/oopp_level_2:/oopp_data -p 8488:8488 -p 8489:8489 oopp:1.0 oopp --config_file=/oopp_data/server_config.json
+  tags: limao_proxy

+ 119 - 0
ansible/roles/limao/templates/server_config.json

@@ -0,0 +1,119 @@
+{       
+{% if sec_region is defined %}
+        "region": "{{ sec_region }}",
+{% endif %}
+{% if sec_region is not defined %}
+	"region": "{{ region }}",
+{% endif %}
+{% if sec is defined %}
+        "node": "{{ node_id|int + 1  }}",
+{% else %}
+	"node": "{{ node_id }}",
+{% endif %}
+	"server": "0.0.0.0",
+	"server_port": 8388,
+	"password": "58Ssd2nn95",
+	"method": "aes-256-cfb",
+	"timeout": 600,
+	"type":"1",
+	"proxy_config": {
+		"proxy_list": [
+{% if sec is defined and sec_opposite_ip is defined%} 
+                        "{{ sec_opposite_ip }}|443|chacha20-ietf-poly1305|a564FDSOisdf"
+{% elif opposite_ip and sec is not defined %}
+                        "{{ opposite_ip }}|443|chacha20-ietf-poly1305|a564FDSOisdf"
+{% endif %}
+		],
+		"retry_interval": 60,
+		"retry_url": "https://twitter.com",
+		"failed_threshold": 0.6,
+		"failed_least_count": 5
+	},
+	"account_check": true,
+	"api": {
+		"addr": ":8389",
+		"https": false,
+		"cert_file": "cert.pem",
+		"key_file": "key.pem",
+		"auth": "asdf1234",
+		"routers": {
+			"/statistics": {
+				"handler": "statistics",
+				"option": ""
+			},
+			"/acc": {
+				"handler": "acc",
+				"option": ""
+			},
+			"/dnsrequest": {
+				"handler": "dnsrequest",
+				"option": ""
+			},
+			"/proxy_status": {
+				"handler": "proxy_status",
+				"option": ""
+			}
+		},
+		"client_ip": [
+			"127.0.0.1"
+		]
+	},
+	"log": {
+		"level": 5,
+		"o_file": "",
+		"o_std": false,
+		"hide_category": {
+			"remoteapi": true,
+			"ss-stream-detail": false,
+			"ulitily": false,
+			"auth": true
+		}
+	},
+	"stream": {
+		"record_dns_request": true
+	},
+	"capture": {
+		"dnsquery": true,
+		"socks5domain": true
+	},
+	"remote_api":{
+ "token":"EYRG87P6LjZe7Q9nf9X9",
+    "host_addr":{
+      "region":"https://wongderf.xyz",
+      "cache_gate":"https://cache-pool.qhisec.xyz:7788"
+    },
+    "router":{
+      "api_auth":{
+        "host":"region",
+        "path":"/api/proxy/v1/authenticate"
+      },
+      "upload_account_connection_info": {
+        "host": "cache_gate",
+        "path": "/putdata"
+      },
+      "upload_online_count": {
+        "host": "region",
+        "path": "/api/proxy/v1/nodes/record_online_users"
+      },
+      "upload_user_flow_bytes": {
+        "host": "region",
+        "path": "/api/proxy/v1/users/inspect_expiration"
+      },
+      "query_ip": {
+        "host": "region",
+        "path": "/api/proxy/v1/remote_ip"
+      },
+	  "limit_speed":{
+        "host": "region",
+        "path": "/api/proxy/v1/nodes/limit_speed"
+      }
+    }
+  },
+	"parameters": {
+		"user_maintaince_interval_second": "600s",
+		"online_user_timeout_second": "300s",
+		"registry_interval_second": "300s",
+		"registry_ttl": "350"
+	}
+}
+

+ 2 - 0
ansible/roles/limao/tests/inventory

@@ -0,0 +1,2 @@
+localhost
+

+ 5 - 0
ansible/roles/limao/tests/test.yml

@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+  remote_user: root
+  roles:
+    - limao

+ 2 - 0
ansible/roles/limao/vars/main.yml

@@ -0,0 +1,2 @@
+---
+# vars file for limao

+ 38 - 0
ansible/roles/logstash/README.md

@@ -0,0 +1,38 @@
+Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+    - hosts: servers
+      roles:
+         - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).

+ 2 - 0
ansible/roles/logstash/defaults/main.yml

@@ -0,0 +1,2 @@
+---
+# defaults file for logstash

+ 80 - 0
ansible/roles/logstash/files/config/jvm.options

@@ -0,0 +1,80 @@
+## JVM configuration
+
+# Xms represents the initial size of total heap space
+# Xmx represents the maximum size of total heap space
+
+-Xms1g
+-Xmx1g
+
+################################################################
+## Expert settings
+################################################################
+##
+## All settings below this section are considered
+## expert settings. Don't tamper with them unless
+## you understand what you are doing
+##
+################################################################
+
+## GC configuration
+-XX:+UseParNewGC
+-XX:+UseConcMarkSweepGC
+-XX:CMSInitiatingOccupancyFraction=75
+-XX:+UseCMSInitiatingOccupancyOnly
+
+## optimizations
+
+# disable calls to System#gc
+-XX:+DisableExplicitGC
+
+## Locale
+# Set the locale language
+#-Duser.language=en
+
+# Set the locale country
+#-Duser.country=US
+
+# Set the locale variant, if any
+#-Duser.variant=
+
+## basic
+
+# set the I/O temp directory
+#-Djava.io.tmpdir=$HOME
+
+# set to headless, just in case
+-Djava.awt.headless=true
+
+# ensure UTF-8 encoding by default (e.g. filenames)
+-Dfile.encoding=UTF-8
+
+# use our provided JNA always versus the system one
+#-Djna.nosys=true
+
+# Turn on JRuby invokedynamic
+-Djruby.compile.invokedynamic=true
+
+## heap dumps
+
+# generate a heap dump when an allocation from the Java heap fails
+# heap dumps are created in the working directory of the JVM
+-XX:+HeapDumpOnOutOfMemoryError
+
+# specify an alternative path for heap dumps
+# ensure the directory exists and has sufficient space
+#-XX:HeapDumpPath=${LOGSTASH_HOME}/heapdump.hprof
+
+## GC logging
+#-XX:+PrintGCDetails
+#-XX:+PrintGCTimeStamps
+#-XX:+PrintGCDateStamps
+#-XX:+PrintClassHistogram
+#-XX:+PrintTenuringDistribution
+#-XX:+PrintGCApplicationStoppedTime
+
+# log GC status to a file with time stamps
+# ensure the directory exists
+#-Xloggc:${LS_GC_LOG_FILE}
+
+# Entropy source for randomness
+-Djava.security.egd=file:/dev/urandom

+ 83 - 0
ansible/roles/logstash/files/config/log4j2.properties

@@ -0,0 +1,83 @@
+status = error
+name = LogstashPropertiesConfig
+
+appender.console.type = Console
+appender.console.name = plain_console
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n
+
+appender.json_console.type = Console
+appender.json_console.name = json_console
+appender.json_console.layout.type = JSONLayout
+appender.json_console.layout.compact = true
+appender.json_console.layout.eventEol = true
+
+appender.rolling.type = RollingFile
+appender.rolling.name = plain_rolling
+appender.rolling.fileName = ${sys:ls.logs}/logstash-${sys:ls.log.format}.log
+appender.rolling.filePattern = ${sys:ls.logs}/logstash-${sys:ls.log.format}-%d{yyyy-MM-dd}.log
+appender.rolling.policies.type = Policies
+appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.rolling.policies.time.interval = 1
+appender.rolling.policies.time.modulate = true
+appender.rolling.layout.type = PatternLayout
+appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %-.10000m%n
+
+appender.json_rolling.type = RollingFile
+appender.json_rolling.name = json_rolling
+appender.json_rolling.fileName = ${sys:ls.logs}/logstash-${sys:ls.log.format}.log
+appender.json_rolling.filePattern = ${sys:ls.logs}/logstash-${sys:ls.log.format}-%d{yyyy-MM-dd}.log
+appender.json_rolling.policies.type = Policies
+appender.json_rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.json_rolling.policies.time.interval = 1
+appender.json_rolling.policies.time.modulate = true
+appender.json_rolling.layout.type = JSONLayout
+appender.json_rolling.layout.compact = true
+appender.json_rolling.layout.eventEol = true
+
+
+rootLogger.level = ${sys:ls.log.level}
+rootLogger.appenderRef.console.ref = ${sys:ls.log.format}_console
+rootLogger.appenderRef.rolling.ref = ${sys:ls.log.format}_rolling
+
+# Slowlog
+
+appender.console_slowlog.type = Console
+appender.console_slowlog.name = plain_console_slowlog
+appender.console_slowlog.layout.type = PatternLayout
+appender.console_slowlog.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n
+
+appender.json_console_slowlog.type = Console
+appender.json_console_slowlog.name = json_console_slowlog
+appender.json_console_slowlog.layout.type = JSONLayout
+appender.json_console_slowlog.layout.compact = true
+appender.json_console_slowlog.layout.eventEol = true
+
+appender.rolling_slowlog.type = RollingFile
+appender.rolling_slowlog.name = plain_rolling_slowlog
+appender.rolling_slowlog.fileName = ${sys:ls.logs}/logstash-slowlog-${sys:ls.log.format}.log
+appender.rolling_slowlog.filePattern = ${sys:ls.logs}/logstash-slowlog-${sys:ls.log.format}-%d{yyyy-MM-dd}.log
+appender.rolling_slowlog.policies.type = Policies
+appender.rolling_slowlog.policies.time.type = TimeBasedTriggeringPolicy
+appender.rolling_slowlog.policies.time.interval = 1
+appender.rolling_slowlog.policies.time.modulate = true
+appender.rolling_slowlog.layout.type = PatternLayout
+appender.rolling_slowlog.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n
+
+appender.json_rolling_slowlog.type = RollingFile
+appender.json_rolling_slowlog.name = json_rolling_slowlog
+appender.json_rolling_slowlog.fileName = ${sys:ls.logs}/logstash-slowlog-${sys:ls.log.format}.log
+appender.json_rolling_slowlog.filePattern = ${sys:ls.logs}/logstash-slowlog-${sys:ls.log.format}-%d{yyyy-MM-dd}.log
+appender.json_rolling_slowlog.policies.type = Policies
+appender.json_rolling_slowlog.policies.time.type = TimeBasedTriggeringPolicy
+appender.json_rolling_slowlog.policies.time.interval = 1
+appender.json_rolling_slowlog.policies.time.modulate = true
+appender.json_rolling_slowlog.layout.type = JSONLayout
+appender.json_rolling_slowlog.layout.compact = true
+appender.json_rolling_slowlog.layout.eventEol = true
+
+logger.slowlog.name = slowlog
+logger.slowlog.level = trace
+logger.slowlog.appenderRef.console_slowlog.ref = ${sys:ls.log.format}_console_slowlog
+logger.slowlog.appenderRef.rolling_slowlog.ref = ${sys:ls.log.format}_rolling_slowlog
+logger.slowlog.additivity = false

+ 214 - 0
ansible/roles/logstash/files/config/logstash.yml

@@ -0,0 +1,214 @@
+# Settings file in YAML
+#
+# Settings can be specified either in hierarchical form, e.g.:
+#
+#   pipeline:
+#     batch:
+#       size: 125
+#       delay: 5
+#
+# Or as flat keys:
+#
+#   pipeline.batch.size: 125
+#   pipeline.batch.delay: 5
+#
+# ------------  Node identity ------------
+#
+# Use a descriptive name for the node:
+#
+# node.name: test
+#
+# If omitted the node name will default to the machine's host name
+#
+# ------------ Data path ------------------
+#
+# Which directory should be used by logstash and its plugins
+# for any persistent needs. Defaults to LOGSTASH_HOME/data
+#
+# path.data:
+#
+# ------------ Pipeline Settings --------------
+#
+# Set the number of workers that will, in parallel, execute the filters+outputs
+# stage of the pipeline.
+#
+# This defaults to the number of the host's CPU cores.
+#
+# pipeline.workers: 2
+#
+# How many workers should be used per output plugin instance
+#
+# pipeline.output.workers: 1
+#
+# How many events to retrieve from inputs before sending to filters+workers
+#
+# pipeline.batch.size: 125
+#
+# How long to wait before dispatching an undersized batch to filters+workers
+# Value is in milliseconds.
+#
+# pipeline.batch.delay: 5
+#
+# Force Logstash to exit during shutdown even if there are still inflight
+# events in memory. By default, logstash will refuse to quit until all
+# received events have been pushed to the outputs.
+#
+# WARNING: enabling this can lead to data loss during shutdown
+#
+# pipeline.unsafe_shutdown: false
+#
+# ------------ Pipeline Configuration Settings --------------
+#
+# Where to fetch the pipeline configuration for the main pipeline
+#
+# path.config:
+#
+# Pipeline configuration string for the main pipeline
+#
+# config.string:
+#
+# At startup, test if the configuration is valid and exit (dry run)
+#
+# config.test_and_exit: false
+#
+# Periodically check if the configuration has changed and reload the pipeline
+# This can also be triggered manually through the SIGHUP signal
+#
+# config.reload.automatic: false
+#
+# How often to check if the pipeline configuration has changed (in seconds)
+#
+# config.reload.interval: 3s
+#
+# Show fully compiled configuration as debug log message
+# NOTE: --log.level must be 'debug'
+#
+# config.debug: false
+#
+# When enabled, process escaped characters such as \n and \" in strings in the
+# pipeline configuration files.
+#
+# config.support_escapes: false
+#
+# ------------ Module Settings ---------------
+# Define modules here.  Modules definitions must be defined as an array.
+# The simple way to see this is to prepend each `name` with a `-`, and keep
+# all associated variables under the `name` they are associated with, and 
+# above the next, like this:
+#
+# modules:
+#   - name: MODULE_NAME
+#     var.PLUGINTYPE1.PLUGINNAME1.KEY1: VALUE
+#     var.PLUGINTYPE1.PLUGINNAME1.KEY2: VALUE
+#     var.PLUGINTYPE2.PLUGINNAME1.KEY1: VALUE
+#     var.PLUGINTYPE3.PLUGINNAME3.KEY1: VALUE
+#
+# Module variable names must be in the format of 
+#
+# var.PLUGIN_TYPE.PLUGIN_NAME.KEY
+#
+# modules:
+#
+# ------------ Cloud Settings ---------------
+# Define Elastic Cloud settings here.
+# Format of cloud.id is a base64 value e.g. dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyRub3RhcmVhbCRpZGVudGlmaWVy
+# and it may have an label prefix e.g. staging:dXMtZ...
+# This will overwrite 'var.elasticsearch.hosts' and 'var.kibana.host'
+# cloud.id: <identifier>
+#
+# Format of cloud.auth is: <user>:<pass>
+# This is optional
+# If supplied this will overwrite 'var.elasticsearch.username' and 'var.elasticsearch.password'
+# If supplied this will overwrite 'var.kibana.username' and 'var.kibana.password'
+# cloud.auth: elastic:<password>
+#
+# ------------ Queuing Settings --------------
+#
+# Internal queuing model, "memory" for legacy in-memory based queuing and
+# "persisted" for disk-based acked queueing. Defaults is memory
+#
+# queue.type: memory
+#
+# If using queue.type: persisted, the directory path where the data files will be stored.
+# Default is path.data/queue
+#
+# path.queue:
+#
+# If using queue.type: persisted, the page data files size. The queue data consists of
+# append-only data files separated into pages. Default is 250mb
+#
+# queue.page_capacity: 250mb
+#
+# If using queue.type: persisted, the maximum number of unread events in the queue.
+# Default is 0 (unlimited)
+#
+# queue.max_events: 0
+#
+# If using queue.type: persisted, the total capacity of the queue in number of bytes.
+# If you would like more unacked events to be buffered in Logstash, you can increase the
+# capacity using this setting. Please make sure your disk drive has capacity greater than
+# the size specified here. If both max_bytes and max_events are specified, Logstash will pick
+# whichever criteria is reached first
+# Default is 1024mb or 1gb
+#
+# queue.max_bytes: 1024mb
+#
+# If using queue.type: persisted, the maximum number of acked events before forcing a checkpoint
+# Default is 1024, 0 for unlimited
+#
+# queue.checkpoint.acks: 1024
+#
+# If using queue.type: persisted, the maximum number of written events before forcing a checkpoint
+# Default is 1024, 0 for unlimited
+#
+# queue.checkpoint.writes: 1024
+#
+# If using queue.type: persisted, the interval in milliseconds when a checkpoint is forced on the head page
+# Default is 1000, 0 for no periodic checkpoint.
+#
+# queue.checkpoint.interval: 1000
+#
+# ------------ Dead-Letter Queue Settings --------------
+# Flag to turn on dead-letter queue.
+#
+# dead_letter_queue.enable: false
+
+# If using dead_letter_queue.enable: true, the maximum size of each dead letter queue. Entries
+# will be dropped if they would increase the size of the dead letter queue beyond this setting.
+# Default is 1024mb
+# dead_letter_queue.max_bytes: 1024mb
+
+# If using dead_letter_queue.enable: true, the directory path where the data files will be stored.
+# Default is path.data/dead_letter_queue
+#
+# path.dead_letter_queue:
+#
+# ------------ Metrics Settings --------------
+#
+# Bind address for the metrics REST endpoint
+#
+# http.host: "127.0.0.1"
+#
+# Bind port for the metrics REST endpoint, this option also accept a range
+# (9600-9700) and logstash will pick up the first available ports.
+#
+# http.port: 9600-9700
+#
+# ------------ Debugging Settings --------------
+#
+# Options for log.level:
+#   * fatal
+#   * error
+#   * warn
+#   * info (default)
+#   * debug
+#   * trace
+#
+# log.level: info
+# path.logs:
+#
+# ------------ Other Settings --------------
+#
+# Where to find custom plugins
+# path.plugins: []
+

+ 97 - 0
ansible/roles/logstash/files/config/nginx_access.conf

@@ -0,0 +1,97 @@
+input {
+    file {
+         type => "nginx_access_log"
+        path => "/usr/local/openresty/nginx/logs/dispatch.comsunny.com*.access.log"
+        start_position => "beginning"
+         }   
+     }    
+
+input {
+    file {
+         type => "nginx_access_log"
+        path => "/usr/local/openresty/nginx/logs/www.comsunny.com_*.access.log"
+        start_position => "beginning"
+         }
+     }
+
+input {
+    file {
+         type => "nginx_access_log"
+        path => "/usr/local/openresty/nginx/logs/www.*.com.access.log"
+        start_position => "beginning"
+         }
+     }
+
+input {
+    file {
+         type => "nginx_access_log"
+        path => "/usr/local/openresty/nginx/logs/api.comsunny.com_443.access.log"
+        start_position => "beginning"
+         }
+     }
+
+input {
+    file {
+         type => "nginx_access_log"
+        path => "/usr/local/openresty/nginx/logs/internal.comsunny.com_80.access.log"
+        start_position => "beginning"
+         }
+     }
+
+input {
+    file {
+         type => "nginx_access_log"
+        path => "/usr/local/openresty/nginx/logs/uu.taschy.com.access.log"
+        start_position => "beginning"
+         }
+     }
+
+
+input {
+    file {
+         type => "backend_log"
+        path => "/var/prog/backend/logs/backend.log"
+        start_position => "beginning"
+         }
+     }
+
+
+filter {
+  if [type] == "nginx_access_log" {
+    grok {
+         patterns_dir => "/opt/logstash/logstash-6.0.0/patterns/nginx"
+      match => { "message" => "%{NGINX_ACCESS}" }
+         }
+                                   }
+       }
+
+
+filter {
+  if [type] == "backend_log" {
+    grok {
+         patterns_dir => "/opt/logstash/logstash-6.0.0/patterns/backend.conf"
+      match => { "message" => "%{BACKEND_LOG}" }
+         }
+                                   }
+       }        
+
+
+output {
+  if [type] == "nginx_access_log" {
+        elasticsearch {
+            hosts => ["10.31.88.120:9200"]
+            index => "nginx_access_log-%{+YYYY-MM-dd}"
+                      }
+                                    }
+       }
+
+
+output {
+  if [type] == "backend_log" {
+        elasticsearch {
+            hosts => ["10.31.88.120:9200"]
+            index => "backend_log-%{+YYYY-MM-dd}"
+                      }
+                                    }
+       }             
+

+ 79 - 0
ansible/roles/logstash/files/config/pipelines.yml

@@ -0,0 +1,79 @@
+# List of pipelines to be loaded by Logstash
+#
+# This document must be a list of dictionaries/hashes, where the keys/values are pipeline settings.
+# Default values for ommitted settings are read from the `logstash.yml` file.
+# When declaring multiple pipelines, each MUST have its own `pipeline.id`.
+#
+# Example of two pipelines:
+#
+# - pipeline.id: test
+#   pipeline.workers: 1
+#   pipeline.batch.size: 1
+#   config.string: "input { generator {} } filter { sleep { time => 1 } } output { stdout { codec => dots } }"
+# - pipeline.id: another_test
+#   queue.type: persisted
+#   path.config: "/tmp/logstash/*.config"
+#
+# Available options:
+#
+#   # name of the pipeline
+#   pipeline.id: mylogs
+#
+#   # The configuration string to be used by this pipeline
+#   config.string: "input { generator {} } filter { sleep { time => 1 } } output { stdout { codec => dots } }"
+#
+#   # The path from where to read the configuration text
+#   path.config: "/etc/conf.d/logstash/myconfig.cfg"
+#
+#   # How many worker threads execute the Filters+Outputs stage of the pipeline
+#   pipeline.workers: 1 (actually defaults to number of CPUs)
+#
+#   # How many events to retrieve from inputs before sending to filters+workers
+#   pipeline.batch.size: 125
+#
+#   # How long to wait before dispatching an undersized batch to filters+workers
+#   pipeline.batch.delay: 5
+#
+#   # How many workers should be used per output plugin instance
+#   pipeline.output.workers: 1
+#
+#   # Internal queuing model, "memory" for legacy in-memory based queuing and
+#   # "persisted" for disk-based acked queueing. Defaults is memory
+#   queue.type: memory
+#
+#   # If using queue.type: persisted, the page data files size. The queue data consists of
+#   # append-only data files separated into pages. Default is 250mb
+#   queue.page_capacity: 250mb
+#
+#   # If using queue.type: persisted, the maximum number of unread events in the queue.
+#   # Default is 0 (unlimited)
+#   queue.max_events: 0
+#
+#   # If using queue.type: persisted, the total capacity of the queue in number of bytes.
+#   # Default is 1024mb or 1gb
+#   queue.max_bytes: 1024mb
+#
+#   # If using queue.type: persisted, the maximum number of acked events before forcing a checkpoint
+#   # Default is 1024, 0 for unlimited
+#   queue.checkpoint.acks: 1024
+#
+#   # If using queue.type: persisted, the maximum number of written events before forcing a checkpoint
+#   # Default is 1024, 0 for unlimited
+#   queue.checkpoint.writes: 1024
+#
+#   # If using queue.type: persisted, the interval in milliseconds when a checkpoint is forced on the head page
+#   # Default is 1000, 0 for no periodic checkpoint.
+#   queue.checkpoint.interval: 1000
+#
+#   # Enable Dead Letter Queueing for this pipeline.
+#   dead_letter_queue.enable: false
+#
+#   If using dead_letter_queue.enable: true, the maximum size of dead letter queue for this pipeline. Entries
+#   will be dropped if they would increase the size of the dead letter queue beyond this setting.
+#   Default is 1024mb
+#   dead_letter_queue.max_bytes: 1024mb
+#
+#   If using dead_letter_queue.enable: true, the directory path where the data files will be stored.
+#   Default is path.data/dead_letter_queue
+#
+#   path.dead_letter_queue:

+ 53 - 0
ansible/roles/logstash/files/config/startup.options

@@ -0,0 +1,53 @@
+################################################################################
+# These settings are ONLY used by $LS_HOME/bin/system-install to create a custom
+# startup script for Logstash and is not used by Logstash itself. It should
+# automagically use the init system (systemd, upstart, sysv, etc.) that your
+# Linux distribution uses.
+#
+# After changing anything here, you need to re-run $LS_HOME/bin/system-install
+# as root to push the changes to the init script.
+################################################################################
+
+# Override Java location
+#JAVACMD=/usr/bin/java
+
+# Set a home directory
+LS_HOME=/usr/share/logstash
+
+# logstash settings directory, the path which contains logstash.yml
+LS_SETTINGS_DIR="${LS_HOME}/config"
+
+# Arguments to pass to logstash
+LS_OPTS="--path.settings ${LS_SETTINGS_DIR}"
+
+# Arguments to pass to java
+LS_JAVA_OPTS=""
+
+# pidfiles aren't used the same way for upstart and systemd; this is for sysv users.
+LS_PIDFILE=/var/run/logstash.pid
+
+# user and group id to be invoked as
+LS_USER=logstash
+LS_GROUP=logstash
+
+# Enable GC logging by uncommenting the appropriate lines in the GC logging
+# section in jvm.options
+LS_GC_LOG_FILE=/var/log/logstash/gc.log
+
+# Open file limit
+LS_OPEN_FILES=16384
+
+# Nice level
+LS_NICE=19
+
+# Change these to have the init script named and described differently
+# This is useful when running multiple instances of Logstash on the same
+# physical box or vm
+SERVICE_NAME="logstash"
+SERVICE_DESCRIPTION="logstash"
+
+# If you need to run a command or script before launching Logstash, put it
+# between the lines beginning with `read` and `EOM`, and uncomment those lines.
+###
+## read -r -d '' PRESTART << EOM
+## EOM

تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 14 - 0
ansible/roles/logstash/files/patterns/aws


+ 1 - 0
ansible/roles/logstash/files/patterns/backend.conf

@@ -0,0 +1 @@
+BACKEND_LOG \[%{NOTSPACE:level}\] %{TIMESTAMP_ISO8601:datetime}[0-9]{0,9} (?<content>[0-9]+.*)

تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 50 - 0
ansible/roles/logstash/files/patterns/bacula


+ 3 - 0
ansible/roles/logstash/files/patterns/bind

@@ -0,0 +1,3 @@
+BIND9_TIMESTAMP %{MONTHDAY}[-]%{MONTH}[-]%{YEAR} %{TIME}
+
+BIND9 %{BIND9_TIMESTAMP:timestamp} queries: %{LOGLEVEL:loglevel}: client %{IP:clientip}#%{POSINT:clientport} \(%{GREEDYDATA:query}\): query: %{GREEDYDATA:query} IN %{GREEDYDATA:querytype} \(%{IP:dns}\)

تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 13 - 0
ansible/roles/logstash/files/patterns/bro


+ 13 - 0
ansible/roles/logstash/files/patterns/exim

@@ -0,0 +1,13 @@
+EXIM_MSGID [0-9A-Za-z]{6}-[0-9A-Za-z]{6}-[0-9A-Za-z]{2}
+EXIM_FLAGS (<=|[-=>*]>|[*]{2}|==)
+EXIM_DATE %{YEAR:exim_year}-%{MONTHNUM:exim_month}-%{MONTHDAY:exim_day} %{TIME:exim_time}
+EXIM_PID \[%{POSINT}\]
+EXIM_QT ((\d+y)?(\d+w)?(\d+d)?(\d+h)?(\d+m)?(\d+s)?)
+EXIM_EXCLUDE_TERMS (Message is frozen|(Start|End) queue run| Warning: | retry time not reached | no (IP address|host name) found for (IP address|host) | unexpected disconnection while reading SMTP command | no immediate delivery: |another process is handling this message)
+EXIM_REMOTE_HOST (H=(%{NOTSPACE:remote_hostname} )?(\(%{NOTSPACE:remote_heloname}\) )?\[%{IP:remote_host}\])
+EXIM_INTERFACE (I=\[%{IP:exim_interface}\](:%{NUMBER:exim_interface_port}))
+EXIM_PROTOCOL (P=%{NOTSPACE:protocol})
+EXIM_MSG_SIZE (S=%{NUMBER:exim_msg_size})
+EXIM_HEADER_ID (id=%{NOTSPACE:exim_header_id})
+EXIM_SUBJECT (T=%{QS:exim_subject})
+

تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 91 - 0
ansible/roles/logstash/files/patterns/firewalls


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 97 - 0
ansible/roles/logstash/files/patterns/grok-patterns


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 39 - 0
ansible/roles/logstash/files/patterns/haproxy


+ 15 - 0
ansible/roles/logstash/files/patterns/httpd

@@ -0,0 +1,15 @@
+HTTPDUSER %{EMAILADDRESS}|%{USER}
+HTTPDERROR_DATE %{DAY} %{MONTH} %{MONTHDAY} %{TIME} %{YEAR}
+
+# Log formats
+HTTPD_COMMONLOG %{IPORHOST:clientip} %{HTTPDUSER:ident} %{HTTPDUSER:auth} \[%{HTTPDATE:timestamp}\] "(?:%{WORD:verb} %{NOTSPACE:request}(?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest})" %{NUMBER:response} (?:%{NUMBER:bytes}|-)
+HTTPD_COMBINEDLOG %{HTTPD_COMMONLOG} %{QS:referrer} %{QS:agent}
+
+# Error logs
+HTTPD20_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{LOGLEVEL:loglevel}\] (?:\[client %{IPORHOST:clientip}\] ){0,1}%{GREEDYDATA:message}
+HTTPD24_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{WORD:module}:%{LOGLEVEL:loglevel}\] \[pid %{POSINT:pid}(:tid %{NUMBER:tid})?\]( \(%{POSINT:proxy_errorcode}\)%{DATA:proxy_message}:)?( \[client %{IPORHOST:clientip}:%{POSINT:clientport}\])?( %{DATA:errorcode}:)? %{GREEDYDATA:message}
+HTTPD_ERRORLOG %{HTTPD20_ERRORLOG}|%{HTTPD24_ERRORLOG}
+
+# Deprecated
+COMMONAPACHELOG %{HTTPD_COMMONLOG}
+COMBINEDAPACHELOG %{HTTPD_COMBINEDLOG}

+ 19 - 0
ansible/roles/logstash/files/patterns/java

@@ -0,0 +1,19 @@
+JAVACLASS (?:[a-zA-Z$_][a-zA-Z$_0-9]*\.)*[a-zA-Z$_][a-zA-Z$_0-9]*
+#Space is an allowed character to match special cases like 'Native Method' or 'Unknown Source'
+JAVAFILE (?:[A-Za-z0-9_. -]+)
+#Allow special <init>, <clinit> methods
+JAVAMETHOD (?:(<(?:cl)?init>)|[a-zA-Z$_][a-zA-Z$_0-9]*)
+#Line number is optional in special cases 'Native method' or 'Unknown source'
+JAVASTACKTRACEPART %{SPACE}at %{JAVACLASS:class}\.%{JAVAMETHOD:method}\(%{JAVAFILE:file}(?::%{NUMBER:line})?\)
+# Java Logs
+JAVATHREAD (?:[A-Z]{2}-Processor[\d]+)
+JAVACLASS (?:[a-zA-Z0-9-]+\.)+[A-Za-z0-9$]+
+JAVAFILE (?:[A-Za-z0-9_.-]+)
+JAVALOGMESSAGE (.*)
+# MMM dd, yyyy HH:mm:ss eg: Jan 9, 2014 7:13:13 AM
+CATALINA_DATESTAMP %{MONTH} %{MONTHDAY}, 20%{YEAR} %{HOUR}:?%{MINUTE}(?::?%{SECOND}) (?:AM|PM)
+# yyyy-MM-dd HH:mm:ss,SSS ZZZ eg: 2014-01-09 17:32:25,527 -0800
+TOMCAT_DATESTAMP 20%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}(?::?%{SECOND}) %{ISO8601_TIMEZONE}
+CATALINALOG %{CATALINA_DATESTAMP:timestamp} %{JAVACLASS:class} %{JAVALOGMESSAGE:logmessage}
+# 2014-01-09 20:03:28,269 -0800 | ERROR | com.example.service.ExampleService - something compeletely unexpected happened...
+TOMCATLOG %{TOMCAT_DATESTAMP:timestamp} \| %{LOGLEVEL:level} \| %{JAVACLASS:class} - %{JAVALOGMESSAGE:logmessage}

+ 9 - 0
ansible/roles/logstash/files/patterns/junos

@@ -0,0 +1,9 @@
+# JUNOS 11.4 RT_FLOW patterns
+RT_FLOW_EVENT (RT_FLOW_SESSION_CREATE|RT_FLOW_SESSION_CLOSE|RT_FLOW_SESSION_DENY)
+
+RT_FLOW1 %{RT_FLOW_EVENT:event}: %{GREEDYDATA:close-reason}: %{IP:src-ip}/%{INT:src-port}->%{IP:dst-ip}/%{INT:dst-port} %{DATA:service} %{IP:nat-src-ip}/%{INT:nat-src-port}->%{IP:nat-dst-ip}/%{INT:nat-dst-port} %{DATA:src-nat-rule-name} %{DATA:dst-nat-rule-name} %{INT:protocol-id} %{DATA:policy-name} %{DATA:from-zone} %{DATA:to-zone} %{INT:session-id} \d+\(%{DATA:sent}\) \d+\(%{DATA:received}\) %{INT:elapsed-time} .*
+
+RT_FLOW2 %{RT_FLOW_EVENT:event}: session created %{IP:src-ip}/%{INT:src-port}->%{IP:dst-ip}/%{INT:dst-port} %{DATA:service} %{IP:nat-src-ip}/%{INT:nat-src-port}->%{IP:nat-dst-ip}/%{INT:nat-dst-port} %{DATA:src-nat-rule-name} %{DATA:dst-nat-rule-name} %{INT:protocol-id} %{DATA:policy-name} %{DATA:from-zone} %{DATA:to-zone} %{INT:session-id} .*
+
+RT_FLOW3 %{RT_FLOW_EVENT:event}: session denied %{IP:src-ip}/%{INT:src-port}->%{IP:dst-ip}/%{INT:dst-port} %{DATA:service} %{INT:protocol-id}\(\d\) %{DATA:policy-name} %{DATA:from-zone} %{DATA:to-zone} .*
+

+ 16 - 0
ansible/roles/logstash/files/patterns/linux-syslog

@@ -0,0 +1,16 @@
+SYSLOG5424PRINTASCII [!-~]+
+
+SYSLOGBASE2 (?:%{SYSLOGTIMESTAMP:timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) (?:%{SYSLOGFACILITY} )?%{SYSLOGHOST:logsource}+(?: %{SYSLOGPROG}:|)
+SYSLOGPAMSESSION %{SYSLOGBASE} (?=%{GREEDYDATA:message})%{WORD:pam_module}\(%{DATA:pam_caller}\): session %{WORD:pam_session_state} for user %{USERNAME:username}(?: by %{GREEDYDATA:pam_by})?
+
+CRON_ACTION [A-Z ]+
+CRONLOG %{SYSLOGBASE} \(%{USER:user}\) %{CRON_ACTION:action} \(%{DATA:message}\)
+
+SYSLOGLINE %{SYSLOGBASE2} %{GREEDYDATA:message}
+
+# IETF 5424 syslog(8) format (see http://www.rfc-editor.org/info/rfc5424)
+SYSLOG5424PRI <%{NONNEGINT:syslog5424_pri}>
+SYSLOG5424SD \[%{DATA}\]+
+SYSLOG5424BASE %{SYSLOG5424PRI}%{NONNEGINT:syslog5424_ver} +(?:%{TIMESTAMP_ISO8601:syslog5424_ts}|-) +(?:%{IPORHOST:syslog5424_host}|-) +(-|%{SYSLOG5424PRINTASCII:syslog5424_app}) +(-|%{SYSLOG5424PRINTASCII:syslog5424_proc}) +(-|%{SYSLOG5424PRINTASCII:syslog5424_msgid}) +(?:%{SYSLOG5424SD:syslog5424_sd}|-|)
+
+SYSLOG5424LINE %{SYSLOG5424BASE} +%{GREEDYDATA:syslog5424_msg}

+ 1 - 0
ansible/roles/logstash/files/patterns/maven

@@ -0,0 +1 @@
+MAVEN_VERSION (?:(\d+)\.)?(?:(\d+)\.)?(\*|\d+)(?:[.-](RELEASE|SNAPSHOT))?

+ 1 - 0
ansible/roles/logstash/files/patterns/mcollective

@@ -0,0 +1 @@
+MCOLLECTIVEAUDIT %{TIMESTAMP_ISO8601:timestamp}:

+ 4 - 0
ansible/roles/logstash/files/patterns/mcollective-patterns

@@ -0,0 +1,4 @@
+# Remember, these can be multi-line events.
+MCOLLECTIVE ., \[%{TIMESTAMP_ISO8601:timestamp} #%{POSINT:pid}\]%{SPACE}%{LOGLEVEL:event_level}
+
+MCOLLECTIVEAUDIT %{TIMESTAMP_ISO8601:timestamp}:

+ 7 - 0
ansible/roles/logstash/files/patterns/mongodb

@@ -0,0 +1,7 @@
+MONGO_LOG %{SYSLOGTIMESTAMP:timestamp} \[%{WORD:component}\] %{GREEDYDATA:message}
+MONGO_QUERY \{ (?<={ ).*(?= } ntoreturn:) \}
+MONGO_SLOWQUERY %{WORD} %{MONGO_WORDDASH:database}\.%{MONGO_WORDDASH:collection} %{WORD}: %{MONGO_QUERY:query} %{WORD}:%{NONNEGINT:ntoreturn} %{WORD}:%{NONNEGINT:ntoskip} %{WORD}:%{NONNEGINT:nscanned}.*nreturned:%{NONNEGINT:nreturned}..+ (?<duration>[0-9]+)ms
+MONGO_WORDDASH \b[\w-]+\b
+MONGO3_SEVERITY \w
+MONGO3_COMPONENT %{WORD}|-
+MONGO3_LOG %{TIMESTAMP_ISO8601:timestamp} %{MONGO3_SEVERITY:severity} %{MONGO3_COMPONENT:component}%{SPACE}(?:\[%{DATA:context}\])? %{GREEDYDATA:message}

تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 124 - 0
ansible/roles/logstash/files/patterns/nagios


+ 1 - 0
ansible/roles/logstash/files/patterns/nginx

@@ -0,0 +1 @@
+NGINX_ACCESS %{IPORHOST:clientip} - %{USERNAME:remote_user} %{DATETIME:datetime} "(?:%{WORD:verb} (%{NOTSPACE:uri}|-)(?: HTTP/%{NUMBER:http_version})?|-)" (%{NUMBER:status}|-) %{NUMBER:bytes} "(%{NOTSPACE:referrer}|-)" (%{QS:agent}|-) "(%{IPORHOST:x_forwarded}|-)" "(%{BASE16FLOAT:request_time}|-)" "(%{URIHOST:upstream_host}|-)" "(%{BASE16FLOAT:upstream_response_time}|-)" (%{NOTSPACE:request_body}|-) "(%{HTTPNAME:domain_url}|-)"

+ 3 - 0
ansible/roles/logstash/files/patterns/postgresql

@@ -0,0 +1,3 @@
+# Default postgresql pg_log format pattern
+POSTGRESQL %{DATESTAMP:timestamp} %{TZ} %{DATA:user_id} %{GREEDYDATA:connection_id} %{POSINT:pid}
+

+ 13 - 0
ansible/roles/logstash/files/patterns/rails

@@ -0,0 +1,13 @@
+RUUID \h{32}
+# rails controller with action
+RCONTROLLER (?<controller>[^#]+)#(?<action>\w+)
+
+# this will often be the only line:
+RAILS3HEAD (?m)Started %{WORD:verb} "%{URIPATHPARAM:request}" for %{IPORHOST:clientip} at (?<timestamp>%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{ISO8601_TIMEZONE})
+# for some a strange reason, params are stripped of {} - not sure that's a good idea.
+RPROCESSING \W*Processing by %{RCONTROLLER} as (?<format>\S+)(?:\W*Parameters: {%{DATA:params}}\W*)?
+RAILS3FOOT Completed %{NUMBER:response}%{DATA} in %{NUMBER:totalms}ms %{RAILS3PROFILE}%{GREEDYDATA}
+RAILS3PROFILE (?:\(Views: %{NUMBER:viewms}ms \| ActiveRecord: %{NUMBER:activerecordms}ms|\(ActiveRecord: %{NUMBER:activerecordms}ms)?
+
+# putting it all together
+RAILS3 %{RAILS3HEAD}(?:%{RPROCESSING})?(?<context>(?:%{DATA}\n)*)(?:%{RAILS3FOOT})?

+ 3 - 0
ansible/roles/logstash/files/patterns/redis

@@ -0,0 +1,3 @@
+REDISTIMESTAMP %{MONTHDAY} %{MONTH} %{TIME}
+REDISLOG \[%{POSINT:pid}\] %{REDISTIMESTAMP:timestamp} \* 
+REDISMONLOG %{NUMBER:timestamp} \[%{INT:database} %{IP:client}:%{NUMBER:port}\] "%{WORD:command}"\s?%{GREEDYDATA:params}

+ 2 - 0
ansible/roles/logstash/files/patterns/ruby

@@ -0,0 +1,2 @@
+RUBY_LOGLEVEL (?:DEBUG|FATAL|ERROR|WARN|INFO)
+RUBY_LOGGER [DFEWI], \[%{TIMESTAMP_ISO8601:timestamp} #%{POSINT:pid}\] *%{RUBY_LOGLEVEL:loglevel} -- +%{DATA:progname}: %{GREEDYDATA:message}

+ 4 - 0
ansible/roles/logstash/files/patterns/squid

@@ -0,0 +1,4 @@
+# Pattern squid3
+# Documentation of squid3 logs formats can be found at the following link:
+# http://wiki.squid-cache.org/Features/LogFormat
+SQUID3 %{NUMBER:timestamp}\s+%{NUMBER:duration}\s%{IP:client_address}\s%{WORD:cache_result}/%{POSINT:status_code}\s%{NUMBER:bytes}\s%{WORD:request_method}\s%{NOTSPACE:url}\s(%{NOTSPACE:user}|-)\s%{WORD:hierarchy_code}/%{IPORHOST:server}\s%{NOTSPACE:content_type}

+ 2 - 0
ansible/roles/logstash/handlers/main.yml

@@ -0,0 +1,2 @@
+---
+# handlers file for logstash

+ 57 - 0
ansible/roles/logstash/meta/main.yml

@@ -0,0 +1,57 @@
+galaxy_info:
+  author: your name
+  description: your description
+  company: your company (optional)
+
+  # If the issue tracker for your role is not on github, uncomment the
+  # next line and provide a value
+  # issue_tracker_url: http://example.com/issue/tracker
+
+  # Some suggested licenses:
+  # - BSD (default)
+  # - MIT
+  # - GPLv2
+  # - GPLv3
+  # - Apache
+  # - CC-BY
+  license: license (GPLv2, CC-BY, etc)
+
+  min_ansible_version: 1.2
+
+  # If this a Container Enabled role, provide the minimum Ansible Container version.
+  # min_ansible_container_version:
+
+  # Optionally specify the branch Galaxy will use when accessing the GitHub
+  # repo for this role. During role install, if no tags are available,
+  # Galaxy will use this branch. During import Galaxy will access files on
+  # this branch. If Travis integration is configured, only notifications for this
+  # branch will be accepted. Otherwise, in all cases, the repo's default branch
+  # (usually master) will be used.
+  #github_branch:
+
+  #
+  # platforms is a list of platforms, and each platform has a name and a list of versions.
+  #
+  # platforms:
+  # - name: Fedora
+  #   versions:
+  #   - all
+  #   - 25
+  # - name: SomePlatform
+  #   versions:
+  #   - all
+  #   - 1.0
+  #   - 7
+  #   - 99.99
+
+  galaxy_tags: []
+    # List tags for your role here, one per line. A tag is a keyword that describes
+    # and categorizes the role. Users find roles by searching for tags. Be sure to
+    # remove the '[]' above, if you add tags to this list.
+    #
+    # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
+    #       Maximum 20 tags per role.
+
+dependencies: []
+  # List your role dependencies here, one per line. Be sure to remove the '[]' above,
+  # if you add dependencies to this list.

+ 38 - 0
ansible/roles/logstash/tasks/logstash_conf.yml

@@ -0,0 +1,38 @@
+---
+- name: Add SSH authorized key for synchronize Moudle
+  authorized_key:
+    user: root
+    state: present
+    key: "{{ lookup('file', '/root/.ssh/id_rsa.pub') }}"
+  tags: logstash_conf
+
+- name: Sync Logstash Config Files
+  synchronize:  # need input password in command line if ssh key is none.
+    src: "{{ item }}"
+    dest: /opt/logstash/logstash-6.0.0
+    delete: yes
+  with_items:
+    - "config"
+    - "patterns"   
+  tags: logstash_conf, logstash_stop
+
+- name: Stop All Flume Agent
+  shell: ps -ef |grep [l]ogstash |grep nginx_access.conf |awk '{print $2}'|xargs kill; echo "Stop All Agent Done."
+  tags: logstash_conf, logstash_stop
+
+
+#- name: Clean Local Data
+#  file: 
+#    path: /data/logstash/data
+#    state: absent
+#  tags: logstash_stop, logstash_conf
+
+- name: Start All Logstash Agent
+  shell: "{{ item }}"
+  with_items:
+    - "nohup bin/logstash -f config/nginx_access.conf --config.reload.automatic >/dev/null 2>&1 &"
+#    - "nohup bin/logstash-ng agent --conf conf --conf-file conf/backend.log.conf --name agent05 -Dlogstash.root.logger=INFO,console >/dev/null 2>&1 &"
+  args:
+    chdir: /opt/logstash/logstash-6.0.0
+  tags: logstash_conf
+

+ 3 - 0
ansible/roles/logstash/tasks/main.yml

@@ -0,0 +1,3 @@
+---
+# tasks file for logstash
+- include: logstash_conf.yml

+ 2 - 0
ansible/roles/logstash/tests/inventory

@@ -0,0 +1,2 @@
+localhost
+

+ 5 - 0
ansible/roles/logstash/tests/test.yml

@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+  remote_user: root
+  roles:
+    - logstash

+ 2 - 0
ansible/roles/logstash/vars/main.yml

@@ -0,0 +1,2 @@
+---
+# vars file for logstash

+ 38 - 0
ansible/roles/nginx/README.md

@@ -0,0 +1,38 @@
+Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+    - hosts: servers
+      roles:
+         - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).

+ 2 - 0
ansible/roles/nginx/defaults/main.yml

@@ -0,0 +1,2 @@
+---
+# defaults file for nginx_lua

+ 129 - 0
ansible/roles/nginx/files/nginxd

@@ -0,0 +1,129 @@
+#!/bin/sh
+#
+# chkconfig: 2345 55 25
+# Description: Nginx init.d script, put in /etc/init.d, chmod +x /etc/init.d/nginx
+#              For Debian, run: update-rc.d -f nginx defaults
+#              For CentOS, run: chkconfig --add nginx
+#
+### BEGIN INIT INFO
+# Provides:          nginx
+# Required-Start:    $all
+# Required-Stop:     $all
+# Default-Start:     2 3 4 5
+# Default-Stop:      0 1 6
+# Short-Description: nginx init.d script
+# Description:       OpenResty (aka. ngx_openresty) is a full-fledged web application server by bundling the standard Nginx core, lots of 3rd-party Nginx modules, as well as most of their external dependencies.
+### END INIT INFO
+#
+
+PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
+DESC="Nginx Daemon"
+NAME=nginx
+PREFIX=/usr/local/openresty/nginx
+DAEMON=$PREFIX/sbin/$NAME
+CONF=$PREFIX/conf/$NAME.conf
+PID=/run/nginx.pid
+SCRIPT=/etc/init.d/$NAME
+
+if [ ! -x "$DAEMON" ] || [ ! -f "$CONF" ]; then
+    echo -e "\033[33m $DAEMON has no permission to run. \033[0m"
+    echo -e "\033[33m Or $CONF doesn't exist. \033[0m"
+    sleep 1
+    exit 1
+fi
+
+do_start() {
+    if [ -f $PID ]; then
+        echo -e "\033[33m $PID already exists. \033[0m"
+        echo -e "\033[33m $DESC is already running or crashed. \033[0m"
+        echo -e "\033[32m $DESC Reopening $CONF ... \033[0m"
+        $DAEMON -s reopen -c $CONF
+        sleep 1
+        echo -e "\033[36m $DESC reopened. \033[0m"
+    else
+        echo -e "\033[32m $DESC Starting $CONF ... \033[0m"
+        $DAEMON -c $CONF
+        sleep 1
+        echo -e "\033[36m $DESC started. \033[0m"
+    fi
+}
+
+do_stop() {
+    if [ ! -f $PID ]; then
+        echo -e "\033[33m $PID doesn't exist. \033[0m"
+        echo -e "\033[33m $DESC isn't running. \033[0m"
+    else
+        echo -e "\033[32m $DESC Stopping $CONF ... \033[0m"
+        $DAEMON -s stop -c $CONF
+        sleep 1
+        echo -e "\033[36m $DESC stopped. \033[0m"
+    fi
+}
+
+do_reload() {
+    if [ ! -f $PID ]; then
+        echo -e "\033[33m $PID doesn't exist. \033[0m"
+        echo -e "\033[33m $DESC isn't running. \033[0m"
+        echo -e "\033[32m $DESC Starting $CONF ... \033[0m"
+        $DAEMON -c $CONF
+        sleep 1
+        echo -e "\033[36m $DESC started. \033[0m"
+    else
+        echo -e "\033[32m $DESC Reloading $CONF ... \033[0m"
+        $DAEMON -s reload -c $CONF
+        sleep 1
+        echo -e "\033[36m $DESC reloaded. \033[0m"
+    fi
+}
+
+do_quit() {
+    if [ ! -f $PID ]; then
+        echo -e "\033[33m $PID doesn't exist. \033[0m"
+        echo -e "\033[33m $DESC isn't running. \033[0m"
+    else
+        echo -e "\033[32m $DESC Quitting $CONF ... \033[0m"
+        $DAEMON -s quit -c $CONF
+        sleep 1
+        echo -e "\033[36m $DESC quitted. \033[0m"
+    fi
+}
+
+do_test() {
+    echo -e "\033[32m $DESC Testing $CONF ... \033[0m"
+    $DAEMON -t -c $CONF
+}
+
+do_info() {
+    $DAEMON -V
+}
+
+case "$1" in
+ start)
+ do_start
+ ;;
+ stop)
+ do_stop
+ ;;
+ reload)
+ do_reload
+ ;;
+ restart)
+ do_stop
+ do_start
+ ;;
+ quit)
+ do_quit
+ ;;
+ test)
+ do_test
+ ;;
+ info)
+ do_info
+ ;;
+ *)
+ echo "Usage: $SCRIPT {start|stop|reload|restart|quit|test|info}"
+ exit 2
+ ;;
+esac
+
+exit 0

BIN
ansible/roles/nginx/files/openresty-1.11.2.4.tar.gz


BIN
ansible/roles/nginx/files/pcre-8.41.zip


+ 1 - 0
ansible/roles/nginx/files/sslkey/sslkey.key

@@ -0,0 +1 @@
+some sslkey contents

+ 0 - 0
ansible/roles/nginx/files/vhost/some vhost file over here


+ 2 - 0
ansible/roles/nginx/handlers/main.yml

@@ -0,0 +1,2 @@
+---
+# handlers file for nginx_lua

+ 57 - 0
ansible/roles/nginx/meta/main.yml

@@ -0,0 +1,57 @@
+galaxy_info:
+  author: your name
+  description: your description
+  company: your company (optional)
+
+  # If the issue tracker for your role is not on github, uncomment the
+  # next line and provide a value
+  # issue_tracker_url: http://example.com/issue/tracker
+
+  # Some suggested licenses:
+  # - BSD (default)
+  # - MIT
+  # - GPLv2
+  # - GPLv3
+  # - Apache
+  # - CC-BY
+  license: license (GPLv2, CC-BY, etc)
+
+  min_ansible_version: 1.2
+
+  # If this a Container Enabled role, provide the minimum Ansible Container version.
+  # min_ansible_container_version:
+
+  # Optionally specify the branch Galaxy will use when accessing the GitHub
+  # repo for this role. During role install, if no tags are available,
+  # Galaxy will use this branch. During import Galaxy will access files on
+  # this branch. If Travis integration is configured, only notifications for this
+  # branch will be accepted. Otherwise, in all cases, the repo's default branch
+  # (usually master) will be used.
+  #github_branch:
+
+  #
+  # platforms is a list of platforms, and each platform has a name and a list of versions.
+  #
+  # platforms:
+  # - name: Fedora
+  #   versions:
+  #   - all
+  #   - 25
+  # - name: SomePlatform
+  #   versions:
+  #   - all
+  #   - 1.0
+  #   - 7
+  #   - 99.99
+
+  galaxy_tags: []
+    # List tags for your role here, one per line. A tag is a keyword that describes
+    # and categorizes the role. Users find roles by searching for tags. Be sure to
+    # remove the '[]' above, if you add tags to this list.
+    #
+    # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
+    #       Maximum 20 tags per role.
+
+dependencies: []
+  # List your role dependencies here, one per line. Be sure to remove the '[]' above,
+  # if you add dependencies to this list.

+ 152 - 0
ansible/roles/nginx/tasks/installer.yml

@@ -0,0 +1,152 @@
+---
+# - name : Include Systembase Yum Install Roles
+#   include_role: 
+#     name: systembase
+#     tasks_from: packages
+#   tags: nginx_installer
+
+- name : Include Systembase Sysctl Configurtion Roles
+  include_role:
+    name: systembase
+    tasks_from: sysctl
+  tags: nginx_installer
+  
+
+- name: Install Library On Ubuntu
+  apt:  "name={{ item }} state=latest"
+  with_items:
+    - unzip
+    - build-essential
+    - libreadline-dev
+    - libncurses5-dev
+    - libpcre3-dev
+    - libssl-dev
+    - git 
+    - perl 
+    - make
+    - libjemalloc-dev 
+    - daemon
+  when: ansible_distribution == 'Ubuntu'
+  tags: nginx_installer
+
+- name: Install Library On CentOS
+  yum:  "name={{ item }} state=latest"
+  with_items:
+    - unzip
+    - readline-devel
+    - pcre-devel
+    - openssl-devel
+    - gcc
+  when: ansible_distribution == 'CentOS'
+  tags: nginx_installer
+
+- name: Get PCPE Version Of RPM
+  shell: yum list installed pcre-devel | grep custom-rpm | awk '{print $2}' | cut -d'-' -f1
+  register: pcre_version
+  changed_when: False
+  args:
+    warn: no
+  tags: nginx_installer
+
+- name: Copy PCRE Library On CentOS6
+  copy: src=pcre-8.41.zip dest=/opt/pcre-8.41.zip owner=root group=root mode=0444
+  when: pcre_version.stdout_lines   < '8.4' and ansible_distribution == 'CentOS'
+  tags: nginx_installer
+
+- name: Unzip PCRE Library
+  unarchive:
+    src: /opt/pcre-8.41.zip
+    dest: /opt/
+    remote_src: yes
+  when: pcre_version.stdout_lines   < '8.4' and ansible_distribution == 'CentOS'
+  tags: nginx_installer
+
+#- name: Define Variable For PCRE
+#vars:
+#    pcre_path: "/opt/pcre-8.41"
+#  when: pcre_version.stdout_lines   < '8.4' and ansible_distribution == 'CentOS'
+#  tags: nginx_installer
+ 
+- group:
+    name: nginx
+    state: present
+  tags: nginx_installer
+
+- user: 
+    name: nginx
+    shell: /sbin/nologin
+    group: nginx 
+  tags: nginx_installer
+
+- name: Copy Source Package
+  copy: src=openresty-{{ openresty_version }}.tar.gz dest=/root/openresty-{{ openresty_version }}.tar.gz owner=root group=root mode=0644
+  tags: nginx_installer
+
+- name: Copy Install Stript When Pcre_version  Less Than 8.4
+  template: src=openresty.sh dest=/root/openresty.sh owner=root group=root mode=0444
+  vars:
+    pcre_path: "/opt/pcre-8.41"
+  tags: nginx_installer
+  when: pcre_version.stdout_lines   < '8.4' and ansible_distribution == 'CentOS'
+
+- name: Copy Install Stript
+  template: src=openresty.sh dest=/root/openresty.sh owner=root group=root mode=0444
+  tags: nginx_installer
+  when: pcre_version.stdout_lines   > '8.4' or ansible_distribution == 'Ubuntu'
+
+- name: Copy Service
+  copy: src=nginxd dest=/etc/init.d/nginxd owner=root group=root mode=0744
+  tags: nginx_installer
+
+- name: Run Installer
+  shell: bash /root/openresty.sh
+  tags: nginx_installer
+
+- name: Create Cache Directory
+  file:
+    path: "{{ item }}"
+    state: directory
+    mode: 0755
+  with_items:
+    - "/usr/local/openresty/nginx/cache/proxy_cache"
+    - "/usr/local/openresty/nginx/cache/temp_path"
+    - "/usr/local/openresty/nginx/cache/fastcgi_cache"
+    - "/usr/local/openresty/nginx/conf/vhost"
+    - "/usr/local/openresty/nginx/run"
+  tags: nginx_installer
+
+- name: Copy nginx.conf
+  template: src=nginx.conf dest=/usr/local/openresty/nginx/conf/nginx.conf  owner=root group=root mode=0644
+  tags: nginx_installer
+
+- name: Create Empty Deny File
+  file:
+    path: /usr/local/openresty/nginx/conf/deny.ip
+    state: touch
+    mode: 0644
+  tags: nginx_installer
+
+#- name: Create Empty Run Path
+#  file:
+#    path: /usr/local/openresty/nginx/run
+#    state: directory
+#    mode: 0755
+
+- name: Remove Pid File & Temp Files
+  file:
+    path: "{{ item }}"
+    state: absent
+  with_items:
+    - "/run/nginx.pid"
+    - "/root/openresty-1.11.2.4"
+    - "/root/openresty-1.11.2.4.tar.gz"
+    - "/root/openresty.sh"
+    - "/opt/pcre-8.41"
+  tags: nginx_installer
+ 
+- name: Nginx Service Enable
+  service:
+    name: nginxd
+    enabled: yes
+#- name: Starting Nginx Process
+#  shell: /etc/init.d/nginxd start

+ 5 - 0
ansible/roles/nginx/tasks/main.yml

@@ -0,0 +1,5 @@
+---
+# tasks file for nginx_lua
+- include: installer.yml
+- include: vhost.yml
+- include: upstream.yml

+ 119 - 0
ansible/roles/nginx/tasks/upstream.yml

@@ -0,0 +1,119 @@
+---
+- name: Disable 118.190.49.243 Backend 443 & 8080 Port
+  vars:
+    java_backend_upstream: |
+        upstream java_443_backend {
+        # server 10.30.149.41:50443 weight=30; # 165
+        # server 10.30.148.149:50443 weight=30;  # 243
+        server 10.31.74.200:50443 weight=30;   # 106
+        }
+        
+        upstream java_8080_backend {
+        # server 10.30.149.41:58080 weight=30; # 165
+        # server 10.30.148.149:58080 weight=30;  # 243
+        server 10.31.74.200:58080 weight=30;   # 106
+        }
+  template: src=nginx.conf dest=/usr/local/openresty/nginx/conf/nginx.conf  owner=root group=root mode=0644
+  when: close_243_javabackend is defined
+  tags: java_upstream
+
+- name: Disable 118.190.137.106 Backend 443 & 8080 Port
+  vars:
+    java_backend_upstream: |
+        upstream java_443_backend {
+        # server 10.30.149.41:50443 weight=30; # 165
+        server 10.30.148.149:50443 weight=30;  # 243
+        # server 10.31.74.200:50443 weight=30;   # 106
+        }
+        
+        upstream java_8080_backend {
+        # server 10.30.149.41:58080 weight=30; # 165
+        server 10.30.148.149:58080 weight=30;  # 243
+        # server 10.31.74.200:58080 weight=30;   # 106
+        }
+  template: src=nginx.conf dest=/usr/local/openresty/nginx/conf/nginx.conf  owner=root group=root mode=0644
+  when: close_106_javabackend is defined
+  tags: java_upstream
+
+- name: Enable 118.190.49.243 Backend 443 & 8080 Port
+  vars:
+    java_backend_upstream: |
+        upstream java_443_backend {
+        # server 10.30.149.41:50443 weight=30; # 165
+        server 10.30.148.149:50443 weight=30;  # 243
+        # server 10.31.74.200:50443 weight=30;   # 106
+        }
+
+        upstream java_8080_backend {
+        # server 10.30.149.41:58080 weight=30; # 165
+        server 10.30.148.149:58080 weight=30;  # 243
+        # server 10.31.74.200:58080 weight=30;   # 106
+        }
+  template: src=nginx.conf dest=/usr/local/openresty/nginx/conf/nginx.conf  owner=root group=root mode=0644
+  when: open_243_javabackend is defined
+  tags: java_upstream
+
+- name: Enable 118.190.137.106 Backend 443 & 8080 Port
+  vars:
+    java_backend_upstream: |
+        upstream java_443_backend {
+        # server 10.30.149.41:50443 weight=30; # 165
+        # server 10.30.148.149:50443 weight=30;  # 243
+        server 10.31.74.200:50443 weight=30;   # 106
+        }
+
+        upstream java_8080_backend {
+        # server 10.30.149.41:58080 weight=30; # 165
+        # server 10.30.148.149:58080 weight=30;  # 243
+        server 10.31.74.200:58080 weight=30;   # 106
+        }
+  template: src=nginx.conf dest=/usr/local/openresty/nginx/conf/nginx.conf  owner=root group=root mode=0644
+  when: open_106_javabackend is defined
+  tags: java_upstream
+
+- name: Enable 118.190.50.165 Backend 443 & 8080 Port
+  vars:
+    java_backend_upstream: |
+        upstream java_443_backend {
+        server 10.30.149.41:50443 weight=30; # 165
+        # server 10.30.148.149:50443 weight=30;  # 243
+        # server 10.31.74.200:50443 weight=30;   # 106
+        }
+
+        upstream java_8080_backend {
+        server 10.30.149.41:58080 weight=30; # 165
+        # server 10.30.148.149:58080 weight=30;  # 243
+        # server 10.31.74.200:58080 weight=30;   # 106
+        }
+  template: src=nginx.conf dest=/usr/local/openresty/nginx/conf/nginx.conf  owner=root group=root mode=0644
+  when: open_165_javabackend is defined
+  tags: java_upstream
+
+- name: Disable 118.190.50.165 Backend 443 & 8080 Port
+  vars:
+    java_backend_upstream: |
+        upstream java_443_backend {
+        # server 10.30.149.41:50443 weight=30; # 165
+        server 10.30.148.149:50443 weight=30;  # 243
+        server 10.31.74.200:50443 weight=30;   # 106
+        }
+
+        upstream java_8080_backend {
+        # server 10.30.149.41:58080 weight=30; # 165
+        server 10.30.148.149:58080 weight=30;  # 243
+        server 10.31.74.200:58080 weight=30;   # 106
+        }
+  template: src=nginx.conf dest=/usr/local/openresty/nginx/conf/nginx.conf  owner=root group=root mode=0644
+  when: close_165_javabackend is defined
+  tags: java_upstream
+
+
+- name: Check Configuration
+  shell: /usr/local/openresty/nginx/sbin/nginx -t
+  tags: java_upstream
+
+- name: Reload /usr/local/openresty/nginx/
+  shell: /usr/local/openresty/nginx/sbin/nginx -s reload
+  tags: java_upstream
+
+

+ 35 - 0
ansible/roles/nginx/tasks/vhost.yml

@@ -0,0 +1,35 @@
+---
+- name: Add SSH authorized key for synchronize Moudle
+  authorized_key:
+    user: root
+    state: present
+    key: "{{ lookup('file', '/root/.ssh/id_rsa.pub') }}"
+  tags: nginx_vhost
+
+- name: Copy nginx.conf
+  template: src=nginx.conf dest=/usr/local/openresty/nginx/conf/nginx.conf  owner=root group=root mode=0644
+  tags: nginx_vhost
+
+# - name: Remove Old Vhost Directory
+#   file :
+#     path: /usr/local/openresty/nginx/conf/vhost
+#     state: absent
+
+- name: Sync Vhost Files
+  synchronize:  # need input password in command line if ssh key is none.
+    src: vhost
+    dest: /usr/local/openresty/nginx/conf/
+    delete: yes
+  tags: nginx_vhost
+
+- name: Sync SSL Certificate
+  copy: src=sslkey dest=/usr/local/openresty/nginx/conf/  owner=root group=root mode=0644  
+  tags: nginx_vhost
+ 
+- name: Check Configuration
+  shell: /usr/local/openresty/nginx/sbin/nginx -t
+  tags: nginx_vhost
+
+- name: Reload /usr/local/openresty/nginx/
+  shell: /usr/local/openresty/nginx/sbin/nginx -s reload
+  tags: nginx_vhost

+ 35 - 0
ansible/roles/nginx/tasks/vhost.yml.bk

@@ -0,0 +1,35 @@
+---
+- name: Add SSH authorized key for synchronize Moudle
+  authorized_key:
+    user: root
+    state: present
+    key: "{{ lookup('file', '/root/.ssh/id_rsa.pub') }}"
+
+- name: Copy nginx.conf
+  template: src=nginx.conf dest=/usr/local/openresty/nginx/conf/nginx.conf  owner=root group=root mode=0644
+
+#- name: Remove Old Vhost Directory
+#  local_action: file path=/tmp/vhost state=absent
+
+- name: Remove Old Vhost Directory
+  file :
+    path: /usr/local/openresty/nginx/conf/vhost
+    state: absent
+
+- name: Sync Vhost Files
+  synchronize:  # need input password in command line if ssh key is none.
+    src: vhost
+    dest: /usr/local/openresty/nginx/conf/
+    delete: yes
+
+# - name: Sync Vhost Config
+#   copy: src=vhost dest=/usr/local/openresty/nginx/conf/  owner=root group=root mode=0644
+# 
+- name: Sync SSL Certificate
+  copy: src=sslkey dest=/usr/local/openresty/nginx/conf/  owner=root group=root mode=0644  
+ 
+- name: Check Configuration
+  shell: /usr/local/openresty/nginx/sbin/nginx -t
+
+- name: Reload /usr/local/openresty/nginx/
+  shell: /usr/local/openresty/nginx/sbin/nginx -s reload

+ 316 - 0
ansible/roles/nginx/templates/nginx.conf

@@ -0,0 +1,316 @@
+# User and group that workers will use.
+user  nginx nginx;
+
+# Number of NGINX workers. Usually it's set, at maximum, 1 worker per CPU core.
+# If the server has 2+ CPU cores but it still does not receive many requests, 
+# it's a good idea to keep the value to 1 so it will avoid creating idle processes.
+worker_processes  auto;
+
+worker_rlimit_nofile 65535;
+
+# NGINX's master process id.
+pid        /run/nginx.pid;
+
+events {
+    # How many client connections each worker can maintain at a time.
+    worker_connections  20480;
+
+    # Using epoll is less CPU intensive when you have to serve thousands of connections, because
+    # instead of scanning all connections to see which file descriptor is ready to read/write,
+    # it will only scan those which are active.
+    # p.s.: It's only available in Linux kernels later than 2.6.
+    use                 epoll;
+
+    # multi_accept tries to accept() as many connections as possible after nginx gets notification about a new connection.
+    multi_accept        on;
+}
+
+
+# HTTP config module
+http {
+    # default charset settings
+    charset       utf-8,gbk;
+
+    # Note that when SSI is enabled the Last-Modified and Content-Length headers are not sent.
+    # default off 
+    # ssi off;
+
+    ##
+    # MIME-TYPE
+    ##
+
+    # You can include other config files using the "include" directive.
+    # Here we are including the mime-types, for example.
+    include       mime.types;
+
+    # The "application/octet-stream" means that the response is a binary file.
+    # If this directive is not defined then response will be set to "text/plain" by default.
+    default_type  application/octet-stream;
+
+    ##
+    # LOG
+    ##
+    log_escape_non_ascii off;
+   
+    log_format main      '$remote_addr - $remote_user [$time_iso8601] "$request" '
+                         '$status $body_bytes_sent "$http_referer" '
+                         '"$http_user_agent" "$http_x_forwarded_for" "$request_time" "$upstream_addr" "$upstream_response_time" "$request_body" "$host"'; 
+
+    log_format real_ip  '$remote_addr - $remote_user [$time_local] $request_time "$request" '
+                        '$status $body_bytes_sent "$http_referer" '
+                        '$http_user_agent $http_x_forwarded_for';
+   
+
+    log_format post_log '$remote_addr - $remote_user [$time_local] $request_time "$request" '
+                        '$status $body_bytes_sent "$request_body" "$http_referer" '
+                        '"$http_user_agent" "$http_x_forwarded_for" ';
+    # Access log: path and type.
+    access_log  /usr/local/openresty/nginx/logs/access.log;
+    # Error log: path and type.
+    error_log  /usr/local/openresty/nginx/logs/error.log;
+
+    ##
+    # TCP
+    ##
+
+    # Optimizes data transfer copying data between one file descriptor and another
+    # instead of reading and copying data to/from user space.
+    sendfile on;
+
+    # Causes NGINX to attempt to send its HTTP response head in one packet,
+    # instead of using partial frames. This is useful for prepending headers before calling sendfile,
+    # or for throughput optimization.
+    tcp_nopush on;
+
+    # Disables the Nagle algorithm.
+    # It's useful for sending frequent small bursts of data in real time.
+    # tcp_nodelay  off;
+    tcp_nodelay  on;
+
+    # Timeout during which a keep-alive client connection will stay open to serve 
+    # all the requested files from the server side.
+    keepalive_timeout  30s;
+
+    ##
+    # GZIP
+    ##
+
+    # In production you MUST set gzip to "on" in order to save bandwidth. Web browsers
+    # which handle compressed files (all recent ones do) will get a very smaller version
+    # of the server response. 
+    gzip  on;
+
+    # Enables compression for a given HTTP request version.
+    # This module makes it possible to transfer requests to another server.
+    # Nginx talks HTTP/1.1 to the browser and HTTP/1.0 to the backend server.
+    gzip_http_version 1.0;
+
+    # Compression level 1 (fastest) to 9 (slowest).
+    # online suggest
+    # gzip_comp_level 6;
+    gzip_comp_level 2;
+    
+    # Enables compression for all proxied requests.
+    gzip_proxied any;
+    
+    # Minimum length of the response (bytes). Responses shorter than this length will not be compressed.
+    # online suggest
+    # gzip_min_length 10000;
+    gzip_min_length  1k;
+
+    # Enables compression for additional MIME-types.
+    # online suggest 
+    # gzip_types  text/plain text/css application/x-javascript text/xml application/xml application/xml+rss text/javascript;
+    gzip_types       text/plain application/x-javascript text/css application/xml;
+
+    # Disables gzip compression for User-Agents matching the given regular expression.
+    # Is this case we've disabled gzip for old versions of the IE that don't support compressed responses.
+    # gzip_disable "MSIE [1-6] \.";
+
+    # Enables or disables inserting the “Vary: Accept-Encoding” response header field 
+    # if the directives gzip, gzip_static, or gunzip are active.
+    gzip_vary on;
+
+    # Sets the number and size of buffers used to compress a response. 
+    # By default, the buffer size is equal to one memory page. 
+    # This is either 4K or 8K, depending on a platform.
+    gzip_buffers     4 16k;
+
+    ##
+    # Client
+    ##
+
+    # Directive assigns the maximum number and size of buffers for large headers to read from client request.
+    # The request line can not be bigger than the size of one buffer, if the client send a bigger header nginx returns error "Request URI too large" (414).
+    # The longest header line of request also must be not more than the size of one buffer, otherwise the client get the error "Bad request" (400).
+    large_client_header_buffers 4 4k;
+
+    # Sets the max size for file uploads to 32Mb.
+    client_max_body_size 20m;
+
+    # Directive sets the headerbuffer size for the request header from client.
+    client_header_buffer_size 4k;
+
+    ##
+    # open_file_cache
+    ##
+
+    # specifies the maximum number of entries in the cache. 
+    # When the cache overflows, the least recently used(LRU) items will be removed;
+    # open_file_cache max=65536 inactive=20s;
+    open_file_cache off;
+    # specifies the time when need to check the validity of the information about the item in open_file_cache.
+    # open_file_cache_valid    30s;
+    # defines the minimum use number of a file within the time specified in the directive parameter inactive in open_file_cache.
+    # If use more than the number, the file descriptor will remain open in the cache.
+    # open_file_cache_min_uses 2;
+    # specifies whether or not to cache errors when searching for a file.
+    # open_file_cache_errors   on;
+
+
+    ##
+    # fastcgi settings
+    ##
+    fastcgi_connect_timeout 300;
+    fastcgi_send_timeout 300;
+    fastcgi_read_timeout 300;
+    fastcgi_buffer_size 64k;
+    fastcgi_buffers 16 64k;
+    
+
+    ##
+    # fastcgi cache settings
+    ##
+
+    add_header                  rt-Fastcgi-Cache $upstream_cache_status;
+    fastcgi_cache_path          /usr/local/openresty/nginx/cache/fastcgi_cache/ levels=2:2 keys_zone=cgi_cache:10m inactive=2h max_size=2g;
+    fastcgi_cache_key           "$scheme$request_method$host$request_uri";
+    fastcgi_cache_use_stale     error timeout invalid_header http_500;
+    fastcgi_ignore_headers      Cache-Control Expires Set-Cookie;
+    
+
+    ##
+    # proxy cache  settings
+    ##
+
+    # Sets the path and other parameters of a cache. Cache data are stored in files. 
+    # The file name in a cache is a result of applying the MD5 function to the cache key.
+    proxy_cache_path    /usr/local/openresty/nginx/cache/proxy_cache/ levels=2:2 keys_zone=cgi_proxy:10m inactive=2h max_size=2g;
+    
+    # Defines a directory for storing temporary files with data received from proxied servers.
+    # Up to three-level subdirectory hierarchy can be used underneath the specified directory
+    # proxy_temp_path path [level1 [level2 [level3]]];
+    proxy_temp_path     /usr/local/openresty/nginx/cache/temp_path/ 1 2;
+
+    proxy_temp_file_write_size 128k;
+
+    proxy_max_temp_file_size 0;
+
+    # Defines a timeout for establishing a connection with a proxied server. 
+    # It should be noted that this timeout cannot usually exceed 75 seconds.
+    proxy_connect_timeout      30s;
+
+    # Sets a timeout for transmitting/reading a request to the proxied server. 
+    # The timeout is set only between two successive write/read operations, not for the transmission of the whole request. 
+    # If the proxied server does not receive anything within this time, the connection is closed.
+    proxy_send_timeout         30s;
+    proxy_read_timeout         30s;
+
+    # Sets the size of the buffer size used for reading the first part of the response received from the proxied server. 
+    proxy_buffer_size          128k;
+
+    # When buffering is disabled, the response is passed to a client synchronously, immediately as it is received. 
+    # nginx will not try to read the whole response from the proxied server. 
+    # The maximum size of the data that nginx can receive from the server at a time is set by the proxy_buffer_size directive.
+    proxy_buffering            on;
+
+    # Sets the number and size of the buffers used for reading a response from the proxied server, for a single connection. 
+    # By default, the buffer size is equal to one memory page. This is either 4K or 8K, depending on a platform.
+    proxy_buffers              100 128k;
+
+    # When buffering of responses from the proxied server is enabled, 
+    # limits the total size of buffers that can be busy sending a response to the client while the response is not yet fully read. 
+    # In the meantime, the rest of the buffers can be used for reading the response and, if needed, buffering part of the response to a temporary file. 
+    proxy_busy_buffers_size    128k;
+    
+
+
+    # Load modular configuration files from the conf/vhost directory.
+    include vhost/*.conf;
+    include vhost/pay/development/*.conf;
+    include vhost/pay/test/*.conf;
+    include vhost/pay/production/*.conf;
+    
+    ##
+    # CLUSTERS
+    ##
+    
+    # Below the "upstream" is defining some php_backend clusters that are serving a PHP-FPM app.
+    # They will handle the requets proxied by NGINX.
+
+
+    # java commsunny group
+    
+#    upstream java_443_backend {
+#    server 10.30.149.41:50443 weight=30;
+#    server 10.30.148.149:50443 weight=30;
+#    server 10.31.74.200:50443 weight=30;
+#    }
+#
+#    upstream java_8080_backend {
+#    server 10.31.74.200:58080 weight=30;
+#    }
+
+# java commsunny group
+
+
+{% if java_backend_upstream is defined %}
+{{ java_backend_upstream }}
+{% else %}
+upstream java_443_backend {
+    # server 10.30.149.41:50443 weight=30; # 165
+    server 10.30.148.149:50443 weight=30;  # 243
+    server 10.31.74.200:50443 weight=30;   # 106
+}
+
+upstream java_8080_backend {
+    # server 10.30.149.41:58080 weight=30; # 165
+    server 10.30.148.149:58080 weight=30;  # 243
+    server 10.31.74.200:58080 weight=30;   # 106
+}
+{% endif %}
+
+upstream opscenter_backend {
+    server 10.31.74.200:8888;
+}
+
+upstream jenkins_backend {
+    server 10.31.88.120:8080;
+}
+
+upstream php_online_backend_http {
+    server 10.28.225.116:80; 
+#    server 10.25.76.207:80;  
+    ip_hash;
+}
+
+upstream php_online_backend_https {
+    server 10.28.225.116:443;
+#    server 10.25.76.207:443;
+    ip_hash; 
+}
+
+upstream php_test_online_backend_http{
+    server 10.28.81.15:80; #
+}
+    include deny.ip; 
+
+#    server {
+#        listen 80 default;
+	    # listen <%= @ipaddress_em1 %>:80 default;
+        # listen <%= @ipaddress_eth0 %>:80 default;
+        # listen 182.18.47.10:80 default;
+#        return 500;
+#    }
+
+}

+ 28 - 0
ansible/roles/nginx/templates/openresty.sh

@@ -0,0 +1,28 @@
+#!/bin/bash
+
+cd /root
+tar -zxf openresty-{{ openresty_version }}.tar.gz
+cd openresty-{{ openresty_version }}
+./configure \
+--prefix=/usr/local/openresty \
+--user=nginx \
+--group=nginx \
+--with-http_ssl_module \
+--with-http_stub_status_module \
+--with-http_gunzip_module \
+--with-http_gzip_static_module \
+--with-http_realip_module \
+{% if pcre_path %}
+--with-pcre={{ pcre_path }} \
+{% else %}
+--with-pcre \
+{% endif %}
+--with-pcre-jit \
+--with-http_flv_module \
+--with-file-aio \
+--with-http_addition_module \
+#--add-module=../ngx_cache_purge-master
+
+make -j{{ ansible_processor_vcpus }} && make install
+make clean
+cd ..

+ 2 - 0
ansible/roles/nginx/tests/inventory

@@ -0,0 +1,2 @@
+localhost
+

+ 0 - 0
ansible/roles/nginx/tests/test.yml


برخی فایل ها در این مقایسه diff نمایش داده نمی شوند زیرا تعداد فایل ها بسیار زیاد است