最近写了一个golang ssh客户端,参照网上的ssh client做了些修改,如果需要进行操作审计,那用golang生成的ssh代替原生的ssh就非常简单了


代码如下

package main

import (
   "fmt"
   "golang.org/x/crypto/ssh"
   "golang.org/x/crypto/ssh/terminal"
   "os"
   "log"
   "io/ioutil"
   "flag"
   "os/user"
   "time"
   "net"
)

func Client(userName, ip, sshKey string) {

   privateKey, err := ioutil.ReadFile(sshKey)
   if err != nil {
      log.Fatal(err)
   }

   signer, err := ssh.ParsePrivateKey(privateKey)
   if err != nil {
      log.Fatal(err)
   }

   config := &ssh.ClientConfig{
      User: userName,
      Auth: []ssh.AuthMethod{
         ssh.PublicKeys(signer),
      },
      HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {
         return nil
      },
      Timeout: 30 * time.Second,
   }

   client, err := ssh.Dial("tcp", ip, config)
   if err != nil {
      fmt.Println("建立连接: ", err)
      return
   }
   defer client.Close()
   session, err := client.NewSession()
   if err != nil {
      fmt.Println("创建Session出错: ", err)
      return
   }
   defer session.Close()

   fd := int(os.Stdin.Fd())
   oldState, err := terminal.MakeRaw(fd)
   if err != nil {
      fmt.Println("创建文件描述符: ", err)
      return
   }

   session.Stdout = os.Stdout
   session.Stderr = os.Stderr
   session.Stdin = os.Stdin

   termWidth, termHeight, err := terminal.GetSize(fd)
   if err != nil {
      fmt.Println("获取窗口宽高: ", err)
      return
   }
   defer terminal.Restore(fd, oldState)

   modes := ssh.TerminalModes{
      ssh.ECHO:          1,
      ssh.TTY_OP_ISPEED: 14400,
      ssh.TTY_OP_OSPEED: 14400,
   }

   if err := session.RequestPty("xterm-256color", termHeight, termWidth, modes); err != nil {
      fmt.Println("创建终端出错: ", err)
      return
   }

   err = session.Shell()
   if err != nil {
      fmt.Println("执行Shell出错: ", err)
      return
   }

   err = session.Wait()
   if err != nil {
      fmt.Println("执行Wait出错: ", err)
      return
   }
}

func main() {

   usr, err := user.Current()
   if err != nil {
      fmt.Println(err)
   }

   var sshF = usr.HomeDir + "/.ssh/id_rsa"

   p := flag.String("p", "22", "输入端口")
   i := flag.String("i", sshF, "输入所需的私钥")
   ip := flag.String("ip", "", "输入ip地址")
   flag.Parse()
   fmt.Println("  __              _               _")
   fmt.Println(" / _|_   _ _ __  | |___   __   __| | _____   _____  _ __  ___")
   fmt.Println("| |_| | | | '_ \\ | __\\ \\ / /  / _ ` |/ _ \\ \\ / / _ \\| '_ \\/ __|")
   fmt.Println("|  _| |_| | | | || |_ \\ V /  | (_| |  __/\\ V / (_) | |_) \\__ \\")
   fmt.Println("|_|  \\__,_|_| |_(_)__| \\_/    \\__,_|\\___| \\_/ \\___/| .__/|___/")
   fmt.Println("                                                   |_|")
   fmt.Println("     ,           ,")
   fmt.Println("    /             \\")
   fmt.Println("   ((__---,,,---__))")
   fmt.Println("      (_) O O (_)_________")
   fmt.Println("         \\ _ /            |\\")
   fmt.Println("          o_o \\ voilet    | \\")
   fmt.Println("               \\   _____  |  *")
   fmt.Println("                |||   WW|||")
   fmt.Println("                |||     |||")
   fmt.Println("")
   fmt.Println("--------------------------------------------------------------")
   fmt.Println("如有任何系统问题,请随时联系运维人员")
   fmt.Println("运维邮箱: voilet@qq.com")
   fmt.Println("友情提示:谨慎操作,规避风险")
   fmt.Println("ssh -i ./ssh/id_rsa -p 2222 -ip 192.168.1.1")
   fmt.Println("可使用的参数 -P指定端口 -i指定key -ip需要登录的主机")
   fmt.Println("-------------------------------------------------------------")

   Client(usr.Name, *ip + ":" + *p, *i)
}

二月 10th, 2017

Posted In: linux系统

标签:,

简介

在我看来基于Bind的只能DNS方案主要包括两个部分:Geolocation和Dynamic Record。国内的业界对智能DNS的定位也无非这两点,但是我所理解的智能DNS是建立在这两条基础上的智能调度系统,比如我有三个负载能力不同的数据中心,DNS可以根据数据中心的metrics(这里可能包括带宽,服务能力等)实现流量的调度,限于个人水平个人未在这个方向有所实践,这个话题留作以后讨论,所以本文只针对前两个问题。由于Bind本身的配置可运维性比较差,这就引出本文主要讨论的DLZ。

原理

DLZ实际上就是扩展了Bind,将Zonefle的内容放到外部数据库里,然后给Bind配置查询语句从数据库里查询记录。当修改数据库里的记录信息的时候,无需重启Bind,下次客户请求时直接就能返回新的记录了。另外,DLZ本身不支持缓存,所以需要自己根据实际情况解决查询的问题。

(更多…)

二月 14th, 2016

Posted In: linux系统

下面就整理一下全部的IPMI的资料

http://www.openfusion.net/linux/ipmi_on_centos

参考上面的连接,很多高级功能。

IPMI设置

IPMI需要进入bios,进行设置IP地址。这个本身没什么特别。不过有时候你会发现设置完IPMI的IP地址后,无法访问,也无法ping通。你会以为IPMI出问题了。

这个时候,你需要完全拔掉机器的电源,等待5分钟,把网线插入IPMI的网卡。这个时候,插上电源,就算不开机,也应该可以ping通,访问。这个问题折腾了我很长时间。

机器加电前,需要把IPMI的网线插上,这样可以初始化。

另外默认IPMI是DHCP获得IP。

IPMI访问

国产服务器的IPMI访问的用户和密码,基本就是这些。这个和主板有关。我见过的两种主板的IPMI就是超微和泰安的。他们间功能上有点区别,默认的密码也是不一样。

联想:用户名:albert  pass:admin

超微:用户名:ADMIN  pass:ADMIN

泰安的主板:user:root  pass:superuser

浪潮服务器:user:root  pass:superuser

IPMI的功能

通过web访问IPMI,你可以实现对机器的操作

  • 开机,关机,重启,查看机器当前的通电状态
  • 安装系统。有些服务器的IPMI,没有内置iKVM,无法实现系统的安装。这个估计和成本有关
  • 修改IPMI的网络和IP地址
  • 修改bios设置,可以通过IPMI进入bios
  • 设置Raid。这个目前对鼠标支持很差。要想设置raid,就只能用键盘操作。这方面IBM,Dell,HP做的不错,他们完全是可以使用鼠标操作。

别的功能,就基本不太实用。另外通过IPMI,其实是可以获得当前机器的电量消耗等参数。这个后续再深入研究。

对于超微的机器,可以设置IPMI的网卡,采用共享,就是让eth0(第一块网卡)和IPMI网卡共有。这样可以节省一条网线。还有就是IPMI专用的网卡是百兆。如果是共用,那么是千兆。

超微的IPMI,可以设置vlan,就是把IPMI口独立到一个vlan里。这样共享,也不会有安全性的问题。泰安的IPMI,是没有vlan这个功能。 (更多…)

六月 4th, 2014

Posted In: linux系统

由于 Apache Commons Fileupload 文件上传组件的问题,导致全系的 Tomcat 版本存在 DoS 安全漏洞。所影响的版本包括:

- - Commons FileUpload 1.0 to 1.3
- - Apache Tomcat 8.0.0-RC1 to 8.0.1
- - Apache Tomcat 7.0.0 to 7.0.50
- - Apache Tomcat 6 and earlier are not affected

解决该漏洞的方法:

- - Upgrade to Apache Commons FileUpload 1.3.1 or later once released
- - Upgrade to Apache Tomcat 8.0.2 or later once released
- - Upgrade to Apache Tomcat 7.0.51 or later once released
- - Apply the appropriate patch
  - Commons FileUpload: http://svn.apache.org/r1565143
  - Tomcat 8: http://svn.apache.org/r1565163
  - Tomcat 7: http://svn.apache.org/r1565169
- - Limit the size of the Content-Type header to less than 4091 bytes

exploit:
http://www.exploit-db.com/exploits/18619/

QQ20140207-2

二月 7th, 2014

Posted In: linux系统

最近一直在研究saltstack,在此感谢”绿肥@灿哥”和saltstack群里的朋友

saltstack地址http://wiki.saltstack.cn/

1

(更多…)

十二月 11th, 2013

Posted In: linux系统

简介

根据官方文档的定位,Cobbler首要的是快速设置网络安装环境的Linux安装服务器;但其功能不限于此,它还可以管理配置,管理DNS,HDCP,TFTP和rsync,软件包升级和电源管理等;个人感觉有些乱,作为一个开源项目明白自己想要解决什么问题并把这个问题解决到极致就够了。

说明

血与泪的经历:

  • Cobbler2.2(来自CentOS5.5) 安装CentOS5.5和CentOS6.4没有问题,安装Ubuntu12.04失败
  • Cobbler2.4(来自CentOS6.4) 安装CentOS5.5和CentOS6.4没有问题,安装Ubuntu12.04没问题

基本概念

PXE原理

PXE原理

  1. 客户端发起Discover包,通过flag说明自身的PXE拓展信息;
  2. 服务器响应Offer包,告知客户端下边去找哪台服务器;
  3. 客户端发送Request包
  4. 服务器发送ACK包
  5. 客户端通过TFTP协议请求pxelinux.0等文件
  6. 客户端加载并启动系统

Cobbler模型

Cobbler模型

这张图画出了Cobbler的模型,越往上的对象越基础越通用,自上而下不断的添加一些新的东西进来让其满足个性化的需求。这里我们需要重点关注的是distro和profile这两个概念。 (更多…)

十一月 5th, 2013

Posted In: linux系统

在安装python的时候报 python version 2.7 required which was not found in the registry
网上找了下发现有人发以下代码,解决安装找不到路径的问题

# script to register Python 2.0 or later for use with win32all
# and other extensions that require Python registry settings
#
# written by Joakim Loew for Secret Labs AB / PythonWare
#
# source:
# http://www.pythonware.com/products/works/articles/regpy20.htm
#
# modified by Valentine Gogichashvili as described in http://www.mail-archive.com/distutils-sig@python.org/msg10512.html

import sys

from _winreg import *

# tweak as necessary
version = sys.version[:3]
installpath = sys.prefix

regpath = "SOFTWARE\\Python\\Pythoncore\\%s\\" % (version)
installkey = "InstallPath"
pythonkey = "PythonPath"
pythonpath = "%s;%s\\Lib\\;%s\\DLLs\\" % (
    installpath, installpath, installpath
)

def RegisterPy():
    try:
        reg = OpenKey(HKEY_CURRENT_USER, regpath)
    except EnvironmentError as e:
        try:
            reg = CreateKey(HKEY_CURRENT_USER, regpath)
            SetValue(reg, installkey, REG_SZ, installpath)
            SetValue(reg, pythonkey, REG_SZ, pythonpath)
            CloseKey(reg)
        except:
            print "*** Unable to register!"
            return
        print "--- Python", version, "is now registered!"
        return
    if (QueryValue(reg, installkey) == installpath and
        QueryValue(reg, pythonkey) == pythonpath):
        CloseKey(reg)
        print "=== Python", version, "is already registered!"
        return
    CloseKey(reg)
    print "*** Unable to register!"
    print "*** You probably have another Python installation!"

if __name__ == "__main__":
    RegisterPy()

九月 25th, 2013

Posted In: linux系统

引言:关于运维

运维的工作主要在2方面:

1:状态的管理
2:系统性能调优
这里主要简介下运维状态的管理:

对于运维来说,基于状态的配置管理已经向自动化迈进了一大步,以状态为核心的运维,让状态本身有了可管理性;在运维过程中我们会发现,同样的一个配置,我们会在不同的时间,不同的地点一次在一次的配置,这个时候,配置管理就有了重复性;有的甚至是原封不动的重复,而另外一些则是按照一定的规律在发展,这些按照一定规律发展的配置,就是可预测的.综上我认识的,我们运维工作的本身是可管理,可重复,可预测的.基于这样的理念,我们就可以更进一步的推进我们的运维自动化,甚至到智能化. (更多…)

九月 10th, 2013

Posted In: linux系统

前阵子在一朋友blog看到一个python查找 webshell脚本的代码,自己拿过来改了下,新增白名单功能,新增发现恶意代码发送邮件报警功能,现发出来供大家参考,如有需要的可以在自己的服务器上跑下试试

#!/usr/bin/env python
#-*- coding: utf-8 -*-
#=============================================================================
#     FileName:
#         Desc:
#       Author: 苦咖啡
#        Email: voilet@qq.com
#     HomePage: http://blog.kukafei520.net
#      Version: 0.0.1
#      History:
#=============================================================================

import os
import sys
import re
import smtplib

#设定邮件
fromaddr = "smtp.qq.com"
toaddrs = ["voilet@qq.com"]
username = "voilet"
password = "xxxxxx"

#设置白名单
pass_file = ["api_ucenter.php"]

#定义发送邮件函数
def sendmail(toaddrs,sub,content):
    '发送邮件模块'
    # Add the From: and To: headers at the start!
    msg = ("From: %s\r\nTo: %s\r\nSubject: %s\r\n\r\n"
           % (fromaddr, ", ".join(toaddrs), sub))
    msg += content
    server = smtplib.SMTP('mail.funshion.com', 25,)
    server.login(username, password)
    server.sendmail(fromaddr, toaddrs, msg)
    server.quit()
 (更多…)

七月 19th, 2013

Posted In: linux系统

在调试naxsi的时候遇到很多的问题,最后在一老外blog发现了这个文章,写的不错,测试能过,目前正在测试其它功能,后期会写一些使用方法和案例

Install the redhat 5 repository package:
rpm -ivh –nosignature http://rpm.axivo.com/redhat/axivo-release-5-1.noarch.rpm

Install the redhat 6 repository package:
rpm -ivh –nosignature http://rpm.axivo.com/redhat/axivo-release-6-1.noarch.rpm

yum –disablerepo=* –enablerepo=axivo list available

Install Procedure
For rules or additional configuration settings, please use the Naxsi Wiki.

In this example, we will install Nginx with Naxsi 0.47 firewall module and user interface on CentOS 5 64bits.
This is a list of useful locations installed by Axivo Nginx package:

  • /etc/nginx – stores all global configuration files
  • /etc/nginx.d – stores all host configuration files
  • /var/lib/nginx – stores Nginx cache data
  • /var/log/nginx – stores Nginx logs

If you did not installed yet the Axivo repository, please follow the instructions listed on main page.

1) Install the rpm’s and their dependencies:

2) Enable the nginx and naxsi-ui services:

3) Create a new MySQL database and assign proper permissions to it:

4) Edit the /etc/nginx/naxsi-ui.conf configuration file and adjust the values inside:

Naxsi needs full permissions to create the initial MySQL data. The easiest way to get everything rolling is to use first your root MySQL user ID. Once the table schemas are generated, you can use the naxsiuser ID and password.

5) Edit the /etc/nginx/nginx.conf global configuration and include the /etc/nginx/naxsi_core.rules file:

Code:
http {
	include mime.types;
	...
	include /etc/nginx/naxsi_core.rules;
	include /etc/nginx.d/*.conf;
}

6) Edit the /etc/nginx.d/localhost.conf host configuration, then include the /etc/nginx/naxsi.rules file and proxied requests:

Code:
server {
	listen 192.168.1.8:80 default_server;
	server_name www.axivo.com;
	...
	location / {
		try_files $uri $uri/ /index.html;
		include naxsi.rules;
	}
	...
	location /RequestDenied {
		proxy_pass http://192.168.1.8:8080;
		internal;
	}
	...
}

7) Start the nginx and naxsi-ui daemons:

You are all set, enjoy the new layer of security on your site.

一月 18th, 2013

Posted In: linux系统

NAXSI setup howto

This document describes the full process of configuring NAXSI.

Installing nginx + naxsi : From package

Packages of naxsi exist for :

  • Debian
  • FreeBSD
  • NetBSD
  • CentOS/Redhat (See axivo repositories)

 

If you’re unlucky, refer to source compilation.

Installing nginx + naxsi : From sources

Nginx doesn’t support (by design) loadable modules. Extra modules must be added during compilation. Here we will install it from the source, but (if you’re lucky) you might as well find nginx+naxsi already packaged in your favorite distribution.

If you’re not, here is the way to go :

wget http://nginx.org/download/nginx-x.x.xx.tar.gz
wget http://naxsi.googlecode.com/files/naxsi-x.xx.tar.gz
tar xvzf nginx-x.x.xx.tar.gz 
tar xvzf naxsi-x.xx.tar.gz
cd nginx-x.x.xx/

[install libpcre (and libssl if you want https, along with zlib for gzip support) libs with your favorite package manager, naxsi relies on it for regex]

./configure --add-module=../naxsi-x.xx/naxsi_src/ [add/remove your favorite/usual options]
make
make install

my personal “building” options being, here :

./configure --conf-path=/etc/nginx/nginx.conf  --add-module=../naxsi-x.xx/naxsi_src/   --error-log-path=/var/log/nginx/error.log     --http-client-body-temp-path=/var/lib/nginx/body     --http-fastcgi-temp-path=/var/lib/nginx/fastcgi     --http-log-path=/var/log/nginx/access.log     --http-proxy-temp-path=/var/lib/nginx/proxy     --lock-path=/var/lock/nginx.lock     --pid-path=/var/run/nginx.pid     --with-http_ssl_module     --without-mail_pop3_module     --without-mail_smtp_module     --without-mail_imap_module     --without-http_uwsgi_module     --without-http_scgi_module     --with-ipv6  --prefix=/usr

Important note for source compilation

You need to remember this if you are new to nginx :

NGINX will decide the order of modules according the order of the module’s directive in nginx’s ./configure. So, no matter what (except you reallyknow what you are doing) put naxsi first in your ./configure.

If you don’t do so, you might run into various problems, from random / unpredictable behaviors to non-effective WAF.

Initial setup

I want to configure NAXSI for my company’s website :

www.nbs-system.com

So, let’s have a look at the initial setup :

/etc/nginx/nginx.conf :

user www-data;
worker_processes  1;
worker_rlimit_core  500M;
working_directory   /tmp/;

error_log  /var/log/nginx/error.log;
pid        /var/run/nginx.pid;


events {
    worker_connections  1024;
    use epoll;
    # multi_accept on;                                                                                                                      
}

http {
    include        /etc/nginx/naxsi_core.rules;
    include       /etc/nginx/mime.types;
    server_names_hash_bucket_size 128;
    access_log  /var/log/nginx/access.log;

    sendfile        on;
    keepalive_timeout  65;
    tcp_nodelay        on;

    gzip  on;
    gzip_disable "MSIE [1-6]\.(?!.*SV1)";
    include /etc/nginx/sites-enabled/*;
}

Notice the /etc/nginx/naxsi_core.rules include. This file is provided in the project (naxsi_config/), and it contains the rules. As you might have noticed, these are not signatures, in the classic WAF sense, but simple “score rules”.

Now, let’s have a look at my sites-enabled/default :

server {
 proxy_set_header Proxy-Connection "";
listen       *:80;
access_log  /tmp/nginx_access.log;
error_log  /tmp/nginx_error.log debug;

location / {
     include    /etc/nginx/nbs.rules;
     proxy_pass http://194.213.124.111/;
     proxy_set_header Host www.nbs-system.com;
   }

#This location is where, in learning mode, to-be-forbidden requests will be "copied"
#In non-learning mode, it's where denied request will land, so feel free to do whatever you want, 
#return 418 I'm a teapot, forward to a custom webpage with 
#a captcha to help track false-positives (see contrib for that),
#whatever you want to do !
 location /RequestDenied {
     proxy_pass http://127.0.0.1:4242;
   }
}

/etc/nginx/nbs.rules :

LearningMode; #Enables learning mode
SecRulesEnabled;
#SecRulesDisabled;
DeniedUrl "/RequestDenied";

include "/tmp/naxsi_rules.tmp";

## check rules
CheckRule "$SQL >= 8" BLOCK;
CheckRule "$RFI >= 8" BLOCK;
CheckRule "$TRAVERSAL >= 4" BLOCK;
CheckRule "$EVADE >= 4" BLOCK;
CheckRule "$XSS >= 8" BLOCK;

/tmp/naxsi_rules.tmp is empty for now, it’ll be filled at runtime by the learning daemon.

Starting the LearningMode phase

Once you performed a bit of browsing, learning daemons db will be populated with generated exceptions. You can as well populate naxsi’s db from log files (options -l of nx_intercept). If you have real users on your website and/or you are not in a hurry, this option might be better, as it allows you not to spend time to do the whitelist configuration. See LearningFromLogFiles for details.

The web interface is minimalist, and has the following features :

  • Whitelist generation : Main goal of the daemon. From naxsi catched exceptions, generate approriate whitelists. Rules are presented in raw form, as well as in optimized form. For example, after some browsing I got the following rules :
 ########### Optimized Rules Suggestion ##################
 # total_count:59 (23.69%), peer_count:1 (100.0%) | parenthesis, probable sql/xss
 BasicRule wl:1011 "mz:$HEADERS_VAR:cookie";
 # total_count:59 (23.69%), peer_count:1 (100.0%) | parenthesis, probable sql/xss
 BasicRule wl:1010 "mz:$HEADERS_VAR:cookie";
 # total_count:59 (23.69%), peer_count:1 (100.0%) | mysql keyword (|)
 BasicRule wl:1005 "mz:$HEADERS_VAR:cookie";
 # total_count:53 (21.29%), peer_count:1 (100.0%) | double encoding !
 BasicRule wl:1315 "mz:$HEADERS_VAR:cookie";

 

  • Statistics generation : You can get reports on source / types of attacks, as well as counts for each kind of exceptions :

The global idea, indeed, is to use the whitelists generated by naxsi, include them into your naxsi’s configuration, and then reload nginx.

For advanced whitelists, such as user forms, please see AdvancedWhitelists section. Following section deals as well with user forms in a more classic way.

user forms

Now comes the “tricky” part of whitelists triggers : USER FORMS !

Yes, fields with ‘free’ user input, such as registration forms, search boxes and so on are typically parts that you should take a great care of.

For example, my company’s website contains a “contact” form with lastname, firstname, email adress, and a free text zone. I decided that I will allow simple/double quotes as well as coma and semi-coma in the last/first names fields, and included as well parenthesis for the free text zone. So, I will simply fill the form and learnign daemons will generate the associated whitelists.

Once I’ve filled the forms, if I look at my nx_extract interface, I will see that new exceptions have been generated :

rule 1007(--) authorized on url for argument 'cf2_field_1' of zone BODY
rule 1010(() authorized on url for argument 'cf2_field_1' of zone BODY
rule 1011()) authorized on url for argument 'cf2_field_1' of zone BODY
rule 1013(') authorized on url for argument 'cf2_field_1' of zone BODY
rule 1015(,) authorized on url for argument 'cf2_field_1' of zone BODY
rule 1306(') authorized on url for argument 'cf2_field_1' of zone BODY
rule 1308(() authorized on url for argument 'cf2_field_1' of zone BODY
rule 1309()) authorized on url for argument 'cf2_field_1' of zone BODY
rule 1007(--) authorized on url for argument 'cf2_field_2' of zone BODY
rule 1013(') authorized on url for argument 'cf2_field_2' of zone BODY
rule 1015(,) authorized on url for argument 'cf2_field_2' of zone BODY
rule 1306(') authorized on url for argument 'cf2_field_2' of zone BODY
rule 1007(--) authorized on url for argument 'cf2_field_3' of zone BODY
rule 1013(') authorized on url for argument 'cf2_field_3' of zone BODY
rule 1015(,) authorized on url for argument 'cf2_field_3' of zone BODY
rule 1306(') authorized on url for argument 'cf2_field_3' of zone BODY
rule 1007(--) authorized on url for argument 'cf2_field_4' of zone BODY
rule 1007(--) authorized on url for argument 'cf2_field_5' of zone BODY
rule 1007(--) authorized on url for argument 'cf2_field_7' of zone BODY
rule 1010(() authorized on url for argument 'cf2_field_7' of zone BODY
rule 1011()) authorized on url for argument 'cf2_field_7' of zone BODY
rule 1013(') authorized on url for argument 'cf2_field_7' of zone BODY
rule 1015(,) authorized on url for argument 'cf2_field_7' of zone BODY
rule 1306(') authorized on url for argument 'cf2_field_7' of zone BODY
rule 1308(() authorized on url for argument 'cf2_field_7' of zone BODY
rule 1309()) authorized on url for argument 'cf2_field_7' of zone BODY
rule 1007(--) authorized on url for argument 'cf_codeerr2' of zone BODY
rule 1315() authorized on url for argument 'cf_codeerr2' of zone BODY
rule 1315() authorized on url for argument 'cf_failure2' of zone BODY
rule 1200(..) authorized on url for argument 'cf_working2' of zone BODY
rule 1315() authorized on url for argument 'cf_working2' of zone BODY

Let’s reload it, and have a look at the generated whitelists ! New rules have been generated, in the style :

BasicRule wl:1007 "mz:$BODY_VAR:cf2_field_1" ;
BasicRule wl:1010 "mz:$BODY_VAR:cf2_field_1" ;
BasicRule wl:1011 "mz:$BODY_VAR:cf2_field_1" ;
BasicRule wl:1013 "mz:$BODY_VAR:cf2_field_1" ;
BasicRule wl:1015 "mz:$BODY_VAR:cf2_field_1" ;
BasicRule wl:1306 "mz:$BODY_VAR:cf2_field_1" ;
BasicRule wl:1308 "mz:$BODY_VAR:cf2_field_1" ;
BasicRule wl:1309 "mz:$BODY_VAR:cf2_field_1" ;
BasicRule wl:1007 "mz:$BODY_VAR:cf2_field_2" ;
BasicRule wl:1013 "mz:$BODY_VAR:cf2_field_2" ;
BasicRule wl:1015 "mz:$BODY_VAR:cf2_field_2" ;
BasicRule wl:1306 "mz:$BODY_VAR:cf2_field_2" ;
BasicRule wl:1007 "mz:$BODY_VAR:cf2_field_3" ;
BasicRule wl:1013 "mz:$BODY_VAR:cf2_field_3" ;
BasicRule wl:1015 "mz:$BODY_VAR:cf2_field_3" ;
BasicRule wl:1306 "mz:$BODY_VAR:cf2_field_3" ;
BasicRule wl:1007 "mz:$BODY_VAR:cf2_field_4" ;
BasicRule wl:1007 "mz:$BODY_VAR:cf2_field_5" ;
BasicRule wl:1007 "mz:$BODY_VAR:cf2_field_7" ;
BasicRule wl:1010 "mz:$BODY_VAR:cf2_field_7" ;
BasicRule wl:1011 "mz:$BODY_VAR:cf2_field_7" ;
BasicRule wl:1013 "mz:$BODY_VAR:cf2_field_7" ;
BasicRule wl:1015 "mz:$BODY_VAR:cf2_field_7" ;
BasicRule wl:1306 "mz:$BODY_VAR:cf2_field_7" ;
BasicRule wl:1308 "mz:$BODY_VAR:cf2_field_7" ;
BasicRule wl:1309 "mz:$BODY_VAR:cf2_field_7" ;
BasicRule wl:1007 "mz:$BODY_VAR:cf_codeerr2" ;
BasicRule wl:1315 "mz:$BODY_VAR:cf_codeerr2" ;
BasicRule wl:1315 "mz:$BODY_VAR:cf_failure2" ;
BasicRule wl:1200 "mz:$BODY_VAR:cf_working2" ;
BasicRule wl:1315 "mz:$BODY_VAR:cf_working2" ;

Once I’ve did the same for the searchbox, my configuration is now over, and we can browse the site, and fill the forms without generating any new exception !

Some side notes

Sometimes, you will want to partially disable naxsi for a part of the website. In the case of my company’s website, I don’t want to configure / enable naxsi for the wordpress back-office, as it’s already protected by a .htaccess.

Then, you can “simply” define another location, where you don’t enable NAXSI :

location / {
     include    /etc/nginx/nbs.rules;
     proxy_pass http://194.213.124.111/;
     proxy_set_header Host www.nbs-system.com;
   }

location /wp-admin {
     proxy_pass https://194.213.124.111/;
     proxy_set_header Host www.nbs-system.com;
 }

And the trick is done 😉

Actually, you can do something way smarter. As wordpress is affected by numerous vulnerabilities in the back-office, I still want to protect it, but without spending too much time on the configuration, so here is what I’m doing :

location /wp-admin {
         include /etc/nginx/nbs.rules;
         BasicRule wl:0 mz:BODY;
         proxy_pass https://194.213.124.111;
         proxy_set_header Host www.nbs-system.com;
}

I’m enabling NAXSI, but I’m disabling all checks on BODY, as it’s the painfull part (posting HTML and so on). In this way, I will still protect WP back-office from vulnerabilities that are exploited through GET requests.

google文档地址:http://code.google.com/p/naxsi/

一月 18th, 2013

Posted In: linux系统

使用方法:
vi /usr/local/nginx/conf/drop_sql.conf
添加以下内容

## Block SQL injections
set $block_sql_injections 0;
if ($query_string ~ "union.*select.*\(") {
set $block_sql_injections 1;
}
if ($query_string ~ "union.*all.*select.*") {
set $block_sql_injections 1;
}
if ($query_string ~ "concat.*\(") {
set $block_sql_injections 1;
}
if ($block_sql_injections = 1) {
return 403;
}

## Block file injections
set $block_file_injections 0;
if ($query_string ~ "[a-zA-Z0-9_]=http://") {
set $block_file_injections 1;
}
if ($query_string ~ "[a-zA-Z0-9_]=(\.\.//?)+") {
set $block_file_injections 1;
}
if ($query_string ~ "[a-zA-Z0-9_]=/([a-z0-9_.]//?)+") {
set $block_file_injections 1;
}
if ($block_file_injections = 1) {
return 403;
}

## Block common exploits
set $block_common_exploits 0;
if ($query_string ~ "(<|%3C).*script.*(>|%3E)") {
set $block_common_exploits 1;
}
if ($query_string ~ "GLOBALS(=|\[|\%[0-9A-Z]{0,2})") {
set $block_common_exploits 1;
}
if ($query_string ~ "_REQUEST(=|\[|\%[0-9A-Z]{0,2})") {
set $block_common_exploits 1;
}
if ($query_string ~ "proc/self/environ") {
set $block_common_exploits 1;
}
if ($query_string ~ "mosConfig_[a-zA-Z_]{1,21}(=|\%3D)") {
set $block_common_exploits 1;
}
if ($query_string ~ "base64_(en|de)code\(.*\)") {
set $block_common_exploits 1;
}
if ($block_common_exploits = 1) {
return 403;
}

## Block spam
set $block_spam 0;
if ($query_string ~ "\b(ultram|unicauca|valium|viagra|vicodin|xanax|ypxaieo)\b") {
set $block_spam 1;
}
if ($query_string ~ "\b(erections|hoodia|huronriveracres|impotence|levitra|libido)\b") {
set $block_spam 1;
}
if ($query_string ~ "\b(ambien|blue\spill|cialis|cocaine|ejaculation|erectile)\b") {
set $block_spam 1;
}
if ($query_string ~ "\b(lipitor|phentermin|pro[sz]ac|sandyauer|tramadol|troyhamby)\b") {
set $block_spam 1;
}
if ($block_spam = 1) {
return 403;
}

## Block user agents
set $block_user_agents 0;

# Don't disable wget if you need it to run cron jobs!
#if ($http_user_agent ~ "Wget") {
# set $block_user_agents 1;
#}

# Disable Akeeba Remote Control 2.5 and earlier
if ($http_user_agent ~ "Indy Library") {
set $block_user_agents 1;
}

# Common bandwidth hoggers and hacking tools.
if ($http_user_agent ~ "libwww-perl") {
set $block_user_agents 1;
}
if ($http_user_agent ~ "GetRight") {
set $block_user_agents 1;
}
if ($http_user_agent ~ "GetWeb!") {
set $block_user_agents 1;
}
if ($http_user_agent ~ "Go!Zilla") {
set $block_user_agents 1;
}
if ($http_user_agent ~ "Download Demon") {
set $block_user_agents 1;
}
if ($http_user_agent ~ "Go-Ahead-Got-It") {
set $block_user_agents 1;
}
if ($http_user_agent ~ "TurnitinBot") {
set $block_user_agents 1;
}
if ($http_user_agent ~ "GrabNet") {
set $block_user_agents 1;
}

if ($block_user_agents = 1) {
return 403;
}

在nginx.conf配置文件中的server段中加入
include drop_sql.conf;

重新加载nginx配置文件即可生效
/usr/local/nginx/sbin/nginx -s reload

一月 17th, 2013

Posted In: linux系统

varnish的介绍很多,这里就不做过多的说明,直接贴上我的安装及配置方法,希望会对大家有用,仅供参考


安装方法如下:

 wget -S http://repo.varnish-cache.org/source/varnish-3.0.2.tar.gz
 tar zxvf varnish-3.0.2.tar.gz
 cd varnish-3.0.2
 ./configure --prefix=/usr/local/varnish --enable-dependency-trackin --enable-debugging-symbols --enable-developer-warnings
 make
 make install
 varnish No package 'libpcre' found
 说是缺少 pcre。我安装nginx的时候用到了pcre 安装了的
 使用以下办法
 export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig

(更多…)

五月 21st, 2012

Posted In: linux系统

标签:,

h1. Redis安装指南

* 下载redis,地址:http://redis.googlecode.com/files/redis-2.4.4.tar.gz

wget -S http://redis.googlecode.com/files/redis-2.4.4.tar.gz

* 解压redis安装包

tar zxf redis-2.4.4.tar.gz

* 进入redis-2.4.4目录,然后修改src/Makefile。找到“PREFIX= /usr/local”,修改为“PREFIX= /usr/local/redis”

* 执行make && make install

* 执行cd /usr/local/redis

* 执行mkdir data etc log生成三个目录

data:存放rdb文件
etc:存放redis配置文件
log:存放redis日志文件

* 新增加一个启动文件/etc/init.d/redis_$port,内容如下

#!/bin/sh
#
# chkconfig: 2345 55 25
# description: Redis server daemon
#
# processname: redis_$port #需要修改与$port相同
# Simple Redis init.d script conceived to work on Linux systems
# as it does use of the /proc filesystem.

REDISPORT=6379   # 需要修改与$port相同
EXEC=/usr/local/redis/bin/redis-server
CLIEXEC=/usr/local/redis/bin/redis-cli

PIDFILE=/var/run/redis_${REDISPORT}.pid
CONF="/usr/local/redis/etc/redis_${REDISPORT}.conf"


case "$1" in
    start)
        if [ -f $PIDFILE ]
        then
                echo "$PIDFILE exists, process is already running or crashed"
        else
                echo "Starting Redis $REDISPORT ..."
                $EXEC $CONF
        fi
        ;;
    stop)
        if [ ! -f $PIDFILE ]
        then
                echo "$PIDFILE does not exist, process is not running"
        else
                PID=$(cat $PIDFILE)
                echo "Stopping ..."
                $CLIEXEC -p $REDISPORT shutdown
                while [ -x /proc/${PID} ]
                do
                    echo "Waiting for Redis to shutdown ..."
                    sleep 1
                done
                echo "Redis stopped"
        fi
        ;;
    restart)
        $0 stop
        $0 start
        ;;
    *)
        echo "Usage: $0 {start|stop|restart}"
        exit
esac

* 新增文件/usr/local/redis/etc/redis_base.conf,内容如下


daemonize yes

timeout 300

loglevel notice

databases 16


save 900 1
save 300 10
save 60 10000

rdbcompression yes


slave-serve-stale-data yes

appendonly no

appendfsync no

no-appendfsync-on-rewrite no

auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb

slowlog-log-slower-than 10000

slowlog-max-len 1024

vm-enabled no

* 新增加一个启动文件/usr/local/redis/etc/redis_$port.conf,内容如下

pidfile /var/run/redis_6379.pid

#修改6379成$port值
port 6379  

#修改6379成$port值
logfile /usr/local/redis/log/redis_6379.log  

#修改6379成$port值
dbfilename dump_6379.rdb  

dir /usr/local/redis/data


include /usr/local/redis/etc/redis_base.conf

* 将脚本redis_$port加入自动化启动,执行如下命令:

chkconfig -add redis_$port
chkconfig --level 135 redis_$port on

四月 23rd, 2012

Posted In: linux系统

简介

qyssh是一个基于ssh证书认证的简单服务器批量管理工具,不支持使用密码登陆的服务器,可以使用qyscp配合qyssh来完成一些脚本的分发与执行工作。

源码

服务器批量管理(qyssh)
#!/bin/bash

#specify default ssh port
PORT=22

#specify default ssh user
USER=root

#specify ssh connecttimeout
Timeout=3

usage()
{
    echo "\
usage: qyssh [-i Path/to/identity ] [-p SSH-Port (default:22)] [-t Connect TimeOut (default:3s)]
             [-u SSH-User (default:root)] [-l Server-List-File] [-c Command ]"
}

kill_ssh_agent()
{
    killall ssh-agent > /dev/null 2>&1
}

while getopts :i:p:u:l:c:t: OPTION
do
    case $OPTION in
    i)
        KEY=$OPTARG
    ;;
    p)
        PORT=$OPTARG
    ;;
    u)
        USER=$OPTARG
    ;;
    l)
        LIST=$OPTARG
    ;;
    c)
        COMMAND=$OPTARG
    ;;
    t)
        Timeout=$OPTARG
    ;;
    ?)
        usage
        exit 1
    ;;
    esac
done

if [ "$KEY"x == "x" ];then
    echo "You must specify a identity with -i option"
    usage
    exit 1
fi

if [ "$LIST"x == "x" ];then
    echo "You must specify a server list file with -l option"
    usage
    exit 1
fi

if [ "$COMMAND"x == "x" ];then
    echo "You must specify a command with -c option"
    usage
    exit 1
fi

kill_ssh_agent
eval `ssh-agent` > /dev/null 2>&1
ssh-add $KEY

server_list=`sed '/^$/d;/^#/d' $LIST`

SSH_COMMAND="ssh -o ConnectTimeout=$Timeout -p $PORT "$USER"@"

for i in $server_list
do
    echo -e "Execute command : \"$COMMAND\" at Host : $i "
    echo "-------------------------------------------------"
    $SSH_COMMAND$i $COMMAND
    case $? in
    0)
        echo -e "executed [ \033[32mOK\033[0m ]!"
    ;;
    ?)
        echo -e "executed [ \033[31mFAILED\033[0m ]!"
    ;;
    esac
    sleep 1
done

kill_ssh_agent
批量文件分发(qyscp)
#!/bin/bash

#specify default ssh port
PORT=22

#specify default ssh user
USER=root

#specify ssh connecttimeout
Timeout=3

usage()
{
    echo "\
usage: qyscp [-i Path/to/identity ] [-p SSH-Port (default:22)] [-t Connect TimeOut (default:3s)]
             [-u SSH-User (default:root)] [-l Server-List-File] FILE TO-PATH"
}

let file=$#-1
TO_PATH=${!#}
FILE=${!file}
unset file

kill_ssh_agent()
{
    killall ssh-agent > /dev/null 2>&1
}

while getopts :i:p:u:l:t: OPTION
do
    case $OPTION in
    i)
        KEY=$OPTARG
    ;;
    p)
        PORT=$OPTARG
    ;;
    u)
        USER=$OPTARG
    ;;
    l)
        LIST=$OPTARG
    ;;
    t)
        Timeout=$OPTARG
    ;;
    ?)
        usage
        exit 1
    ;;
    esac
done

if [ "$KEY"x == "x" ];then
    echo "You must specify a identity with -i option"
    usage
    exit 1
fi

if [ "$LIST"x == "x" ];then
    echo "You must specify a server list file with -l option"
    usage
    exit 1
fi

if [ "$FILE" == "$KEY" -o \
      "$FILE" == "$PORT" -o \
      "$FILE" == "$USER" -o \
      "$FILE" == "$LIST"  -o \
      "$FILE" == "$Timeout" ];then
    usage
    exit 1
fi

kill_ssh_agent
eval `ssh-agent` > /dev/null 2>&1
ssh-add $KEY

server_list=`sed '/^$/d;/^#/d' $LIST`

SCP_COMMAND="scp -o ConnectTimeout=$Timeout -P $PORT $FILE "$USER"@"

for i in $server_list
do
    echo -e "Execute scp : \"$FILE\" to Host : $i:$TO_PATH "
    echo "-------------------------------------------------"
    $SCP_COMMAND$i:$TO_PATH
    case $? in
    0)
        echo -e "[ \033[32mOK\033[0m ]!"
    ;;
    ?)
        echo -e "[ \033[31mFAILED\033[0m ]!"
    ;;
    esac
    sleep 1
done

kill_ssh_agent

安装

将脚本拷贝到/usr/local/sbin并赋予可执行权限即可

使用

qyssh

设置必要的参数

使用该脚本必须通过-i参数指定一个密钥,使用-c参数指定需要执行的命令,使用-l参数指定一个服务器列表文件,一个典型的服务器列表文件如下:

cat /home/yourhome/test_server_list

内容如下:

#team1_server
192.168.1.2
192.168.1.3
192.168.1.4
192.168.1.5
192.168.1.6
192.168.1.7
#team2_server
172.16.10.2
172.16.10.3
172.16.10.4
172.16.10.5
172.16.10.6

这个文件可以包含注释。

使用方法

首先请确保你的密钥文件权限为400,以便在命令中指定。

[root@testsbin]# ls -l /home/yourhome/key
-r-------- 1 root root 736 12-31 17:01 /home/yourhome/key

用法: qyssh [-i [证书文件,必须参数] ] [-p ssh端口号 (default:22)][-u ssh用户 (default:root)]

[-l 服务器列表文件 (default:read the database)][-c 执行的命令 ][-t 连接超时时间 (default:3s)]

qyssh -u yourhome -p 60000 -i /home/yourhome/key -l /home/yourhome/test_server_list -c ls
批量获取TCP连接数并排序

使用-c指定批量执行的ssh命令。

qyssh -i /home/yourhome/key -u yourhome -l /home/yourhome/test_server_list -p 60000 -t 10 -c "netstat -nat |awk '{print \$6}'|sort|uniq -c | sort -rn"

注意如果你的命令中包含带$的变量,请使用\避免其替换变量。

上述命令输出的结果如下:

Execute command : "netstat -nat |awk '{print $6}'|sort|uniq -c | sort -rn" at Host : 192.168.1.2
-------------------------------------------------
    282 ESTABLISHED
     26 FIN_WAIT2
     11 TIME_WAIT
      8 LISTEN
      1 Foreign
      1 established)
executed [ OK ]!
Execute command : "netstat -nat |awk '{print $6}'|sort|uniq -c | sort -rn" at Host : 192.168.1.3
-------------------------------------------------
    257 ESTABLISHED
      8 LISTEN
      1 Foreign
      1 established)
executed [ OK ]!
Execute command : "netstat -nat |awk '{print $6}'|sort|uniq -c | sort -rn" at Host : 192.168.1.4
-------------------------------------------------
    260 ESTABLISHED
     14 LISTEN
      1 Foreign
      1 established)
executed [ OK ]!
Execute command : "netstat -nat |awk '{print $6}'|sort|uniq -c | sort -rn" at Host : 192.168.1.5
-------------------------------------------------
   4985 TIME_WAIT
   1939 SYN_RECV
    122 FIN_WAIT1
     54 LAST_ACK
     32 ESTABLISHED
      7 LISTEN
      6 FIN_WAIT2
      5 CLOSING
      1 Foreign
      1 established)
executed [ OK ]!
......etc......

qyscp

首先请确定你的系统装有scp,该工具基于scp。

设置必要的参数

qyssh

使用方法

首先请确保你的密钥文件权限为400,以便在命令中指定。

[root@test sbin]# ls -l /home/yourhome/key
-r-------- 1 root root 736 12-31 17:01 /home/yourhome/key

用法: qyscp [-i [证书文件,必须参数] ] [-p ssh端口号 (default:22)][-t 连接超时时间 (default:3s)]

[-l 服务器列表文件 (default:read the database)][-u ssh用户 (default:root)] [本地文件路径] [拷贝路径]

qyscp -i /home/test/key -l /root/test_server_list -p 60000 ./test.sh /root/

实例

批量添加用户

1、首先编写脚本useradd.sh

#!/bin/bash
user=testuser
useradd $user
mkdir -p /home/$user/.ssh
cat > /home/$user/.ssh/.keys <<EOF
......ssh密钥,此处略.....
EOF
echo "$user ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
chown $user.$user /home/$user -R

2、使用qyscp分发

qyscp -l /root/test_server_list -p 60000 -i /home/test/key ./useradd.sh ~

3、执行脚本并且在执行完成后删除脚本

qyssh -l /root/test_server_list -p 60000 -i /home/test/key -c "sh ~/useradd.sh && rm -rf ~/useradd.sh"

四月 20th, 2012

Posted In: linux系统

标签:, ,

下一页 »

无觅相关文章插件,快速提升流量