Superset环境搭建(Ubuntu)

1、安装依赖包

sudo apt-get install build-essential libssl-dev libffi-dev python-dev python-pip libsasl2-dev libldap2-dev
pip install virtualenv

2、使用虚拟环境

#新建沙盒
virtualenv supersetenv
#进入沙盒
source bin/activate

3、安装

#升级安装工具,安装服务
pip install --upgrade setuptools pip
pip install superset
#新增管理员用户
fabmanager create-admin --app superset
#重置管理员密码
#fabmanager reset-password admin --app superset
#升级数据库
superset db upgrade
#加载测试数据
superset load_examples
#初始化
superset init
#运行Server
superset runserver

4、此时,只需要访问http://ip:8088就可以登陆了

5、安装驱动

#mysql
apt-get install libmysqlclient-dev
pip install mysqlclient
#oracle
#pip install cx_Oracle
#mssql
#pip install pymssql

6、关闭

#Ctrl+C关闭Server
#退出沙盒
deactivate
#删除沙盒
#rmvirtualenv supersetenv

7、升级

#进入沙盒
source bin/activate
#升级Server
pip install superset --upgrade
#升级DB
superset db upgrade
#初始化
superset init
#退出沙盒
deactivate

Spark环境搭建05

这里主要说一下Spark的SQL操作,如何从mysql导入数据。

#拷贝了mysql的驱动到jar文件下面
#建立DF
val jdbcDF = spark.read.format("jdbc").option("url", "jdbc:mysql://localhost:3307").option("dbtable", "hive.TBLS").option("user", "hive").option("password", "hive").load()

#schema
jdbcDF.schema

#count
jdbcDF.count()

#show
jdbcDF.show()

Spark环境搭建04

这里主要说一下Spark的SQL操作。

1、dataframe操作数据

#加载json数据
val df = spark.read.json("/usr/hadoop/person.json")
#加载CSV数据
#val df = spark.read.csv("/usr/hadoop/person.csv")

#查询前20行
df.show()

#查看结构
df.printSchema()

#选择一列
df.select("NAME").show()

#按条件过滤行
df.filter($"BALANCE_COST" < 10 && $"BALANCE_COST" > 1).show()

#分组统计
df.groupBy("SEX_CODE").count().show()

2、sql操作数据

#创建视图
df.createOrReplaceTempView("person")

#查看数据
spark.sql("SELECT * FROM person").show()

#统计数据
spark.sql("SELECT * FROM person").count()

#带条件选择
spark.sql("SELECT * FROM person WHERE BALANCE_COST<10 and BALANCE_COST>1 order by BALANCE_COST").show()

3、转为DS

#转为Dataset
case class PERSONC(PATIENT_NO : String,NAME : String,SEX_CODE : String,BIRTHDATE : String,BALANCE_CODE : String)
var personDS = spark.read.json("/usr/hadoop/person.json").as[PERSONC]

4、sql与map reduce混用

personDS.select("BALANCE_COST").map(row=>if(row(0)==null) 0.0 else (row(0)+"").toDouble).reduce((a,b)=>if(a>b) a else b)

spark.sql("select BALANCE_COST from person").map(row=>if(row(0)==null) 0.0 else (row(0)+"").toDouble).reduce((a,b)=>if(a>b) a else b)

5、数据映射为Class对象

val personRDD = spark.sparkContext.textFile("/usr/hadoop/person.txt")
val persons = personRDD.map(_.split(",")).map(attributes => Person(attributes(0), attributes(1), attributes(2), attributes(3), attributes(4).toDouble))

6、自定义schema

import org.apache.spark.sql.Row
import org.apache.spark.sql.types._

#加载数据
val personRDD = spark.sparkContext.textFile("/usr/hadoop/person.txt")
#转为org.apache.spark.sql.Row
val rowRDD = personRDD.map(_.split(",")).map(attributes => Row(attributes(0), attributes(1), attributes(2), attributes(3), attributes(4).replace("\"","").toDouble))
#定义新的Schema
val personSchema = StructType(List(StructField("PatientNum",StringType,nullable = true), StructField("Name",StringType,nullable = true), StructField("SexCode",StringType,nullable = true), StructField("BirthDate",StringType,nullable = true), StructField("BalanceCode",DoubleType,nullable = true)))
#建立新的DF
val personDF = spark.createDataFrame(rowRDD, personSchema)
#使用DF
personDF.select("PatientNum").show()

Spark环境搭建03

上面说到了Spark如何与Hadoop整合,下面就说一下Spark如何与HBase整合。

1、获取hbase的classpath

#要把netty和jetty的包去掉,否则会有jar包冲突
HBASE_PATH=`/home/hadoop/Deploy/hbase-1.1.2/bin/hbase classpath`

2、启动spark

bin/spark-shell --driver-class-path $HBASE_PATH

3、进行简单的操作

import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.HBaseAdmin
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.io.ImmutableBytesWritable

val conf = HBaseConfiguration.create()
conf.set(TableInputFormat.INPUT_TABLE, "inpatient_hb")

val admin = new HBaseAdmin(conf)
admin.isTableAvailable("inpatient_hb")
res1: Boolean = true

val hBaseRDD = sc.newAPIHadoopRDD(conf, classOf[TableInputFormat], classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable], classOf[org.apache.hadoop.hbase.client.Result])

hBaseRDD.count()
2017-01-03 20:46:29,854 INFO  [main] scheduler.DAGScheduler (Logging.scala:logInfo(58)) - Job 0 finished: count at <console>:36, took 23.170739 s
res2: Long = 115077

Spark环境搭建02

1、启动spark

sbin/start-all.sh

可以在http://hiup:8080/看到spark运行情况。

2、启动shell

bin/spark-shell

3、测试

$ ./run-example SparkPi 10
Pi is roughly 3.140408

$ ./run-example SparkPi 100
Pi is roughly 3.1412528

$ ./run-example SparkPi 1000
Pi is roughly 3.14159016

4、基本操作

#HDFS加载数据
scala> var textFile=sc.textFile("/usr/hadoop/inpatient.txt")

#第一行
scala> textFile.first()
res1: String = "第一行内容"

#第一行,用逗号分割后,第一列(住院号)
textFile.first().split(",")(0)
res2: String = "0000718165"

#第一行,用逗号分割后,第5列(费用)
textFile.first().split(",")(5)
res3: String = "100.01"

#行数
scala> textFile.count()
res4: Long = 115411

#包含ICU的行数
textFile.filter(line=>line.contains("ICU")).count()
res5: Long = 912

#获取每一行的长度
var lineLengths = textFile.map(s=>s.length)

#获取总长度
var totalLenght = lineLengths.reduce((a,b)=>a+b)
totalLenght: Int = 32859905

#获取最大费用
textFile.map(line=>if(line.split(",").size==30) line.split(",")(23).replace("\"","") else "0").reduce((a,b)=>if(a.toDouble>b.toDouble) a else b)
res6: String = 300

#创建一个类
@SerialVersionUID(100L)
class PATIENT(var PATIENT_NO : String,var NAME : String,var SEX_CODE : String,var BIRTHDATE : String,var BALANCE_COST : String) extends Serializable 

#新建一个对象
var p=new PATIENT("PATIENT_NO","NAME","SEX_CODE","BIRTHDATE","BALANCE_COST")

#新建一个map函数
def mapFunc(line:String) : PATIENT = {
var cols=line.split(",")
return new PATIENT(cols(0),cols(1),cols(2),cols(3),cols(4))
}

#最大费用
textFile.filter(line=>line.split(",").size==30).map(mapFunc).reduce((a,b)=>if(a.BALANCE_COST.replace("\"","").toDouble>b.BALANCE_COST.replace("\"","").toDouble) a else b).BALANCE_COST

#男性最大费用
textFile.filter(line=>line.split(",").size==30).map(mapFunc).filter(p=>p.SEX_CODE=="\"M\"").reduce((a,b)=>if(a.BALANCE_COST.replace("\"","").toDouble>b.BALANCE_COST.replace("\"","").toDouble) a else b).BALANCE_COST

#女性最大费用
textFile.filter(line=>line.split(",").size==30).map(mapFunc).filter(p=>p.SEX_CODE=="\"F\"").reduce((a,b)=>if(a.BALANCE_COST.replace("\"","").toDouble>b.BALANCE_COST.replace("\"","").toDouble) a else b).BALANCE_COST

#退出
scala> exit

Spark环境搭建01

1、下载scala-2.11.1,并解压到/usr/scala/scala-2.11.1

2、下载spark-2.0.0-bin-hadoop2.4,并解压到/home/hadoop/Deploy/spark-2.0.0
(*如果要看后续文章,建议使用hadoop-2.5.2 hbase-1.1.2 hive-1.2.1 spark-2.0.0)

3、复制spark-env.sh.template为spark-env.sh,并添加下面几行

export JAVA_HOME=/usr/java/jdk1.7.0_79
export SCALA_HOME=/usr/scala/scala-2.11.1/
export SPARK_MASTER_IP=hiup01
export SPARK_WORKER_MEMORY=1g
export HADOOP_CONF_DIR=/home/hadoop/Deploy/hadoop-2.5.2/etc/hadoop

4、复制slaves.template为slaves,并添加下面几行

hiup01
hiup02
hiup03

5、将scala-2.11.1及spark-2.0.0复制到hiup02及hiup03

6、环境搭建完毕。

Docker私有仓库搭建

1、安装registry

# sudo apt-get install docker docker-registry

2、上传镜像
2.1、客户端允许http

$ sudo vi /etc/defualt/docker
#添加这一行
DOCKER_OPTS="--insecure-registry 192.168.130.191:5000"

2.2、上传镜像

#查看镜像列表
$ sudo docker images
REPOSITORY                           TAG                 IMAGE ID            CREATED             SIZE
elasticsearch                        5.1                 747929f3b12a        2 weeks ago         352.6 MB

#标记镜像
$ sudo docker tag elasticsearch:5.1 192.168.130.191:5000/elasticsearch

#查看镜像列表
$ sudo docker images
REPOSITORY                           TAG                 IMAGE ID            CREATED             SIZE
elasticsearch                        5.1                 747929f3b12a        2 weeks ago         352.6 MB
192.168.130.191:5000/elasticsearch   5.1                 747929f3b12a        2 weeks ago         352.6 MB

#上传镜像
$ sudo docker push 192.168.130.191:5000/elasticsearch:5.1
The push refers to a repository [192.168.130.191:5000/elasticsearch]
cea33faf9668: Pushed
c3707daa9b07: Pushed
a56b404460eb: Pushed
5e48ecb24792: Pushed
f86173bb67f3: Pushed
c87433dfa8d7: Pushed
c9dbd14c23f0: Pushed
b5b4ba1cb64d: Pushed
15ba1125d6c0: Pushed
bd25fcff1b2c: Pushed
8d9c6e6ceb37: Pushed
bc3b6402e94c: Pushed
223c0d04a137: Pushed
fe4c16cbf7a4: Pushed
5.1: digest: sha256:14ec0b594c0bf1b007debc12e3a16a99aee74964724ac182bc851fec3fc5d2b0 size: 3248

3、查询镜像

$ curl -X GET http://192.168.130.191:5000/v2/_catalog
{"repositories":["alpine","elasticsearch","jetty","mongo","mysql","nginx","openjdk","redis","registry","ubuntu","zookeeper"]}

$ curl -X GET http://192.168.130.191:5000/v2/elasticsearch/tags/list
{"name":"elasticsearch","tags":["5.1"]}

#下面的查询命令总是报404错误,api文档中也没有,有些奇怪
$ curl -X GET http://192.168.130.191:5000/v2/search?q=elasticsearch
$ sudo docker search 192.168.130.191:5000/elasticsearch

4、下载镜像

$ sudo docker pull 192.168.130.191:5000/elasticsearch:5.1
5.1: Pulling from elasticsearch
386a066cd84a: Pull complete
75ea84187083: Pull complete
3e2e387eb26a: Pull complete
eef540699244: Pull complete
1624a2f8d114: Pull complete
7018f4ec6e0a: Pull complete
6ca3bc2ad3b3: Pull complete
424638b495a6: Pull complete
2ff72d0b7bea: Pull complete
d0d6a2049bf2: Pull complete
003b957bd67f: Pull complete
14d23bc515af: Pull complete
923836f4bd50: Pull complete
c0b5750bf0f7: Pull complete
Digest: sha256:14ec0b594c0bf1b007debc12e3a16a99aee74964724ac182bc851fec3fc5d2b0
Status: Downloaded newer image for 192.168.130.191:5000/elasticsearch:5.1

5、删除镜像

$ curl -X DELETE /v2/elasticsearch/manifests/5.1

参考github

elasticsearch docker官方镜像无法运行

elasticsearch5.1 docker官方镜像运行时会报错:

ERROR: bootstrap checks failed
max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]

那是因为vm.max_map_count达不到es的最低要求262144,修改方式有两种:

#一次生效
sudo sysctl -w vm.max_map_count=262144
#永久生效
sudo vi /etc/sysctl.conf
#添加这一行
vm.max_map_count=262144

#加载配置
sudo sysctl -p

常用docker镜像命令(Compose)

1、拉取镜像

#ubuntu-16.04.1-server-amd64
sudo apt-get install docker
sudo apt-get install docker-compose
#拉取镜像
sudo docker pull mysql:5.7
sudo docker pull redis:3.2
sudo docker pull mongo:3.4
sudo docker pull jetty:9.3-jre8
sudo docker pull nginx:1.11
sudo docker pull elasticsearch:5.1
sudo docker pull ubuntu:16.04

2、新建网络

sudo docker network create hiup

3、整理yml文件
3.1yml版本1

h01-mysql02:
  image: mysql:5.7
  mem_limit: 128m
  cpu_shares: 100
  tty: true
  hostname: h01-mysql02
  net: hiup
  ports:
    - "3306:3306"
  volumes:
   - /home/hiup/docker/data/mysql/var/lib/mysql:/var/lib/mysql
   - /home/hiup/docker/data/mysql/etc/mysql/conf.d:/etc/mysql/conf.d
  environment:
   - MYSQL_ROOT_PASSWORD=hiup
  log_driver: "json-file"
  log_opt:
    max-size: "10m"
    max-file: "10"

h01-redis02:
  image: redis:3.2
  mem_limit: 128m
  cpu_shares: 100
  tty: true
  hostname: h01-redis02
  net: hiup
  volumes:
   - /home/hiup/docker/data/redis/etc/redis/:/etc/redis/
   - /home/hiup/docker/data/redis/data:/data
  ports:
   - "6379:6379"
  command: redis-server /etc/redis/redis.conf
  log_driver: "json-file"
  log_opt:
    max-size: "10m"
    max-file: "10"

h01-mongo02:
  image: mongo:3.4
  mem_limit: 128m
  cpu_shares: 100
  tty: true
  hostname: h01-mongo02
  net: hiup
  ports:
   - "27017:27017"
  volumes:
   - /home/hiup/docker/data/mongo/etc/mongod.conf:/etc/mongod.conf
   - /home/hiup/docker/data/mongo/data/db:/data/db
  log_driver: "json-file"
  log_opt:
    max-size: "10m"
    max-file: "10"

h01-jetty02:
  image: jetty:9.3-jre8
  mem_limit: 128m
  cpu_shares: 100
  tty: true
  hostname: h01-jetty02
  net: hiup
  ports:
   - "8080:8080"
  volumes:
   - /home/hiup/docker/data/jetty/usr/local/jetty/etc:/usr/local/jetty/etc
   - /home/hiup/docker/data/jetty/webapps:/var/lib/jetty/webapps
  log_driver: "json-file"
  log_opt:
    max-size: "10m"
    max-file: "10"

h01-nginx02:
  image: nginx:1.11
  mem_limit: 128m
  cpu_shares: 100
  tty: true
  hostname: h01-nginx02
  net: hiup
  ports:
   - "80:80"
  volumes:
   - /home/hiup/docker/data/nginx/etc/nginx/nginx.conf:/etc/nginx/nginx.conf
   - /home/hiup/docker/data/nginx/usr/share/nginx/html:/usr/share/nginx/html
  log_driver: "json-file"
  log_opt:
    max-size: "10m"
    max-file: "10"

h01-es02:
  image: elasticsearch:5.1
  mem_limit: 640m
  cpu_shares: 100
  tty: true
  hostname: h01-es02
  net: hiup
  ports:
   - "9200:9200"
   - "9300:9300"
  volumes:
   - /home/hiup/docker/data/es/usr/share/elasticsearch/config:/usr/share/elasticsearch/config
   - /home/hiup/docker/data/es/usr/share/elasticsearch/data:/usr/share/elasticsearch/data
  command: elasticsearch
  log_driver: "json-file"
  log_opt:
    max-size: "10m"
    max-file: "10"

h01-ubuntu02:
  image: ubuntu:16.04
  mem_limit: 128m
  cpu_shares: 100
  tty: true
  hostname: h01-ubuntu02
  net: hiup
  #ports:
  #volumes:
  command: /bin/bash
  log_driver: "json-file"
  log_opt:
    max-size: "10m"
    max-file: "10"

3.2yml版本2

version: '2'
services:
  h01-mysql02:
    image: mysql:5.7
    mem_limit: 128m
    cpu_shares: 100
    tty: true
    hostname: h01-mysql02
    network_mode: hiup
    ports:
      - "3306:3306"
    volumes:
     - /home/hiup/docker/data/mysql/var/lib/mysql:/var/lib/mysql
     - /home/hiup/docker/data/mysql/etc/mysql/conf.d:/etc/mysql/conf.d
    environment:
     - MYSQL_ROOT_PASSWORD=hiup
    logging:
      driver: "json-file"
      options:
        max-size: "10m"
        max-file: "10"
  
  h01-redis02:
    image: redis:3.2
    mem_limit: 128m
    cpu_shares: 100
    tty: true
    hostname: h01-redis02
    network_mode: hiup
    volumes:
     - /home/hiup/docker/data/redis/etc/redis/:/etc/redis/
     - /home/hiup/docker/data/redis/data:/data
    ports:
     - "6379:6379"
    command: redis-server /etc/redis/redis.conf
    logging:
      driver: "json-file"
      options:
        max-size: "10m"
        max-file: "10"
  
  h01-mongo02:
    image: mongo:3.4
    mem_limit: 128m
    cpu_shares: 100
    tty: true
    hostname: h01-mongo02
    network_mode: hiup
    ports:
     - "27017:27017"
    volumes:
     - /home/hiup/docker/data/mongo/etc/mongod.conf:/etc/mongod.conf
     - /home/hiup/docker/data/mongo/data/db:/data/db
    logging:
      driver: "json-file"
      options:
        max-size: "10m"
        max-file: "10"
  
  h01-jetty02:
    image: jetty:9.3-jre8
    mem_limit: 128m
    cpu_shares: 100
    tty: true
    hostname: h01-jetty02
    network_mode: hiup
    ports:
     - "8080:8080"
    volumes:
     - /home/hiup/docker/data/jetty/usr/local/jetty/etc:/usr/local/jetty/etc
     - /home/hiup/docker/data/jetty/webapps:/var/lib/jetty/webapps
    logging:
      driver: "json-file"
      options:
        max-size: "10m"
        max-file: "10"
  
  h01-nginx02:
    image: nginx:1.11
    mem_limit: 128m
    cpu_shares: 100
    tty: true
    hostname: h01-nginx02
    network_mode: hiup
    ports:
     - "80:80"
    volumes:
     - /home/hiup/docker/data/nginx/etc/nginx/nginx.conf:/etc/nginx/nginx.conf
     - /home/hiup/docker/data/nginx/usr/share/nginx/html:/usr/share/nginx/html
    logging:
      driver: "json-file"
      options:
        max-size: "10m"
        max-file: "10"
  
  h01-es02:
    image: elasticsearch:5.1
    mem_limit: 640m
    cpu_shares: 100
    tty: true
    hostname: h01-es02
    network_mode: hiup
    ports:
     - "9200:9200"
     - "9300:9300"
    volumes:
     - /home/hiup/docker/data/es/usr/share/elasticsearch/config:/usr/share/elasticsearch/config
     - /home/hiup/docker/data/es/usr/share/elasticsearch/data:/usr/share/elasticsearch/data
    command: elasticsearch
    logging:
      driver: "json-file"
      options:
        max-size: "10m"
        max-file: "10"
  
  h01-ubuntu02:
    image: ubuntu:16.04
    mem_limit: 128m
    cpu_shares: 100
    tty: true
    hostname: h01-ubuntu02
    network_mode: hiup
    ports:
    volumes:
    command: /bin/bash
    logging:
      driver: "json-file"
      options:
        max-size: "10m"
        max-file: "10"

4、运行

sudo docker-compose up -d

5、配置文件目录

.
├── es
│   └── usr
│       └── share
│           └── elasticsearch
│               ├── config
│               │   ├── elasticsearch.yml
│               │   ├── log4j2.properties
│               │   └── scripts
│               └── data
│                   └── nodes
│                       └── 0
│                           ├── node.lock
│                           └── _state
│                               ├── global-0.st
│                               └── node-0.st
├── jetty
│   ├── usr
│   │   └── local
│   │       └── jetty
│   │           └── etc
│   │               ├── example-quickstart.xml
│   │               ├── gcloud-memcached-session-context.xml
│   │               ├── gcloud-session-context.xml
│   │               ├── hawtio.xml
│   │               ├── home-base-warning.xml
│   │               ├── jamon.xml
│   │               ├── jdbcRealm.properties
│   │               ├── jetty-alpn.xml
│   │               ├── jetty-annotations.xml
│   │               ├── jetty-cdi.xml
│   │               ├── jetty.conf
│   │               ├── jetty-debuglog.xml
│   │               ├── jetty-debug.xml
│   │               ├── jetty-deploy.xml
│   │               ├── jetty-gcloud-memcached-sessions.xml
│   │               ├── jetty-gcloud-session-idmgr.xml
│   │               ├── jetty-gcloud-sessions.xml
│   │               ├── jetty-gzip.xml
│   │               ├── jetty-http2c.xml
│   │               ├── jetty-http2.xml
│   │               ├── jetty-http-forwarded.xml
│   │               ├── jetty-https.xml
│   │               ├── jetty-http.xml
│   │               ├── jetty-infinispan.xml
│   │               ├── jetty-ipaccess.xml
│   │               ├── jetty-jaas.xml
│   │               ├── jetty-jdbc-sessions.xml
│   │               ├── jetty-jmx-remote.xml
│   │               ├── jetty-jmx.xml
│   │               ├── jetty-logging.xml
│   │               ├── jetty-lowresources.xml
│   │               ├── jetty-monitor.xml
│   │               ├── jetty-nosql.xml
│   │               ├── jetty-plus.xml
│   │               ├── jetty-proxy-protocol-ssl.xml
│   │               ├── jetty-proxy-protocol.xml
│   │               ├── jetty-proxy.xml
│   │               ├── jetty-requestlog.xml
│   │               ├── jetty-rewrite-customizer.xml
│   │               ├── jetty-rewrite.xml
│   │               ├── jetty-setuid.xml
│   │               ├── jetty-spring.xml
│   │               ├── jetty-ssl-context.xml
│   │               ├── jetty-ssl.xml
│   │               ├── jetty-started.xml
│   │               ├── jetty-stats.xml
│   │               ├── jetty-threadlimit.xml
│   │               ├── jetty.xml
│   │               ├── jminix.xml
│   │               ├── jolokia.xml
│   │               ├── krb5.ini
│   │               ├── README.spnego
│   │               ├── rewrite-compactpath.xml
│   │               ├── spnego.conf
│   │               ├── spnego.properties
│   │               └── webdefault.xml
│   └── webapps
│       └── jvmjsp.war
├── mongo
│   ├── data
│   │   └── db
│   │       ├── collection-0-4376730799513530636.wt
│   │       ├── collection-2-4376730799513530636.wt
│   │       ├── collection-5-4376730799513530636.wt
│   │       ├── diagnostic.data
│   │       │   └── metrics.2016-12-27T08-57-50Z-00000
│   │       ├── index-1-4376730799513530636.wt
│   │       ├── index-3-4376730799513530636.wt
│   │       ├── index-4-4376730799513530636.wt
│   │       ├── index-6-4376730799513530636.wt
│   │       ├── journal
│   │       │   ├── WiredTigerLog.0000000001
│   │       │   ├── WiredTigerPreplog.0000000001
│   │       │   └── WiredTigerPreplog.0000000002
│   │       ├── _mdb_catalog.wt
│   │       ├── mongod.lock
│   │       ├── sizeStorer.wt
│   │       ├── storage.bson
│   │       ├── WiredTiger
│   │       ├── WiredTigerLAS.wt
│   │       ├── WiredTiger.lock
│   │       ├── WiredTiger.turtle
│   │       └── WiredTiger.wt
│   └── etc
│       └── mongod.conf
├── mysql
│   ├── etc
│   │   └── mysql
│   │       └── conf.d
│   │           ├── docker.cnf
│   │           └── mysql.cnf
│   └── var
│       └── lib
│           └── mysql
│               ├── auto.cnf
│               ├── ib_buffer_pool
│               ├── ibdata1
│               ├── ib_logfile0
│               ├── ib_logfile1
│               ├── mysql [error opening dir]
│               ├── performance_schema [error opening dir]
│               └── sys [error opening dir]
├── nginx
│   ├── etc
│   │   └── nginx
│   │       └── nginx.conf
│   └── usr
│       └── share
│           └── nginx
│               └── html
│                   ├── 50x.html
│                   └── index.html
└── redis
    ├── data
    │   └── dump.rdb
    └── etc
        └── redis
            ├── redis.conf
            └── sentinel.conf

6、本地工具安装

sudo apt-get install mysql-client
sudo apt-get install redis-tools
sudo apt-get install mongodb-clients
sudo apt-get install curl

常用docker镜像命令(Shell)

1、拉取镜像

#ubuntu-16.04.1-server-amd64
sudo apt-get install docker
sudo apt-get install docker-compose
#拉取镜像
sudo docker pull mysql:5.7
sudo docker pull redis:3.2
sudo docker pull mongo:3.4
sudo docker pull jetty:9.3-jre8
sudo docker pull nginx:1.11
sudo docker pull elasticsearch:5.1
sudo docker pull ubuntu:16.04

2、新建网络

sudo docker network create hiup

3、启动容器
3.1、第一次启动容器

#mysql
sudo docker run --net=hiup --name h01-mysql01 -h h01-mysql01 -p3306:3306 -c 100 -m 128m -e MYSQL_ROOT_PASSWORD=hiup -v /home/hiup/docker/data/mysql/var/lib/mysql:/var/lib/mysql -v /home/hiup/docker/data/mysql/etc/mysql/conf.d:/etc/mysql/conf.d -itd mysql:5.7

#redis
sudo docker run --net=hiup --name h01-redis01 -h h01-redis01 -p6379:6379 -c 100 -m 128m  -v /home/hiup/docker/data/redis/etc/redis/:/etc/redis/ -v /home/hiup/docker/data/redis/data:/data -itd redis:3.2 redis-server /etc/redis/redis.conf
#下面配置提供持久化支持
#redis-server --appendonly yes

#mongodb
sudo docker run --net=hiup --name h01-mongo01 -h h01-mongo01 -p27017:27017 -c 100 -m 128m -v /home/hiup/docker/data/mongo/etc/mongod.conf:/etc/mongod.conf -v /home/hiup/docker/data/mongo/data/db:/data/db -itd mongo:3.4
#提供授权支持
#--auth

#jetty
sudo docker run --net=hiup --name h01-jetty01 -h h01-jetty01 -p8080:8080 -c 100 -m 128m -v /home/hiup/docker/data/jetty/usr/local/jetty/etc:/usr/local/jetty/etc -v /home/hiup/docker/data/jetty/webapps:/var/lib/jetty/webapps -itd jetty:9.3-jre8
#默认环境变量
#JETTY_HOME    =  /usr/local/jetty
#JETTY_BASE    =  /var/lib/jetty
#TMPDIR        =  /tmp/jetty
#Deploy dir is /var/lib/jetty/webapps
#内存设置
#-e JAVA_OPTIONS="-Xmx1g"
#参数列表
#--list-config

#nginx
sudo docker run --net=hiup --name h01-nginx01 -h h01-nginx01 -p80:80 -c 100 -m 128m -v /home/hiup/docker/data/nginx/etc/nginx/nginx.conf:/etc/nginx/nginx.conf -v /home/hiup/docker/data/nginx/usr/share/nginx/html:/usr/share/nginx/html -itd nginx:1.11

#elasticsearch
sudo docker run --net=hiup --name h01-es01 -h h01-es01 -p9200:9200 -p9300:9300 -c 100 -m 640m -v /home/hiup/docker/data/es/usr/share/elasticsearch/config:/usr/share/elasticsearch/config -v /home/hiup/docker/data/es/usr/share/elasticsearch/data:/usr/share/elasticsearch/data -itd elasticsearch:5.1

#ubuntu
sudo docker run --net=hiup --name h01-ubuntu01 -h h01-ubuntu01 -c 100 -m 128m -itd ubuntu:16.04
sudo docker attach h01-ubuntu01

3.2、第n次启动容器(n>1)

sudo docker start h01-mysql01
sudo docker start h01-redis01
sudo docker start h01-mongo01
sudo docker start h01-jetty01
sudo docker start h01-nginx01
sudo docker start h01-es01
sudo docker start h01-ubuntu01

4、配置文件目录

.
├── es
│   └── usr
│       └── share
│           └── elasticsearch
│               ├── config
│               │   ├── elasticsearch.yml
│               │   ├── log4j2.properties
│               │   └── scripts
│               └── data
│                   └── nodes
│                       └── 0
│                           ├── node.lock
│                           └── _state
│                               ├── global-0.st
│                               └── node-0.st
├── jetty
│   ├── usr
│   │   └── local
│   │       └── jetty
│   │           └── etc
│   │               ├── example-quickstart.xml
│   │               ├── gcloud-memcached-session-context.xml
│   │               ├── gcloud-session-context.xml
│   │               ├── hawtio.xml
│   │               ├── home-base-warning.xml
│   │               ├── jamon.xml
│   │               ├── jdbcRealm.properties
│   │               ├── jetty-alpn.xml
│   │               ├── jetty-annotations.xml
│   │               ├── jetty-cdi.xml
│   │               ├── jetty.conf
│   │               ├── jetty-debuglog.xml
│   │               ├── jetty-debug.xml
│   │               ├── jetty-deploy.xml
│   │               ├── jetty-gcloud-memcached-sessions.xml
│   │               ├── jetty-gcloud-session-idmgr.xml
│   │               ├── jetty-gcloud-sessions.xml
│   │               ├── jetty-gzip.xml
│   │               ├── jetty-http2c.xml
│   │               ├── jetty-http2.xml
│   │               ├── jetty-http-forwarded.xml
│   │               ├── jetty-https.xml
│   │               ├── jetty-http.xml
│   │               ├── jetty-infinispan.xml
│   │               ├── jetty-ipaccess.xml
│   │               ├── jetty-jaas.xml
│   │               ├── jetty-jdbc-sessions.xml
│   │               ├── jetty-jmx-remote.xml
│   │               ├── jetty-jmx.xml
│   │               ├── jetty-logging.xml
│   │               ├── jetty-lowresources.xml
│   │               ├── jetty-monitor.xml
│   │               ├── jetty-nosql.xml
│   │               ├── jetty-plus.xml
│   │               ├── jetty-proxy-protocol-ssl.xml
│   │               ├── jetty-proxy-protocol.xml
│   │               ├── jetty-proxy.xml
│   │               ├── jetty-requestlog.xml
│   │               ├── jetty-rewrite-customizer.xml
│   │               ├── jetty-rewrite.xml
│   │               ├── jetty-setuid.xml
│   │               ├── jetty-spring.xml
│   │               ├── jetty-ssl-context.xml
│   │               ├── jetty-ssl.xml
│   │               ├── jetty-started.xml
│   │               ├── jetty-stats.xml
│   │               ├── jetty-threadlimit.xml
│   │               ├── jetty.xml
│   │               ├── jminix.xml
│   │               ├── jolokia.xml
│   │               ├── krb5.ini
│   │               ├── README.spnego
│   │               ├── rewrite-compactpath.xml
│   │               ├── spnego.conf
│   │               ├── spnego.properties
│   │               └── webdefault.xml
│   └── webapps
│       └── jvmjsp.war
├── mongo
│   ├── data
│   │   └── db
│   │       ├── collection-0-4376730799513530636.wt
│   │       ├── collection-2-4376730799513530636.wt
│   │       ├── collection-5-4376730799513530636.wt
│   │       ├── diagnostic.data
│   │       │   └── metrics.2016-12-27T08-57-50Z-00000
│   │       ├── index-1-4376730799513530636.wt
│   │       ├── index-3-4376730799513530636.wt
│   │       ├── index-4-4376730799513530636.wt
│   │       ├── index-6-4376730799513530636.wt
│   │       ├── journal
│   │       │   ├── WiredTigerLog.0000000001
│   │       │   ├── WiredTigerPreplog.0000000001
│   │       │   └── WiredTigerPreplog.0000000002
│   │       ├── _mdb_catalog.wt
│   │       ├── mongod.lock
│   │       ├── sizeStorer.wt
│   │       ├── storage.bson
│   │       ├── WiredTiger
│   │       ├── WiredTigerLAS.wt
│   │       ├── WiredTiger.lock
│   │       ├── WiredTiger.turtle
│   │       └── WiredTiger.wt
│   └── etc
│       └── mongod.conf
├── mysql
│   ├── etc
│   │   └── mysql
│   │       └── conf.d
│   │           ├── docker.cnf
│   │           └── mysql.cnf
│   └── var
│       └── lib
│           └── mysql
│               ├── auto.cnf
│               ├── ib_buffer_pool
│               ├── ibdata1
│               ├── ib_logfile0
│               ├── ib_logfile1
│               ├── mysql [error opening dir]
│               ├── performance_schema [error opening dir]
│               └── sys [error opening dir]
├── nginx
│   ├── etc
│   │   └── nginx
│   │       └── nginx.conf
│   └── usr
│       └── share
│           └── nginx
│               └── html
│                   ├── 50x.html
│                   └── index.html
└── redis
    ├── data
    │   └── dump.rdb
    └── etc
        └── redis
            ├── redis.conf
            └── sentinel.conf

5、本地工具安装

sudo apt-get install mysql-client
sudo apt-get install redis-cli
sudo apt-get install mongodb-clients
sudo apt-get install curl