Code Monkey home page Code Monkey logo

blog's Introduction

blog's People

Contributors

vuuvv avatar

Stargazers

 avatar

Watchers

 avatar  avatar

Forkers

stvenyin

blog's Issues

centos7安装k8s

升级内核

# 安装elrepo仓库
yum install -y https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm

yum makecache

# 安装lt版内核
yum --enablerepo=elrepo-kernel install -y kernel-lt

# 打印出已安装内核列表
awk -F \' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg

# 将最新安装的内核设为启动默认
grub2-set-default 'CentOS Linux (5.4.153-1.el7.elrepo.x86_64) 7 (Core)'

# 重启
reboot

# 查看当前内核版本
uname -a

# 更新系统
yum update --exclude=kernel* -y

# 安装必备工具
yum install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git -y

内核配置

# 安装ipvsadm
yum install ipvsadm ipset sysstat conntrack libseccomp -y

vim /etc/modules-load.d/ipvs.conf 
# 加入以下内容
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip

# 加载内核配置
systemctl enable --now systemd-modules-load.service

# 开启一些k8s集群中必须的内核参数,所有节点配置k8s内核
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
sysctl --system

关闭防火墙、selinux、dnsmasq、swap

systemctl disable --now firewalld 
systemctl disable --now dnsmasq
systemctl disable --now NetworkManager

setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config

swapoff -a && sysctl -w vm.swappiness=0
sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab

更改域名并把域名加到/etc/hosts中

hostnamectl set-hostname k8s-master

时间同步配置

# 安装ntpdate
yum install ntpdate -y

# 设置时区和同步服务器
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia/Shanghai' >/etc/timezone
ntpdate time2.aliyun.com

# ntp每小时更新一次
cat <<EOF > /etc/cron.hourly/ntp
#!/bin/bash
ntpdate time1.google.com
EOF
chmod +x /etc/cron.hourly/ntp

系统配置

ulimit -SHn 65535

vim /etc/security/limits.conf
# 末尾添加如下内容
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited

安装containerd

因为要装k8s,所以不装docker直接安装其底层容器支持库containerd

# 配置docker yum仓库
yum install -y yum-utils
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
#yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

yum install containerd.io -y

containerd config default > /etc/containerd/config.toml

# 更改为systemd
vim /etc/containerd/config.toml
# 更改下列内容
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
  ...
  [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
    SystemdCgroup = true

[plugins."io.containerd.grpc.v1.cri".registry]
      [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
          endpoint = ["https://oml82bmq.mirror.aliyuncs.com/"]

[plugins."io.containerd.grpc.v1.cri"]
    sandbox_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2"

# 启动和开机启动
systemctl daemon-reload && systemctl start containerd.service
systemctl enable containerd.service

#  查看状态
systemctl status containerd

# 安装crictl
VERSION="v1.20.0"
curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz
sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin
rm -f crictl-$VERSION-linux-amd64.tar.gz

cat <<EOF > /etc/crictl.yaml
runtime-endpoint: unix:///var/run/containerd/containerd.sock
image-endpoint: unix:///var/run/containerd/containerd.sock
timeout: 2
debug: true
pull-image-on-create: false
EOF

安装kubernetes

cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kubelet kubeadm kubectl
EOF

sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

sudo systemctl enable --now kubelet

kubeadm config

kubeadm config print init-defaults > init.yaml
# 更改localAPIEndpoint-advertiseAddress为本机ip地址
# 更改nodeRegistration-criSocket为/var/run/containerd/containerd.sock
# 更改nodeRegistration-name为本机域名
# aliyun registry: registry.aliyuncs.com/google_containers
# 加上podSubnet保证和network policy的一样,以防安装network policy的时候出问题, networking-podSubnet: "10.244.0.0/16"

kubeadm init --config init.yaml

# flannel 默认的 podSubnet为10.244.0.0/16, 可以根据需要自己修改
kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml

# or calico calico 默认的 podSubnet为192.168.0.0/16, 可以根据需要自己修改
kubectl create -f https://docs.projectcalico.org/manifests/tigera-operator.yaml
kubectl create -f https://docs.projectcalico.org/manifests/custom-resources.yaml

# 如果需要在master上安装普通pod,要移除master上的taint
kubectl taint nodes --all node-role.kubernetes.io/master-

启用已签名的 kubelet 服务证书

使用vue的时候不要在template里面定义数据,这样的数据是没有reactive的,即数据发生变化时是不会通知vue的

使用vue的时候不要在template里面定义数据,这样的数据是没有reactive的,即数据发生变化时是不会通知vue的,比如这样定义数据:

<v-card>
  <v-card-text>
    文章管理
    <tree :data="[
    {
      title: '1',
      children: [
      {
        title: '2',
      },
      {
        title: '3',
        children: [
        {
          title: 8
        },
        {
          title: 9
        }
        ]
      }
      ]
    },
    {
      title: '4',
      children: [{
        title: '5',
      }, {
        title: '6',
      }]

    }
    ]"></tree>
  </v-card-text>
</v-card>

此时传入的data不是响应式的数据,无论如何改变数据,vue都不会进行响应

在vagrant中使用ssh-agent控制多台虚拟机

软件的版本

操作系统: windows10 专业版 64-bit
vagrant: 1.9.7
virtualbox: 5.1.24
Git for Windows: 2.13.3 64-bit(确保openssh的版本大于7.2)

设置ssh-gent自启动

env=~/.ssh/agent.env

agent_load_env () { test -f "$env" && . "$env" >| /dev/null ; }

agent_start () {
    (umask 077; ssh-agent >| "$env")
    . "$env" >| /dev/null ; }

agent_load_env

# agent_run_state: 0=agent running w/ key; 1=agent w/o key; 2= agent not running
agent_run_state=$(ssh-add -l >| /dev/null 2>&1; echo $?)

if [ ! "$SSH_AUTH_SOCK" ] || [ $agent_run_state = 2 ]; then
    agent_start
    ssh-add
elif [ "$SSH_AUTH_SOCK" ] && [ $agent_run_state = 1 ]; then
    ssh-add
fi

unset env

复制上面的文件到~/.bashrc或~/.profile文件中,这个文件可以确保在启动bash的时候ssh-agent会自动启动而且不会产生多个ssh-agent进程
Auto-launching ssh-agent on Git for Windows

配置ssh config

Host 192.168.77.*
  IdentityFile ~/.vagrant.d/insecure_private_key
  User vagrant
Host *
  AddKeysToAgent yes
  ForwardAgent yes

把上面的代码写入~/.ssh/config或/etc/ssh/ssh_config文件中(建议放入~/.ssh/confg)中

192.168.77.* 代表vagrant中的虚拟机

~/.vagrant.d/insecure_private_key 文件是vagrant的统一私钥文件,如果vagrant设置了

config.ssh.insert_key = false,虚拟机中的/home/vagrant/.ssh/authorized_keys文件中就会加入该私钥对应的公钥,这样统一配置管理多台主机就比较方便

User vagrant 在登录虚拟机的时候默认使用vagrant用户

AddKeysToAgent 在用ssh命令登录任何主机的时候,如果登录成功,那么登录时使用的私钥会自动加入到ssh-agent中

ForwardAgent 使用ssh命令登录任何主机的时候,如果本地主机开启了ssh-agent,那么被登录的主机也会开启ssh-agent并把本地主机的所有私钥加入到自己的ssh-agent中,ForwardAgent只对下一个被登陆的主机有效,如果要形成真正的forward chain,那每台主机的ssh config中都要设置ForwardAgent yes

创建vagrant工作目录和文件

mkdir test-nodes
cd test-nodes
touch Vagrantfile

创建config文件,用来配置每台主机的ssh,使每台主机都开启ForwardAgent功能

Host *
  ForwardAgent yes

创建bootstrap.sh,用来初始话虚拟机

cp /vagrant/config /home/vagrant/.ssh/config

编辑Vagrantfile

# -*- mode: ruby -*-
# vi: set ft=ruby :

# 两台主机n1, n2
hosts = {
  "n1" => "192.168.77.101",
  "n2" => "192.168.77.102",
}
Vagrant.configure("2") do |config|
  # 使用vagrant的insecure_private_key
  # 这样每台主机中的authorized_keys中都有相同的insecure_private_key的公钥
  config.ssh.insert_key = false
  # 通过上面的ssh config的设置,我们可以不用开启vagrant中的功能,该功能只对vagrant ssh有效
  # config.ssh.forward_agent = true

  config.vm.box = "bento/ubuntu-16.04"
  config.vm.box_check_update = false

  # 配置每台主机的名称和ip
  hosts.each do |name, ip|
    config.vm.define name do |machine|
      machine.vm.network :private_network, ip: ip
      machine.vm.provider "virtualbox" do |v|
        v.name = name
      end
    end
  end

  config.vm.provision "shell", path: "bootstrap.sh"
end

开启虚拟机

vagrant up

等待该命令结束,两台虚拟机就创建并运行起来了

测试

登录到192.168.77.101

ssh 192.168.77.101

在192.168.77.101中登录192.168.77.101

ssh 192.168.77.102

如果都没有提示输入密码,就表示配置成功

故障检查

在使用ssh命令登录前,检查当前环境中的ssh-agent是否启动,并查看ssh-agent中所加入的私钥

echo $SSH_AUTH_SOCK

如果没有返回值,说明当前环境中并没有启动ssh-agent

ssh-add -L

查看私钥是否加入到ssh-agent中

centos7在墙内安装kubernetes

  1. 首先要有个翻墙的代理,我们假设是http://192.168.99.10:1001
  2. 设置yum的代理
    编辑 /etc/yum.conf文件,加入或修改下面的语句
# The proxy server - proxy server:port number
proxy=http://192.168.99.10:1001
# The account details for yum connections, if need
# proxy_username=username
# proxy_password=password
  1. 安装docker
    可以参考https://docs.docker.com/engine/installation/linux/docker-ce/centos/#docker-ee-customers
    安装依赖
yum install -y yum-utils device-mapper-persistent-data lvm2

增加yum repository

yum-config-manager \
    --add-repo \
    https://download.docker.com/linux/centos/docker-ce.repo

更新yum包索引

yum makecache fast

查看可安装docker的版本

yum list docker-ce.x86_64  --showduplicates | sort -r

docker-ce.x86_64  17.06.0.el7                               docker-ce-stable  

安装docker

yum install docker-ce-<VERSION>

启动docker

systemctl start docker

更改docker代理
可参考https://docs.docker.com/engine/admin/systemd/#httphttps-proxy

mkdir -p /etc/systemd/system/docker.service.d

创建并修改文件/etc/systemd/system/docker.service.d/http-proxy.conf

[Service]
Environment="HTTP_PROXY=http://192.168.99.10:1001" "NO_PROXY=localhost,127.0.0.1,192.168.124.80,192.168.124.81"

创建并修改文件/etc/systemd/system/docker.service.d/https-proxy.conf

[Service]
Environment="HTTPS_PROXY=http://192.168.99.10:1001" "NO_PROXY=localhost,127.0.0.1,192.168.124.80,192.168.124.81"

重启docker服务

systemctl daemon-reload
systemctl restart docker
  1. 安装kubeadm和kubelet
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
        https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
setenforce 0
yum install -y kubelet kubeadm
systemctl enable kubelet && systemctl start kubelet
  1. 初始化master
kubeadm init

命令结果

[kubeadm] WARNING: kubeadm is in beta, please do not use it for production clusters.
[init] Using Kubernetes version: v1.7.0
[init] Using Authorization modes: [Node RBAC]
[preflight] Running pre-flight checks
[preflight] Starting the kubelet service
[certificates] Generated CA certificate and key.
[certificates] Generated API server certificate and key.
[certificates] API Server serving cert is signed for DNS names [kubeadm-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.138.0.4]
[certificates] Generated API server kubelet client certificate and key.
[certificates] Generated service account token signing key and public key.
[certificates] Generated front-proxy CA certificate and key.
[certificates] Generated front-proxy client certificate and key.
[certificates] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/admin.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/kubelet.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/controller-manager.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/scheduler.conf"
[apiclient] Created API client, waiting for the control plane to become ready
[apiclient] All control plane components are healthy after 16.502136 seconds
[token] Using token: <token>
[apiconfig] Created RBAC rules
[addons] Applied essential addon: kube-proxy
[addons] Applied essential addon: kube-dns

Your Kubernetes master has initialized successfully!

To start using your cluster, you need to run (as a regular user):

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  http://kubernetes.io/docs/admin/addons/

You can now join any number of machines by running the following on each node
as root:

  kubeadm join --token <token> <master-ip>:<master-port>

查看日志:

journalctl -xeu kubelet

可能出现的问题:

kubelet: error: failed to run Kubelet: failed to create kubelet: misconfiguration: kubelet cgroup driver: "cgroupfs" is different from docker cgroup driver: "systemd"

解决方法:
kubernetes/kubernetes#43805 (comment)

  1. 安装pod network(weave)
export kubever=$(kubectl version | base64 | tr -d '\n')
kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$kubever"
  1. 加入node
    node所在系统也要参照1,2,3,4
    然后
kubeadm join --token <token> <master-ip>:<master-port>

ssh-agent使用说明

开启forward

在~/.ssh/config或/etc/ssh/ssh_config文件中加上

Host *
    ForwardAgent yes

这样在使用ssh-agent登录远程主机后,这个远程主机也会开启ssh-agent,这样就可以把ssh-agent串联起来

开启ssh-agent

eval $(ssh-agent)

检查ssh-agent是否开启

echo $SSH_AUTH_SOCK

如果有值,说明ssh-agent已经开启,否则没有开启。
也可以输入

ssh-add -L

如果输出为

Could not open a connection to your authentication agent.

代表没有开启ssh-agent

为ssh-agent添加私钥

ssh-add ~/.ssh/id_rsa

列出当前ssh-agent中可用的密钥

ssh-add -L

会以公钥的形式显示出来

自动编译并部署服务器

流程

  1. 从源代码仓库获取代码到本地
  2. 本地编译:运行指定的编译脚本,将代码编译(也可以打包成docker镜像)
  3. 上传编译好的文件到服务器,如果是docker镜像,则要先把docker镜像上传至docker仓库,然后再服务器上下载docker镜像在运行docker镜像

用pm2部署nodejs服务

下面的命令都假设有root权限,如果不是root用户,可自行加上sudo

安装最新版的git(至少大于1.8)

使用pm2自动部署的功能,你的git的版本必须大于1.8,而在centos6或centos7使用yum安装的版本就是1.8,所以在centos服务器上必须自己编译安装git
首先移除原有git

sudo yum remove git

安装编译git的依赖

sudo yum groupinstall "Development Tools"
sudo yum install gettext-devel openssl-devel perl-CPAN perl-devel zlib-devel

下载git最新源码,并解压,然后编译安装

wget https://github.com/git/git/archive/v2.1.2.tar.gz -O git.tar.gz
tar xzvf git.tar.gz
cd git-2.13.3
make configure
./configure --prefix=/usr/local/bin
make install

新的git版本装好了。

配置免密码登录远程服务器

我们要让本地的开发机器能免密码登录远程服务器,要不然pm2会一直不断的提示你输入密码
生成本地机器的ssh公钥

ssh-keygen -t rsa

然后一直回车,就会在你的~/.ssh/目录下生成id_rsa.pub
复制公钥到远程服务器

ssh-copy-id [email protected]

现在你输入

这样就可以直接登录远程服务器了, 不会提示输入密码

配置git仓库(github, 码云, gitbucket)免密码获取代码和提交代码

依然是要使用刚才生成的公钥,复制~/.ssh/id_rsa.pub

github

image

码云

image

gitbucket

这里我们说的是用gitbucket搭建的git仓库私服

开启ssh访问

gitbucket默认是没有开启ssh访问的。
用管理员登录,打开system administration->system setting页面进行配置
image

配置pm2

假设我们的项目名称和目录都是myapp,在服务器上的目录是/var/www/myapp

在本地和服务器上安装pm2

假设在本地和服务器上都有安装nodejs

npm install -g pm2

初始化项目

mkdir myapp
cd myapp
npm init myapp -y
git init myapp
git add README.md
git commit -m "first commit"
git remote add origin git://github.com/xxxx/xxxx.git
git push -u origin master

最简单的nodejs服务器

在myapp下创建文件server.js,输入下面的内容

const http = require('http');
 
const hostname = '127.0.0.1';
const port = 6001;
 
const server = http.createServer((req, res) => {
  res.statusCode = 200;
  res.setHeader('Content-Type', 'text/plain');
  res.end('Hello Pm2 Deploy!\n');
});
 
server.listen(port, hostname, () => {
  console.log(`Server running at http://${hostname}:${port}/`);
});

生成pm2配置文件

pm2 ecosystem

会在当前目录下生成一个ecosystem.config.js文件,内容如下

module.exports = {
  /**
   * Application configuration section
   * http://pm2.keymetrics.io/docs/usage/application-declaration/
   */
  apps : [

    // First application
    {
      name      : 'API',
      script    : 'app.js',
      env: {
        COMMON_VARIABLE: 'true'
      },
      env_production : {
        NODE_ENV: 'production'
      }
    },

    // Second application
    {
      name      : 'WEB',
      script    : 'web.js'
    }
  ],

  /**
   * Deployment section
   * http://pm2.keymetrics.io/docs/usage/deployment/
   */
  deploy : {
    production : {
      user : 'node',
      host : '212.83.163.1',
      ref  : 'origin/master',
      repo : '[email protected]:repo.git',
      path : '/var/www/production',
      'post-deploy' : 'npm install && pm2 reload ecosystem.config.js --env production'
    },
    dev : {
      user : 'node',
      host : '212.83.163.1',
      ref  : 'origin/master',
      repo : '[email protected]:repo.git',
      path : '/var/www/development',
      'post-deploy' : 'npm install && pm2 reload ecosystem.config.js --env dev',
      env  : {
        NODE_ENV: 'dev'
      }
    }
  }
};

参照下图,根据自己的情况更改红框中的内容
image

提交代码到远程服务器

git commit -am "project init for pm2 deploy"
git push origin master

这样一来我们的初始代码就基本完成了,目录中的文件如下图
image

进行远程部署

初始化

pm2 deploy ecosystem.config.js production setup

这个命令只需在开始的时候运行一次

部署

pm2 deploy ecosystem.config.js production

每次提交代码的时候就运行一次这个命令, 他会将你最新的代码部署到远程服务器上

virtual box windows宿主机与虚拟机之间的完美通讯

打开virtual box,在主机网络管理器中创建一个Host-Only网络、

1629358671(1)

将虚拟机的网络设置为刚才创建的网络

1629358773(1)

根据自己的需要配置自己的虚拟的的静态IP(也可以不配,使用dhcp)

ubuntu20.04配置静态ip

让虚拟机可以访问外网

windows中打开网络适配器管理,右键点击可以访问外网的适配器,点击属性,点击共享标签页,勾选允许其他网络通过此计算机的internet连接来连接,选中在virutal box主机网络管理器中创建的Host-Only网络,点击确认即可

1629359161(1)

ubuntu扩充磁盘

sudo lvextend -l +100%FREE /dev/mapper/ubuntu--vg-ubuntu--lv
sudo resize2fs /dev/mapper/ubuntu--vg-ubuntu--lvv

IntelliJ IDEA中设置spring boot devtools

maven设置

<dependencies>
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-devtools</artifactId>
    </dependency>
</dependencies>

gradle设置

compile('org.springframework.boot:spring-boot-devtools')

如果使用IntelliJ IDEA还需下面的设置:

在comiplers设置选项中启用 “Make project automatically” .
image

还需要在registry设置中启用 compiler.automake.allow.when.app.running . 按快捷键 Shift+Command+A 或ctrl+alt+shift+/调出registry面板, 然后搜索该项registry.
image

安装

安装docker

卸载

yum remove docker \
                  docker-client \
                  docker-client-latest \
                  docker-common \
                  docker-latest \
                  docker-latest-logrotate \
                  docker-logrotate \
                  docker-engine

安装

yum install -y yum-utils \
  device-mapper-persistent-data \
  lvm2

yum install docker-ce docker-ce-cli containerd.io

运行

systemctl start docker

设置开机启动

systemctl enable docker

安装docker-compose

curl -L "https://github.com/docker/compose/releases/download/1.24.1/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose

chmod +x /usr/local/bin/docker-compose

配置镜像加速器

sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://oml82bmq.mirror.aliyuncs.com"]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker

验证docker安装

docker info

或者运行

docker run hello-world

看看输出信息里是否有Hello from Docker!

安装停车场服务器

安装包里的文件

  1. html.zip 客户端html, 压缩包里包含dashboard,m, mm三个目录和一个index.html

  2. docker-compose.yml docker-compose配置文件

  3. mycustom.cnf mysql服务器配置文件

  4. nginx.conf nginx配置文件

  5. .env 环境变量配置文件

  6. park.sql.zip 数据库文件

修改.env配置文件

这个文件里有两个变量

  1. HOST IP或域名

  2. PORT 服务器端口

根据自己的情况修改这两个变量

修改nginx.conf配置文件

找到字符串192.168.1.102替换成自己的ip或域名

创建安装目录并上传文件

mkdir /data /html

把nginx.conf mycustom.cnf docker-compose.yml .env上传至/docker目录

把html.zip解压并上传至/html目录

验证目录和文件

/docker下应该有三个文件

  1. nginx.conf

  2. mycustom.cnf

  3. docker-compose.yml

  4. .env

/html下应该有三个目录和一个index.html文件

  1. dashboard

  2. m

  3. mm

启动服务器

cd /data
docker login registry.aliyuncs.com -u [email protected] -p  xxxx
docker-compose pull
docker-compose up -d
docker rmi $(docker images --quiet --filter 'dangling=true')

配置数据库

访问 http://ip:port/mysql/index.php, ip和port替换成自己服务器的ip和端口
填写

  • 服务器:mysql

  • 用户名:root

  • 密码:tianchen123

进行登录

新建数据库park,并选择该数据库,然后导入park.sql.zip,等待数据库导入完成

访问后台管理页面

http://ip:port/

用户名:admin
密码:admin

修改配置

进入平台管理->系统配置页面,根据自己的公众号配置对“微信公众号配置”和”微信管理后台公众号配置“页面进行配置。然后查看所有配置页面把地址为192.168.1.102的ip改成自己的域名

ubuntu 20.04 设置静态ip

编辑/etc/netplan/00-installer-config.yaml文件为:

network:
  ethernets:
    enp0s3:
      dhcp4: no
      addresses:
        - 192.168.137.110/24
      gateway4: 192.168.137.1
      nameservers:
        addresses: [114.114.114.114]
  version: 2

执行

 sudo netplan apply

查看

ip addr

在笔记本上安装fabric8,使用代理

可以参考https://fabric8.io/guide/getStarted/gofabric8.html,由于在墙内的特殊情况,我们要在代理内安装fabric8

  1. 操作系统: windows10, 并安装好virtualbox和代理
  2. 所需软件下载及安装
    minikube v0.21.0
curl -Lo minikube.exe https://storage.googleapis.com/minikube/releases/v0.21.0/minikube-windows-amd64.exe

kubelet v1.7.5

curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.7.0/bin/windows/amd64/kubectl.exe

gofabric8 v0.4.133

curl -Lo gofabric8.exe https://github.com/fabric8io/gofabric8/releases/download/v0.4.133/gofabric8-windows-amd64.exe

PS: 最好使用该gofabric的版本,要不然可能会出现fabric8io/gofabric8#509这种问题
下载好后,将minikube.exe, kubectl.exe, gofabric.exe放入$PATH路径下
3. 启动minikube

minikube start --docker-env HTTP_PROXY=http://192.168.99.1:8877 --docker-env HTTPS_PROXY=http://192.168.99.1:8877 --memory=6000

PS: 让minikube中的docker环境使用自己的proxy,minikube的虚拟机内存定为6G,因为fabric8使用的内存比较大
4. 部署fabric8

gofabric8 deploy -y

控制台输出:

Deploying fabric8 to your Kubernetes installation at https://192.168.99.102:8443 for domain  in namespace default

Loading fabric8 releases from maven repository:https://repo1.maven.org/maven2/
Deploying package: platform version: 2.4.24

Now about to install package https://repo1.maven.org/maven2/io/fabric8/platform/packages/fabric8-platform/2.4.24/fabric8-platform-2.4.24-kubernetes.yml
Processing resource kind: Namespace in namespace default name user-secrets-source-admin
Found namespace on kind Secret of user-secrets-source-adminProcessing resource kind: Secret in namespace user-secrets-source-admin name default-gogs-git
Processing resource kind: Secret in namespace default name jenkins-docker-cfg
Processing resource kind: Secret in namespace default name jenkins-git-ssh
Processing resource kind: Secret in namespace default name jenkins-hub-api-token
Processing resource kind: Secret in namespace default name jenkins-master-ssh
Processing resource kind: Secret in namespace default name jenkins-maven-settings
Processing resource kind: Secret in namespace default name jenkins-release-gpg
Processing resource kind: Secret in namespace default name jenkins-ssh-config
Processing resource kind: ServiceAccount in namespace default name configmapcontroller
Processing resource kind: ServiceAccount in namespace default name exposecontroller
Processing resource kind: ServiceAccount in namespace default name fabric8
Processing resource kind: ServiceAccount in namespace default name gogs
Processing resource kind: ServiceAccount in namespace default name jenkins
Processing resource kind: Service in namespace default name fabric8
Processing resource kind: Service in namespace default name fabric8-docker-registry
Processing resource kind: Service in namespace default name fabric8-forge
Processing resource kind: Service in namespace default name gogs
Processing resource kind: Service in namespace default name gogs-ssh
Processing resource kind: Service in namespace default name jenkins
Processing resource kind: Service in namespace default name jenkins-jnlp
Processing resource kind: Service in namespace default name jenkinshift
Processing resource kind: Service in namespace default name nexus
Processing resource kind: PersistentVolumeClaim in namespace default name fabric8-docker-registry-storage
Processing resource kind: PersistentVolumeClaim in namespace default name gogs-data
Processing resource kind: PersistentVolumeClaim in namespace default name jenkins-jobs
Processing resource kind: PersistentVolumeClaim in namespace default name jenkins-mvn-local-repo
Processing resource kind: PersistentVolumeClaim in namespace default name jenkins-workspace
Processing resource kind: PersistentVolumeClaim in namespace default name nexus-storage
Processing resource kind: ConfigMap in namespace default name catalog-apiman
Processing resource kind: ConfigMap in namespace default name catalog-apiman-gateway
Processing resource kind: ConfigMap in namespace default name catalog-artifactory
Processing resource kind: ConfigMap in namespace default name catalog-cd-pipeline
Processing resource kind: ConfigMap in namespace default name catalog-chaos-monkey
Processing resource kind: ConfigMap in namespace default name catalog-chat-irc
Processing resource kind: ConfigMap in namespace default name catalog-chat-letschat
Processing resource kind: ConfigMap in namespace default name catalog-chat-slack
Processing resource kind: ConfigMap in namespace default name catalog-configmapcontroller
Processing resource kind: ConfigMap in namespace default name catalog-content-repository
Processing resource kind: ConfigMap in namespace default name catalog-elasticsearch
Processing resource kind: ConfigMap in namespace default name catalog-elasticsearch-v1
Processing resource kind: ConfigMap in namespace default name catalog-exposecontroller
Processing resource kind: ConfigMap in namespace default name catalog-fabric8-docker-registry
Processing resource kind: ConfigMap in namespace default name catalog-fabric8-forge
Processing resource kind: ConfigMap in namespace default name catalog-fluentd
Processing resource kind: ConfigMap in namespace default name catalog-funktion
Processing resource kind: ConfigMap in namespace default name catalog-funktion-operator
Processing resource kind: ConfigMap in namespace default name catalog-funktion-runtimes
Processing resource kind: ConfigMap in namespace default name catalog-gerrit
Processing resource kind: ConfigMap in namespace default name catalog-git-collector
Processing resource kind: ConfigMap in namespace default name catalog-gitlab
Processing resource kind: ConfigMap in namespace default name catalog-gogs
Processing resource kind: ConfigMap in namespace default name catalog-grafana
Processing resource kind: ConfigMap in namespace default name catalog-ingress-nginx
Processing resource kind: ConfigMap in namespace default name catalog-jenkins
Processing resource kind: ConfigMap in namespace default name catalog-keycloak
Processing resource kind: ConfigMap in namespace default name catalog-kibana
Processing resource kind: ConfigMap in namespace default name catalog-kiwiirc
Processing resource kind: ConfigMap in namespace default name catalog-kubeflix
Processing resource kind: ConfigMap in namespace default name catalog-letschat
Processing resource kind: ConfigMap in namespace default name catalog-logging
Processing resource kind: ConfigMap in namespace default name catalog-manageiq
Processing resource kind: ConfigMap in namespace default name catalog-management
Processing resource kind: ConfigMap in namespace default name catalog-maven-shell
Processing resource kind: ConfigMap in namespace default name catalog-message-broker
Processing resource kind: ConfigMap in namespace default name catalog-message-gateway
Processing resource kind: ConfigMap in namespace default name catalog-messaging
Processing resource kind: ConfigMap in namespace default name catalog-metrics
Processing resource kind: ConfigMap in namespace default name catalog-nexus
Processing resource kind: ConfigMap in namespace default name catalog-prometheus
Processing resource kind: ConfigMap in namespace default name catalog-prometheus-blackbox-exporter
Processing resource kind: ConfigMap in namespace default name catalog-prometheus-node-exporter
Processing resource kind: ConfigMap in namespace default name catalog-social
Processing resource kind: ConfigMap in namespace default name catalog-taiga
Processing resource kind: ConfigMap in namespace default name catalog-turbine-server
Processing resource kind: ConfigMap in namespace default name catalog-zipkin
Processing resource kind: ConfigMap in namespace default name catalog-zookeeper
Processing resource kind: ConfigMap in namespace default name exposecontroller
Processing resource kind: ConfigMap in namespace default name fabric8
Processing resource kind: ConfigMap in namespace default name fabric8-environments
Processing resource kind: ConfigMap in namespace default name fabric8-forge
Found namespace on kind ConfigMap of user-secrets-source-adminProcessing resource kind: ConfigMap in namespace user-secrets-source-admin name fabric8-git-app-secrets
Processing resource kind: ConfigMap in namespace default name fabric8-platform
Processing resource kind: ConfigMap in namespace default name gogs
Processing resource kind: ConfigMap in namespace default name jenkins
Processing resource kind: Deployment in namespace default name configmapcontroller
Processing resource kind: Deployment in namespace default name exposecontroller
Processing resource kind: Deployment in namespace default name fabric8
Processing resource kind: Deployment in namespace default name fabric8-docker-registry
Processing resource kind: Deployment in namespace default name fabric8-forge
Processing resource kind: Deployment in namespace default name gogs
Processing resource kind: Deployment in namespace default name jenkins
Processing resource kind: Deployment in namespace default name nexus
platform......................................................................✔

Recreating configmap exposecontroller

-------------------------

Default GOGS admin username/password = gogsadmin/RedHat$1

Checking if PersistentVolumeClaims bind to a PersistentVolume
Downloading images and waiting to open the fabric8 console...

-------------------------
...................................................................................................
Opening URL http://192.168.99.102:30073

进入fabric8 console
oj 0sz e 4 d1n dy ym0

centos6上安装docker

centos6上安装docker

卸载docker

yum -y remove docker

安装docker

yum install -y https://get.docker.com/rpm/1.7.0/centos-6/RPMS/x86_64/docker-engine-1.7.0-1.el6.x86_64.rpm

配置镜像加速器

编辑 /etc/sysconfig/docker,即将配置项other_args修改为

other_args="--registry-mirror=https://oml82bmq.mirror.aliyuncs.com"

启动docker

service docker start

centos7 防火墙使用

永久的开放需要的端口

sudo firewall-cmd --zone=public --add-port=3000/tcp --permanent
sudo firewall-cmd --reload

之后检查新的防火墙规则

firewall-cmd --list-all

关闭防火墙

//临时关闭防火墙,重启后会重新自动打开
systemctl restart firewalld
//检查防火墙状态
firewall-cmd --state
firewall-cmd --list-all
//Disable firewall
systemctl disable firewalld
systemctl stop firewalld
systemctl status firewalld
//Enable firewall
systemctl enable firewalld
systemctl start firewalld
systemctl status firewalld

精通kubenetes

理解kubenetes架构

Kubernetes是一个大型的开源项目,具有很多代码和很多功能。你可能已经阅读过关于Kubernetes的文章,甚至或许对它稍有研究,在无关紧要的项目上使用过它,或者甚至在工作中使用它。但要理解Kubernetes的所有内容,如何有效地使用它,以及最佳做法是什么,你需要做的更多。在本章中,我们将共同构建必要的基础,以此让你可以使得Kubernetes充分发挥潜力。我们将首先了解什么是容器编排方式。然后,我们将介绍一些重要的Kubernetes概念,这些概念将形成的词汇我们将在整本书中使用。之后,我们将深入了解Kubernetes的真正架构,并了解如何开启Kubernetes为用户提供的所有功能。然后,我们将讨论Kubernetes支持的各种运行时和容器引擎(Docker只是一个选项),最后,我们将讨论Kubernetes在连续集成和部署流程中的作用。
在本章结尾,您将对容器编排有一个坚实的了解,Kubernetes解决了什么问题,kubenetes的如此设计和架构的原因,以及它所支持的不同运行时容器。 你也会很熟悉该开源库的整体结构,并准备进入并找到所有问题的答案。

理解容器编排

Kubernetes的主要责任是容器编排。Kubernetes要确保执行各种工作的容器已经安排在物理机器或虚拟机上运行。必须依照部署环境和集群的要求高效的打包并配置好容器。此外,Kubenetes必须监视所有运行中的容器,替换掉已经死掉的或者没有响应的或者处于其他不健康状态的容器。在接下来的章节,你将会学到Kubenetes的更多功能。在本节中,重点是容器和它们的编排。

用gradle快速创建java应用

gradle wrapper --gradle-distribution-url=http://vuuvv.qiniudn.com/gradle-4.6-bin.zip
./gradlew init --type java-application --test-framework testng

使用typescirpt开发vue项目

安装vue命令行

npm install -g vue-cli

用vue-cli生成vue webpack项目

vue init webpack vuuvv
cd vuuvv
yarn install

安装vue模板的时候选择不要eslint

使用typescript环境

移除babel

删除所有与babel相关的依赖
image

删除项目根目录的.babelrc文件

添加typescript相关依赖

yarn add --dev typescript ts-loader tslint tslint-loader

添加tsconfig.json

在项目根目录添加文件tsconfig.json

{
  "compilerOptions": {
    "lib": [
      "dom",
      "es5",
      "es2015.promise"
    ],
    "module": "es2015",
    "moduleResolution": "node",
    "target": "es5",
    "sourceMap": true,
    "emitDecoratorMetadata": true,
    "experimentalDecorators": true,
    "allowSyntheticDefaultImports": true
  }
}

添加tslint配置文件

在项目根目录添加文件tslint.json

{
  "rules": {
    "class-name": true,
    "comment-format": [
      true,
      "check-space"
    ],
    "indent": [
      true,
      "spaces"
    ],
    "no-duplicate-variable": true,
    "no-eval": true,
    "no-internal-module": false,
    "no-trailing-whitespace": false,
    "no-var-keyword": true,
    "one-line": [
      true,
      "check-open-brace",
      "check-whitespace"
    ],
    "quotemark": [
      true,
      "single"
    ],
    "semicolon": [
      true
    ],
    "triple-equals": [
      true,
      "allow-null-check"
    ],
    "typedef-whitespace": [
      true,
      {
        "call-signature": "nospace",
        "index-signature": "nospace",
        "parameter": "nospace",
        "property-declaration": "nospace",
        "variable-declaration": "nospace"
      }
    ],
    "variable-name": [
      true,
      "ban-keywords"
    ],
    "whitespace": [
      true,
      "check-branch",
      "check-decl",
      "check-operator",
      "check-separator",
      "check-type"
    ]
  }
}

让typescript可以引用vue文件

在项目根目录下添加vue-shim.d.ts文件

declare module "*.vue" {
  import Vue from 'vue'
  export default Vue
}

修改webpack配置文件

修改build/webpack.base.conf.js

  entry: {
    app: './src/main.js'
  },

改为

  entry: {
    app: './src/main.ts'
  },

  resolve: {
    extensions: ['.js', '.vue', '.json'],
    ...
  },

改为

  resolve: {
    extensions: ['.ts', '.js', '.vue', '.json'],
    ...
  },

在module.rules中增加下面代码

      {
        test: /\.ts$/,
        exclude: /node_modules|vue\/src/,
        loader: "ts-loader",
        options: {
          appendTsSuffixTo: [/\.vue$/]
        }
      },
      {
        test: /\.ts$/,
        exclude: /node_modules/,
        enforce: 'pre',
        loader: 'tslint-loader'
      },

修改build/vue-loader.conf.js,让tslint可以检查vue文件内嵌的typescript语句

module.exports = {
  loaders: utils.cssLoaders({
    sourceMap: sourceMapEnabled,
    extract: isProduction
  }),
  cssSourceMap: sourceMapEnabled,
  transformToRequire: {
    video: 'src',
    source: 'src',
    img: 'src',
    image: 'xlink:href'
  }
}

修改为

const loaders = utils.cssLoaders({
  sourceMap: sourceMapEnabled,
  extract: isProduction
});

loaders['ts'] = 'ts-loader!tslint-loader';


module.exports = {
  loaders,
  cssSourceMap: sourceMapEnabled,
  transformToRequire: {
    video: 'src',
    source: 'src',
    img: 'src',
    image: 'xlink:href'
  },
  esModule: true
}

修改src中的代码

  • 将js文件改为js文件
  • 将引入vue文件的语句加上.vue的后缀
  • 修改tslint报告的语法错误
  • 在vue文件中要使用typescript的时候,使用标签<script lang="ts"></script>

启动项目

npm start

test

系统需求

  1. java 1.8
  2. nodejs 10
  3. mysql 5.7
  4. redis 3.2
  5. nginx 1.17
  6. rabbitmq(可选)

安装

上传并复制文件

把压缩包里的文件复制到/opt目录下

mysql 5.7

使用客户自己的mysql服务器

导入数据

新建数据库park,编码utf8mb4
导入/opt/park/sql/park.sql

redis

使用客户自己的redis服务器

java1.8

客户应该已经有配置java1.8的环境
如果没有则执行下面指令安装

rpm -ivh /opt/park/tools/java/jdk-8u231-linux-x64.rpm

验证

java -version

输出

java version "1.8.0_231"
Java(TM) SE Runtime Environment (build 1.8.0_231-b11)
Java HotSpot(TM) 64-Bit Server VM (build 25.231-b11, mixed mode)

nodejs

安装包位于 /opt/park/tools/nodejs目录中

rpm -ivh /opt/park/tools/nodejs/nodejs-10.17.0-1nodesource.x86_64.rpm

验证

node --version

输出

v10.17.0

nginx

rpm -ivh /opt/park/tools/nginx/nginx-1.17.6-1.el6.ngx.x86_64.rpm

配置nginx

cp /opt/park/nginx.conf /etc/nginx/

选择覆盖原有文件

启动nginx

service nginx start

java应用

java应用在/opt/park/java/目录中, 启用的端口为9001

主要配置在run.sh中

export SPRING_DATASOURCE_URL="jdbc:mysql://47.98.42.247:3366/park?verifyServerCertificate=false&useSSL=true&useUnicode=true"
export SPRING_DATASOURCE_USERNAME=root
export SPRING_DATASOURCE_PASSWORD=tianchen123
export REDIS_HOST=47.96.23.47
export REDIS_PORT=6379
export SITE_DOMAIN=192.168.1.111
export SITE_IP=192.168.1.111
export SITE_PORT=80
export SITE_PATH="/api/"

# running java app
java -jar -Djava.security.egd=file:/dev/./urandom /opt/park/java/app.jar --spring.profiles.active=prod

环境变量说明

  1. SPRING_DATASOURCE_URL mysql数据库地址
  2. SPRING_DATASOURCE_USERNAME mysql用户名
  3. SPRING_DATASOURCE_PASSWORD mysql密码
  4. REDIS_HOST redis服务器ip
  5. REDIS_PORT redis服务器端口
  6. SITE_DOMAIN 本网站域名
  7. SITE_IP 本网站ip
  8. SITE_PORT 本网站端口
  9. SITE_PATH 本网站路径前缀

运行

nohup sh /opt/park/java/run.sh >> /opt/park/java/park.log 2>&1 &

查看日志

tail -f /opt/park/java/park.log

看到

Tomcat started on port(s): 9001 (http)
Started Application in 46.756 seconds (JVM running for 49.516)
{dataSource-1} inited

信息说明启动成功

设备连接服务fu(nodejs应用)

设备连接应用在/opt/park/connector/目录中,启用的端口为4000

主要配置在run.sh中

export PUSH_URL="http://localhost:4000/connector/push"
export REDIS_HOST="47.96.23.47"
export REDIS_PORT="6379"
export MYSQL_HOST="47.98.42.247"
export MYSQL_PORT="3366"
export MYSQL_USER=root
export MYSQL_PASSWORD=tianchen123
export MYSQL_DATABASE=park

cd /opt/park/connector
npm run prod

环境变量说明

  1. PUSH_URL 就是应用服务器向设备连接服务器发送指令的地址,如果两个服务在同一台机器上这个就可以不用修改,否则要改成应用服务器可以访问到设备连接服务的地址
  2. MYSQL_HOST mysql主机ip
  3. MYSQL_PORT mysql端口
  4. MYSQL_USER mysql用户名
  5. MYSQL_PASSWORD mysql密码
  6. MYSQL_DATABASE mysql数据库名
  7. REDIS_HOST redis服务器ip
  8. REDIS_PORT redis服务器端口

运行

nohup sh /opt/park/connector/run.sh >> /opt/park/connector/park.log 2>&1 &

查看日志

tail -f /opt/park/connector/park.log

看到

Server listening at http://localhost:4000

信息说明启动成功

打开网站

假设服务器的ip地址为192.168.1.111,访问http://192.168.1.111/即可

公众号设置

公众号设置

设置->公众号设置->功能设置
image

开发->基本配置
image

停车场设置

平台管理->系统配置->微信公众号配置
image

公众号菜单入口

访问 http://自己的域名/dashboard/#/user/wechat/menu 这个地址可以设置公众号菜单(这个页面功能不是很完善,最好添加一个菜单刷新一次,如果不会弄,把你们的网址发给我,我帮你们弄)

菜单项

  1. 代人缴费 http://自己的域名/api/park/wechat/r?url=/car/temp
  2. 停车缴费 http://自己的域名/api/park/wechat/pay
  3. 月租缴费 http://自己的域名/api/park/wechat/r?url=/car/month/pay
  4. 储值充值 http://自己的域名/api/park/wechat/r?url=/car/store/pay
  5. 我的中心 http://自己的域名/api/park/wechat

根据自己的域名进行更改

离线安装

安装glibc-2.14

cd glibc-2.14
mkdir build
cd build/
../configure --prefix=/opt/glibc-2.14
make && make install

在/etc/profile文件后追加

export LD_LIBRARY_PATH=/opt/glibc-2.14/lib:$LD_LIBRARY_PATH

使其生效

source /etc/profile

安装nodejs

mkdir -p /usr/local/lib/nodejs
tar -xJvf node-v12.13.1-linux-x64.tar.xz -C /usr/local/lib/nodejs
sudo ln -s /usr/local/lib/nodejs/node-v12.13.1-linux-x64/bin/node /usr/bin/node
sudo ln -s /usr/local/lib/nodejs/node-v12.13.1-linux-x64/bin/npm /usr/bin/npm
sudo ln -s /usr/local/lib/nodejs/node-v12.13.1-linux-x64/bin/npx /usr/bin/npx
node --version

会输出nodejs的版本v8.16.2

安装rabbitmq

rpm -ivh rabbitmq-server-3.8.1-1.el6.noarch.rpm

安装redis

rpm -ivh redis-3.0.7-4.el6.art.x86_64.rpm
server redis start

Recommend Projects

  • React photo React

    A declarative, efficient, and flexible JavaScript library for building user interfaces.

  • Vue.js photo Vue.js

    🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.

  • Typescript photo Typescript

    TypeScript is a superset of JavaScript that compiles to clean JavaScript output.

  • TensorFlow photo TensorFlow

    An Open Source Machine Learning Framework for Everyone

  • Django photo Django

    The Web framework for perfectionists with deadlines.

  • D3 photo D3

    Bring data to life with SVG, Canvas and HTML. 📊📈🎉

Recommend Topics

  • javascript

    JavaScript (JS) is a lightweight interpreted programming language with first-class functions.

  • web

    Some thing interesting about web. New door for the world.

  • server

    A server is a program made to process requests and deliver data to clients.

  • Machine learning

    Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.

  • Game

    Some thing interesting about game, make everyone happy.

Recommend Org

  • Facebook photo Facebook

    We are working to build community through open source technology. NB: members must have two-factor auth.

  • Microsoft photo Microsoft

    Open source projects and samples from Microsoft.

  • Google photo Google

    Google ❤️ Open Source for everyone.

  • D3 photo D3

    Data-Driven Documents codes.