sudo apt-get -y install openvpn
sudo touch /etc/openvpn/credentials
sudo printf '%s\n' 'username' 'password' > /etc/openvpn/credentials
sudo sed -i 's/auth-user-pass/auth-user-pass \/etc\/openvpn\/credentials/g' /etc/openvpn/US-East.ovpn
sudo openvpn --config /etc/openvpn/US-East.ovpn
Reference:
How to Setup OpenVPN Command Line on Linux (Ubuntu)
Create EFS on AWS web portal
Edit the security group of EFS to allow access from EC2 instances
Mount EFS on EC2
sudo mkdir efs
sudo chmod 777 /efs
Install amazon-efs-utils
for auto-remount
git clone https://github.com/aws/efs-utils
cd efs-utils/
./build-deb.sh
sudo apt-get -y install ./build/amazon-efs-utils*deb
Configure IAM role in EC2 (already done)
Edit /etc/fstab
fs-xxxxxxxx:/ /efs efs _netdev,tls,iam 0 0
Test mount
sudo mount -fav
Add Linux user in the other EC2's group to avoid readonly
issue
sudo usermod -a -G ubuntu guangningyu
sudo usermod -a -G guangningyu ubuntu
Reference:
1. Mount the Amazon EFS File System on the EC2 Instance and Test
2. Mounting your Amazon EFS file system automatically
3. User and Group ID Permissions for Files and Directories Within a File System
brew update && brew install azure-cli
az login
brew tap azure/functions
brew install azure-functions-core-tools@2
References:
Install Azure CLI on macOS
Azure/azure-functions-core-tools
Reference: Windows Server 2016 : Initial Settings : Add Local Users
Install dependencies
sudo apt-get update
sudo apt-get install automake autotools-dev fuse g++ git libcurl4-gnutls-dev libfuse-dev libssl-dev libxml2-dev make pkg-config
Install s3fs
git clone https://github.com/s3fs-fuse/s3fs-fuse.git
cd s3fs-fuse
./autogen.sh
./configure --prefix=/usr --with-openssl
make
sudo make install
which s3fs
Config credentials
echo "Your_accesskey:Your_secretkey" >> /etc/passwd-s3fs
sudo chmod 640 /etc/passwd-s3fs
Create mounting point
mkdir /mys3bucket
s3fs your_bucketname -o use_cache=/tmp -o allow_other -o uid=1001 -o mp_umask=002 -o multireq_max=5 /mys3bucket
Config mount after reboot
Add the following command in /etc/rc.local
:
/usr/local/bin/s3fs your_bucketname -o use_cache=/tmp -o allow_other -o uid=1001 -o mp_umask=002 -o multireq_max=5 /mys3bucket
Reference:
How to Mount S3 bucket on EC2 Linux Instance
# Install Nextcloud stack
sudo snap install nextcloud
# Create administrator account
sudo nextcloud.manual-install <admin_username> <admin_password>
# Configure trusted domains (only localhost by default)
sudo nextcloud.occ config:system:get trusted_domains
sudo nextcloud.occ config:system:set trusted_domains 1 --value=<dns-domain>
# Set 512M as PHP memory limit
sudo snap get nextcloud php.memory-limit # Should be 512M
sudo snap set nextcloud php.memory-limit=512M
# Set background jobs interval (e.g. checking for new emails, update RSS feeds, ...)
sudo snap set nextcloud nextcloud.cron-interval=10m # Default: 15m
sudo snap set nextcloud ports.http=81 ports.https=444
Reference:
Nextcloud on AWS
Putting the snap behind a reverse proxy
import pyodbc
conn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER=test;DATABASE=test;UID=user;PWD=password')
cursor = conn.cursor()
for row in cursor.tables():
print(row.table_name)
Set up the same locale
in both of the local laptop and remoter server:
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
Use the lsblk
command to list the block devices attached to the instance
$ lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
xvda 202:0 0 30G 0 disk
`-xvda1 202:1 0 8G 0 part /
loop0 7:0 0 91M 1 loop /snap/core/6405
loop1 7:1 0 87.9M 1 loop /snap/core/5742
loop2 7:2 0 17.9M 1 loop /snap/amazon-ssm-agent/1068
loop3 7:3 0 16.5M 1 loop /snap/amazon-ssm-agent/784
loop4 7:4 0 18M 1 loop /snap/amazon-ssm-agent/930
Use the df -h
command to report the existing disk space usage on the file system
$ sudo df -h /dev/xvd*
Filesystem Size Used Avail Use% Mounted on
udev 488M 0 488M 0% /dev
/dev/xvda1 7.7G 7.4G 370M 96% /
Expand the modified partition using growpart
$ sudo growpart /dev/xvda 1
CHANGED: partition=1 start=2048 old: size=16775135 end=16777183 new: size=62912479,end=62914
sudo add-apt-repository ppa:certbot/certbot
sudo apt-get update
sudo apt-get install certbot python-certbot-nginx
Running this command will get a certificate for you and have Certbot edit your Nginx configuration automatically to serve it.
sudo certbot --nginx
# for a specific name
sudo certbot --nginx -d example.com
The Certbot packages on your system come with a cron job that will renew your certificates automatically before they expire.
You can test automatic renewal for your certificates by running this command:
sudo certbot renew --dry-run
apt-get install python-pip
pip install shadowsocks
Create config file /etc/shadowsocks.json
:
{
"server":"your_ip_address",
"server_port":8388,
"local_address": "127.0.0.1",
"local_port":1080,
"password":"your_password",
"timeout":300,
"method":"aes-256-cfb",
"fast_open": false
}
You can set multiple ports in the config file:
{
"server": "your_ip_address",
"local_address": "127.0.0.1",
"local_port": "1080",
"port_password": {
"8381": "password_1",
"8388": "password_2"
},
"timeout": 300,
"method": "aes-256-cfb"
}
ssserver -c /etc/shadowsocks.json
# run at background
ssserver -c /etc/shadowsocks.json -d start
ssserver -c /etc/shadowsocks.json -d stop
Edit /etc/rc.local
:
/usr/local/bin/ssserver -c /etc/shadowsocks.json -d start
exit 0