updated vendor files and paths

This commit is contained in:
Brad Rydzewski
2015-09-29 18:21:17 -07:00
parent 155576fb03
commit dfea14c7e5
719 changed files with 128749 additions and 34774 deletions

View File

@@ -1,58 +0,0 @@
package ccmenu
import (
"encoding/xml"
"strconv"
"time"
"github.com/drone/drone/pkg/types"
)
type CCProjects struct {
XMLName xml.Name `xml:"Projects"`
Project *CCProject `xml:"Project"`
}
type CCProject struct {
XMLName xml.Name `xml:"Project"`
Name string `xml:"name,attr"`
Activity string `xml:"activity,attr"`
LastBuildStatus string `xml:"lastBuildStatus,attr"`
LastBuildLabel string `xml:"lastBuildLabel,attr"`
LastBuildTime string `xml:"lastBuildTime,attr"`
WebURL string `xml:"webUrl,attr"`
}
func NewCC(r *types.Repo, b *types.Build) *CCProjects {
proj := &CCProject{
Name: r.Owner + "/" + r.Name,
WebURL: r.Self,
Activity: "Building",
LastBuildStatus: "Unknown",
LastBuildLabel: "Unknown",
}
// if the build is not currently running then
// we can return the latest build status.
if b.Status != types.StatePending &&
b.Status != types.StateRunning {
proj.Activity = "Sleeping"
proj.LastBuildTime = time.Unix(b.Started, 0).Format(time.RFC3339)
proj.LastBuildLabel = strconv.Itoa(b.Number)
}
// ensure the last build state accepts a valid
// ccmenu enumeration
switch b.Status {
case types.StateError, types.StateKilled:
proj.LastBuildStatus = "Exception"
case types.StateSuccess:
proj.LastBuildStatus = "Success"
case types.StateFailure:
proj.LastBuildStatus = "Failure"
default:
proj.LastBuildStatus = "Unknown"
}
return &CCProjects{Project: proj}
}

118
shared/crypto/crypto.go Normal file
View File

@@ -0,0 +1,118 @@
package crypto
import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"io"
"code.google.com/p/go.crypto/ssh"
"github.com/square/go-jose"
)
const (
RSA_BITS = 2048 // Default number of bits in an RSA key
RSA_BITS_MIN = 768 // Minimum number of bits in an RSA key
)
// standard characters allowed in token string.
var chars = []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789")
// default token length
var length = 32
// Rand generates a 32-bit random string.
func Rand() string {
b := make([]byte, length)
r := make([]byte, length+(length/4)) // storage for random bytes.
clen := byte(len(chars))
maxrb := byte(256 - (256 % len(chars)))
i := 0
for {
io.ReadFull(rand.Reader, r)
for _, c := range r {
if c >= maxrb {
// Skip this number to avoid modulo bias.
continue
}
b[i] = chars[c%clen]
i++
if i == length {
return string(b)
}
}
}
}
// helper function to generate an RSA Private Key.
func GeneratePrivateKey() (*rsa.PrivateKey, error) {
return rsa.GenerateKey(rand.Reader, RSA_BITS)
}
// helper function that marshalls an RSA Public Key to an SSH
// .authorized_keys format
func MarshalPublicKey(public *rsa.PublicKey) []byte {
private, err := ssh.NewPublicKey(public)
if err != nil {
return []byte{}
}
return ssh.MarshalAuthorizedKey(private)
}
// helper function that marshalls an RSA Private Key to
// a PEM encoded file.
func MarshalPrivateKey(private *rsa.PrivateKey) []byte {
marshaled := x509.MarshalPKCS1PrivateKey(private)
encoded := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Headers: nil, Bytes: marshaled})
return encoded
}
// UnmarshalPrivateKey is a helper function that unmarshals a PEM
// bytes to an RSA Private Key
func UnmarshalPrivateKey(private []byte) *rsa.PrivateKey {
decoded, _ := pem.Decode(private)
parsed, err := x509.ParsePKCS1PrivateKey(decoded.Bytes)
if err != nil {
return nil
}
return parsed
}
// Encrypt encrypts a secret string.
func Encrypt(in, privKey string) (string, error) {
rsaPrivKey, err := decodePrivateKey(privKey)
if err != nil {
return "", err
}
return encrypt(in, &rsaPrivKey.PublicKey)
}
// decodePrivateKey is a helper function that unmarshals a PEM
// bytes to an RSA Private Key
func decodePrivateKey(privateKey string) (*rsa.PrivateKey, error) {
derBlock, _ := pem.Decode([]byte(privateKey))
return x509.ParsePKCS1PrivateKey(derBlock.Bytes)
}
// encrypt encrypts a plaintext variable using JOSE with
// RSA_OAEP and A128GCM algorithms.
func encrypt(text string, pubKey *rsa.PublicKey) (string, error) {
var encrypted string
var plaintext = []byte(text)
// Creates a new encrypter using defaults
encrypter, err := jose.NewEncrypter(jose.RSA_OAEP, jose.A128GCM, pubKey)
if err != nil {
return encrypted, err
}
// Encrypts the plaintext value and serializes
// as a JOSE string.
object, err := encrypter.Encrypt(plaintext)
if err != nil {
return encrypted, err
}
return object.CompactSerialize()
}

View File

@@ -0,0 +1,68 @@
package crypto
import (
"testing"
"github.com/franela/goblin"
"github.com/square/go-jose"
)
func TestKeys(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("Generate Key", func() {
g.It("Generates a private key", func() {
_, err := GeneratePrivateKey()
g.Assert(err == nil).IsTrue()
})
})
}
func Test_Encrypt(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("Secure", func() {
g.It("Should encrypt a string", func() {
ciphertext, err := Encrypt("top_secret", fakePriv)
g.Assert(err == nil).IsTrue()
object, _ := jose.ParseEncrypted(ciphertext)
privKey, _ := decodePrivateKey(fakePriv)
plaintext, _ := object.Decrypt(privKey)
g.Assert(string(plaintext)).Equal("top_secret")
})
})
}
var fakePriv = `
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEA71FaA+otDak2rXF/4h69Tz+OxS6NOWaOc/n7dinHXnlo3Toy
ZzvwweJGQKIOfPNBMncz+8h6oLOByFvb95Z1UEM0d+KCFCCutOeN9NNMw4fkUtSZ
7sm6T35wQUkDOiO1YAGy27hQfT7iryhPwA8KmgZmt7toNNf+WymPR8DMwAAYeqHA
5DIEWWsg+RLohOJ0itIk9q6Us9WYhng0sZ9+U+C87FospjKRMyAinSvKx0Uan4ap
YGbLjDQHimWtimfT4XWCGTO1cWno378Vm/newUN6WVaeZ2CSHcWgD2fWcjFixX2A
SvcvfuCo7yZPUPWeiYKrc5d1CC3ncocu43LhSQIDAQABAoIBAQDIbYKM+sfmxAwF
8KOg1gvIXjuNCrK+GxU9LmSajtzpU5cuiHoEGaBGUOJzaQXnQbcds9W2ji2dfxk3
my87SShRIyfDK9GzV7fZzIAIRhrpO1tOv713zj0aLJOJKcPpIlTZ5jJMcC4A5vTk
q0c3W6GOY8QNJohckXT2FnVoK6GPPiaZnavkwH33cJk0j1vMsbADdKF7Jdfq9FBF
Lx+Za7wo79MQIr68KEqsqMpmrawIf1T3TqOCNbkPCL2tu5EfoyGIItrH33SBOV/B
HbIfe4nJYZMWXhe3kZ/xCFqiRx6/wlc5pGCwCicgHJJe/l8Y9OticDCCyJDQtD8I
6927/j2NAoGBAPNRRY8r5ES5f8ftEktcLwh2zw08PNkcolTeqsEMbWAQspV/v+Ay
4niEXIN3ix2yTnMgrtxRGO7zdPnMaTN8E88FsSDKQ97lm7m3jo7lZtDMz16UxGmd
AOOuXwUtpngz7OrQ25NXhvFYLTgLoPsv3PbFbF1pwbhZqPTttTdg5so3AoGBAPvK
ta/n7DMZd/HptrkdkxxHaGN19ZjBVIqyeORhIDznEYjv9Z90JvzRxCmUriD4fyJC
/XSTytORa34UgmOk1XFtxWusXhnYqCTIHG/MKCy9D4ifzFzii9y/M+EnQIMb658l
+edLyrGFla+t5NS1XAqDYjfqpUFbMvU1kVoDJ/B/AoGBANBQe3o5PMSuAD19tdT5
Rnc7qMcPFJVZE44P2SdQaW/+u7aM2gyr5AMEZ2RS+7LgDpQ4nhyX/f3OSA75t/PR
PfBXUi/dm8AA2pNlGNM0ihMn1j6GpaY6OiG0DzwSulxdMHBVgjgijrCgKo66Pgfw
EYDgw4cyXR1k/ec8gJK6Dr1/AoGBANvmSY77Kdnm4E4yIxbAsX39DznuBzQFhGQt
Qk+SU6lc1H+Xshg0ROh/+qWl5/17iOzPPLPXb0getJZEKywDBTYu/D/xJa3E/fRB
oDQzRNLtuudDSCPG5wc/JXv53+mhNMKlU/+gvcEUPYpUgIkUavHzlI/pKbJOh86H
ng3Su8rZAn9w/zkoJu+n7sHta/Hp6zPTbvjZ1EijZp0+RygBgiv9UjDZ6D9EGcjR
ZiFwuc8I0g7+GRkgG2NbfqX5Cewb/nbJQpHPO31bqJrcLzU0KurYAwQVx6WGW0He
ERIlTeOMxVo6M0OpI+rH5bOLdLLEVhNtM/4HUFi1Qy6CCMbN2t3H
-----END RSA PRIVATE KEY-----
`

View File

@@ -1,72 +0,0 @@
package sshutil
import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"hash"
"github.com/drone/drone/Godeps/_workspace/src/code.google.com/p/go.crypto/ssh"
)
const (
RSA_BITS = 2048 // Default number of bits in an RSA key
RSA_BITS_MIN = 768 // Minimum number of bits in an RSA key
)
// helper function to generate an RSA Private Key.
func GeneratePrivateKey() (*rsa.PrivateKey, error) {
return rsa.GenerateKey(rand.Reader, RSA_BITS)
}
// helper function that marshalls an RSA Public Key to an SSH
// .authorized_keys format
func MarshalPublicKey(pubkey *rsa.PublicKey) []byte {
pk, err := ssh.NewPublicKey(pubkey)
if err != nil {
return []byte{}
}
return ssh.MarshalAuthorizedKey(pk)
}
// helper function that marshalls an RSA Private Key to
// a PEM encoded file.
func MarshalPrivateKey(privkey *rsa.PrivateKey) []byte {
privateKeyMarshaled := x509.MarshalPKCS1PrivateKey(privkey)
privateKeyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Headers: nil, Bytes: privateKeyMarshaled})
return privateKeyPEM
}
// UnMarshalPrivateKey is a helper function that unmarshals a PEM
// bytes to an RSA Private Key
func UnMarshalPrivateKey(privateKeyPEM []byte) *rsa.PrivateKey {
derBlock, _ := pem.Decode(privateKeyPEM)
privateKey, err := x509.ParsePKCS1PrivateKey(derBlock.Bytes)
if err != nil {
return nil
}
return privateKey
}
// Encrypt is helper function to encrypt a plain-text string using
// an RSA public key.
func Encrypt(hash hash.Hash, pubkey *rsa.PublicKey, msg string) (string, error) {
src, err := rsa.EncryptOAEP(hash, rand.Reader, pubkey, []byte(msg), nil)
return base64.RawURLEncoding.EncodeToString(src), err
}
// Decrypt is helper function to encrypt a plain-text string using
// an RSA public key.
func Decrypt(hash hash.Hash, privkey *rsa.PrivateKey, secret string) (string, error) {
decoded, err := base64.RawURLEncoding.DecodeString(secret)
if err != nil {
return "", err
}
out, err := rsa.DecryptOAEP(hash, rand.Reader, privkey, decoded, nil)
return string(out), err
}

View File

@@ -1,40 +0,0 @@
package sshutil
import (
"crypto/sha256"
"testing"
"github.com/drone/drone/Godeps/_workspace/src/github.com/franela/goblin"
)
func TestSSHUtil(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("sshutil", func() {
var encrypted, testMsg string
privkey, err := GeneratePrivateKey()
g.Assert(err == nil).IsTrue()
pubkey := privkey.PublicKey
sha256 := sha256.New()
testMsg = "foo=bar"
g.Before(func() {
encrypted, err = Encrypt(sha256, &pubkey, testMsg)
g.Assert(err == nil).IsTrue()
})
g.It("Can decrypt encrypted msg", func() {
decrypted, err := Decrypt(sha256, privkey, encrypted)
g.Assert(err == nil).IsTrue()
g.Assert(decrypted == testMsg).IsTrue()
})
g.It("Unmarshals private key from PEM block", func() {
privateKeyPEM := MarshalPrivateKey(privkey)
privateKey := UnMarshalPrivateKey(privateKeyPEM)
g.Assert(privateKey.PublicKey.E == pubkey.E).IsTrue()
})
})
}

View File

@@ -0,0 +1,51 @@
package database
//go:generate go-bindata -pkg database -o database_gen.go sqlite3/ mysql/ postgres/
import (
"database/sql"
"github.com/drone/drone/shared/envconfig"
log "github.com/Sirupsen/logrus"
_ "github.com/go-sql-driver/mysql"
_ "github.com/lib/pq"
_ "github.com/mattn/go-sqlite3"
"github.com/rubenv/sql-migrate"
)
func Load(env envconfig.Env) *sql.DB {
var (
driver = env.String("DATABASE_DRIVER", "sqlite3")
config = env.String("DATABASE_CONFIG", "drone.sqlite")
)
log.Infof("using database driver %s", driver)
log.Infof("using database config %s", config)
return Open(driver, config)
}
// Open opens a database connection, runs the database migrations, and returns
// the database connection. Any errors connecting to the database or executing
// migrations will cause the application to exit.
func Open(driver, config string) *sql.DB {
var db, err = sql.Open(driver, config)
if err != nil {
log.Errorln(err)
log.Fatalln("database connection failed")
}
var migrations = &migrate.AssetMigrationSource{
Asset: Asset,
AssetDir: AssetDir,
Dir: driver,
}
_, err = migrate.Exec(db, driver, migrations, migrate.Up)
if err != nil {
log.Errorln(err)
log.Fatalln("migration failed")
}
return db
}

View File

@@ -0,0 +1,132 @@
-- +migrate Up
CREATE TABLE users (
user_id INTEGER PRIMARY KEY AUTO_INCREMENT
,user_login VARCHAR(500)
,user_token VARCHAR(500)
,user_secret VARCHAR(500)
,user_email VARCHAR(500)
,user_avatar VARCHAR(500)
,user_active BOOLEAN
,user_admin BOOLEAN
,user_hash VARCHAR(500)
,UNIQUE(user_login)
);
CREATE TABLE repos (
repo_id INTEGER PRIMARY KEY AUTO_INCREMENT
,repo_user_id INTEGER
,repo_owner VARCHAR(500)
,repo_name VARCHAR(500)
,repo_full_name VARCHAR(1000)
,repo_avatar VARCHAR(500)
,repo_link VARCHAR(1000)
,repo_clone VARCHAR(1000)
,repo_branch VARCHAR(500)
,repo_timeout INTEGER
,repo_private BOOLEAN
,repo_trusted BOOLEAN
,repo_allow_pr BOOLEAN
,repo_allow_push BOOLEAN
,repo_allow_deploys BOOLEAN
,repo_allow_tags BOOLEAN
,repo_hash VARCHAR(500)
,UNIQUE(repo_owner, repo_name)
);
CREATE TABLE stars (
star_id INTEGER PRIMARY KEY AUTO_INCREMENT
,star_repo_id INTEGER
,star_user_id INTEGER
,UNIQUE(star_repo_id, star_user_id)
);
CREATE INDEX ix_star_user ON builds (star_user_id);
CREATE TABLE keys (
key_id INTEGER PRIMARY KEY AUTO_INCREMENT
,key_repo_id INTEGER
,key_public MEDIUMBLOB
,key_private MEDIUMBLOB
,UNIQUE(key_repo_id)
);
CREATE TABLE builds (
build_id INTEGER PRIMARY KEY AUTO_INCREMENT
,build_repo_id INTEGER
,build_number INTEGER
,build_event VARCHAR(500)
,build_status VARCHAR(500)
,build_created INTEGER
,build_started INTEGER
,build_finished INTEGER
,build_commit VARCHAR(500)
,build_branch VARCHAR(500)
,build_ref VARCHAR(500)
,build_refspec VARCHAR(1000)
,build_remote VARCHAR(500)
,build_title VARCHAR(1000)
,build_message VARCHAR(2000)
,build_timestamp INTEGER
,build_author VARCHAR(500)
,build_avatar VARCHAR(1000)
,build_email VARCHAR(500)
,build_link VARCHAR(1000)
,UNIQUE(build_number, build_repo_id)
);
CREATE INDEX ix_build_repo ON builds (build_repo_id);
CREATE TABLE jobs (
job_id INTEGER PRIMARY KEY AUTO_INCREMENT
,job_node_id INTEGER
,job_build_id INTEGER
,job_number INTEGER
,job_status VARCHAR(500)
,job_exit_code INTEGER
,job_started INTEGER
,job_finished INTEGER
,job_environment VARCHAR(2000)
,UNIQUE(job_build_id, job_number)
);
CREATE INDEX ix_job_build ON jobs (job_build_id);
CREATE INDEX ix_job_node ON jobs (job_node_id);
CREATE TABLE IF NOT EXISTS logs (
log_id INTEGER PRIMARY KEY AUTO_INCREMENT
,log_job_id INTEGER
,log_data MEDIUMBLOB
,UNIQUE(log_job_id)
);
CREATE TABLE IF NOT EXISTS nodes (
node_id INTEGER PRIMARY KEY AUTOINCREMENT
,node_addr VARCHAR(1024)
,node_arch VARCHAR(50)
,node_cert MEDIUMBLOB
,node_key MEDIUMBLOB
,node_ca MEDIUMBLOB
);
INSERT INTO nodes VALUES(null, 'unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
INSERT INTO nodes VALUES(null, 'unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
-- +migrate Down
DROP TABLE nodes;
DROP TABLE logs;
DROP TABLE jobs;
DROP TABLE builds;
DROP TABLE keys;
DROP TABLE stars;
DROP TABLE repos;
DROP TABLE users;

View File

@@ -0,0 +1,132 @@
-- +migrate Up
CREATE TABLE users (
user_id SERIAL PRIMARY KEY
,user_login VARCHAR(500)
,user_token VARCHAR(500)
,user_secret VARCHAR(500)
,user_email VARCHAR(500)
,user_avatar VARCHAR(500)
,user_active BOOLEAN
,user_admin BOOLEAN
,user_hash VARCHAR(500)
,UNIQUE(user_login)
);
CREATE TABLE repos (
repo_id SERIAL PRIMARY KEY
,repo_user_id INTEGER
,repo_owner VARCHAR(500)
,repo_name VARCHAR(500)
,repo_full_name VARCHAR(1000)
,repo_avatar VARCHAR(500)
,repo_link VARCHAR(1000)
,repo_clone VARCHAR(1000)
,repo_branch VARCHAR(500)
,repo_timeout INTEGER
,repo_private BOOLEAN
,repo_trusted BOOLEAN
,repo_allow_pr BOOLEAN
,repo_allow_push BOOLEAN
,repo_allow_deploys BOOLEAN
,repo_allow_tags BOOLEAN
,repo_hash VARCHAR(500)
,UNIQUE(repo_owner, repo_name)
);
CREATE TABLE stars (
star_id SERIAL PRIMARY KEY
,star_repo_id INTEGER
,star_user_id INTEGER
,UNIQUE(star_repo_id, star_user_id)
);
CREATE INDEX ix_star_user ON builds (star_user_id);
CREATE TABLE keys (
key_id SERIAL PRIMARY KEY
,key_repo_id INTEGER
,key_public BYTEA
,key_private BYTEA
,UNIQUE(key_repo_id)
);
CREATE TABLE builds (
build_id SERIAL PRIMARY KEY
,build_repo_id INTEGER
,build_number INTEGER
,build_event VARCHAR(500)
,build_status VARCHAR(500)
,build_created INTEGER
,build_started INTEGER
,build_finished INTEGER
,build_commit VARCHAR(500)
,build_branch VARCHAR(500)
,build_ref VARCHAR(500)
,build_refspec VARCHAR(1000)
,build_remote VARCHAR(500)
,build_title VARCHAR(1000)
,build_message VARCHAR(2000)
,build_timestamp INTEGER
,build_author VARCHAR(500)
,build_avatar VARCHAR(1000)
,build_email VARCHAR(500)
,build_link VARCHAR(1000)
,UNIQUE(build_number, build_repo_id)
);
CREATE INDEX ix_build_repo ON builds (build_repo_id);
CREATE TABLE jobs (
job_id SERIAL PRIMARY KEY
,job_node_id INTEGER
,job_build_id INTEGER
,job_number INTEGER
,job_status VARCHAR(500)
,job_exit_code INTEGER
,job_started INTEGER
,job_finished INTEGER
,job_environment VARCHAR(2000)
,UNIQUE(job_build_id, job_number)
);
CREATE INDEX ix_job_build ON jobs (job_build_id);
CREATE INDEX ix_job_node ON jobs (job_node_id);
CREATE TABLE IF NOT EXISTS logs (
log_id SERIAL PRIMARY KEY
,log_job_id INTEGER
,log_data BYTEA
,UNIQUE(log_job_id)
);
CREATE TABLE IF NOT EXISTS nodes (
node_id INTEGER PRIMARY KEY AUTOINCREMENT
,node_addr VARCHAR(1024)
,node_arch VARCHAR(50)
,node_cert BYTEA
,node_key BYTEA
,node_ca BYTEA
);
INSERT INTO nodes VALUES(null, 'unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
INSERT INTO nodes VALUES(null, 'unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
-- +migrate Down
DROP TABLE nodes;
DROP TABLE logs;
DROP TABLE jobs;
DROP TABLE builds;
DROP TABLE keys;
DROP TABLE stars;
DROP TABLE repos;
DROP TABLE users;

32
shared/database/rebind.go Normal file
View File

@@ -0,0 +1,32 @@
package database
import (
"strconv"
"github.com/russross/meddler"
)
// Rebind is a helper function that changes the sql
// bind type from ? to $ for postgres queries.
func Rebind(query string) string {
if meddler.Default != meddler.PostgreSQL {
return query
}
qb := []byte(query)
// Add space enough for 5 params before we have to allocate
rqb := make([]byte, 0, len(qb)+5)
j := 1
for _, b := range qb {
if b == '?' {
rqb = append(rqb, '$')
for _, b := range strconv.Itoa(j) {
rqb = append(rqb, byte(b))
}
j++
} else {
rqb = append(rqb, b)
}
}
return string(rqb)
}

View File

@@ -0,0 +1,131 @@
-- +migrate Up
CREATE TABLE users (
user_id INTEGER PRIMARY KEY AUTOINCREMENT
,user_login TEXT
,user_token TEXT
,user_secret TEXT
,user_email TEXT
,user_avatar TEXT
,user_active BOOLEAN
,user_admin BOOLEAN
,user_hash TEXT
,UNIQUE(user_login)
);
CREATE TABLE repos (
repo_id INTEGER PRIMARY KEY AUTOINCREMENT
,repo_user_id INTEGER
,repo_owner TEXT
,repo_name TEXT
,repo_full_name TEXT
,repo_avatar TEXT
,repo_link TEXT
,repo_clone TEXT
,repo_branch TEXT
,repo_timeout INTEGER
,repo_private BOOLEAN
,repo_trusted BOOLEAN
,repo_allow_pr BOOLEAN
,repo_allow_push BOOLEAN
,repo_allow_deploys BOOLEAN
,repo_allow_tags BOOLEAN
,repo_hash TEXT
,UNIQUE(repo_owner, repo_name)
);
CREATE TABLE stars (
star_id INTEGER PRIMARY KEY AUTOINCREMENT
,star_repo_id INTEGER
,star_user_id INTEGER
,UNIQUE(star_repo_id, star_user_id)
);
CREATE INDEX ix_star_user ON stars (star_user_id);
CREATE TABLE keys (
key_id INTEGER PRIMARY KEY AUTOINCREMENT
,key_repo_id INTEGER
,key_public BLOB
,key_private BLOB
,UNIQUE(key_repo_id)
);
CREATE TABLE builds (
build_id INTEGER PRIMARY KEY AUTOINCREMENT
,build_repo_id INTEGER
,build_number INTEGER
,build_event TEXT
,build_status TEXT
,build_created INTEGER
,build_started INTEGER
,build_finished INTEGER
,build_commit TEXT
,build_branch TEXT
,build_ref TEXT
,build_refspec TEXT
,build_remote TEXT
,build_title TEXT
,build_message TEXT
,build_timestamp INTEGER
,build_author TEXT
,build_avatar TEXT
,build_email TEXT
,build_link TEXT
,UNIQUE(build_number, build_repo_id)
);
CREATE INDEX ix_build_repo ON builds (build_repo_id);
CREATE TABLE jobs (
job_id INTEGER PRIMARY KEY AUTOINCREMENT
,job_node_id INTEGER
,job_build_id INTEGER
,job_number INTEGER
,job_status TEXT
,job_exit_code INTEGER
,job_started INTEGER
,job_finished INTEGER
,job_environment TEXT
,UNIQUE(job_build_id, job_number)
);
CREATE INDEX ix_job_build ON jobs (job_build_id);
CREATE INDEX ix_job_node ON jobs (job_node_id);
CREATE TABLE IF NOT EXISTS logs (
log_id INTEGER PRIMARY KEY AUTOINCREMENT
,log_job_id INTEGER
,log_data BLOB
,UNIQUE(log_job_id)
);
CREATE TABLE IF NOT EXISTS nodes (
node_id INTEGER PRIMARY KEY AUTOINCREMENT
,node_addr TEXT
,node_arch TEXT
,node_cert BLOB
,node_key BLOB
,node_ca BLOB
);
INSERT INTO nodes VALUES(null, 'unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
INSERT INTO nodes VALUES(null, 'unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
-- +migrate Down
DROP TABLE nodes;
DROP TABLE logs;
DROP TABLE jobs;
DROP TABLE builds;
DROP TABLE keys;
DROP TABLE stars;
DROP TABLE repos;
DROP TABLE users;

View File

@@ -1,124 +0,0 @@
package docker
import (
"encoding/binary"
"errors"
"io"
)
const (
StdWriterPrefixLen = 8
StdWriterFdIndex = 0
StdWriterSizeIndex = 4
)
type StdType [StdWriterPrefixLen]byte
var (
Stdin StdType = StdType{0: 0}
Stdout StdType = StdType{0: 1}
Stderr StdType = StdType{0: 2}
)
type StdWriter struct {
io.Writer
prefix StdType
sizeBuf []byte
}
var ErrInvalidStdHeader = errors.New("Unrecognized input header")
// StdCopy is a modified version of io.Copy.
//
// StdCopy will demultiplex `src`, assuming that it contains two streams,
// previously multiplexed together using a StdWriter instance.
// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`.
//
// StdCopy will read until it hits EOF on `src`. It will then return a nil error.
// In other words: if `err` is non nil, it indicates a real underlying error.
//
// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
var (
buf = make([]byte, 32*1024+StdWriterPrefixLen+1)
bufLen = len(buf)
nr, nw int
er, ew error
out io.Writer
frameSize int
)
for {
// Make sure we have at least a full header
for nr < StdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
nr += nr2
if er == io.EOF {
if nr < StdWriterPrefixLen {
return written, nil
}
break
}
if er != nil {
return 0, er
}
}
// Check the first byte to know where to write
switch buf[StdWriterFdIndex] {
case 0:
fallthrough
case 1:
// Write on stdout
out = dstout
case 2:
// Write on stderr
out = dsterr
default:
return 0, ErrInvalidStdHeader
}
// Retrieve the size of the frame
frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4]))
// Check if the buffer is big enough to read the frame.
// Extend it if necessary.
if frameSize+StdWriterPrefixLen > bufLen {
buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-bufLen+1)...)
bufLen = len(buf)
}
// While the amount of bytes read is less than the size of the frame + header, we keep reading
for nr < frameSize+StdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
nr += nr2
if er == io.EOF {
if nr < frameSize+StdWriterPrefixLen {
return written, nil
}
break
}
if er != nil {
return 0, er
}
}
// Write the retrieved frame (without header)
nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen])
if ew != nil {
return 0, ew
}
// If the frame has not been fully written: error
if nw != frameSize {
return 0, io.ErrShortWrite
}
written += int64(nw)
// Move the rest of the buffer to the beginning
copy(buf, buf[frameSize+StdWriterPrefixLen:])
// Move the index
nr -= frameSize + StdWriterPrefixLen
}
}

109
shared/docker/docker.go Normal file
View File

@@ -0,0 +1,109 @@
package docker
import (
"io"
"io/ioutil"
"github.com/samalba/dockerclient"
)
var (
LogOpts = &dockerclient.LogOptions{
Stdout: true,
Stderr: true,
}
LogOptsTail = &dockerclient.LogOptions{
Follow: true,
Stdout: true,
Stderr: true,
}
)
// Run creates the docker container, pulling images if necessary, starts
// the container and blocks until the container exits, returning the exit
// information.
func Run(client dockerclient.Client, conf *dockerclient.ContainerConfig, name string) (*dockerclient.ContainerInfo, error) {
info, err := RunDaemon(client, conf, name)
if err != nil {
return nil, err
}
return Wait(client, info.Id)
}
// RunDaemon creates the docker container, pulling images if necessary, starts
// the container and returns the container information. It does not wait for
// the container to exit.
func RunDaemon(client dockerclient.Client, conf *dockerclient.ContainerConfig, name string) (*dockerclient.ContainerInfo, error) {
// attempts to create the contianer
id, err := client.CreateContainer(conf, name)
if err != nil {
// and pull the image and re-create if that fails
err = client.PullImage(conf.Image, nil)
if err != nil {
return nil, err
}
id, err = client.CreateContainer(conf, name)
if err != nil {
client.RemoveContainer(id, true, true)
return nil, err
}
}
// fetches the container information
info, err := client.InspectContainer(id)
if err != nil {
client.RemoveContainer(id, true, true)
return nil, err
}
// starts the container
err = client.StartContainer(id, &conf.HostConfig)
if err != nil {
client.RemoveContainer(id, true, true)
return nil, err
}
return info, err
}
// Wait blocks until the named container exits, returning the exit information.
func Wait(client dockerclient.Client, name string) (*dockerclient.ContainerInfo, error) {
defer func() {
client.StopContainer(name, 5)
client.KillContainer(name, "9")
}()
errc := make(chan error, 1)
infoc := make(chan *dockerclient.ContainerInfo, 1)
go func() {
// blocks and waits for the container to finish
// by streaming the logs (to /dev/null). Ideally
// we could use the `wait` function instead
rc, err := client.ContainerLogs(name, LogOptsTail)
if err != nil {
errc <- err
return
}
io.Copy(ioutil.Discard, rc)
rc.Close()
info, err := client.InspectContainer(name)
if err != nil {
errc <- err
return
}
infoc <- info
}()
select {
case info := <-infoc:
return info, nil
case err := <-errc:
return nil, err
}
}

View File

@@ -0,0 +1,117 @@
package envconfig
import (
"bufio"
"errors"
"os"
"strconv"
"strings"
)
type Env map[string]string
// Get returns the value of the environment variable named by the key.
func (env Env) Get(key string) string {
return env[key]
}
// String returns the string value of the environment variable named by the
// key. If the variable is not present, the default value is returned.
func (env Env) String(key, value string) string {
got, ok := env[key]
if ok {
value = got
}
return value
}
// Bool returns the boolean value of the environment variable named by the key.
// If the variable is not present, the default value is returned.
func (env Env) Bool(name string, value bool) bool {
got, ok := env[name]
if ok {
value, _ = strconv.ParseBool(got)
}
return value
}
// Int returns the integer value of the environment variable named by the key.
// If the variable is not present, the default value is returned.
func (env Env) Int(name string, value int) int {
got, ok := env[name]
if ok {
value, _ = strconv.Atoi(got)
}
return value
}
// Load reads the environment file and reads variables in "key=value" format.
// Then it read the system environment variables. It returns the combined
// results in a key value map.
func Load(filepath string) Env {
var envs = map[string]string{}
// load the environment file
f, err := os.Open(filepath)
if err == nil {
defer f.Close()
r := bufio.NewReader(f)
for {
line, _, err := r.ReadLine()
if err != nil {
break
}
key, val, err := parseln(string(line))
if err != nil {
continue
}
os.Setenv(key, val)
}
}
// load the environment variables
for _, env := range os.Environ() {
key, val, err := parseln(env)
if err != nil {
continue
}
envs[key] = val
}
return Env(envs)
}
// helper function to parse a "key=value" environment variable string.
func parseln(line string) (key string, val string, err error) {
line = removeComments(line)
if len(line) == 0 {
return
}
splits := strings.SplitN(line, "=", 2)
if len(splits) < 2 {
err = errors.New("missing delimiter '='")
return
}
key = strings.Trim(splits[0], " ")
val = strings.Trim(splits[1], ` "'`)
return
}
// helper function to trim comments and whitespace from a string.
func removeComments(s string) (_ string) {
if len(s) == 0 || string(s[0]) == "#" {
return
} else {
index := strings.Index(s, " #")
if index > -1 {
s = strings.TrimSpace(s[0:index])
}
}
return s
}

View File

@@ -92,6 +92,7 @@ func SetCookie(w http.ResponseWriter, r *http.Request, name, value string) {
Domain: r.URL.Host,
HttpOnly: true,
Secure: IsHttps(r),
MaxAge: 2147483647, // the cooke value (token) is responsible for expiration
}
http.SetCookie(w, &cookie)

471
shared/oauth2/oauth2.go Normal file
View File

@@ -0,0 +1,471 @@
// Copyright 2011 The goauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package oauth supports making OAuth2-authenticated HTTP requests.
//
// Example usage:
//
// // Specify your configuration. (typically as a global variable)
// var config = &oauth.Config{
// ClientId: YOUR_CLIENT_ID,
// ClientSecret: YOUR_CLIENT_SECRET,
// Scope: "https://www.googleapis.com/auth/buzz",
// AuthURL: "https://accounts.google.com/o/oauth2/auth",
// TokenURL: "https://accounts.google.com/o/oauth2/token",
// RedirectURL: "http://you.example.org/handler",
// }
//
// // A landing page redirects to the OAuth provider to get the auth code.
// func landing(w http.ResponseWriter, r *http.Request) {
// http.Redirect(w, r, config.AuthCodeURL("foo"), http.StatusFound)
// }
//
// // The user will be redirected back to this handler, that takes the
// // "code" query parameter and Exchanges it for an access token.
// func handler(w http.ResponseWriter, r *http.Request) {
// t := &oauth.Transport{Config: config}
// t.Exchange(r.FormValue("code"))
// // The Transport now has a valid Token. Create an *http.Client
// // with which we can make authenticated API requests.
// c := t.Client()
// c.Post(...)
// // ...
// // btw, r.FormValue("state") == "foo"
// }
//
package oauth2
import (
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"sync"
"time"
)
// OAuthError is the error type returned by many operations.
//
// In retrospect it should not exist. Don't depend on it.
type OAuthError struct {
prefix string
msg string
}
func (oe OAuthError) Error() string {
return "OAuthError: " + oe.prefix + ": " + oe.msg
}
// Cache specifies the methods that implement a Token cache.
type Cache interface {
Token() (*Token, error)
PutToken(*Token) error
}
// CacheFile implements Cache. Its value is the name of the file in which
// the Token is stored in JSON format.
type CacheFile string
func (f CacheFile) Token() (*Token, error) {
file, err := os.Open(string(f))
if err != nil {
return nil, OAuthError{"CacheFile.Token", err.Error()}
}
defer file.Close()
tok := &Token{}
if err := json.NewDecoder(file).Decode(tok); err != nil {
return nil, OAuthError{"CacheFile.Token", err.Error()}
}
return tok, nil
}
func (f CacheFile) PutToken(tok *Token) error {
file, err := os.OpenFile(string(f), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return OAuthError{"CacheFile.PutToken", err.Error()}
}
if err := json.NewEncoder(file).Encode(tok); err != nil {
file.Close()
return OAuthError{"CacheFile.PutToken", err.Error()}
}
if err := file.Close(); err != nil {
return OAuthError{"CacheFile.PutToken", err.Error()}
}
return nil
}
// Config is the configuration of an OAuth consumer.
type Config struct {
// ClientId is the OAuth client identifier used when communicating with
// the configured OAuth provider.
ClientId string
// ClientSecret is the OAuth client secret used when communicating with
// the configured OAuth provider.
ClientSecret string
// Scope identifies the level of access being requested. Multiple scope
// values should be provided as a space-delimited string.
Scope string
// AuthURL is the URL the user will be directed to in order to grant
// access.
AuthURL string
// TokenURL is the URL used to retrieve OAuth tokens.
TokenURL string
// RedirectURL is the URL to which the user will be returned after
// granting (or denying) access.
RedirectURL string
// TokenCache allows tokens to be cached for subsequent requests.
TokenCache Cache
// AccessType is an OAuth extension that gets sent as the
// "access_type" field in the URL from AuthCodeURL.
// See https://developers.google.com/accounts/docs/OAuth2WebServer.
// It may be "online" (the default) or "offline".
// If your application needs to refresh access tokens when the
// user is not present at the browser, then use offline. This
// will result in your application obtaining a refresh token
// the first time your application exchanges an authorization
// code for a user.
AccessType string
// ApprovalPrompt indicates whether the user should be
// re-prompted for consent. If set to "auto" (default) the
// user will be prompted only if they haven't previously
// granted consent and the code can only be exchanged for an
// access token.
// If set to "force" the user will always be prompted, and the
// code can be exchanged for a refresh token.
ApprovalPrompt string
}
// Token contains an end-user's tokens.
// This is the data you must store to persist authentication.
type Token struct {
AccessToken string
RefreshToken string
Expiry time.Time // If zero the token has no (known) expiry time.
// Extra optionally contains extra metadata from the server
// when updating a token. The only current key that may be
// populated is "id_token". It may be nil and will be
// initialized as needed.
Extra map[string]string
}
// Expired reports whether the token has expired or is invalid.
func (t *Token) Expired() bool {
if t.AccessToken == "" {
return true
}
if t.Expiry.IsZero() {
return false
}
return t.Expiry.Before(time.Now())
}
// Transport implements http.RoundTripper. When configured with a valid
// Config and Token it can be used to make authenticated HTTP requests.
//
// t := &oauth.Transport{config}
// t.Exchange(code)
// // t now contains a valid Token
// r, _, err := t.Client().Get("http://example.org/url/requiring/auth")
//
// It will automatically refresh the Token if it can,
// updating the supplied Token in place.
type Transport struct {
*Config
*Token
// mu guards modifying the token.
mu sync.Mutex
// Transport is the HTTP transport to use when making requests.
// It will default to http.DefaultTransport if nil.
// (It should never be an oauth.Transport.)
Transport http.RoundTripper
}
// Client returns an *http.Client that makes OAuth-authenticated requests.
func (t *Transport) Client() *http.Client {
return &http.Client{Transport: t}
}
func (t *Transport) transport() http.RoundTripper {
if t.Transport != nil {
return t.Transport
}
return http.DefaultTransport
}
// AuthCodeURL returns a URL that the end-user should be redirected to,
// so that they may obtain an authorization code.
func (c *Config) AuthCodeURL(state string) string {
url_, err := url.Parse(c.AuthURL)
if err != nil {
panic("AuthURL malformed: " + err.Error())
}
q := url.Values{
"response_type": {"code"},
"client_id": {c.ClientId},
"state": condVal(state),
"scope": condVal(c.Scope),
"redirect_uri": condVal(c.RedirectURL),
"access_type": condVal(c.AccessType),
"approval_prompt": condVal(c.ApprovalPrompt),
}.Encode()
if url_.RawQuery == "" {
url_.RawQuery = q
} else {
url_.RawQuery += "&" + q
}
return url_.String()
}
func condVal(v string) []string {
if v == "" {
return nil
}
return []string{v}
}
// Exchange takes a code and gets access Token from the remote server.
func (t *Transport) Exchange(code string) (*Token, error) {
if t.Config == nil {
return nil, OAuthError{"Exchange", "no Config supplied"}
}
// If the transport or the cache already has a token, it is
// passed to `updateToken` to preserve existing refresh token.
tok := t.Token
if tok == nil && t.TokenCache != nil {
tok, _ = t.TokenCache.Token()
}
if tok == nil {
tok = new(Token)
}
err := t.updateToken(tok, url.Values{
"grant_type": {"authorization_code"},
"redirect_uri": {t.RedirectURL},
"scope": {t.Scope},
"code": {code},
})
if err != nil {
return nil, err
}
t.Token = tok
if t.TokenCache != nil {
return tok, t.TokenCache.PutToken(tok)
}
return tok, nil
}
// RoundTrip executes a single HTTP transaction using the Transport's
// Token as authorization headers.
//
// This method will attempt to renew the Token if it has expired and may return
// an error related to that Token renewal before attempting the client request.
// If the Token cannot be renewed a non-nil os.Error value will be returned.
// If the Token is invalid callers should expect HTTP-level errors,
// as indicated by the Response's StatusCode.
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
accessToken, err := t.getAccessToken()
if err != nil {
return nil, err
}
// To set the Authorization header, we must make a copy of the Request
// so that we don't modify the Request we were given.
// This is required by the specification of http.RoundTripper.
req = cloneRequest(req)
req.Header.Set("Authorization", "Bearer "+accessToken)
// Make the HTTP request.
return t.transport().RoundTrip(req)
}
func (t *Transport) getAccessToken() (string, error) {
t.mu.Lock()
defer t.mu.Unlock()
if t.Token == nil {
if t.Config == nil {
return "", OAuthError{"RoundTrip", "no Config supplied"}
}
if t.TokenCache == nil {
return "", OAuthError{"RoundTrip", "no Token supplied"}
}
var err error
t.Token, err = t.TokenCache.Token()
if err != nil {
return "", err
}
}
// Refresh the Token if it has expired.
if t.Expired() {
if err := t.Refresh(); err != nil {
return "", err
}
}
if t.AccessToken == "" {
return "", errors.New("no access token obtained from refresh")
}
return t.AccessToken, nil
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header)
for k, s := range r.Header {
r2.Header[k] = s
}
return r2
}
// Refresh renews the Transport's AccessToken using its RefreshToken.
func (t *Transport) Refresh() error {
if t.Token == nil {
return OAuthError{"Refresh", "no existing Token"}
}
if t.RefreshToken == "" {
return OAuthError{"Refresh", "Token expired; no Refresh Token"}
}
if t.Config == nil {
return OAuthError{"Refresh", "no Config supplied"}
}
err := t.updateToken(t.Token, url.Values{
"grant_type": {"refresh_token"},
"refresh_token": {t.RefreshToken},
})
if err != nil {
return err
}
if t.TokenCache != nil {
return t.TokenCache.PutToken(t.Token)
}
return nil
}
// AuthenticateClient gets an access Token using the client_credentials grant
// type.
func (t *Transport) AuthenticateClient() error {
if t.Config == nil {
return OAuthError{"Exchange", "no Config supplied"}
}
if t.Token == nil {
t.Token = &Token{}
}
return t.updateToken(t.Token, url.Values{"grant_type": {"client_credentials"}})
}
// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
// implements the OAuth2 spec correctly
// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
// In summary:
// - Reddit only accepts client secret in the Authorization header
// - Dropbox accepts either it in URL param or Auth header, but not both.
// - Google only accepts URL param (not spec compliant?), not Auth header
func providerAuthHeaderWorks(tokenURL string) bool {
if strings.HasPrefix(tokenURL, "https://accounts.google.com/") ||
strings.HasPrefix(tokenURL, "https://github.com/") ||
strings.HasPrefix(tokenURL, "https://api.instagram.com/") ||
strings.HasPrefix(tokenURL, "https://www.douban.com/") {
// Some sites fail to implement the OAuth2 spec fully.
return false
}
// Assume the provider implements the spec properly
// otherwise. We can add more exceptions as they're
// discovered. We will _not_ be adding configurable hooks
// to this package to let users select server bugs.
return true
}
// updateToken mutates both tok and v.
func (t *Transport) updateToken(tok *Token, v url.Values) error {
v.Set("client_id", t.ClientId)
v.Set("client_secret", t.ClientSecret)
client := &http.Client{Transport: t.transport()}
req, err := http.NewRequest("POST", t.TokenURL, strings.NewReader(v.Encode()))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
req.SetBasicAuth(t.ClientId, t.ClientSecret)
r, err := client.Do(req)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode != 200 {
return OAuthError{"updateToken", "Unexpected HTTP status " + r.Status}
}
var b struct {
Access string `json:"access_token"`
Refresh string `json:"refresh_token"`
ExpiresIn int64 `json:"expires_in"` // seconds
Id string `json:"id_token"`
}
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
if err != nil {
return err
}
content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
switch content {
case "application/x-www-form-urlencoded", "text/plain":
vals, err := url.ParseQuery(string(body))
if err != nil {
return err
}
b.Access = vals.Get("access_token")
b.Refresh = vals.Get("refresh_token")
b.ExpiresIn, _ = strconv.ParseInt(vals.Get("expires_in"), 10, 64)
b.Id = vals.Get("id_token")
default:
if err = json.Unmarshal(body, &b); err != nil {
return fmt.Errorf("got bad response from server: %q", body)
}
}
if b.Access == "" {
return errors.New("received empty access token from authorization server")
}
tok.AccessToken = b.Access
// Don't overwrite `RefreshToken` with an empty value
if b.Refresh != "" {
tok.RefreshToken = b.Refresh
}
if b.ExpiresIn == 0 {
tok.Expiry = time.Time{}
} else {
tok.Expiry = time.Now().Add(time.Duration(b.ExpiresIn) * time.Second)
}
if b.Id != "" {
if tok.Extra == nil {
tok.Extra = make(map[string]string)
}
tok.Extra["id_token"] = b.Id
}
return nil
}

36
shared/server/server.go Normal file
View File

@@ -0,0 +1,36 @@
package server
import (
"net/http"
log "github.com/Sirupsen/logrus"
"github.com/drone/drone/shared/envconfig"
)
type Server struct {
Addr string
Cert string
Key string
}
func Load(env envconfig.Env) *Server {
return &Server{
Addr: env.String("SERVER_ADDR", ":8000"),
Cert: env.String("SERVER_CERT", ""),
Key: env.String("SERVER_KEY", ""),
}
}
func (s *Server) Run(handler http.Handler) {
log.Infof("starting server %s", s.Addr)
if len(s.Cert) != 0 {
log.Fatal(
http.ListenAndServeTLS(s.Addr, s.Cert, s.Key, handler),
)
} else {
log.Fatal(
http.ListenAndServe(s.Addr, handler),
)
}
}

View File

@@ -1,9 +1,10 @@
package token
import (
"fmt"
"net/http"
"github.com/drone/drone/Godeps/_workspace/src/github.com/dgrijalva/jwt-go"
"github.com/dgrijalva/jwt-go"
)
type SecretFunc func(*Token) (string, error)
@@ -12,6 +13,7 @@ const (
UserToken = "user"
SessToken = "sess"
HookToken = "hook"
CsrfToken = "csrf"
)
// Default algorithm used to sign JWT tokens.
@@ -22,7 +24,6 @@ type Token struct {
Text string
}
// Parse parses
func Parse(raw string, fn SecretFunc) (*Token, error) {
token := &Token{}
parsed, err := jwt.Parse(raw, keyFunc(token, fn))
@@ -34,15 +35,46 @@ func Parse(raw string, fn SecretFunc) (*Token, error) {
return token, nil
}
func ParseRequest(req *http.Request, fn SecretFunc) (*Token, error) {
token := &Token{}
parsed, err := jwt.ParseFromRequest(req, keyFunc(token, fn))
func ParseRequest(r *http.Request, fn SecretFunc) (*Token, error) {
var token = r.Header.Get("Authorization")
// first we attempt to get the token from the
// authorization header.
if len(token) != 0 {
token = r.Header.Get("Authorization")
fmt.Sscanf(token, "Bearer %s", &token)
return Parse(token, fn)
}
// then we attempt to get the token from the
// access_token url query parameter
token = r.FormValue("access_token")
if len(token) != 0 {
return Parse(token, fn)
}
// and finally we attemt to get the token from
// the user session cookie
cookie, err := r.Cookie("user_sess")
if err != nil {
return nil, err
} else if !parsed.Valid {
return nil, jwt.ValidationError{}
}
return token, nil
return Parse(cookie.Value, fn)
}
func CheckCsrf(r *http.Request, fn SecretFunc) error {
// get and options requests are always
// enabled, without CSRF checks.
switch r.Method {
case "GET", "OPTIONS":
return nil
}
// parse the raw CSRF token value and validate
raw := r.Header.Get("X-CSRF-TOKEN")
_, err := Parse(raw, fn)
return err
}
func New(kind, text string) *Token {

View File

@@ -1 +0,0 @@
package token