didn't realize gin supports net.Context. Change to support Context pattern!

This commit is contained in:
Brad Rydzewski
2015-10-21 16:14:02 -07:00
parent af2ef2347a
commit cfdfbcfd3b
56 changed files with 1495 additions and 1051 deletions

View File

@@ -1,71 +0,0 @@
package database
//go:generate go-bindata -pkg database -o database_gen.go sqlite3/ mysql/ postgres/
import (
"database/sql"
"os"
"github.com/drone/drone/shared/envconfig"
log "github.com/Sirupsen/logrus"
_ "github.com/go-sql-driver/mysql"
_ "github.com/lib/pq"
_ "github.com/mattn/go-sqlite3"
"github.com/rubenv/sql-migrate"
"github.com/russross/meddler"
)
func Load(env envconfig.Env) *sql.DB {
var (
driver = env.String("DATABASE_DRIVER", "sqlite3")
config = env.String("DATABASE_CONFIG", "drone.sqlite")
)
log.Infof("using database driver %s", driver)
log.Infof("using database config %s", config)
return Open(driver, config)
}
// Open opens a database connection, runs the database migrations, and returns
// the database connection. Any errors connecting to the database or executing
// migrations will cause the application to exit.
func Open(driver, config string) *sql.DB {
var db, err = sql.Open(driver, config)
if err != nil {
log.Errorln(err)
log.Fatalln("database connection failed")
}
switch driver {
case "mysql":
meddler.Default = meddler.MySQL
case "postgres":
meddler.Default = meddler.PostgreSQL
}
var migrations = &migrate.AssetMigrationSource{
Asset: Asset,
AssetDir: AssetDir,
Dir: driver,
}
_, err = migrate.Exec(db, driver, migrations, migrate.Up)
if err != nil {
log.Errorln(err)
log.Fatalln("migration failed")
}
return db
}
func OpenTest() *sql.DB {
var (
driver = "sqlite3"
config = ":memory:"
)
if os.Getenv("DATABASE_DRIVER") != "" {
driver = os.Getenv("DATABASE_DRIVER")
config = os.Getenv("DATABASE_CONFIG")
}
return Open(driver, config)
}

View File

@@ -1,125 +0,0 @@
-- +migrate Up
CREATE TABLE users (
user_id INTEGER PRIMARY KEY AUTO_INCREMENT
,user_login VARCHAR(500)
,user_token VARCHAR(500)
,user_secret VARCHAR(500)
,user_expiry INTEGER
,user_email VARCHAR(500)
,user_avatar VARCHAR(500)
,user_active BOOLEAN
,user_admin BOOLEAN
,user_hash VARCHAR(500)
,UNIQUE(user_login)
);
CREATE TABLE repos (
repo_id INTEGER PRIMARY KEY AUTO_INCREMENT
,repo_user_id INTEGER
,repo_owner VARCHAR(255)
,repo_name VARCHAR(255)
,repo_full_name VARCHAR(511)
,repo_avatar VARCHAR(500)
,repo_link VARCHAR(1000)
,repo_clone VARCHAR(1000)
,repo_branch VARCHAR(500)
,repo_timeout INTEGER
,repo_private BOOLEAN
,repo_trusted BOOLEAN
,repo_allow_pr BOOLEAN
,repo_allow_push BOOLEAN
,repo_allow_deploys BOOLEAN
,repo_allow_tags BOOLEAN
,repo_hash VARCHAR(500)
,UNIQUE(repo_full_name)
);
CREATE TABLE `keys` (
key_id INTEGER PRIMARY KEY AUTO_INCREMENT
,key_repo_id INTEGER
,key_public MEDIUMBLOB
,key_private MEDIUMBLOB
,UNIQUE(key_repo_id)
);
CREATE TABLE builds (
build_id INTEGER PRIMARY KEY AUTO_INCREMENT
,build_repo_id INTEGER
,build_number INTEGER
,build_event VARCHAR(500)
,build_status VARCHAR(500)
,build_enqueued INTEGER
,build_created INTEGER
,build_started INTEGER
,build_finished INTEGER
,build_commit VARCHAR(500)
,build_branch VARCHAR(500)
,build_ref VARCHAR(500)
,build_refspec VARCHAR(1000)
,build_remote VARCHAR(500)
,build_title VARCHAR(1000)
,build_message VARCHAR(2000)
,build_timestamp INTEGER
,build_author VARCHAR(500)
,build_avatar VARCHAR(1000)
,build_email VARCHAR(500)
,build_link VARCHAR(1000)
,UNIQUE(build_number, build_repo_id)
);
CREATE INDEX ix_build_repo ON builds (build_repo_id);
CREATE TABLE jobs (
job_id INTEGER PRIMARY KEY AUTO_INCREMENT
,job_node_id INTEGER
,job_build_id INTEGER
,job_number INTEGER
,job_status VARCHAR(500)
,job_exit_code INTEGER
,job_started INTEGER
,job_enqueued INTEGER
,job_finished INTEGER
,job_environment VARCHAR(2000)
,UNIQUE(job_build_id, job_number)
);
CREATE INDEX ix_job_build ON jobs (job_build_id);
CREATE INDEX ix_job_node ON jobs (job_node_id);
CREATE TABLE IF NOT EXISTS logs (
log_id INTEGER PRIMARY KEY AUTO_INCREMENT
,log_job_id INTEGER
,log_data MEDIUMBLOB
,UNIQUE(log_job_id)
);
CREATE TABLE IF NOT EXISTS nodes (
node_id INTEGER PRIMARY KEY AUTO_INCREMENT
,node_addr VARCHAR(1024)
,node_arch VARCHAR(50)
,node_cert MEDIUMBLOB
,node_key MEDIUMBLOB
,node_ca MEDIUMBLOB
);
INSERT INTO nodes VALUES(null, 'unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
INSERT INTO nodes VALUES(null, 'unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
-- +migrate Down
DROP TABLE nodes;
DROP TABLE logs;
DROP TABLE jobs;
DROP TABLE builds;
DROP TABLE `keys`;
DROP TABLE stars;
DROP TABLE repos;
DROP TABLE users;

View File

@@ -1,126 +0,0 @@
-- +migrate Up
CREATE TABLE users (
user_id SERIAL PRIMARY KEY
,user_login VARCHAR(40)
,user_token VARCHAR(128)
,user_secret VARCHAR(128)
,user_expiry INTEGER
,user_email VARCHAR(256)
,user_avatar VARCHAR(256)
,user_active BOOLEAN
,user_admin BOOLEAN
,user_hash VARCHAR(128)
,UNIQUE(user_login)
);
CREATE TABLE repos (
repo_id SERIAL PRIMARY KEY
,repo_user_id INTEGER
,repo_owner VARCHAR(255)
,repo_name VARCHAR(255)
,repo_full_name VARCHAR(511)
,repo_avatar VARCHAR(500)
,repo_link VARCHAR(1000)
,repo_clone VARCHAR(1000)
,repo_branch VARCHAR(500)
,repo_timeout INTEGER
,repo_private BOOLEAN
,repo_trusted BOOLEAN
,repo_allow_pr BOOLEAN
,repo_allow_push BOOLEAN
,repo_allow_deploys BOOLEAN
,repo_allow_tags BOOLEAN
,repo_hash VARCHAR(500)
,UNIQUE(repo_full_name)
);
CREATE TABLE keys (
key_id SERIAL PRIMARY KEY
,key_repo_id INTEGER
,key_public BYTEA
,key_private BYTEA
,UNIQUE(key_repo_id)
);
CREATE TABLE builds (
build_id SERIAL PRIMARY KEY
,build_repo_id INTEGER
,build_number INTEGER
,build_event VARCHAR(25)
,build_status VARCHAR(25)
,build_enqueued INTEGER
,build_created INTEGER
,build_started INTEGER
,build_finished INTEGER
,build_commit VARCHAR(40)
,build_branch VARCHAR(256)
,build_ref VARCHAR(512)
,build_refspec VARCHAR(512)
,build_remote VARCHAR(512)
,build_title VARCHAR(1000)
,build_message VARCHAR(2000)
,build_timestamp INTEGER
,build_author VARCHAR(40)
,build_avatar VARCHAR(1000)
,build_email VARCHAR(500)
,build_link VARCHAR(1000)
,UNIQUE(build_number, build_repo_id)
);
CREATE INDEX ix_build_repo ON builds (build_repo_id);
CREATE TABLE jobs (
job_id SERIAL PRIMARY KEY
,job_node_id INTEGER
,job_build_id INTEGER
,job_number INTEGER
,job_status VARCHAR(25)
,job_exit_code INTEGER
,job_started INTEGER
,job_enqueued INTEGER
,job_finished INTEGER
,job_environment VARCHAR(2000)
,UNIQUE(job_build_id, job_number)
);
CREATE INDEX ix_job_build ON jobs (job_build_id);
CREATE INDEX ix_job_node ON jobs (job_node_id);
CREATE TABLE IF NOT EXISTS logs (
log_id SERIAL PRIMARY KEY
,log_job_id INTEGER
,log_data BYTEA
,UNIQUE(log_job_id)
);
CREATE TABLE IF NOT EXISTS nodes (
node_id SERIAL PRIMARY KEY
,node_addr VARCHAR(1024)
,node_arch VARCHAR(50)
,node_cert BYTEA
,node_key BYTEA
,node_ca BYTEA
);
INSERT INTO nodes (node_addr, node_arch, node_cert, node_key, node_ca) VALUES
('unix:///var/run/docker.sock', 'linux_amd64', '', '', ''),
('unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
-- +migrate Down
DROP TABLE nodes;
DROP TABLE logs;
DROP TABLE jobs;
DROP TABLE builds;
DROP TABLE keys;
DROP TABLE stars;
DROP TABLE repos;
DROP TABLE users;

View File

@@ -1,35 +0,0 @@
package database
import (
"strconv"
"github.com/russross/meddler"
)
// Rebind is a helper function that changes the sql
// bind type from ? to $ for postgres queries.
func Rebind(query string) string {
if meddler.Default != meddler.PostgreSQL {
return query
}
qb := []byte(query)
// Add space enough for 5 params before we have to allocate
rqb := make([]byte, 0, len(qb)+5)
j := 1
for _, b := range qb {
switch b {
case '?':
rqb = append(rqb, '$')
for _, b := range strconv.Itoa(j) {
rqb = append(rqb, byte(b))
}
j++
case '`':
rqb = append(rqb, ' ')
default:
rqb = append(rqb, b)
}
}
return string(rqb)
}

View File

@@ -1,135 +0,0 @@
-- +migrate Up
CREATE TABLE users (
user_id INTEGER PRIMARY KEY AUTOINCREMENT
,user_login TEXT
,user_token TEXT
,user_secret TEXT
,user_expiry INTEGER
,user_email TEXT
,user_avatar TEXT
,user_active BOOLEAN
,user_admin BOOLEAN
,user_hash TEXT
,UNIQUE(user_login)
);
CREATE TABLE repos (
repo_id INTEGER PRIMARY KEY AUTOINCREMENT
,repo_user_id INTEGER
,repo_owner TEXT
,repo_name TEXT
,repo_full_name TEXT
,repo_avatar TEXT
,repo_link TEXT
,repo_clone TEXT
,repo_branch TEXT
,repo_timeout INTEGER
,repo_private BOOLEAN
,repo_trusted BOOLEAN
,repo_allow_pr BOOLEAN
,repo_allow_push BOOLEAN
,repo_allow_deploys BOOLEAN
,repo_allow_tags BOOLEAN
,repo_hash TEXT
,UNIQUE(repo_full_name)
);
CREATE TABLE stars (
star_id INTEGER PRIMARY KEY AUTOINCREMENT
,star_repo_id INTEGER
,star_user_id INTEGER
,UNIQUE(star_repo_id, star_user_id)
);
CREATE INDEX ix_star_user ON stars (star_user_id);
CREATE TABLE keys (
key_id INTEGER PRIMARY KEY AUTOINCREMENT
,key_repo_id INTEGER
,key_public BLOB
,key_private BLOB
,UNIQUE(key_repo_id)
);
CREATE TABLE builds (
build_id INTEGER PRIMARY KEY AUTOINCREMENT
,build_repo_id INTEGER
,build_number INTEGER
,build_event TEXT
,build_status TEXT
,build_enqueued INTEGER
,build_created INTEGER
,build_started INTEGER
,build_finished INTEGER
,build_commit TEXT
,build_branch TEXT
,build_ref TEXT
,build_refspec TEXT
,build_remote TEXT
,build_title TEXT
,build_message TEXT
,build_timestamp INTEGER
,build_author TEXT
,build_avatar TEXT
,build_email TEXT
,build_link TEXT
,UNIQUE(build_number, build_repo_id)
);
CREATE INDEX ix_build_repo ON builds (build_repo_id);
CREATE INDEX ix_build_author ON builds (build_author);
CREATE TABLE jobs (
job_id INTEGER PRIMARY KEY AUTOINCREMENT
,job_node_id INTEGER
,job_build_id INTEGER
,job_number INTEGER
,job_status TEXT
,job_exit_code INTEGER
,job_enqueued INTEGER
,job_started INTEGER
,job_finished INTEGER
,job_environment TEXT
,UNIQUE(job_build_id, job_number)
);
CREATE INDEX ix_job_build ON jobs (job_build_id);
CREATE INDEX ix_job_node ON jobs (job_node_id);
CREATE TABLE IF NOT EXISTS logs (
log_id INTEGER PRIMARY KEY AUTOINCREMENT
,log_job_id INTEGER
,log_data BLOB
,UNIQUE(log_job_id)
);
CREATE TABLE IF NOT EXISTS nodes (
node_id INTEGER PRIMARY KEY AUTOINCREMENT
,node_addr TEXT
,node_arch TEXT
,node_cert BLOB
,node_key BLOB
,node_ca BLOB
);
INSERT INTO nodes VALUES(null, 'unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
INSERT INTO nodes VALUES(null, 'unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
-- +migrate Down
DROP TABLE nodes;
DROP TABLE logs;
DROP TABLE jobs;
DROP TABLE builds;
DROP TABLE keys;
DROP TABLE stars;
DROP TABLE repos;
DROP TABLE users;