Merge pull request #2040 from bradrydzewski/database-migration

Generated migration, remove Makefile
This commit is contained in:
Brad Rydzewski 2017-05-14 19:14:31 +02:00 committed by GitHub
commit bf04c791c0
106 changed files with 2321 additions and 5899 deletions

View file

@ -6,10 +6,27 @@ pipeline:
test:
image: golang:1.8
commands:
- make deps gen
- make test test_postgres test_mysql
- go get -u github.com/drone/drone-ui/dist
- go get -u golang.org/x/tools/cmd/cover
- go test -cover $(go list ./... | grep -v /vendor/)
dist:
test_postgres:
image: golang:1.8
environment:
- DATABASE_DRIVER=postgres
- DATABASE_CONFIG=host=postgres user=postgres dbname=postgres sslmode=disable
commands:
- go test github.com/drone/drone/store/datastore
test_mysql:
image: golang:1.8
environment:
- DATABASE_DRIVER=mysql
- DATABASE_CONFIG=root@tcp(mysql:3306)/test?parseTime=true
commands:
- go test github.com/drone/drone/store/datastore
build:
image: golang:1.8
commands:
- ./ci.sh
@ -54,7 +71,7 @@ pipeline:
services:
postgres:
image: postgres:9.4.5
image: postgres:9.6
environment:
- POSTGRES_USER=postgres
mysql:

22
.gitignore vendored
View file

@ -1,31 +1,9 @@
drone/drone
*.sqlite
*_gen.go
!store/datastore/sql/sqlite/sql_gen.go
!store/datastore/sql/mysql/sql_gen.go
!store/datastore/sql/postgres/sql_gen.go
!server/template/template_gen.go
#*.css
*.txt
*.zip
*.gz
*.out
*.min.js
*.deb
.env
temp/
extras/
release/
server/frontend/bower_components
server/frontend/build
server/swagger/files/*.json
# vendored repositories that we don't actually need
# to vendor. so exclude them
vendor/google.golang.org/cloud
vendor/github.com/bugagazavr
# IDE/Editor stuff
.idea

View file

@ -1,67 +0,0 @@
[org.core]
people = [
"bradrydzewski"
]
[org.plugins]
people = [
"bradrydzewski",
"Bugagazavr",
"donny-dont",
"jackspirou",
"msteinert",
"nlf",
"tboerger",
]
[org.python]
people = [
"bradrydzewski",
"msteinert",
"gtaylor",
]
[org.node]
people = [
"bradrydzewski",
"nlf",
]
[people]
[people.bradrydzewski]
name = "Brad Rydzewski"
email = "brad@drone.io"
login = "bradrydzewski"
[people.Bugagazavr]
login = "Bugagazavr"
[people.donny-dont]
name = "Don Olmstead"
login = "donny-dont"
[people.gtaylor]
name = "Greg Taylor"
login = "gtaylor"
[people.jackspirou]
name = "Jack Spirou"
login = "jackspirou"
email = "jack@spirou.io"
[people.msteinert]
name = "Mike Steinert"
login = "msteinert"
[people.nlf]
name = "Nathan LaFreniere"
login = "nlf"
[people.tboerger]
name = "Thomas Boerger"
login = "tboerger"

View file

@ -1,70 +0,0 @@
.PHONY: build
PACKAGES = $(shell go list ./... | grep -v /vendor/)
ifneq ($(shell uname), Darwin)
EXTLDFLAGS = -extldflags "-static" $(null)
else
EXTLDFLAGS =
endif
all: gen build_static
deps: deps_backend deps_frontend
deps_frontend:
go get -u github.com/drone/drone-ui/dist
deps_backend:
go get -u golang.org/x/tools/cmd/cover
go get -u github.com/jteeuwen/go-bindata/...
go get -u github.com/elazarl/go-bindata-assetfs/...
gen: gen_migrations
gen_migrations:
go generate github.com/drone/drone/store/datastore/ddl
test:
go test -cover $(PACKAGES)
# docker run --publish=3306:3306 -e MYSQL_DATABASE=test -e MYSQL_ALLOW_EMPTY_PASSWORD=yes mysql:5.6.27
test_mysql:
DATABASE_DRIVER="mysql" DATABASE_CONFIG="root@tcp(mysql:3306)/test?parseTime=true" go test github.com/drone/drone/store/datastore
# docker run --publish=5432:5432 postgres:9.4.5
test_postgres:
DATABASE_DRIVER="postgres" DATABASE_CONFIG="host=postgres user=postgres dbname=postgres sslmode=disable" go test github.com/drone/drone/store/datastore
# build the release files
build: build_static build_cross build_tar build_sha
build_static:
go install -ldflags '${EXTLDFLAGS}-X github.com/drone/drone/version.VersionDev=build.$(DRONE_BUILD_NUMBER)' github.com/drone/drone/drone
mkdir -p release
cp $(GOPATH)/bin/drone release/
# TODO this is getting moved to a shell script, do not alter
build_cross:
GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -ldflags '-X github.com/drone/drone/version.VersionDev=build.$(DRONE_BUILD_NUMBER)' -o release/linux/amd64/drone github.com/drone/drone/drone
GOOS=linux GOARCH=arm64 CGO_ENABLED=0 go build -ldflags '-X github.com/drone/drone/version.VersionDev=build.$(DRONE_BUILD_NUMBER)' -o release/linux/arm64/drone github.com/drone/drone/drone
GOOS=linux GOARCH=arm CGO_ENABLED=0 go build -ldflags '-X github.com/drone/drone/version.VersionDev=build.$(DRONE_BUILD_NUMBER)' -o release/linux/arm/drone github.com/drone/drone/drone
GOOS=windows GOARCH=amd64 CGO_ENABLED=0 go build -ldflags '-X github.com/drone/drone/version.VersionDev=build.$(DRONE_BUILD_NUMBER)' -o release/windows/amd64/drone github.com/drone/drone/drone
GOOS=darwin GOARCH=amd64 CGO_ENABLED=0 go build -ldflags '-X github.com/drone/drone/version.VersionDev=build.$(DRONE_BUILD_NUMBER)' -o release/darwin/amd64/drone github.com/drone/drone/drone
# TODO this is getting moved to a shell script, do not alter
build_tar:
tar -cvzf release/linux/amd64/drone.tar.gz -C release/linux/amd64 drone
tar -cvzf release/linux/arm64/drone.tar.gz -C release/linux/arm64 drone
tar -cvzf release/linux/arm/drone.tar.gz -C release/linux/arm drone
tar -cvzf release/windows/amd64/drone.tar.gz -C release/windows/amd64 drone
tar -cvzf release/darwin/amd64/drone.tar.gz -C release/darwin/amd64 drone
# TODO this is getting moved to a shell script, do not alter
build_sha:
sha256sum release/linux/amd64/drone.tar.gz > release/linux/amd64/drone.sha256
sha256sum release/linux/arm64/drone.tar.gz > release/linux/arm64/drone.sha256
sha256sum release/linux/arm/drone.tar.gz > release/linux/arm/drone.sha256
sha256sum release/windows/amd64/drone.tar.gz > release/windows/amd64/drone.sha256
sha256sum release/darwin/amd64/drone.tar.gz > release/darwin/amd64/drone.sha256

View file

@ -1,3 +0,0 @@
package ddl
//go:generate go-bindata -pkg ddl -o ddl_gen.go sqlite3/ mysql/ postgres/

View file

@ -0,0 +1,105 @@
package ddl
import (
"database/sql"
"errors"
"github.com/drone/drone/store/datastore/ddl/mysql"
"github.com/drone/drone/store/datastore/ddl/postgres"
"github.com/drone/drone/store/datastore/ddl/sqlite"
)
// Supported database drivers
const (
DriverSqlite = "sqlite3"
DriverMysql = "mysql"
DriverPostgres = "postgres"
)
// Migrate performs the database migration. If the migration fails
// and error is returned.
func Migrate(driver string, db *sql.DB) error {
switch driver {
case DriverMysql:
if err := checkPriorMigration(db); err != nil {
return err
}
return mysql.Migrate(db)
case DriverPostgres:
return postgres.Migrate(db)
default:
return sqlite.Migrate(db)
}
}
// we need to check and see if there was a previous migration
// for drone 0.6 or prior and migrate to the new migration
// system. Attempting to migrate from 0.5 or below to 0.7 or
// above will result in an error.
//
// this can be removed once we get to 1.0 with the reasonable
// expectation that people are no longer using 0.5.
func checkPriorMigration(db *sql.DB) error {
var none int
if err := db.QueryRow(legacyMigrationsExist).Scan(&none); err != nil {
// if no legacy migrations exist, this is a fresh install
// and we can proceed as normal.
return nil
}
if err := db.QueryRow(legacyMigrationsCurrent).Scan(&none); err != nil {
// this indicates an attempted upgrade from 0.5 or lower to
// version 0.7 or higher and will fail.
return errors.New("Please upgrade to 0.6 before upgrading to 0.7+")
}
if _, err := db.Exec(createMigrationsTable); err != nil {
return err
}
if _, err := db.Exec(legacyMigrationsImport); err != nil {
return err
}
return nil
}
var legacyMigrationsExist = `
SELECT 1
FROM gorp_migrations
LIMIT 1
`
var legacyMigrationsCurrent = `
SELECT 1
FROM gorp_migrations
WHERE id = '16.sql'
LIMIT 1
`
var legacyMigrationsImport = `
INSERT IGNORE INTO migrations (name) VALUES
('create-table-users')
,('create-table-repos')
,('create-table-builds')
,('create-index-builds-repo')
,('create-index-builds-author')
,('create-table-procs')
,('create-index-procs-build')
,('create-table-logs')
,('create-table-files')
,('create-index-files-builds')
,('create-index-files-procs')
,('create-table-secrets')
,('create-index-secrets-repo')
,('create-table-registry')
,('create-index-registry-repo')
,('create-table-config')
,('create-table-tasks')
,('create-table-agents')
,('create-table-senders')
,('create-index-sender-repos')
`
var createMigrationsTable = `
CREATE TABLE IF NOT EXISTS migrations (
name VARCHAR(512)
,UNIQUE(name)
)
`

View file

@ -1,12 +0,0 @@
-- +migrate Up
ALTER TABLE secrets ADD COLUMN secret_conceal BOOLEAN;
ALTER TABLE team_secrets ADD COLUMN team_secret_conceal BOOLEAN;
UPDATE secrets SET secret_conceal = false;
UPDATE team_secrets SET team_secret_conceal = false;
-- +migrate Down
ALTER TABLE secrets DROP COLUMN secret_conceal;
ALTER TABLE team_secrets DROP COLUMN team_secret_conceal;

View file

@ -1,8 +0,0 @@
-- +migrate Up
ALTER TABLE builds ADD COLUMN build_error VARCHAR(500);
UPDATE builds SET build_error = '';
-- +migrate Down
ALTER TABLE builds DROP COLUMN build_error;

View file

@ -1,18 +0,0 @@
-- +migrate Up
ALTER TABLE repos ADD COLUMN repo_config_path VARCHAR(255);
ALTER TABLE builds ADD COLUMN build_sender VARCHAR(255);
ALTER TABLE builds ADD COLUMN build_reviewer VARCHAR(255);
ALTER TABLE builds ADD COLUMN build_reviewed INTEGER;
UPDATE repos SET repo_config_path = '.drone.yml';
UPDATE builds SET build_reviewer = '';
UPDATE builds SET build_reviewed = 0;
UPDATE builds SET build_sender = '';
-- +migrate Down
ALTER TABLE repos DROP COLUMN repo_config_path;
ALTER TABLE builds DROP COLUMN build_sender;
ALTER TABLE builds DROP COLUMN build_reviewer;
ALTER TABLE builds DROP COLUMN build_reviewed;

View file

@ -1,45 +0,0 @@
-- +migrate Up
CREATE TABLE procs (
proc_id INTEGER PRIMARY KEY AUTO_INCREMENT
,proc_build_id INTEGER
,proc_pid INTEGER
,proc_ppid INTEGER
,proc_pgid INTEGER
,proc_name VARCHAR(250)
,proc_state VARCHAR(250)
,proc_error VARCHAR(500)
,proc_exit_code INTEGER
,proc_started INTEGER
,proc_stopped INTEGER
,proc_machine VARCHAR(250)
,proc_platform VARCHAR(250)
,proc_environ VARCHAR(2000)
,UNIQUE(proc_build_id, proc_pid)
);
CREATE INDEX proc_build_ix ON procs (proc_build_id);
CREATE TABLE files (
file_id INTEGER PRIMARY KEY AUTO_INCREMENT
,file_build_id INTEGER
,file_proc_id INTEGER
,file_name VARCHAR(250)
,file_mime VARCHAR(250)
,file_size INTEGER
,file_time INTEGER
,file_data MEDIUMBLOB
,UNIQUE(file_proc_id,file_name)
);
CREATE INDEX file_build_ix ON files (file_build_id);
CREATE INDEX file_proc_ix ON files (file_proc_id);
-- +migrate Down
DROP INDEX file_build_ix;
DROP INDEX file_proc_ix;
DROP TABLE files;
DROP INDEX proc_build_ix;
DROP TABLE procs;

View file

@ -1,11 +0,0 @@
-- +migrate Up
CREATE TABLE tasks (
task_id VARCHAR(255) PRIMARY KEY
,task_data MEDIUMBLOB
,task_labels MEDIUMBLOB
);
-- +migrate Down
DROP TABLE tasks;

View file

@ -1,124 +0,0 @@
-- +migrate Up
CREATE TABLE IF NOT EXISTS users (
user_id INTEGER PRIMARY KEY AUTO_INCREMENT
,user_login VARCHAR(255)
,user_token VARCHAR(500)
,user_secret VARCHAR(500)
,user_expiry INTEGER
,user_email VARCHAR(500)
,user_avatar VARCHAR(500)
,user_active BOOLEAN
,user_admin BOOLEAN
,user_hash VARCHAR(500)
,UNIQUE(user_login)
);
CREATE TABLE IF NOT EXISTS repos (
repo_id INTEGER PRIMARY KEY AUTO_INCREMENT
,repo_user_id INTEGER
,repo_owner VARCHAR(255)
,repo_name VARCHAR(255)
,repo_full_name VARCHAR(255)
,repo_avatar VARCHAR(500)
,repo_link VARCHAR(1000)
,repo_clone VARCHAR(1000)
,repo_branch VARCHAR(500)
,repo_timeout INTEGER
,repo_private BOOLEAN
,repo_trusted BOOLEAN
,repo_allow_pr BOOLEAN
,repo_allow_push BOOLEAN
,repo_allow_deploys BOOLEAN
,repo_allow_tags BOOLEAN
,repo_hash VARCHAR(500)
,UNIQUE(repo_full_name)
);
CREATE TABLE IF NOT EXISTS `keys` (
key_id INTEGER PRIMARY KEY AUTO_INCREMENT
,key_repo_id INTEGER
,key_public MEDIUMBLOB
,key_private MEDIUMBLOB
,UNIQUE(key_repo_id)
);
CREATE TABLE IF NOT EXISTS builds (
build_id INTEGER PRIMARY KEY AUTO_INCREMENT
,build_repo_id INTEGER
,build_number INTEGER
,build_event VARCHAR(500)
,build_status VARCHAR(500)
,build_enqueued INTEGER
,build_created INTEGER
,build_started INTEGER
,build_finished INTEGER
,build_commit VARCHAR(500)
,build_branch VARCHAR(500)
,build_ref VARCHAR(500)
,build_refspec VARCHAR(1000)
,build_remote VARCHAR(500)
,build_title VARCHAR(1000)
,build_message VARCHAR(2000)
,build_timestamp INTEGER
,build_author VARCHAR(500)
,build_avatar VARCHAR(1000)
,build_email VARCHAR(500)
,build_link VARCHAR(1000)
,UNIQUE(build_number, build_repo_id)
);
CREATE INDEX ix_build_repo ON builds (build_repo_id);
CREATE TABLE IF NOT EXISTS jobs (
job_id INTEGER PRIMARY KEY AUTO_INCREMENT
,job_node_id INTEGER
,job_build_id INTEGER
,job_number INTEGER
,job_status VARCHAR(500)
,job_exit_code INTEGER
,job_started INTEGER
,job_enqueued INTEGER
,job_finished INTEGER
,job_environment VARCHAR(2000)
,UNIQUE(job_build_id, job_number)
);
CREATE INDEX ix_job_build ON jobs (job_build_id);
CREATE INDEX ix_job_node ON jobs (job_node_id);
CREATE TABLE IF NOT EXISTS logs (
log_id INTEGER PRIMARY KEY AUTO_INCREMENT
,log_job_id INTEGER
,log_data MEDIUMBLOB
,UNIQUE(log_job_id)
);
CREATE TABLE IF NOT EXISTS nodes (
node_id INTEGER PRIMARY KEY AUTO_INCREMENT
,node_addr VARCHAR(1024)
,node_arch VARCHAR(50)
,node_cert MEDIUMBLOB
,node_key MEDIUMBLOB
,node_ca MEDIUMBLOB
);
INSERT INTO nodes VALUES(null, 'unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
INSERT INTO nodes VALUES(null, 'unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
-- +migrate Down
DROP TABLE nodes;
DROP TABLE logs;
DROP TABLE jobs;
DROP TABLE builds;
DROP TABLE `keys`;
DROP TABLE repos;
DROP TABLE users;

View file

@ -1,12 +0,0 @@
-- +migrate Up
ALTER TABLE repos ADD COLUMN repo_scm VARCHAR(25);
ALTER TABLE builds ADD COLUMN build_deploy VARCHAR(500);
UPDATE repos SET repo_scm = 'git' WHERE repo_scm = null;
UPDATE builds SET build_deploy = '' WHERE build_deploy = null;
-- +migrate Down
ALTER TABLE repos DROP COLUMN repo_scm;
ALTER TABLE builds DROP COLUMN build_deploy;

View file

@ -1,32 +0,0 @@
-- +migrate Up
CREATE TABLE secrets (
secret_id INTEGER PRIMARY KEY AUTO_INCREMENT
,secret_repo_id INTEGER
,secret_name VARCHAR(255)
,secret_value MEDIUMBLOB
,secret_images VARCHAR(2000)
,secret_events VARCHAR(2000)
,UNIQUE(secret_name, secret_repo_id)
);
CREATE TABLE registry (
registry_id INTEGER PRIMARY KEY AUTO_INCREMENT
,registry_repo_id INTEGER
,registry_addr VARCHAR(255)
,registry_email VARCHAR(500)
,registry_username VARCHAR(2000)
,registry_password VARCHAR(2000)
,registry_token VARCHAR(2000)
,UNIQUE(registry_addr, registry_repo_id)
);
CREATE INDEX ix_secrets_repo ON secrets (secret_repo_id);
CREATE INDEX ix_registry_repo ON registry (registry_repo_id);
-- +migrate Down
DROP INDEX ix_secrets_repo;
DROP INDEX ix_registry_repo;

View file

@ -1,9 +0,0 @@
-- +migrate Up
ALTER TABLE jobs ADD COLUMN job_error VARCHAR(500);
UPDATE jobs SET job_error = '' WHERE job_error = null;
-- +migrate Down
ALTER TABLE jobs DROP COLUMN job_error;

View file

@ -1,12 +0,0 @@
-- +migrate Up
ALTER TABLE builds ADD COLUMN build_signed BOOLEAN;
ALTER TABLE builds ADD COLUMN build_verified BOOLEAN;
UPDATE builds SET build_signed = false;
UPDATE builds SET build_verified = false;
-- +migrate Down
ALTER TABLE builds DROP COLUMN build_signed;
ALTER TABLE builds DROP COLUMN build_verified;

View file

@ -1,19 +0,0 @@
-- +migrate Up
CREATE TABLE team_secrets (
team_secret_id INTEGER PRIMARY KEY AUTO_INCREMENT
,team_secret_key VARCHAR(255)
,team_secret_name VARCHAR(255)
,team_secret_value MEDIUMBLOB
,team_secret_images VARCHAR(2000)
,team_secret_events VARCHAR(2000)
,UNIQUE(team_secret_name, team_secret_key)
);
CREATE INDEX ix_team_secrets_key ON team_secrets (team_secret_key);
-- +migrate Down
DROP INDEX ix_team_secrets_key;
DROP TABLE team_secrets;

View file

@ -1,7 +0,0 @@
-- +migrate Up
ALTER TABLE builds ADD COLUMN build_parent INTEGER DEFAULT 0;
-- +migrate Down
ALTER TABLE builds DROP COLUMN build_parent;

View file

@ -1,12 +0,0 @@
-- +migrate Up
ALTER TABLE secrets ADD COLUMN secret_skip_verify BOOLEAN;
ALTER TABLE team_secrets ADD COLUMN team_secret_skip_verify BOOLEAN;
UPDATE secrets SET secret_skip_verify = false;
UPDATE team_secrets SET team_secret_skip_verify = false;
-- +migrate Down
ALTER TABLE secrets DROP COLUMN secret_skip_verify;
ALTER TABLE team_secrets DROP COLUMN team_secret_skip_verify;

View file

@ -0,0 +1,3 @@
package mysql
//go:generate togo ddl -package mysql -dialect mysql

View file

@ -0,0 +1,444 @@
package mysql
import (
"database/sql"
)
var migrations = []struct {
name string
stmt string
}{
{
name: "create-table-users",
stmt: createTableUsers,
},
{
name: "create-table-repos",
stmt: createTableRepos,
},
{
name: "create-table-builds",
stmt: createTableBuilds,
},
{
name: "create-index-builds-repo",
stmt: createIndexBuildsRepo,
},
{
name: "create-index-builds-author",
stmt: createIndexBuildsAuthor,
},
{
name: "create-table-procs",
stmt: createTableProcs,
},
{
name: "create-index-procs-build",
stmt: createIndexProcsBuild,
},
{
name: "create-table-logs",
stmt: createTableLogs,
},
{
name: "create-table-files",
stmt: createTableFiles,
},
{
name: "create-index-files-builds",
stmt: createIndexFilesBuilds,
},
{
name: "create-index-files-procs",
stmt: createIndexFilesProcs,
},
{
name: "create-table-secrets",
stmt: createTableSecrets,
},
{
name: "create-index-secrets-repo",
stmt: createIndexSecretsRepo,
},
{
name: "create-table-registry",
stmt: createTableRegistry,
},
{
name: "create-index-registry-repo",
stmt: createIndexRegistryRepo,
},
{
name: "create-table-config",
stmt: createTableConfig,
},
{
name: "create-table-tasks",
stmt: createTableTasks,
},
{
name: "create-table-agents",
stmt: createTableAgents,
},
{
name: "create-table-senders",
stmt: createTableSenders,
},
{
name: "create-index-sender-repos",
stmt: createIndexSenderRepos,
},
}
// Migrate performs the database migration. If the migration fails
// and error is returned.
func Migrate(db *sql.DB) error {
if err := createTable(db); err != nil {
return err
}
completed, err := selectCompleted(db)
if err != nil && err != sql.ErrNoRows {
return err
}
for _, migration := range migrations {
if _, ok := completed[migration.name]; ok {
continue
}
if _, err := db.Exec(migration.stmt); err != nil {
return err
}
if err := insertMigration(db, migration.name); err != nil {
return err
}
}
return nil
}
func createTable(db *sql.DB) error {
_, err := db.Exec(migrationTableCreate)
return err
}
func insertMigration(db *sql.DB, name string) error {
_, err := db.Exec(migrationInsert, name)
return err
}
func selectCompleted(db *sql.DB) (map[string]struct{}, error) {
migrations := map[string]struct{}{}
rows, err := db.Query(migrationSelect)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var name string
if err := rows.Scan(&name); err != nil {
return nil, err
}
migrations[name] = struct{}{}
}
return migrations, nil
}
//
// migration table ddl and sql
//
var migrationTableCreate = `
CREATE TABLE IF NOT EXISTS migrations (
name VARCHAR(512)
,UNIQUE(name)
)
`
var migrationInsert = `
INSERT INTO migrations (name) VALUES (?)
`
var migrationSelect = `
SELECT name FROM migrations
`
//
// 001_create_table_users.sql
//
var createTableUsers = `
CREATE TABLE IF NOT EXISTS users (
user_id INTEGER PRIMARY KEY AUTO_INCREMENT
,user_login VARCHAR(250)
,user_token VARCHAR(500)
,user_secret VARCHAR(500)
,user_expiry INTEGER
,user_email VARCHAR(500)
,user_avatar VARCHAR(500)
,user_active BOOLEAN
,user_admin BOOLEAN
,user_hash VARCHAR(500)
,UNIQUE(user_login)
);
`
//
// 002_create_table_repos.sql
//
var createTableRepos = `
CREATE TABLE IF NOT EXISTS repos (
repo_id INTEGER PRIMARY KEY AUTO_INCREMENT
,repo_user_id INTEGER
,repo_owner VARCHAR(250)
,repo_name VARCHAR(250)
,repo_full_name VARCHAR(250)
,repo_avatar VARCHAR(500)
,repo_link VARCHAR(1000)
,repo_clone VARCHAR(1000)
,repo_branch VARCHAR(500)
,repo_timeout INTEGER
,repo_private BOOLEAN
,repo_trusted BOOLEAN
,repo_allow_pr BOOLEAN
,repo_allow_push BOOLEAN
,repo_allow_deploys BOOLEAN
,repo_allow_tags BOOLEAN
,repo_hash VARCHAR(500)
,repo_scm VARCHAR(50)
,repo_config_path VARCHAR(500)
,repo_gated BOOLEAN
,UNIQUE(repo_full_name)
);
`
//
// 003_create_table_builds.sql
//
var createTableBuilds = `
CREATE TABLE IF NOT EXISTS builds (
build_id INTEGER PRIMARY KEY AUTO_INCREMENT
,build_repo_id INTEGER
,build_number INTEGER
,build_event VARCHAR(500)
,build_status VARCHAR(500)
,build_enqueued INTEGER
,build_created INTEGER
,build_started INTEGER
,build_finished INTEGER
,build_commit VARCHAR(500)
,build_branch VARCHAR(500)
,build_ref VARCHAR(500)
,build_refspec VARCHAR(1000)
,build_remote VARCHAR(500)
,build_title VARCHAR(1000)
,build_message VARCHAR(2000)
,build_timestamp INTEGER
,build_author VARCHAR(500)
,build_avatar VARCHAR(1000)
,build_email VARCHAR(500)
,build_link VARCHAR(1000)
,build_deploy VARCHAR(500)
,build_signed BOOLEAN
,build_verified BOOLEAN
,build_parent INTEGER
,build_error VARCHAR(500)
,build_reviewer VARCHAR(250)
,build_reviewed INTEGER
,build_sender VARCHAR(250)
,build_config_id INTEGER
,UNIQUE(build_number, build_repo_id)
);
`
var createIndexBuildsRepo = `
CREATE INDEX ix_build_repo ON builds (build_repo_id);
`
var createIndexBuildsAuthor = `
CREATE INDEX ix_build_author ON builds (build_author);
`
//
// 004_create_table_procs.sql
//
var createTableProcs = `
CREATE TABLE IF NOT EXISTS procs (
proc_id INTEGER PRIMARY KEY AUTO_INCREMENT
,proc_build_id INTEGER
,proc_pid INTEGER
,proc_ppid INTEGER
,proc_pgid INTEGER
,proc_name VARCHAR(250)
,proc_state VARCHAR(250)
,proc_error VARCHAR(500)
,proc_exit_code INTEGER
,proc_started INTEGER
,proc_stopped INTEGER
,proc_machine VARCHAR(250)
,proc_platform VARCHAR(250)
,proc_environ VARCHAR(2000)
,UNIQUE(proc_build_id, proc_pid)
);
`
var createIndexProcsBuild = `
CREATE INDEX proc_build_ix ON procs (proc_build_id);
`
//
// 005_create_table_logs.sql
//
var createTableLogs = `
CREATE TABLE IF NOT EXISTS logs (
log_id INTEGER PRIMARY KEY AUTO_INCREMENT
,log_job_id INTEGER
,log_data MEDIUMBLOB
,UNIQUE(log_job_id)
);
`
//
// 006_create_table_files.sql
//
var createTableFiles = `
CREATE TABLE IF NOT EXISTS files (
file_id INTEGER PRIMARY KEY AUTO_INCREMENT
,file_build_id INTEGER
,file_proc_id INTEGER
,file_name VARCHAR(250)
,file_mime VARCHAR(250)
,file_size INTEGER
,file_time INTEGER
,file_data MEDIUMBLOB
,UNIQUE(file_proc_id,file_name)
);
`
var createIndexFilesBuilds = `
CREATE INDEX file_build_ix ON files (file_build_id);
`
var createIndexFilesProcs = `
CREATE INDEX file_proc_ix ON files (file_proc_id);
`
//
// 007_create_table_secets.sql
//
var createTableSecrets = `
CREATE TABLE IF NOT EXISTS secrets (
secret_id INTEGER PRIMARY KEY AUTO_INCREMENT
,secret_repo_id INTEGER
,secret_name VARCHAR(250)
,secret_value MEDIUMBLOB
,secret_images VARCHAR(2000)
,secret_events VARCHAR(2000)
,secret_skip_verify BOOLEAN
,secret_conceal BOOLEAN
,UNIQUE(secret_name, secret_repo_id)
);
`
var createIndexSecretsRepo = `
CREATE INDEX ix_secrets_repo ON secrets (secret_repo_id);
`
//
// 008_create_table_registry.sql
//
var createTableRegistry = `
CREATE TABLE IF NOT EXISTS registry (
registry_id INTEGER PRIMARY KEY AUTO_INCREMENT
,registry_repo_id INTEGER
,registry_addr VARCHAR(250)
,registry_email VARCHAR(500)
,registry_username VARCHAR(2000)
,registry_password VARCHAR(2000)
,registry_token VARCHAR(2000)
,UNIQUE(registry_addr, registry_repo_id)
);
`
var createIndexRegistryRepo = `
CREATE INDEX ix_registry_repo ON registry (registry_repo_id);
`
//
// 009_create_table_config.sql
//
var createTableConfig = `
CREATE TABLE IF NOT EXISTS config (
config_id INTEGER PRIMARY KEY AUTO_INCREMENT
,config_repo_id INTEGER
,config_hash VARCHAR(250)
,config_data MEDIUMBLOB
,UNIQUE(config_hash, config_repo_id)
);
`
//
// 010_create_table_tasks.sql
//
var createTableTasks = `
CREATE TABLE IF NOT EXISTS tasks (
task_id VARCHAR(250) PRIMARY KEY
,task_data MEDIUMBLOB
,task_labels MEDIUMBLOB
);
`
//
// 011_create_table_agents.sql
//
var createTableAgents = `
CREATE TABLE IF NOT EXISTS agents (
agent_id INTEGER PRIMARY KEY AUTO_INCREMENT
,agent_addr VARCHAR(250)
,agent_platform VARCHAR(500)
,agent_capacity INTEGER
,agent_created INTEGER
,agent_updated INTEGER
,UNIQUE(agent_addr)
);
`
//
// 012_create_table_senders.sql
//
var createTableSenders = `
CREATE TABLE IF NOT EXISTS senders (
sender_id INTEGER PRIMARY KEY AUTO_INCREMENT
,sender_repo_id INTEGER
,sender_login VARCHAR(250)
,sender_allow BOOLEAN
,sender_block BOOLEAN
,UNIQUE(sender_repo_id,sender_login)
);
`
var createIndexSenderRepos = `
CREATE INDEX sender_repo_ix ON senders (sender_repo_id);
`

View file

@ -0,0 +1,16 @@
-- name: create-table-users
CREATE TABLE IF NOT EXISTS users (
user_id INTEGER PRIMARY KEY AUTO_INCREMENT
,user_login VARCHAR(250)
,user_token VARCHAR(500)
,user_secret VARCHAR(500)
,user_expiry INTEGER
,user_email VARCHAR(500)
,user_avatar VARCHAR(500)
,user_active BOOLEAN
,user_admin BOOLEAN
,user_hash VARCHAR(500)
,UNIQUE(user_login)
);

View file

@ -0,0 +1,26 @@
-- name: create-table-repos
CREATE TABLE IF NOT EXISTS repos (
repo_id INTEGER PRIMARY KEY AUTO_INCREMENT
,repo_user_id INTEGER
,repo_owner VARCHAR(250)
,repo_name VARCHAR(250)
,repo_full_name VARCHAR(250)
,repo_avatar VARCHAR(500)
,repo_link VARCHAR(1000)
,repo_clone VARCHAR(1000)
,repo_branch VARCHAR(500)
,repo_timeout INTEGER
,repo_private BOOLEAN
,repo_trusted BOOLEAN
,repo_allow_pr BOOLEAN
,repo_allow_push BOOLEAN
,repo_allow_deploys BOOLEAN
,repo_allow_tags BOOLEAN
,repo_hash VARCHAR(500)
,repo_scm VARCHAR(50)
,repo_config_path VARCHAR(500)
,repo_gated BOOLEAN
,UNIQUE(repo_full_name)
);

View file

@ -0,0 +1,44 @@
-- name: create-table-builds
CREATE TABLE IF NOT EXISTS builds (
build_id INTEGER PRIMARY KEY AUTO_INCREMENT
,build_repo_id INTEGER
,build_number INTEGER
,build_event VARCHAR(500)
,build_status VARCHAR(500)
,build_enqueued INTEGER
,build_created INTEGER
,build_started INTEGER
,build_finished INTEGER
,build_commit VARCHAR(500)
,build_branch VARCHAR(500)
,build_ref VARCHAR(500)
,build_refspec VARCHAR(1000)
,build_remote VARCHAR(500)
,build_title VARCHAR(1000)
,build_message VARCHAR(2000)
,build_timestamp INTEGER
,build_author VARCHAR(500)
,build_avatar VARCHAR(1000)
,build_email VARCHAR(500)
,build_link VARCHAR(1000)
,build_deploy VARCHAR(500)
,build_signed BOOLEAN
,build_verified BOOLEAN
,build_parent INTEGER
,build_error VARCHAR(500)
,build_reviewer VARCHAR(250)
,build_reviewed INTEGER
,build_sender VARCHAR(250)
,build_config_id INTEGER
,UNIQUE(build_number, build_repo_id)
);
-- name: create-index-builds-repo
CREATE INDEX ix_build_repo ON builds (build_repo_id);
-- name: create-index-builds-author
CREATE INDEX ix_build_author ON builds (build_author);

View file

@ -0,0 +1,24 @@
-- name: create-table-procs
CREATE TABLE IF NOT EXISTS procs (
proc_id INTEGER PRIMARY KEY AUTO_INCREMENT
,proc_build_id INTEGER
,proc_pid INTEGER
,proc_ppid INTEGER
,proc_pgid INTEGER
,proc_name VARCHAR(250)
,proc_state VARCHAR(250)
,proc_error VARCHAR(500)
,proc_exit_code INTEGER
,proc_started INTEGER
,proc_stopped INTEGER
,proc_machine VARCHAR(250)
,proc_platform VARCHAR(250)
,proc_environ VARCHAR(2000)
,UNIQUE(proc_build_id, proc_pid)
);
-- name: create-index-procs-build
CREATE INDEX proc_build_ix ON procs (proc_build_id);

View file

@ -0,0 +1,9 @@
-- name: create-table-logs
CREATE TABLE IF NOT EXISTS logs (
log_id INTEGER PRIMARY KEY AUTO_INCREMENT
,log_job_id INTEGER
,log_data MEDIUMBLOB
,UNIQUE(log_job_id)
);

View file

@ -0,0 +1,22 @@
-- name: create-table-files
CREATE TABLE IF NOT EXISTS files (
file_id INTEGER PRIMARY KEY AUTO_INCREMENT
,file_build_id INTEGER
,file_proc_id INTEGER
,file_name VARCHAR(250)
,file_mime VARCHAR(250)
,file_size INTEGER
,file_time INTEGER
,file_data MEDIUMBLOB
,UNIQUE(file_proc_id,file_name)
);
-- name: create-index-files-builds
CREATE INDEX file_build_ix ON files (file_build_id);
-- name: create-index-files-procs
CREATE INDEX file_proc_ix ON files (file_proc_id);

View file

@ -0,0 +1,18 @@
-- name: create-table-secrets
CREATE TABLE IF NOT EXISTS secrets (
secret_id INTEGER PRIMARY KEY AUTO_INCREMENT
,secret_repo_id INTEGER
,secret_name VARCHAR(250)
,secret_value MEDIUMBLOB
,secret_images VARCHAR(2000)
,secret_events VARCHAR(2000)
,secret_skip_verify BOOLEAN
,secret_conceal BOOLEAN
,UNIQUE(secret_name, secret_repo_id)
);
-- name: create-index-secrets-repo
CREATE INDEX ix_secrets_repo ON secrets (secret_repo_id);

View file

@ -0,0 +1,17 @@
-- name: create-table-registry
CREATE TABLE IF NOT EXISTS registry (
registry_id INTEGER PRIMARY KEY AUTO_INCREMENT
,registry_repo_id INTEGER
,registry_addr VARCHAR(250)
,registry_email VARCHAR(500)
,registry_username VARCHAR(2000)
,registry_password VARCHAR(2000)
,registry_token VARCHAR(2000)
,UNIQUE(registry_addr, registry_repo_id)
);
-- name: create-index-registry-repo
CREATE INDEX ix_registry_repo ON registry (registry_repo_id);

View file

@ -1,6 +1,6 @@
-- +migrate Up
-- name: create-table-config
CREATE TABLE config (
CREATE TABLE IF NOT EXISTS config (
config_id INTEGER PRIMARY KEY AUTO_INCREMENT
,config_repo_id INTEGER
,config_hash VARCHAR(250)
@ -8,10 +8,3 @@ CREATE TABLE config (
,UNIQUE(config_hash, config_repo_id)
);
ALTER TABLE builds ADD COLUMN build_config_id INTEGER;
UPDATE builds set build_config_id = 0;
-- +migrate Down
DROP TABLE config;

View file

@ -0,0 +1,7 @@
-- name: create-table-tasks
CREATE TABLE IF NOT EXISTS tasks (
task_id VARCHAR(250) PRIMARY KEY
,task_data MEDIUMBLOB
,task_labels MEDIUMBLOB
);

View file

@ -1,8 +1,8 @@
-- +migrate Up
-- name: create-table-agents
CREATE TABLE agents (
CREATE TABLE IF NOT EXISTS agents (
agent_id INTEGER PRIMARY KEY AUTO_INCREMENT
,agent_addr VARCHAR(255)
,agent_addr VARCHAR(250)
,agent_platform VARCHAR(500)
,agent_capacity INTEGER
,agent_created INTEGER
@ -10,8 +10,3 @@ CREATE TABLE agents (
,UNIQUE(agent_addr)
);
-- +migrate Down
DROP TABLE agents;

View file

@ -1,9 +1,6 @@
-- +migrate Up
-- name: create-table-senders
ALTER TABLE repos ADD COLUMN repo_gated BOOLEAN;
UPDATE repos SET repo_gated = false;
CREATE TABLE senders (
CREATE TABLE IF NOT EXISTS senders (
sender_id INTEGER PRIMARY KEY AUTO_INCREMENT
,sender_repo_id INTEGER
,sender_login VARCHAR(250)
@ -13,10 +10,6 @@ CREATE TABLE senders (
,UNIQUE(sender_repo_id,sender_login)
);
-- name: create-index-sender-repos
CREATE INDEX sender_repo_ix ON senders (sender_repo_id);
-- +migrate Down
ALTER TABLE repos DROP COLUMN repo_gated;
DROP INDEX sender_repo_ix;
DROP TABLE senders;

View file

@ -1,12 +0,0 @@
-- +migrate Up
ALTER TABLE secrets ADD COLUMN secret_conceal BOOLEAN;
ALTER TABLE team_secrets ADD COLUMN team_secret_conceal BOOLEAN;
UPDATE secrets SET secret_conceal = false;
UPDATE team_secrets SET team_secret_conceal = false;
-- +migrate Down
ALTER TABLE secrets DROP COLUMN secret_conceal;
ALTER TABLE team_secrets DROP COLUMN team_secret_conceal;

View file

@ -1,8 +0,0 @@
-- +migrate Up
ALTER TABLE builds ADD COLUMN build_error VARCHAR(500);
UPDATE builds SET build_error = '';
-- +migrate Down
ALTER TABLE builds DROP COLUMN build_error;

View file

@ -1,18 +0,0 @@
-- +migrate Up
ALTER TABLE repos ADD COLUMN repo_config_path VARCHAR(255);
ALTER TABLE builds ADD COLUMN build_reviewer VARCHAR(255);
ALTER TABLE builds ADD COLUMN build_reviewed INTEGER;
ALTER TABLE builds ADD COLUMN build_sender VARCHAR(255);
UPDATE repos SET repo_config_path = '.drone.yml';
UPDATE builds SET build_reviewer = '';
UPDATE builds SET build_reviewed = 0;
UPDATE builds SET build_sender = '';
-- +migrate Down
ALTER TABLE repos DROP COLUMN repo_config_path;
ALTER TABLE builds DROP COLUMN build_reviewer;
ALTER TABLE builds DROP COLUMN build_reviewed;
ALTER TABLE builds DROP COLUMN build_sender;

View file

@ -1,47 +0,0 @@
-- +migrate Up
CREATE TABLE procs (
proc_id SERIAL PRIMARY KEY
,proc_build_id INTEGER
,proc_pid INTEGER
,proc_ppid INTEGER
,proc_pgid INTEGER
,proc_name VARCHAR(250)
,proc_state VARCHAR(250)
,proc_error VARCHAR(500)
,proc_exit_code INTEGER
,proc_started INTEGER
,proc_stopped INTEGER
,proc_machine VARCHAR(250)
,proc_platform VARCHAR(250)
,proc_environ VARCHAR(2000)
,UNIQUE(proc_build_id, proc_pid)
);
CREATE INDEX proc_build_ix ON procs (proc_build_id);
CREATE TABLE files (
file_id SERIAL PRIMARY KEY
,file_build_id INTEGER
,file_proc_id INTEGER
,file_name VARCHAR(250)
,file_mime VARCHAR(250)
,file_size INTEGER
,file_time INTEGER
,file_data BYTEA
,UNIQUE(file_proc_id,file_name)
);
CREATE INDEX file_build_ix ON files (file_build_id);
CREATE INDEX file_proc_ix ON files (file_proc_id);
-- +migrate Down
DROP INDEX file_build_ix;
DROP INDEX file_proc_ix;
DROP TABLE files;
DROP INDEX proc_build_ix;
DROP TABLE procs;

View file

@ -1,22 +0,0 @@
-- +migrate Up
ALTER TABLE repos ADD COLUMN repo_gated BOOLEAN;
UPDATE repos SET repo_gated = false;
CREATE TABLE senders (
sender_id SERIAL PRIMARY KEY
,sender_repo_id INTEGER
,sender_login VARCHAR(250)
,sender_allow BOOLEAN
,sender_block BOOLEAN
,UNIQUE(sender_repo_id,sender_login)
);
CREATE INDEX sender_repo_ix ON senders (sender_repo_id);
-- +migrate Down
ALTER TABLE repos DROP COLUMN repo_gated;
DROP INDEX sender_repo_ix;
DROP TABLE senders;

View file

@ -1,11 +0,0 @@
-- +migrate Up
CREATE TABLE tasks (
task_id VARCHAR(255) PRIMARY KEY
,task_data BYTEA
,task_labels BYTEA
);
-- +migrate Down
DROP TABLE tasks;

View file

@ -1,17 +0,0 @@
-- +migrate Up
CREATE TABLE config (
config_id SERIAL PRIMARY KEY
,config_repo_id INTEGER
,config_hash VARCHAR(250)
,config_data BYTEA
,UNIQUE(config_hash, config_repo_id)
);
ALTER TABLE builds ADD COLUMN build_config_id INTEGER;
UPDATE builds set build_config_id = 0;
-- +migrate Down
DROP TABLE config;

View file

@ -1,126 +0,0 @@
-- +migrate Up
CREATE TABLE users (
user_id SERIAL PRIMARY KEY
,user_login VARCHAR(40)
,user_token VARCHAR(128)
,user_secret VARCHAR(128)
,user_expiry INTEGER
,user_email VARCHAR(256)
,user_avatar VARCHAR(256)
,user_active BOOLEAN
,user_admin BOOLEAN
,user_hash VARCHAR(128)
,UNIQUE(user_login)
);
CREATE TABLE repos (
repo_id SERIAL PRIMARY KEY
,repo_user_id INTEGER
,repo_owner VARCHAR(255)
,repo_name VARCHAR(255)
,repo_full_name VARCHAR(511)
,repo_avatar VARCHAR(500)
,repo_link VARCHAR(1000)
,repo_clone VARCHAR(1000)
,repo_branch VARCHAR(500)
,repo_timeout INTEGER
,repo_private BOOLEAN
,repo_trusted BOOLEAN
,repo_allow_pr BOOLEAN
,repo_allow_push BOOLEAN
,repo_allow_deploys BOOLEAN
,repo_allow_tags BOOLEAN
,repo_hash VARCHAR(500)
,UNIQUE(repo_full_name)
);
CREATE TABLE keys (
key_id SERIAL PRIMARY KEY
,key_repo_id INTEGER
,key_public BYTEA
,key_private BYTEA
,UNIQUE(key_repo_id)
);
CREATE TABLE builds (
build_id SERIAL PRIMARY KEY
,build_repo_id INTEGER
,build_number INTEGER
,build_event VARCHAR(25)
,build_status VARCHAR(25)
,build_enqueued INTEGER
,build_created INTEGER
,build_started INTEGER
,build_finished INTEGER
,build_commit VARCHAR(40)
,build_branch VARCHAR(256)
,build_ref VARCHAR(512)
,build_refspec VARCHAR(512)
,build_remote VARCHAR(512)
,build_title VARCHAR(1000)
,build_message VARCHAR(2000)
,build_timestamp INTEGER
,build_author VARCHAR(40)
,build_avatar VARCHAR(1000)
,build_email VARCHAR(500)
,build_link VARCHAR(1000)
,UNIQUE(build_number, build_repo_id)
);
CREATE INDEX ix_build_repo ON builds (build_repo_id);
CREATE TABLE jobs (
job_id SERIAL PRIMARY KEY
,job_node_id INTEGER
,job_build_id INTEGER
,job_number INTEGER
,job_status VARCHAR(25)
,job_exit_code INTEGER
,job_started INTEGER
,job_enqueued INTEGER
,job_finished INTEGER
,job_environment VARCHAR(2000)
,UNIQUE(job_build_id, job_number)
);
CREATE INDEX ix_job_build ON jobs (job_build_id);
CREATE INDEX ix_job_node ON jobs (job_node_id);
CREATE TABLE IF NOT EXISTS logs (
log_id SERIAL PRIMARY KEY
,log_job_id INTEGER
,log_data BYTEA
,UNIQUE(log_job_id)
);
CREATE TABLE IF NOT EXISTS nodes (
node_id SERIAL PRIMARY KEY
,node_addr VARCHAR(1024)
,node_arch VARCHAR(50)
,node_cert BYTEA
,node_key BYTEA
,node_ca BYTEA
);
INSERT INTO nodes (node_addr, node_arch, node_cert, node_key, node_ca) VALUES
('unix:///var/run/docker.sock', 'linux_amd64', '', '', ''),
('unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
-- +migrate Down
DROP TABLE nodes;
DROP TABLE logs;
DROP TABLE jobs;
DROP TABLE builds;
DROP TABLE keys;
DROP TABLE stars;
DROP TABLE repos;
DROP TABLE users;

View file

@ -1,12 +0,0 @@
-- +migrate Up
ALTER TABLE repos ADD COLUMN repo_scm VARCHAR(25);
ALTER TABLE builds ADD COLUMN build_deploy VARCHAR(500);
UPDATE repos SET repo_scm = 'git';
UPDATE builds SET build_deploy = '';
-- +migrate Down
ALTER TABLE repos DROP COLUMN repo_scm;
ALTER TABLE builds DROP COLUMN build_deploy;

View file

@ -1,32 +0,0 @@
-- +migrate Up
CREATE TABLE secrets (
secret_id SERIAL PRIMARY KEY
,secret_repo_id INTEGER
,secret_name VARCHAR(500)
,secret_value BYTEA
,secret_images VARCHAR(2000)
,secret_events VARCHAR(2000)
,UNIQUE(secret_name, secret_repo_id)
);
CREATE TABLE registry (
registry_id SERIAL PRIMARY KEY
,registry_repo_id INTEGER
,registry_addr VARCHAR(500)
,registry_email VARCHAR(500)
,registry_username VARCHAR(2000)
,registry_password VARCHAR(2000)
,registry_token VARCHAR(2000)
,UNIQUE(registry_addr, registry_repo_id)
);
CREATE INDEX ix_secrets_repo ON secrets (secret_repo_id);
CREATE INDEX ix_registry_repo ON registry (registry_repo_id);
-- +migrate Down
DROP INDEX ix_secrets_repo;
DROP INDEX ix_registry_repo;

View file

@ -1,9 +0,0 @@
-- +migrate Up
ALTER TABLE jobs ADD COLUMN job_error VARCHAR(500);
UPDATE jobs SET job_error = '';
-- +migrate Down
ALTER TABLE jobs DROP COLUMN job_error;

View file

@ -1,16 +0,0 @@
-- +migrate Up
ALTER TABLE builds ADD COLUMN build_signed BOOLEAN;
ALTER TABLE builds ADD COLUMN build_verified BOOLEAN;
UPDATE builds SET build_signed = false;
UPDATE builds SET build_verified = false;
CREATE INDEX ix_build_status_running ON builds (build_status)
WHERE build_status IN ('pending', 'running');
-- +migrate Down
ALTER TABLE builds DROP COLUMN build_signed;
ALTER TABLE builds DROP COLUMN build_verified;
DROP INDEX ix_build_status_running;

View file

@ -1,19 +0,0 @@
-- +migrate Up
CREATE TABLE team_secrets (
team_secret_id SERIAL PRIMARY KEY
,team_secret_key VARCHAR(255)
,team_secret_name VARCHAR(255)
,team_secret_value BYTEA
,team_secret_images VARCHAR(2000)
,team_secret_events VARCHAR(2000)
,UNIQUE(team_secret_name, team_secret_key)
);
CREATE INDEX ix_team_secrets_key ON team_secrets (team_secret_key);
-- +migrate Down
DROP INDEX ix_team_secrets_key;
DROP TABLE team_secrets;

View file

@ -1,7 +0,0 @@
-- +migrate Up
ALTER TABLE builds ADD COLUMN build_parent INTEGER DEFAULT 0;
-- +migrate Down
ALTER TABLE builds DROP COLUMN build_parent;

View file

@ -1,12 +0,0 @@
-- +migrate Up
ALTER TABLE secrets ADD COLUMN secret_skip_verify BOOLEAN;
ALTER TABLE team_secrets ADD COLUMN team_secret_skip_verify BOOLEAN;
UPDATE secrets SET secret_skip_verify = false;
UPDATE team_secrets SET team_secret_skip_verify = false;
-- +migrate Down
ALTER TABLE secrets DROP COLUMN secret_skip_verify;
ALTER TABLE team_secrets DROP COLUMN team_secret_skip_verify;

View file

@ -0,0 +1,3 @@
package postgres
//go:generate togo ddl -package postgres -dialect postgres

View file

@ -0,0 +1,444 @@
package postgres
import (
"database/sql"
)
var migrations = []struct {
name string
stmt string
}{
{
name: "create-table-users",
stmt: createTableUsers,
},
{
name: "create-table-repos",
stmt: createTableRepos,
},
{
name: "create-table-builds",
stmt: createTableBuilds,
},
{
name: "create-index-builds-repo",
stmt: createIndexBuildsRepo,
},
{
name: "create-index-builds-author",
stmt: createIndexBuildsAuthor,
},
{
name: "create-table-procs",
stmt: createTableProcs,
},
{
name: "create-index-procs-build",
stmt: createIndexProcsBuild,
},
{
name: "create-table-logs",
stmt: createTableLogs,
},
{
name: "create-table-files",
stmt: createTableFiles,
},
{
name: "create-index-files-builds",
stmt: createIndexFilesBuilds,
},
{
name: "create-index-files-procs",
stmt: createIndexFilesProcs,
},
{
name: "create-table-secrets",
stmt: createTableSecrets,
},
{
name: "create-index-secrets-repo",
stmt: createIndexSecretsRepo,
},
{
name: "create-table-registry",
stmt: createTableRegistry,
},
{
name: "create-index-registry-repo",
stmt: createIndexRegistryRepo,
},
{
name: "create-table-config",
stmt: createTableConfig,
},
{
name: "create-table-tasks",
stmt: createTableTasks,
},
{
name: "create-table-agents",
stmt: createTableAgents,
},
{
name: "create-table-senders",
stmt: createTableSenders,
},
{
name: "create-index-sender-repos",
stmt: createIndexSenderRepos,
},
}
// Migrate performs the database migration. If the migration fails
// and error is returned.
func Migrate(db *sql.DB) error {
if err := createTable(db); err != nil {
return err
}
completed, err := selectCompleted(db)
if err != nil && err != sql.ErrNoRows {
return err
}
for _, migration := range migrations {
if _, ok := completed[migration.name]; ok {
continue
}
if _, err := db.Exec(migration.stmt); err != nil {
return err
}
if err := insertMigration(db, migration.name); err != nil {
return err
}
}
return nil
}
func createTable(db *sql.DB) error {
_, err := db.Exec(migrationTableCreate)
return err
}
func insertMigration(db *sql.DB, name string) error {
_, err := db.Exec(migrationInsert, name)
return err
}
func selectCompleted(db *sql.DB) (map[string]struct{}, error) {
migrations := map[string]struct{}{}
rows, err := db.Query(migrationSelect)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var name string
if err := rows.Scan(&name); err != nil {
return nil, err
}
migrations[name] = struct{}{}
}
return migrations, nil
}
//
// migration table ddl and sql
//
var migrationTableCreate = `
CREATE TABLE IF NOT EXISTS migrations (
name VARCHAR(512)
,UNIQUE(name)
)
`
var migrationInsert = `
INSERT INTO migrations (name) VALUES ($1)
`
var migrationSelect = `
SELECT name FROM migrations
`
//
// 001_create_table_users.sql
//
var createTableUsers = `
CREATE TABLE IF NOT EXISTS users (
user_id SERIAL PRIMARY KEY
,user_login VARCHAR(250)
,user_token VARCHAR(500)
,user_secret VARCHAR(500)
,user_expiry INTEGER
,user_email VARCHAR(500)
,user_avatar VARCHAR(500)
,user_active BOOLEAN
,user_admin BOOLEAN
,user_hash VARCHAR(500)
,UNIQUE(user_login)
);
`
//
// 002_create_table_repos.sql
//
var createTableRepos = `
CREATE TABLE IF NOT EXISTS repos (
repo_id SERIAL PRIMARY KEY
,repo_user_id INTEGER
,repo_owner VARCHAR(250)
,repo_name VARCHAR(250)
,repo_full_name VARCHAR(250)
,repo_avatar VARCHAR(500)
,repo_link VARCHAR(1000)
,repo_clone VARCHAR(1000)
,repo_branch VARCHAR(500)
,repo_timeout INTEGER
,repo_private BOOLEAN
,repo_trusted BOOLEAN
,repo_allow_pr BOOLEAN
,repo_allow_push BOOLEAN
,repo_allow_deploys BOOLEAN
,repo_allow_tags BOOLEAN
,repo_hash VARCHAR(500)
,repo_scm VARCHAR(50)
,repo_config_path VARCHAR(500)
,repo_gated BOOLEAN
,UNIQUE(repo_full_name)
);
`
//
// 003_create_table_builds.sql
//
var createTableBuilds = `
CREATE TABLE IF NOT EXISTS builds (
build_id SERIAL PRIMARY KEY
,build_repo_id INTEGER
,build_number INTEGER
,build_event VARCHAR(500)
,build_status VARCHAR(500)
,build_enqueued INTEGER
,build_created INTEGER
,build_started INTEGER
,build_finished INTEGER
,build_commit VARCHAR(500)
,build_branch VARCHAR(500)
,build_ref VARCHAR(500)
,build_refspec VARCHAR(1000)
,build_remote VARCHAR(500)
,build_title VARCHAR(1000)
,build_message VARCHAR(2000)
,build_timestamp INTEGER
,build_author VARCHAR(500)
,build_avatar VARCHAR(1000)
,build_email VARCHAR(500)
,build_link VARCHAR(1000)
,build_deploy VARCHAR(500)
,build_signed BOOLEAN
,build_verified BOOLEAN
,build_parent INTEGER
,build_error VARCHAR(500)
,build_reviewer VARCHAR(250)
,build_reviewed INTEGER
,build_sender VARCHAR(250)
,build_config_id INTEGER
,UNIQUE(build_number, build_repo_id)
);
`
var createIndexBuildsRepo = `
CREATE INDEX IF NOT EXISTS ix_build_repo ON builds (build_repo_id);
`
var createIndexBuildsAuthor = `
CREATE INDEX IF NOT EXISTS ix_build_author ON builds (build_author);
`
//
// 004_create_table_procs.sql
//
var createTableProcs = `
CREATE TABLE IF NOT EXISTS procs (
proc_id SERIAL PRIMARY KEY
,proc_build_id INTEGER
,proc_pid INTEGER
,proc_ppid INTEGER
,proc_pgid INTEGER
,proc_name VARCHAR(250)
,proc_state VARCHAR(250)
,proc_error VARCHAR(500)
,proc_exit_code INTEGER
,proc_started INTEGER
,proc_stopped INTEGER
,proc_machine VARCHAR(250)
,proc_platform VARCHAR(250)
,proc_environ VARCHAR(2000)
,UNIQUE(proc_build_id, proc_pid)
);
`
var createIndexProcsBuild = `
CREATE INDEX IF NOT EXISTS proc_build_ix ON procs (proc_build_id);
`
//
// 005_create_table_logs.sql
//
var createTableLogs = `
CREATE TABLE IF NOT EXISTS logs (
log_id SERIAL PRIMARY KEY
,log_job_id INTEGER
,log_data BYTEA
,UNIQUE(log_job_id)
);
`
//
// 006_create_table_files.sql
//
var createTableFiles = `
CREATE TABLE IF NOT EXISTS files (
file_id SERIAL PRIMARY KEY
,file_build_id INTEGER
,file_proc_id INTEGER
,file_name VARCHAR(250)
,file_mime VARCHAR(250)
,file_size INTEGER
,file_time INTEGER
,file_data BYTEA
,UNIQUE(file_proc_id,file_name)
);
`
var createIndexFilesBuilds = `
CREATE INDEX IF NOT EXISTS file_build_ix ON files (file_build_id);
`
var createIndexFilesProcs = `
CREATE INDEX IF NOT EXISTS file_proc_ix ON files (file_proc_id);
`
//
// 007_create_table_secets.sql
//
var createTableSecrets = `
CREATE TABLE IF NOT EXISTS secrets (
secret_id SERIAL PRIMARY KEY
,secret_repo_id INTEGER
,secret_name VARCHAR(250)
,secret_value BYTEA
,secret_images VARCHAR(2000)
,secret_events VARCHAR(2000)
,secret_skip_verify BOOLEAN
,secret_conceal BOOLEAN
,UNIQUE(secret_name, secret_repo_id)
);
`
var createIndexSecretsRepo = `
CREATE INDEX IF NOT EXISTS ix_secrets_repo ON secrets (secret_repo_id);
`
//
// 008_create_table_registry.sql
//
var createTableRegistry = `
CREATE TABLE IF NOT EXISTS registry (
registry_id SERIAL PRIMARY KEY
,registry_repo_id INTEGER
,registry_addr VARCHAR(250)
,registry_email VARCHAR(500)
,registry_username VARCHAR(2000)
,registry_password VARCHAR(2000)
,registry_token VARCHAR(2000)
,UNIQUE(registry_addr, registry_repo_id)
);
`
var createIndexRegistryRepo = `
CREATE INDEX IF NOT EXISTS ix_registry_repo ON registry (registry_repo_id);
`
//
// 009_create_table_config.sql
//
var createTableConfig = `
CREATE TABLE IF NOT EXISTS config (
config_id SERIAL PRIMARY KEY
,config_repo_id INTEGER
,config_hash VARCHAR(250)
,config_data BYTEA
,UNIQUE(config_hash, config_repo_id)
);
`
//
// 010_create_table_tasks.sql
//
var createTableTasks = `
CREATE TABLE IF NOT EXISTS tasks (
task_id VARCHAR(250) PRIMARY KEY
,task_data BYTEA
,task_labels BYTEA
);
`
//
// 011_create_table_agents.sql
//
var createTableAgents = `
CREATE TABLE IF NOT EXISTS agents (
agent_id SERIAL PRIMARY KEY
,agent_addr VARCHAR(250)
,agent_platform VARCHAR(500)
,agent_capacity INTEGER
,agent_created INTEGER
,agent_updated INTEGER
,UNIQUE(agent_addr)
);
`
//
// 012_create_table_senders.sql
//
var createTableSenders = `
CREATE TABLE IF NOT EXISTS senders (
sender_id SERIAL PRIMARY KEY
,sender_repo_id INTEGER
,sender_login VARCHAR(250)
,sender_allow BOOLEAN
,sender_block BOOLEAN
,UNIQUE(sender_repo_id,sender_login)
);
`
var createIndexSenderRepos = `
CREATE INDEX IF NOT EXISTS sender_repo_ix ON senders (sender_repo_id);
`

View file

@ -0,0 +1,16 @@
-- name: create-table-users
CREATE TABLE IF NOT EXISTS users (
user_id SERIAL PRIMARY KEY
,user_login VARCHAR(250)
,user_token VARCHAR(500)
,user_secret VARCHAR(500)
,user_expiry INTEGER
,user_email VARCHAR(500)
,user_avatar VARCHAR(500)
,user_active BOOLEAN
,user_admin BOOLEAN
,user_hash VARCHAR(500)
,UNIQUE(user_login)
);

View file

@ -0,0 +1,26 @@
-- name: create-table-repos
CREATE TABLE IF NOT EXISTS repos (
repo_id SERIAL PRIMARY KEY
,repo_user_id INTEGER
,repo_owner VARCHAR(250)
,repo_name VARCHAR(250)
,repo_full_name VARCHAR(250)
,repo_avatar VARCHAR(500)
,repo_link VARCHAR(1000)
,repo_clone VARCHAR(1000)
,repo_branch VARCHAR(500)
,repo_timeout INTEGER
,repo_private BOOLEAN
,repo_trusted BOOLEAN
,repo_allow_pr BOOLEAN
,repo_allow_push BOOLEAN
,repo_allow_deploys BOOLEAN
,repo_allow_tags BOOLEAN
,repo_hash VARCHAR(500)
,repo_scm VARCHAR(50)
,repo_config_path VARCHAR(500)
,repo_gated BOOLEAN
,UNIQUE(repo_full_name)
);

View file

@ -0,0 +1,44 @@
-- name: create-table-builds
CREATE TABLE IF NOT EXISTS builds (
build_id SERIAL PRIMARY KEY
,build_repo_id INTEGER
,build_number INTEGER
,build_event VARCHAR(500)
,build_status VARCHAR(500)
,build_enqueued INTEGER
,build_created INTEGER
,build_started INTEGER
,build_finished INTEGER
,build_commit VARCHAR(500)
,build_branch VARCHAR(500)
,build_ref VARCHAR(500)
,build_refspec VARCHAR(1000)
,build_remote VARCHAR(500)
,build_title VARCHAR(1000)
,build_message VARCHAR(2000)
,build_timestamp INTEGER
,build_author VARCHAR(500)
,build_avatar VARCHAR(1000)
,build_email VARCHAR(500)
,build_link VARCHAR(1000)
,build_deploy VARCHAR(500)
,build_signed BOOLEAN
,build_verified BOOLEAN
,build_parent INTEGER
,build_error VARCHAR(500)
,build_reviewer VARCHAR(250)
,build_reviewed INTEGER
,build_sender VARCHAR(250)
,build_config_id INTEGER
,UNIQUE(build_number, build_repo_id)
);
-- name: create-index-builds-repo
CREATE INDEX IF NOT EXISTS ix_build_repo ON builds (build_repo_id);
-- name: create-index-builds-author
CREATE INDEX IF NOT EXISTS ix_build_author ON builds (build_author);

View file

@ -0,0 +1,24 @@
-- name: create-table-procs
CREATE TABLE IF NOT EXISTS procs (
proc_id SERIAL PRIMARY KEY
,proc_build_id INTEGER
,proc_pid INTEGER
,proc_ppid INTEGER
,proc_pgid INTEGER
,proc_name VARCHAR(250)
,proc_state VARCHAR(250)
,proc_error VARCHAR(500)
,proc_exit_code INTEGER
,proc_started INTEGER
,proc_stopped INTEGER
,proc_machine VARCHAR(250)
,proc_platform VARCHAR(250)
,proc_environ VARCHAR(2000)
,UNIQUE(proc_build_id, proc_pid)
);
-- name: create-index-procs-build
CREATE INDEX IF NOT EXISTS proc_build_ix ON procs (proc_build_id);

View file

@ -0,0 +1,9 @@
-- name: create-table-logs
CREATE TABLE IF NOT EXISTS logs (
log_id SERIAL PRIMARY KEY
,log_job_id INTEGER
,log_data BYTEA
,UNIQUE(log_job_id)
);

View file

@ -0,0 +1,22 @@
-- name: create-table-files
CREATE TABLE IF NOT EXISTS files (
file_id SERIAL PRIMARY KEY
,file_build_id INTEGER
,file_proc_id INTEGER
,file_name VARCHAR(250)
,file_mime VARCHAR(250)
,file_size INTEGER
,file_time INTEGER
,file_data BYTEA
,UNIQUE(file_proc_id,file_name)
);
-- name: create-index-files-builds
CREATE INDEX IF NOT EXISTS file_build_ix ON files (file_build_id);
-- name: create-index-files-procs
CREATE INDEX IF NOT EXISTS file_proc_ix ON files (file_proc_id);

View file

@ -0,0 +1,18 @@
-- name: create-table-secrets
CREATE TABLE IF NOT EXISTS secrets (
secret_id SERIAL PRIMARY KEY
,secret_repo_id INTEGER
,secret_name VARCHAR(250)
,secret_value BYTEA
,secret_images VARCHAR(2000)
,secret_events VARCHAR(2000)
,secret_skip_verify BOOLEAN
,secret_conceal BOOLEAN
,UNIQUE(secret_name, secret_repo_id)
);
-- name: create-index-secrets-repo
CREATE INDEX IF NOT EXISTS ix_secrets_repo ON secrets (secret_repo_id);

View file

@ -0,0 +1,17 @@
-- name: create-table-registry
CREATE TABLE IF NOT EXISTS registry (
registry_id SERIAL PRIMARY KEY
,registry_repo_id INTEGER
,registry_addr VARCHAR(250)
,registry_email VARCHAR(500)
,registry_username VARCHAR(2000)
,registry_password VARCHAR(2000)
,registry_token VARCHAR(2000)
,UNIQUE(registry_addr, registry_repo_id)
);
-- name: create-index-registry-repo
CREATE INDEX IF NOT EXISTS ix_registry_repo ON registry (registry_repo_id);

View file

@ -0,0 +1,10 @@
-- name: create-table-config
CREATE TABLE IF NOT EXISTS config (
config_id SERIAL PRIMARY KEY
,config_repo_id INTEGER
,config_hash VARCHAR(250)
,config_data BYTEA
,UNIQUE(config_hash, config_repo_id)
);

View file

@ -0,0 +1,7 @@
-- name: create-table-tasks
CREATE TABLE IF NOT EXISTS tasks (
task_id VARCHAR(250) PRIMARY KEY
,task_data BYTEA
,task_labels BYTEA
);

View file

@ -1,8 +1,8 @@
-- +migrate Up
-- name: create-table-agents
CREATE TABLE agents (
CREATE TABLE IF NOT EXISTS agents (
agent_id SERIAL PRIMARY KEY
,agent_addr VARCHAR(500)
,agent_addr VARCHAR(250)
,agent_platform VARCHAR(500)
,agent_capacity INTEGER
,agent_created INTEGER
@ -10,8 +10,3 @@ CREATE TABLE agents (
,UNIQUE(agent_addr)
);
-- +migrate Down
DROP TABLE agents;

View file

@ -0,0 +1,15 @@
-- name: create-table-senders
CREATE TABLE IF NOT EXISTS senders (
sender_id SERIAL PRIMARY KEY
,sender_repo_id INTEGER
,sender_login VARCHAR(250)
,sender_allow BOOLEAN
,sender_block BOOLEAN
,UNIQUE(sender_repo_id,sender_login)
);
-- name: create-index-sender-repos
CREATE INDEX IF NOT EXISTS sender_repo_ix ON senders (sender_repo_id);

View file

@ -0,0 +1,3 @@
package sqlite
//go:generate togo ddl -package sqlite -dialect sqlite3

View file

@ -0,0 +1,445 @@
package sqlite
import (
"database/sql"
)
var migrations = []struct {
name string
stmt string
}{
{
name: "create-table-users",
stmt: createTableUsers,
},
{
name: "create-table-repos",
stmt: createTableRepos,
},
{
name: "create-table-builds",
stmt: createTableBuilds,
},
{
name: "create-index-builds-repo",
stmt: createIndexBuildsRepo,
},
{
name: "create-index-builds-author",
stmt: createIndexBuildsAuthor,
},
{
name: "create-index-builds-status",
stmt: createIndexBuildsStatus,
},
{
name: "create-table-procs",
stmt: createTableProcs,
},
{
name: "create-index-procs-build",
stmt: createIndexProcsBuild,
},
{
name: "create-table-logs",
stmt: createTableLogs,
},
{
name: "create-table-files",
stmt: createTableFiles,
},
{
name: "create-index-files-builds",
stmt: createIndexFilesBuilds,
},
{
name: "create-index-files-procs",
stmt: createIndexFilesProcs,
},
{
name: "create-table-secrets",
stmt: createTableSecrets,
},
{
name: "create-index-secrets-repo",
stmt: createIndexSecretsRepo,
},
{
name: "create-table-registry",
stmt: createTableRegistry,
},
{
name: "create-index-registry-repo",
stmt: createIndexRegistryRepo,
},
{
name: "create-table-config",
stmt: createTableConfig,
},
{
name: "create-table-tasks",
stmt: createTableTasks,
},
{
name: "create-table-agents",
stmt: createTableAgents,
},
{
name: "create-table-senders",
stmt: createTableSenders,
},
{
name: "create-index-sender-repos",
stmt: createIndexSenderRepos,
},
}
// Migrate performs the database migration. If the migration fails
// and error is returned.
func Migrate(db *sql.DB) error {
if err := createTable(db); err != nil {
return err
}
completed, err := selectCompleted(db)
if err != nil && err != sql.ErrNoRows {
return err
}
for _, migration := range migrations {
if _, ok := completed[migration.name]; ok {
continue
}
if _, err := db.Exec(migration.stmt); err != nil {
return err
}
if err := insertMigration(db, migration.name); err != nil {
return err
}
}
return nil
}
func createTable(db *sql.DB) error {
_, err := db.Exec(migrationTableCreate)
return err
}
func insertMigration(db *sql.DB, name string) error {
_, err := db.Exec(migrationInsert, name)
return err
}
func selectCompleted(db *sql.DB) (map[string]struct{}, error) {
migrations := map[string]struct{}{}
rows, err := db.Query(migrationSelect)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var name string
if err := rows.Scan(&name); err != nil {
return nil, err
}
migrations[name] = struct{}{}
}
return migrations, nil
}
//
// migration table ddl and sql
//
var migrationTableCreate = `
CREATE TABLE IF NOT EXISTS migrations (
name VARCHAR(512)
,UNIQUE(name)
)
`
var migrationInsert = `
INSERT INTO migrations (name) VALUES (?)
`
var migrationSelect = `
SELECT name FROM migrations
`
//
// 001_create_table_users.sql
//
var createTableUsers = `
CREATE TABLE IF NOT EXISTS users (
user_id INTEGER PRIMARY KEY AUTOINCREMENT
,user_login TEXT
,user_token TEXT
,user_secret TEXT
,user_expiry INTEGER
,user_email TEXT
,user_avatar TEXT
,user_active BOOLEAN
,user_admin BOOLEAN
,user_hash TEXT
,UNIQUE(user_login)
);
`
//
// 002_create_table_repos.sql
//
var createTableRepos = `
CREATE TABLE IF NOT EXISTS repos (
repo_id INTEGER PRIMARY KEY AUTOINCREMENT
,repo_user_id INTEGER
,repo_owner TEXT
,repo_name TEXT
,repo_full_name TEXT
,repo_avatar TEXT
,repo_link TEXT
,repo_clone TEXT
,repo_branch TEXT
,repo_timeout INTEGER
,repo_private BOOLEAN
,repo_trusted BOOLEAN
,repo_allow_pr BOOLEAN
,repo_allow_push BOOLEAN
,repo_allow_deploys BOOLEAN
,repo_allow_tags BOOLEAN
,repo_hash TEXT
,repo_scm TEXT
,repo_config_path TEXT
,repo_gated BOOLEAN
,UNIQUE(repo_full_name)
);
`
//
// 003_create_table_builds.sql
//
var createTableBuilds = `
CREATE TABLE IF NOT EXISTS builds (
build_id INTEGER PRIMARY KEY AUTOINCREMENT
,build_repo_id INTEGER
,build_number INTEGER
,build_event TEXT
,build_status TEXT
,build_enqueued INTEGER
,build_created INTEGER
,build_started INTEGER
,build_finished INTEGER
,build_commit TEXT
,build_branch TEXT
,build_ref TEXT
,build_refspec TEXT
,build_remote TEXT
,build_title TEXT
,build_message TEXT
,build_timestamp INTEGER
,build_author TEXT
,build_avatar TEXT
,build_email TEXT
,build_link TEXT
,build_deploy TEXT
,build_signed BOOLEAN
,build_verified BOOLEAN
,build_parent INTEGER
,build_error TEXT
,build_reviewer TEXT
,build_reviewed INTEGER
,build_sender TEXT
,build_config_id INTEGER
,UNIQUE(build_number, build_repo_id)
);
`
var createIndexBuildsRepo = `
CREATE INDEX IF NOT EXISTS ix_build_repo ON builds (build_repo_id);
`
var createIndexBuildsAuthor = `
CREATE INDEX IF NOT EXISTS ix_build_author ON builds (build_author);
`
var createIndexBuildsStatus = `
CREATE INDEX IF NOT EXISTS ix_build_status_running ON builds (build_status)
WHERE build_status IN ('pending', 'running');
`
//
// 004_create_table_procs.sql
//
var createTableProcs = `
CREATE TABLE IF NOT EXISTS procs (
proc_id INTEGER PRIMARY KEY AUTOINCREMENT
,proc_build_id INTEGER
,proc_pid INTEGER
,proc_ppid INTEGER
,proc_pgid INTEGER
,proc_name TEXT
,proc_state TEXT
,proc_error TEXT
,proc_exit_code INTEGER
,proc_started INTEGER
,proc_stopped INTEGER
,proc_machine TEXT
,proc_platform TEXT
,proc_environ TEXT
,UNIQUE(proc_build_id, proc_pid)
);
`
var createIndexProcsBuild = `
CREATE INDEX IF NOT EXISTS proc_build_ix ON procs (proc_build_id);
`
//
// 005_create_table_logs.sql
//
var createTableLogs = `
CREATE TABLE IF NOT EXISTS logs (
log_id INTEGER PRIMARY KEY AUTOINCREMENT
,log_job_id INTEGER
,log_data BLOB
,UNIQUE(log_job_id)
);
`
//
// 006_create_table_files.sql
//
var createTableFiles = `
CREATE TABLE IF NOT EXISTS files (
file_id INTEGER PRIMARY KEY AUTOINCREMENT
,file_build_id INTEGER
,file_proc_id INTEGER
,file_name TEXT
,file_mime TEXT
,file_size INTEGER
,file_time INTEGER
,file_data BLOB
,UNIQUE(file_proc_id,file_name)
);
`
var createIndexFilesBuilds = `
CREATE INDEX IF NOT EXISTS file_build_ix ON files (file_build_id);
`
var createIndexFilesProcs = `
CREATE INDEX IF NOT EXISTS file_proc_ix ON files (file_proc_id);
`
//
// 007_create_table_secets.sql
//
var createTableSecrets = `
CREATE TABLE IF NOT EXISTS secrets (
secret_id INTEGER PRIMARY KEY AUTOINCREMENT
,secret_repo_id INTEGER
,secret_name TEXT
,secret_value TEXT
,secret_images TEXT
,secret_events TEXT
,secret_skip_verify BOOLEAN
,secret_conceal BOOLEAN
,UNIQUE(secret_name, secret_repo_id)
);
`
var createIndexSecretsRepo = `
CREATE INDEX IF NOT EXISTS ix_secrets_repo ON secrets (secret_repo_id);
`
//
// 008_create_table_registry.sql
//
var createTableRegistry = `
CREATE TABLE IF NOT EXISTS registry (
registry_id INTEGER PRIMARY KEY AUTOINCREMENT
,registry_repo_id INTEGER
,registry_addr TEXT
,registry_username TEXT
,registry_password TEXT
,registry_email TEXT
,registry_token TEXT
,UNIQUE(registry_addr, registry_repo_id)
);
`
var createIndexRegistryRepo = `
CREATE INDEX IF NOT EXISTS ix_registry_repo ON registry (registry_repo_id);
`
//
// 009_create_table_config.sql
//
var createTableConfig = `
CREATE TABLE IF NOT EXISTS config (
config_id INTEGER PRIMARY KEY AUTOINCREMENT
,config_repo_id INTEGER
,config_hash TEXT
,config_data BLOB
,UNIQUE(config_hash, config_repo_id)
);
`
//
// 010_create_table_tasks.sql
//
var createTableTasks = `
CREATE TABLE IF NOT EXISTS tasks (
task_id TEXT PRIMARY KEY
,task_data BLOB
,task_labels BLOB
);
`
//
// 011_create_table_agents.sql
//
var createTableAgents = `
CREATE TABLE IF NOT EXISTS agents (
agent_id INTEGER PRIMARY KEY AUTOINCREMENT
,agent_addr TEXT
,agent_platform TEXT
,agent_capacity INTEGER
,agent_created INTEGER
,agent_updated INTEGER
,UNIQUE(agent_addr)
);
`
//
// 012_create_table_senders.sql
//
var createTableSenders = `
CREATE TABLE IF NOT EXISTS senders (
sender_id INTEGER PRIMARY KEY AUTOINCREMENT
,sender_repo_id INTEGER
,sender_login TEXT
,sender_allow BOOLEAN
,sender_block BOOLEAN
,UNIQUE(sender_repo_id,sender_login)
);
`
var createIndexSenderRepos = `
CREATE INDEX IF NOT EXISTS sender_repo_ix ON senders (sender_repo_id);
`

View file

@ -0,0 +1,15 @@
-- name: create-table-users
CREATE TABLE IF NOT EXISTS users (
user_id INTEGER PRIMARY KEY AUTOINCREMENT
,user_login TEXT
,user_token TEXT
,user_secret TEXT
,user_expiry INTEGER
,user_email TEXT
,user_avatar TEXT
,user_active BOOLEAN
,user_admin BOOLEAN
,user_hash TEXT
,UNIQUE(user_login)
);

View file

@ -0,0 +1,25 @@
-- name: create-table-repos
CREATE TABLE IF NOT EXISTS repos (
repo_id INTEGER PRIMARY KEY AUTOINCREMENT
,repo_user_id INTEGER
,repo_owner TEXT
,repo_name TEXT
,repo_full_name TEXT
,repo_avatar TEXT
,repo_link TEXT
,repo_clone TEXT
,repo_branch TEXT
,repo_timeout INTEGER
,repo_private BOOLEAN
,repo_trusted BOOLEAN
,repo_allow_pr BOOLEAN
,repo_allow_push BOOLEAN
,repo_allow_deploys BOOLEAN
,repo_allow_tags BOOLEAN
,repo_hash TEXT
,repo_scm TEXT
,repo_config_path TEXT
,repo_gated BOOLEAN
,UNIQUE(repo_full_name)
);

View file

@ -0,0 +1,48 @@
-- name: create-table-builds
CREATE TABLE IF NOT EXISTS builds (
build_id INTEGER PRIMARY KEY AUTOINCREMENT
,build_repo_id INTEGER
,build_number INTEGER
,build_event TEXT
,build_status TEXT
,build_enqueued INTEGER
,build_created INTEGER
,build_started INTEGER
,build_finished INTEGER
,build_commit TEXT
,build_branch TEXT
,build_ref TEXT
,build_refspec TEXT
,build_remote TEXT
,build_title TEXT
,build_message TEXT
,build_timestamp INTEGER
,build_author TEXT
,build_avatar TEXT
,build_email TEXT
,build_link TEXT
,build_deploy TEXT
,build_signed BOOLEAN
,build_verified BOOLEAN
,build_parent INTEGER
,build_error TEXT
,build_reviewer TEXT
,build_reviewed INTEGER
,build_sender TEXT
,build_config_id INTEGER
,UNIQUE(build_number, build_repo_id)
);
-- name: create-index-builds-repo
CREATE INDEX IF NOT EXISTS ix_build_repo ON builds (build_repo_id);
-- name: create-index-builds-author
CREATE INDEX IF NOT EXISTS ix_build_author ON builds (build_author);
-- name: create-index-builds-status
CREATE INDEX IF NOT EXISTS ix_build_status_running ON builds (build_status)
WHERE build_status IN ('pending', 'running');

View file

@ -0,0 +1,23 @@
-- name: create-table-procs
CREATE TABLE IF NOT EXISTS procs (
proc_id INTEGER PRIMARY KEY AUTOINCREMENT
,proc_build_id INTEGER
,proc_pid INTEGER
,proc_ppid INTEGER
,proc_pgid INTEGER
,proc_name TEXT
,proc_state TEXT
,proc_error TEXT
,proc_exit_code INTEGER
,proc_started INTEGER
,proc_stopped INTEGER
,proc_machine TEXT
,proc_platform TEXT
,proc_environ TEXT
,UNIQUE(proc_build_id, proc_pid)
);
-- name: create-index-procs-build
CREATE INDEX IF NOT EXISTS proc_build_ix ON procs (proc_build_id);

View file

@ -0,0 +1,8 @@
-- name: create-table-logs
CREATE TABLE IF NOT EXISTS logs (
log_id INTEGER PRIMARY KEY AUTOINCREMENT
,log_job_id INTEGER
,log_data BLOB
,UNIQUE(log_job_id)
);

View file

@ -0,0 +1,21 @@
-- name: create-table-files
CREATE TABLE IF NOT EXISTS files (
file_id INTEGER PRIMARY KEY AUTOINCREMENT
,file_build_id INTEGER
,file_proc_id INTEGER
,file_name TEXT
,file_mime TEXT
,file_size INTEGER
,file_time INTEGER
,file_data BLOB
,UNIQUE(file_proc_id,file_name)
);
-- name: create-index-files-builds
CREATE INDEX IF NOT EXISTS file_build_ix ON files (file_build_id);
-- name: create-index-files-procs
CREATE INDEX IF NOT EXISTS file_proc_ix ON files (file_proc_id);

View file

@ -0,0 +1,17 @@
-- name: create-table-secrets
CREATE TABLE IF NOT EXISTS secrets (
secret_id INTEGER PRIMARY KEY AUTOINCREMENT
,secret_repo_id INTEGER
,secret_name TEXT
,secret_value TEXT
,secret_images TEXT
,secret_events TEXT
,secret_skip_verify BOOLEAN
,secret_conceal BOOLEAN
,UNIQUE(secret_name, secret_repo_id)
);
-- name: create-index-secrets-repo
CREATE INDEX IF NOT EXISTS ix_secrets_repo ON secrets (secret_repo_id);

View file

@ -0,0 +1,17 @@
-- name: create-table-registry
CREATE TABLE IF NOT EXISTS registry (
registry_id INTEGER PRIMARY KEY AUTOINCREMENT
,registry_repo_id INTEGER
,registry_addr TEXT
,registry_username TEXT
,registry_password TEXT
,registry_email TEXT
,registry_token TEXT
,UNIQUE(registry_addr, registry_repo_id)
);
-- name: create-index-registry-repo
CREATE INDEX IF NOT EXISTS ix_registry_repo ON registry (registry_repo_id);

View file

@ -0,0 +1,9 @@
-- name: create-table-config
CREATE TABLE IF NOT EXISTS config (
config_id INTEGER PRIMARY KEY AUTOINCREMENT
,config_repo_id INTEGER
,config_hash TEXT
,config_data BLOB
,UNIQUE(config_hash, config_repo_id)
);

View file

@ -0,0 +1,7 @@
-- name: create-table-tasks
CREATE TABLE IF NOT EXISTS tasks (
task_id TEXT PRIMARY KEY
,task_data BLOB
,task_labels BLOB
);

View file

@ -1,6 +1,6 @@
-- +migrate Up
-- name: create-table-agents
CREATE TABLE agents (
CREATE TABLE IF NOT EXISTS agents (
agent_id INTEGER PRIMARY KEY AUTOINCREMENT
,agent_addr TEXT
,agent_platform TEXT
@ -10,8 +10,3 @@ CREATE TABLE agents (
,UNIQUE(agent_addr)
);
-- +migrate Down
DROP TABLE agents;

View file

@ -0,0 +1,15 @@
-- name: create-table-senders
CREATE TABLE IF NOT EXISTS senders (
sender_id INTEGER PRIMARY KEY AUTOINCREMENT
,sender_repo_id INTEGER
,sender_login TEXT
,sender_allow BOOLEAN
,sender_block BOOLEAN
,UNIQUE(sender_repo_id,sender_login)
);
-- name: create-index-sender-repos
CREATE INDEX IF NOT EXISTS sender_repo_ix ON senders (sender_repo_id);

View file

@ -0,0 +1,242 @@
-- name: create-table-users
CREATE TABLE users (
user_id INTEGER PRIMARY KEY AUTOINCREMENT
,user_login TEXT
,user_token TEXT
,user_secret TEXT
,user_expiry INTEGER
,user_email TEXT
,user_avatar TEXT
,user_active BOOLEAN
,user_admin BOOLEAN
,user_hash TEXT
,UNIQUE(user_login)
);
--------------------------------------------------------------------------------
-- name: create-table-repos
CREATE TABLE repos (
repo_id INTEGER PRIMARY KEY AUTOINCREMENT
,repo_user_id INTEGER
,repo_owner TEXT
,repo_name TEXT
,repo_full_name TEXT
,repo_avatar TEXT
,repo_link TEXT
,repo_clone TEXT
,repo_branch TEXT
,repo_timeout INTEGER
,repo_private BOOLEAN
,repo_trusted BOOLEAN
,repo_allow_pr BOOLEAN
,repo_allow_push BOOLEAN
,repo_allow_deploys BOOLEAN
,repo_allow_tags BOOLEAN
,repo_hash TEXT
,repo_scm TEXT
,repo_config_path TEXT
,repo_gated BOOLEAN
,UNIQUE(repo_full_name)
);
--------------------------------------------------------------------------------
-- name: create-table-secrets
CREATE TABLE secrets (
secret_id INTEGER PRIMARY KEY AUTOINCREMENT
,secret_repo_id INTEGER
,secret_name TEXT
,secret_value TEXT
,secret_images TEXT
,secret_events TEXT
,secret_skip_verify BOOLEAN
,secret_conceal BOOLEAN
,UNIQUE(secret_name, secret_repo_id)
);
-- name: create-index-secrets-repo
CREATE INDEX ix_secrets_repo ON secrets (secret_repo_id);
--------------------------------------------------------------------------------
-- name: create-table-registry
CREATE TABLE registry (
registry_id INTEGER PRIMARY KEY AUTOINCREMENT
,registry_repo_id INTEGER
,registry_addr TEXT
,registry_username TEXT
,registry_password TEXT
,registry_email TEXT
,registry_token TEXT
,UNIQUE(registry_addr, registry_repo_id)
);
-- name: create-index-registry-repo
CREATE INDEX ix_registry_repo ON registry (registry_repo_id);
--------------------------------------------------------------------------------
-- name: create-table-builds
CREATE TABLE builds (
build_id INTEGER PRIMARY KEY AUTOINCREMENT
,build_repo_id INTEGER
,build_number INTEGER
,build_event TEXT
,build_status TEXT
,build_enqueued INTEGER
,build_created INTEGER
,build_started INTEGER
,build_finished INTEGER
,build_commit TEXT
,build_branch TEXT
,build_ref TEXT
,build_refspec TEXT
,build_remote TEXT
,build_title TEXT
,build_message TEXT
,build_timestamp INTEGER
,build_author TEXT
,build_avatar TEXT
,build_email TEXT
,build_link TEXT
,build_deploy TEXT
,build_signed BOOLEAN
,build_verified BOOLEAN
,build_parent INTEGER
,build_error TEXT
,build_reviewer TEXT
,build_reviewed INTEGER
,build_sender TEXT
,build_config_id INTEGER
,UNIQUE(build_number, build_repo_id)
);
-- name: create-index-builds-repo
CREATE INDEX ix_build_repo ON builds (build_repo_id);
-- name: create-index-builds-author
CREATE INDEX ix_build_author ON builds (build_author);
-- name: create-index-builds-status
CREATE INDEX ix_build_status_running ON builds (build_status)
WHERE build_status IN ('pending', 'running');
--------------------------------------------------------------------------------
-- name: create-table-procs
CREATE TABLE procs (
proc_id INTEGER PRIMARY KEY AUTOINCREMENT
,proc_build_id INTEGER
,proc_pid INTEGER
,proc_ppid INTEGER
,proc_pgid INTEGER
,proc_name TEXT
,proc_state TEXT
,proc_error TEXT
,proc_exit_code INTEGER
,proc_started INTEGER
,proc_stopped INTEGER
,proc_machine TEXT
,proc_platform TEXT
,proc_environ TEXT
,UNIQUE(proc_build_id, proc_pid)
);
-- name: create-index-procs-build
CREATE INDEX proc_build_ix ON procs (proc_build_id);
--------------------------------------------------------------------------------
-- name: create-table-logs
CREATE TABLE IF NOT EXISTS logs (
log_id INTEGER PRIMARY KEY AUTOINCREMENT
,log_job_id INTEGER
,log_data BLOB
,UNIQUE(log_job_id)
);
--------------------------------------------------------------------------------
-- name: create-table-files
CREATE TABLE IF NOT EXISTS files (
file_id INTEGER PRIMARY KEY AUTOINCREMENT
,file_build_id INTEGER
,file_proc_id INTEGER
,file_name TEXT
,file_mime TEXT
,file_size INTEGER
,file_time INTEGER
,file_data BLOB
,UNIQUE(file_proc_id,file_name)
,FOREIGN KEY(file_proc_id) REFERENCES procs (proc_id) ON DELETE CASCADE
);
-- name: create-index-files-builds
CREATE INDEX file_build_ix ON files (file_build_id);
-- name: create-index-files-procs
CREATE INDEX file_proc_ix ON files (file_proc_id);
--------------------------------------------------------------------------------
-- name: create-table-senders
CREATE TABLE IF NOT EXISTS senders (
sender_id INTEGER PRIMARY KEY AUTOINCREMENT
,sender_repo_id INTEGER
,sender_login BOOLEAN
,sender_allow BOOLEAN
,sender_block BOOLEAN
,UNIQUE(sender_repo_id,sender_login)
);
-- name: create-index-sender-repos
CREATE INDEX sender_repo_ix ON senders (sender_repo_id);
--------------------------------------------------------------------------------
-- name: create-table-config
CREATE TABLE IF NOT EXISTS config (
config_id INTEGER PRIMARY KEY AUTOINCREMENT
,config_repo_id INTEGER
,config_hash TEXT
,config_data BLOB
,UNIQUE(config_hash, config_repo_id)
);
--------------------------------------------------------------------------------
-- name: create-table-tasks
CREATE TABLE IF NOT EXISTS tasks (
task_id TEXT PRIMARY KEY
,task_data BLOB
,task_labels BLOB
);
--------------------------------------------------------------------------------
-- name: create-table-agents
CREATE TABLE IF NOT EXISTS agents (
agent_id INTEGER PRIMARY KEY AUTOINCREMENT
,agent_addr TEXT
,agent_platform TEXT
,agent_capacity INTEGER
,agent_created INTEGER
,agent_updated INTEGER
,UNIQUE(agent_addr)
);

View file

@ -1,12 +0,0 @@
-- +migrate Up
ALTER TABLE secrets ADD COLUMN secret_conceal BOOLEAN;
ALTER TABLE team_secrets ADD COLUMN team_secret_conceal BOOLEAN;
UPDATE secrets SET secret_conceal = 0;
UPDATE team_secrets SET team_secret_conceal = 0;
-- +migrate Down
ALTER TABLE secrets DROP COLUMN secret_conceal;
ALTER TABLE team_secrets DROP COLUMN team_secret_conceal;

View file

@ -1,8 +0,0 @@
-- +migrate Up
ALTER TABLE builds ADD COLUMN build_error TEXT;
UPDATE builds SET build_error = '';
-- +migrate Down
ALTER TABLE builds DROP COLUMN build_error;

View file

@ -1,18 +0,0 @@
-- +migrate Up
ALTER TABLE repos ADD COLUMN repo_config_path TEXT;
ALTER TABLE builds ADD COLUMN build_reviewer TEXT;
ALTER TABLE builds ADD COLUMN build_reviewed INTEGER;
ALTER TABLE builds ADD COLUMN build_sender TEXT;
UPDATE repos SET repo_config_path = '.drone.yml';
UPDATE builds SET build_reviewer = '';
UPDATE builds SET build_reviewed = 0;
UPDATE builds SET build_sender = '';
-- +migrate Down
ALTER TABLE repos DROP COLUMN repo_config_path;
ALTER TABLE builds DROP COLUMN build_reviewer;
ALTER TABLE builds DROP COLUMN build_reviewed;
ALTER TABLE builds DROP COLUMN build_sender;

View file

@ -1,46 +0,0 @@
-- +migrate Up
CREATE TABLE procs (
proc_id INTEGER PRIMARY KEY AUTOINCREMENT
,proc_build_id INTEGER
,proc_pid INTEGER
,proc_ppid INTEGER
,proc_pgid INTEGER
,proc_name TEXT
,proc_state TEXT
,proc_error TEXT
,proc_exit_code INTEGER
,proc_started INTEGER
,proc_stopped INTEGER
,proc_machine TEXT
,proc_platform TEXT
,proc_environ TEXT
,UNIQUE(proc_build_id, proc_pid)
);
CREATE INDEX proc_build_ix ON procs (proc_build_id);
CREATE TABLE files (
file_id INTEGER PRIMARY KEY AUTOINCREMENT
,file_build_id INTEGER
,file_proc_id INTEGER
,file_name TEXT
,file_mime TEXT
,file_size INTEGER
,file_time INTEGER
,file_data BLOB
,UNIQUE(file_proc_id,file_name)
,FOREIGN KEY(file_proc_id) REFERENCES procs (proc_id) ON DELETE CASCADE
);
CREATE INDEX file_build_ix ON files (file_build_id);
CREATE INDEX file_proc_ix ON files (file_proc_id);
-- +migrate Down
DROP INDEX file_build_ix;
DROP INDEX file_proc_ix;
DROP TABLE files;
DROP INDEX proc_build_ix;
DROP TABLE procs;

View file

@ -1,22 +0,0 @@
-- +migrate Up
ALTER TABLE repos ADD COLUMN repo_gated BOOLEAN;
UPDATE repos SET repo_gated = 0;
CREATE TABLE senders (
sender_id INTEGER PRIMARY KEY AUTOINCREMENT
,sender_repo_id INTEGER
,sender_login BOOLEAN
,sender_allow BOOLEAN
,sender_block BOOLEAN
,UNIQUE(sender_repo_id,sender_login)
);
CREATE INDEX sender_repo_ix ON senders (sender_repo_id);
-- +migrate Down
ALTER TABLE repos DROP COLUMN repo_gated;
DROP INDEX sender_repo_ix;
DROP TABLE senders;

View file

@ -1,11 +0,0 @@
-- +migrate Up
CREATE TABLE tasks (
task_id TEXT PRIMARY KEY
,task_data BLOB
,task_labels BLOB
);
-- +migrate Down
DROP TABLE tasks;

View file

@ -1,17 +0,0 @@
-- +migrate Up
CREATE TABLE config (
config_id INTEGER PRIMARY KEY AUTOINCREMENT
,config_repo_id INTEGER
,config_hash TEXT
,config_data BLOB
,UNIQUE(config_hash, config_repo_id)
);
ALTER TABLE builds ADD COLUMN build_config_id INTEGER;
UPDATE builds set build_config_id = 0;
-- +migrate Down
DROP TABLE config;

View file

@ -1,135 +0,0 @@
-- +migrate Up
CREATE TABLE users (
user_id INTEGER PRIMARY KEY AUTOINCREMENT
,user_login TEXT
,user_token TEXT
,user_secret TEXT
,user_expiry INTEGER
,user_email TEXT
,user_avatar TEXT
,user_active BOOLEAN
,user_admin BOOLEAN
,user_hash TEXT
,UNIQUE(user_login)
);
CREATE TABLE repos (
repo_id INTEGER PRIMARY KEY AUTOINCREMENT
,repo_user_id INTEGER
,repo_owner TEXT
,repo_name TEXT
,repo_full_name TEXT
,repo_avatar TEXT
,repo_link TEXT
,repo_clone TEXT
,repo_branch TEXT
,repo_timeout INTEGER
,repo_private BOOLEAN
,repo_trusted BOOLEAN
,repo_allow_pr BOOLEAN
,repo_allow_push BOOLEAN
,repo_allow_deploys BOOLEAN
,repo_allow_tags BOOLEAN
,repo_hash TEXT
,UNIQUE(repo_full_name)
);
CREATE TABLE stars (
star_id INTEGER PRIMARY KEY AUTOINCREMENT
,star_repo_id INTEGER
,star_user_id INTEGER
,UNIQUE(star_repo_id, star_user_id)
);
CREATE INDEX ix_star_user ON stars (star_user_id);
CREATE TABLE keys (
key_id INTEGER PRIMARY KEY AUTOINCREMENT
,key_repo_id INTEGER
,key_public BLOB
,key_private BLOB
,UNIQUE(key_repo_id)
);
CREATE TABLE builds (
build_id INTEGER PRIMARY KEY AUTOINCREMENT
,build_repo_id INTEGER
,build_number INTEGER
,build_event TEXT
,build_status TEXT
,build_enqueued INTEGER
,build_created INTEGER
,build_started INTEGER
,build_finished INTEGER
,build_commit TEXT
,build_branch TEXT
,build_ref TEXT
,build_refspec TEXT
,build_remote TEXT
,build_title TEXT
,build_message TEXT
,build_timestamp INTEGER
,build_author TEXT
,build_avatar TEXT
,build_email TEXT
,build_link TEXT
,UNIQUE(build_number, build_repo_id)
);
CREATE INDEX ix_build_repo ON builds (build_repo_id);
CREATE INDEX ix_build_author ON builds (build_author);
CREATE TABLE jobs (
job_id INTEGER PRIMARY KEY AUTOINCREMENT
,job_node_id INTEGER
,job_build_id INTEGER
,job_number INTEGER
,job_status TEXT
,job_exit_code INTEGER
,job_enqueued INTEGER
,job_started INTEGER
,job_finished INTEGER
,job_environment TEXT
,UNIQUE(job_build_id, job_number)
);
CREATE INDEX ix_job_build ON jobs (job_build_id);
CREATE INDEX ix_job_node ON jobs (job_node_id);
CREATE TABLE IF NOT EXISTS logs (
log_id INTEGER PRIMARY KEY AUTOINCREMENT
,log_job_id INTEGER
,log_data BLOB
,UNIQUE(log_job_id)
);
CREATE TABLE IF NOT EXISTS nodes (
node_id INTEGER PRIMARY KEY AUTOINCREMENT
,node_addr TEXT
,node_arch TEXT
,node_cert BLOB
,node_key BLOB
,node_ca BLOB
);
INSERT INTO nodes VALUES(null, 'unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
INSERT INTO nodes VALUES(null, 'unix:///var/run/docker.sock', 'linux_amd64', '', '', '');
-- +migrate Down
DROP TABLE nodes;
DROP TABLE logs;
DROP TABLE jobs;
DROP TABLE builds;
DROP TABLE keys;
DROP TABLE stars;
DROP TABLE repos;
DROP TABLE users;

View file

@ -1,12 +0,0 @@
-- +migrate Up
ALTER TABLE repos ADD COLUMN repo_scm TEXT;
ALTER TABLE builds ADD COLUMN build_deploy TEXT;
UPDATE repos SET repo_scm = 'git';
UPDATE builds SET build_deploy = '';
-- +migrate Down
ALTER TABLE repos DROP COLUMN repo_scm;
ALTER TABLE builds DROP COLUMN build_deploy;

View file

@ -1,34 +0,0 @@
-- +migrate Up
CREATE TABLE secrets (
secret_id INTEGER PRIMARY KEY AUTOINCREMENT
,secret_repo_id INTEGER
,secret_name TEXT
,secret_value TEXT
,secret_images TEXT
,secret_events TEXT
,UNIQUE(secret_name, secret_repo_id)
);
CREATE TABLE registry (
registry_id INTEGER PRIMARY KEY AUTOINCREMENT
,registry_repo_id INTEGER
,registry_addr TEXT
,registry_username TEXT
,registry_password TEXT
,registry_email TEXT
,registry_token TEXT
,UNIQUE(registry_addr, registry_repo_id)
);
CREATE INDEX ix_secrets_repo ON secrets (secret_repo_id);
CREATE INDEX ix_registry_repo ON registry (registry_repo_id);
-- +migrate Down
DROP INDEX ix_secrets_repo;
DROP INDEX ix_registry_repo;
DROP TABLE secrets;
DROP TABLE registry;

View file

@ -1,9 +0,0 @@
-- +migrate Up
ALTER TABLE jobs ADD COLUMN job_error TEXT;
UPDATE jobs SET job_error = '';
-- +migrate Down
ALTER TABLE jobs DROP COLUMN job_error;

View file

@ -1,16 +0,0 @@
-- +migrate Up
ALTER TABLE builds ADD COLUMN build_signed BOOLEAN;
ALTER TABLE builds ADD COLUMN build_verified BOOLEAN;
UPDATE builds SET build_signed = 0;
UPDATE builds SET build_verified = 0;
CREATE INDEX ix_build_status_running ON builds (build_status)
WHERE build_status IN ('pending', 'running');
-- +migrate Down
ALTER TABLE builds DROP COLUMN build_signed;
ALTER TABLE builds DROP COLUMN build_verified;
DROP INDEX ix_build_status_running;

View file

@ -1,19 +0,0 @@
-- +migrate Up
CREATE TABLE team_secrets (
team_secret_id INTEGER PRIMARY KEY AUTOINCREMENT
,team_secret_key TEXT
,team_secret_name TEXT
,team_secret_value TEXT
,team_secret_images TEXT
,team_secret_events TEXT
,UNIQUE(team_secret_name, team_secret_key)
);
CREATE INDEX ix_team_secrets_key ON team_secrets (team_secret_key);
-- +migrate Down
DROP INDEX ix_team_secrets_key;
DROP TABLE team_secrets;

View file

@ -1,7 +0,0 @@
-- +migrate Up
ALTER TABLE builds ADD COLUMN build_parent INTEGER DEFAULT 0;
-- +migrate Down
ALTER TABLE builds DROP COLUMN build_parent;

View file

@ -1,12 +0,0 @@
-- +migrate Up
ALTER TABLE secrets ADD COLUMN secret_skip_verify BOOLEAN;
ALTER TABLE team_secrets ADD COLUMN team_secret_skip_verify BOOLEAN;
UPDATE secrets SET secret_skip_verify = 0;
UPDATE team_secrets SET team_secret_skip_verify = 0;
-- +migrate Down
ALTER TABLE secrets DROP COLUMN secret_skip_verify;
ALTER TABLE team_secrets DROP COLUMN team_secret_skip_verify;

View file

@ -7,7 +7,6 @@ import (
"github.com/drone/drone/store"
"github.com/drone/drone/store/datastore/ddl"
"github.com/rubenv/sql-migrate"
"github.com/russross/meddler"
"github.com/Sirupsen/logrus"
@ -116,13 +115,7 @@ func pingDatabase(db *sql.DB) (err error) {
// helper function to setup the databsae by performing
// automated database migration steps.
func setupDatabase(driver string, db *sql.DB) error {
var migrations = &migrate.AssetMigrationSource{
Asset: ddl.Asset,
AssetDir: ddl.AssetDir,
Dir: driver,
}
_, err := migrate.Exec(db, driver, migrations, migrate.Up)
return err
return ddl.Migrate(driver, db)
}
// helper function to setup the meddler default driver

View file

@ -1,245 +0,0 @@
# sql-migrate
> SQL Schema migration tool for [Go](http://golang.org/). Based on [gorp](https://github.com/go-gorp/gorp) and [goose](https://bitbucket.org/liamstask/goose).
[![Build Status](https://travis-ci.org/rubenv/sql-migrate.svg?branch=master)](https://travis-ci.org/rubenv/sql-migrate) [![GoDoc](https://godoc.org/github.com/rubenv/sql-migrate?status.png)](https://godoc.org/github.com/rubenv/sql-migrate)
Using [modl](https://github.com/jmoiron/modl)? Check out [modl-migrate](https://github.com/rubenv/modl-migrate).
## Features
* Usable as a CLI tool or as a library
* Supports SQLite, PostgreSQL, MySQL, MSSQL and Oracle databases (through [gorp](https://github.com/go-gorp/gorp))
* Can embed migrations into your application
* Migrations are defined with SQL for full flexibility
* Atomic migrations
* Up/down migrations to allow rollback
* Supports multiple database types in one project
## Installation
To install the library and command line program, use the following:
```bash
go get github.com/rubenv/sql-migrate/...
```
## Usage
### As a standalone tool
```
$ sql-migrate --help
usage: sql-migrate [--version] [--help] <command> [<args>]
Available commands are:
down Undo a database migration
redo Reapply the last migration
status Show migration status
up Migrates the database to the most recent version available
```
Each command requires a configuration file (which defaults to `dbconfig.yml`, but can be specified with the `-config` flag). This config file should specify one or more environments:
```yml
development:
dialect: sqlite3
datasource: test.db
dir: migrations/sqlite3
production:
dialect: postgres
datasource: dbname=myapp sslmode=disable
dir: migrations/postgres
table: migrations
```
The `table` setting is optional and will default to `gorp_migrations`.
The environment that will be used can be specified with the `-env` flag (defaults to `development`).
Use the `--help` flag in combination with any of the commands to get an overview of its usage:
```
$ sql-migrate up --help
Usage: sql-migrate up [options] ...
Migrates the database to the most recent version available.
Options:
-config=config.yml Configuration file to use.
-env="development" Environment.
-limit=0 Limit the number of migrations (0 = unlimited).
-dryrun Don't apply migrations, just print them.
```
The `up` command applies all available migrations. By contrast, `down` will only apply one migration by default. This behavior can be changed for both by using the `-limit` parameter.
The `redo` command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations.
Use the `status` command to see the state of the applied migrations:
```bash
$ sql-migrate status
+---------------+-----------------------------------------+
| MIGRATION | APPLIED |
+---------------+-----------------------------------------+
| 1_initial.sql | 2014-09-13 08:19:06.788354925 +0000 UTC |
| 2_record.sql | no |
+---------------+-----------------------------------------+
```
### As a library
Import sql-migrate into your application:
```go
import "github.com/rubenv/sql-migrate"
```
Set up a source of migrations, this can be from memory, from a set of files or from bindata (more on that later):
```go
// Hardcoded strings in memory:
migrations := &migrate.MemoryMigrationSource{
Migrations: []*migrate.Migration{
&migrate.Migration{
Id: "123",
Up: []string{"CREATE TABLE people (id int)"},
Down: []string{"DROP TABLE people"},
},
},
}
// OR: Read migrations from a folder:
migrations := &migrate.FileMigrationSource{
Dir: "db/migrations",
}
// OR: Use migrations from bindata:
migrations := &migrate.AssetMigrationSource{
Asset: Asset,
AssetDir: AssetDir,
Dir: "migrations",
}
```
Then use the `Exec` function to upgrade your database:
```go
db, err := sql.Open("sqlite3", filename)
if err != nil {
// Handle errors!
}
n, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up)
if err != nil {
// Handle errors!
}
fmt.Printf("Applied %d migrations!\n", n)
```
Note that `n` can be greater than `0` even if there is an error: any migration that succeeded will remain applied even if a later one fails.
Check [the GoDoc reference](https://godoc.org/github.com/rubenv/sql-migrate) for the full documentation.
## Writing migrations
Migrations are defined in SQL files, which contain a set of SQL statements. Special comments are used to distinguish up and down migrations.
```sql
-- +migrate Up
-- SQL in section 'Up' is executed when this migration is applied
CREATE TABLE people (id int);
-- +migrate Down
-- SQL section 'Down' is executed when this migration is rolled back
DROP TABLE people;
```
You can put multiple statements in each block, as long as you end them with a semicolon (`;`).
If you have complex statements which contain semicolons, use `StatementBegin` and `StatementEnd` to indicate boundaries:
```sql
-- +migrate Up
CREATE TABLE people (id int);
-- +migrate StatementBegin
CREATE OR REPLACE FUNCTION do_something()
returns void AS $$
DECLARE
create_query text;
BEGIN
-- Do something here
END;
$$
language plpgsql;
-- +migrate StatementEnd
-- +migrate Down
DROP FUNCTION do_something();
DROP TABLE people;
```
The order in which migrations are applied is defined through the filename: sql-migrate will sort migrations based on their name. It's recommended to use an increasing version number or a timestamp as the first part of the filename.
## Embedding migrations with [bindata](https://github.com/jteeuwen/go-bindata)
If you like your Go applications self-contained (that is: a single binary): use [bindata](https://github.com/jteeuwen/go-bindata) to embed the migration files.
Just write your migration files as usual, as a set of SQL files in a folder.
Then use bindata to generate a `.go` file with the migrations embedded:
```bash
go-bindata -pkg myapp -o bindata.go db/migrations/
```
The resulting `bindata.go` file will contain your migrations. Remember to regenerate your `bindata.go` file whenever you add/modify a migration (`go generate` will help here, once it arrives).
Use the `AssetMigrationSource` in your application to find the migrations:
```go
migrations := &migrate.AssetMigrationSource{
Asset: Asset,
AssetDir: AssetDir,
Dir: "db/migrations",
}
```
Both `Asset` and `AssetDir` are functions provided by bindata.
Then proceed as usual.
## Extending
Adding a new migration source means implementing `MigrationSource`.
```go
type MigrationSource interface {
FindMigrations() ([]*Migration, error)
}
```
The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the `Id` field.
## License
(The MIT License)
Copyright (C) 2014-2015 by Ruben Vermeersch <ruben@rocketeer.be>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View file

@ -1,199 +0,0 @@
/*
SQL Schema migration tool for Go.
Key features:
* Usable as a CLI tool or as a library
* Supports SQLite, PostgreSQL, MySQL, MSSQL and Oracle databases (through gorp)
* Can embed migrations into your application
* Migrations are defined with SQL for full flexibility
* Atomic migrations
* Up/down migrations to allow rollback
* Supports multiple database types in one project
Installation
To install the library and command line program, use the following:
go get github.com/rubenv/sql-migrate/...
Command-line tool
The main command is called sql-migrate.
$ sql-migrate --help
usage: sql-migrate [--version] [--help] <command> [<args>]
Available commands are:
down Undo a database migration
redo Reapply the last migration
status Show migration status
up Migrates the database to the most recent version available
Each command requires a configuration file (which defaults to dbconfig.yml, but can be specified with the -config flag). This config file should specify one or more environments:
development:
dialect: sqlite3
datasource: test.db
dir: migrations/sqlite3
production:
dialect: postgres
datasource: dbname=myapp sslmode=disable
dir: migrations/postgres
table: migrations
The `table` setting is optional and will default to `gorp_migrations`.
The environment that will be used can be specified with the -env flag (defaults to development).
Use the --help flag in combination with any of the commands to get an overview of its usage:
$ sql-migrate up --help
Usage: sql-migrate up [options] ...
Migrates the database to the most recent version available.
Options:
-config=config.yml Configuration file to use.
-env="development" Environment.
-limit=0 Limit the number of migrations (0 = unlimited).
-dryrun Don't apply migrations, just print them.
The up command applies all available migrations. By contrast, down will only apply one migration by default. This behavior can be changed for both by using the -limit parameter.
The redo command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations.
Use the status command to see the state of the applied migrations:
$ sql-migrate status
+---------------+-----------------------------------------+
| MIGRATION | APPLIED |
+---------------+-----------------------------------------+
| 1_initial.sql | 2014-09-13 08:19:06.788354925 +0000 UTC |
| 2_record.sql | no |
+---------------+-----------------------------------------+
Library
Import sql-migrate into your application:
import "github.com/rubenv/sql-migrate"
Set up a source of migrations, this can be from memory, from a set of files or from bindata (more on that later):
// Hardcoded strings in memory:
migrations := &migrate.MemoryMigrationSource{
Migrations: []*migrate.Migration{
&migrate.Migration{
Id: "123",
Up: []string{"CREATE TABLE people (id int)"},
Down: []string{"DROP TABLE people"},
},
},
}
// OR: Read migrations from a folder:
migrations := &migrate.FileMigrationSource{
Dir: "db/migrations",
}
// OR: Use migrations from bindata:
migrations := &migrate.AssetMigrationSource{
Asset: Asset,
AssetDir: AssetDir,
Dir: "migrations",
}
Then use the Exec function to upgrade your database:
db, err := sql.Open("sqlite3", filename)
if err != nil {
// Handle errors!
}
n, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up)
if err != nil {
// Handle errors!
}
fmt.Printf("Applied %d migrations!\n", n)
Note that n can be greater than 0 even if there is an error: any migration that succeeded will remain applied even if a later one fails.
The full set of capabilities can be found in the API docs below.
Writing migrations
Migrations are defined in SQL files, which contain a set of SQL statements. Special comments are used to distinguish up and down migrations.
-- +migrate Up
-- SQL in section 'Up' is executed when this migration is applied
CREATE TABLE people (id int);
-- +migrate Down
-- SQL section 'Down' is executed when this migration is rolled back
DROP TABLE people;
You can put multiple statements in each block, as long as you end them with a semicolon (;).
If you have complex statements which contain semicolons, use StatementBegin and StatementEnd to indicate boundaries:
-- +migrate Up
CREATE TABLE people (id int);
-- +migrate StatementBegin
CREATE OR REPLACE FUNCTION do_something()
returns void AS $$
DECLARE
create_query text;
BEGIN
-- Do something here
END;
$$
language plpgsql;
-- +migrate StatementEnd
-- +migrate Down
DROP FUNCTION do_something();
DROP TABLE people;
The order in which migrations are applied is defined through the filename: sql-migrate will sort migrations based on their name. It's recommended to use an increasing version number or a timestamp as the first part of the filename.
Embedding migrations with bindata
If you like your Go applications self-contained (that is: a single binary): use bindata (https://github.com/jteeuwen/go-bindata) to embed the migration files.
Just write your migration files as usual, as a set of SQL files in a folder.
Then use bindata to generate a .go file with the migrations embedded:
go-bindata -pkg myapp -o bindata.go db/migrations/
The resulting bindata.go file will contain your migrations. Remember to regenerate your bindata.go file whenever you add/modify a migration (go generate will help here, once it arrives).
Use the AssetMigrationSource in your application to find the migrations:
migrations := &migrate.AssetMigrationSource{
Asset: Asset,
AssetDir: AssetDir,
Dir: "db/migrations",
}
Both Asset and AssetDir are functions provided by bindata.
Then proceed as usual.
Extending
Adding a new migration source means implementing MigrationSource.
type MigrationSource interface {
FindMigrations() ([]*Migration, error)
}
The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the Id field.
*/
package migrate

View file

@ -1,475 +0,0 @@
package migrate
import (
"bytes"
"database/sql"
"errors"
"fmt"
"io"
"os"
"path"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/rubenv/sql-migrate/sqlparse"
"gopkg.in/gorp.v1"
)
type MigrationDirection int
const (
Up MigrationDirection = iota
Down
)
var tableName = "gorp_migrations"
var schemaName = ""
var numberPrefixRegex = regexp.MustCompile(`^(\d+).*$`)
// Set the name of the table used to store migration info.
//
// Should be called before any other call such as (Exec, ExecMax, ...).
func SetTable(name string) {
if name != "" {
tableName = name
}
}
// SetSchema sets the name of a schema that the migration table be referenced.
func SetSchema(name string) {
if name != "" {
schemaName = name
}
}
func getTableName() string {
t := tableName
if schemaName != "" {
t = fmt.Sprintf("%s.%s", schemaName, t)
}
return t
}
type Migration struct {
Id string
Up []string
Down []string
}
func (m Migration) Less(other *Migration) bool {
switch {
case m.isNumeric() && other.isNumeric():
return m.VersionInt() < other.VersionInt()
case m.isNumeric() && !other.isNumeric():
return true
case !m.isNumeric() && other.isNumeric():
return false
default:
return m.Id < other.Id
}
}
func (m Migration) isNumeric() bool {
return len(m.NumberPrefixMatches()) > 0
}
func (m Migration) NumberPrefixMatches() []string {
return numberPrefixRegex.FindStringSubmatch(m.Id)
}
func (m Migration) VersionInt() int64 {
v := m.NumberPrefixMatches()[1]
value, err := strconv.ParseInt(v, 10, 64)
if err != nil {
panic(fmt.Sprintf("Could not parse %q into int64: %s", v, err))
}
return value
}
type PlannedMigration struct {
*Migration
Queries []string
}
type byId []*Migration
func (b byId) Len() int { return len(b) }
func (b byId) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byId) Less(i, j int) bool { return b[i].Less(b[j]) }
type MigrationRecord struct {
Id string `db:"id"`
AppliedAt time.Time `db:"applied_at"`
}
var MigrationDialects = map[string]gorp.Dialect{
"sqlite3": gorp.SqliteDialect{},
"postgres": gorp.PostgresDialect{},
"mysql": gorp.MySQLDialect{"InnoDB", "UTF8"},
"mssql": gorp.SqlServerDialect{},
"oci8": gorp.OracleDialect{},
}
type MigrationSource interface {
// Finds the migrations.
//
// The resulting slice of migrations should be sorted by Id.
FindMigrations() ([]*Migration, error)
}
// A hardcoded set of migrations, in-memory.
type MemoryMigrationSource struct {
Migrations []*Migration
}
var _ MigrationSource = (*MemoryMigrationSource)(nil)
func (m MemoryMigrationSource) FindMigrations() ([]*Migration, error) {
// Make sure migrations are sorted
sort.Sort(byId(m.Migrations))
return m.Migrations, nil
}
// A set of migrations loaded from a directory.
type FileMigrationSource struct {
Dir string
}
var _ MigrationSource = (*FileMigrationSource)(nil)
func (f FileMigrationSource) FindMigrations() ([]*Migration, error) {
migrations := make([]*Migration, 0)
file, err := os.Open(f.Dir)
if err != nil {
return nil, err
}
files, err := file.Readdir(0)
if err != nil {
return nil, err
}
for _, info := range files {
if strings.HasSuffix(info.Name(), ".sql") {
file, err := os.Open(path.Join(f.Dir, info.Name()))
if err != nil {
return nil, err
}
migration, err := ParseMigration(info.Name(), file)
if err != nil {
return nil, err
}
migrations = append(migrations, migration)
}
}
// Make sure migrations are sorted
sort.Sort(byId(migrations))
return migrations, nil
}
// Migrations from a bindata asset set.
type AssetMigrationSource struct {
// Asset should return content of file in path if exists
Asset func(path string) ([]byte, error)
// AssetDir should return list of files in the path
AssetDir func(path string) ([]string, error)
// Path in the bindata to use.
Dir string
}
var _ MigrationSource = (*AssetMigrationSource)(nil)
func (a AssetMigrationSource) FindMigrations() ([]*Migration, error) {
migrations := make([]*Migration, 0)
files, err := a.AssetDir(a.Dir)
if err != nil {
return nil, err
}
for _, name := range files {
if strings.HasSuffix(name, ".sql") {
file, err := a.Asset(path.Join(a.Dir, name))
if err != nil {
return nil, err
}
migration, err := ParseMigration(name, bytes.NewReader(file))
if err != nil {
return nil, err
}
migrations = append(migrations, migration)
}
}
// Make sure migrations are sorted
sort.Sort(byId(migrations))
return migrations, nil
}
// Migration parsing
func ParseMigration(id string, r io.ReadSeeker) (*Migration, error) {
m := &Migration{
Id: id,
}
up, err := sqlparse.SplitSQLStatements(r, true)
if err != nil {
return nil, err
}
down, err := sqlparse.SplitSQLStatements(r, false)
if err != nil {
return nil, err
}
m.Up = up
m.Down = down
return m, nil
}
// Execute a set of migrations
//
// Returns the number of applied migrations.
func Exec(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection) (int, error) {
return ExecMax(db, dialect, m, dir, 0)
}
// Execute a set of migrations
//
// Will apply at most `max` migrations. Pass 0 for no limit (or use Exec).
//
// Returns the number of applied migrations.
func ExecMax(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) {
migrations, dbMap, err := PlanMigration(db, dialect, m, dir, max)
if err != nil {
return 0, err
}
// Apply migrations
applied := 0
for _, migration := range migrations {
trans, err := dbMap.Begin()
if err != nil {
return applied, err
}
for _, stmt := range migration.Queries {
_, err := trans.Exec(stmt)
if err != nil {
trans.Rollback()
return applied, err
}
}
if dir == Up {
err = trans.Insert(&MigrationRecord{
Id: migration.Id,
AppliedAt: time.Now(),
})
if err != nil {
return applied, err
}
} else if dir == Down {
_, err := trans.Delete(&MigrationRecord{
Id: migration.Id,
})
if err != nil {
return applied, err
}
} else {
panic("Not possible")
}
err = trans.Commit()
if err != nil {
return applied, err
}
applied++
}
return applied, nil
}
// Plan a migration.
func PlanMigration(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) ([]*PlannedMigration, *gorp.DbMap, error) {
dbMap, err := getMigrationDbMap(db, dialect)
if err != nil {
return nil, nil, err
}
migrations, err := m.FindMigrations()
if err != nil {
return nil, nil, err
}
var migrationRecords []MigrationRecord
_, err = dbMap.Select(&migrationRecords, fmt.Sprintf("SELECT * FROM %s", getTableName()))
if err != nil {
return nil, nil, err
}
// Sort migrations that have been run by Id.
var existingMigrations []*Migration
for _, migrationRecord := range migrationRecords {
existingMigrations = append(existingMigrations, &Migration{
Id: migrationRecord.Id,
})
}
sort.Sort(byId(existingMigrations))
// Get last migration that was run
record := &Migration{}
if len(existingMigrations) > 0 {
record = existingMigrations[len(existingMigrations)-1]
}
result := make([]*PlannedMigration, 0)
// Add missing migrations up to the last run migration.
// This can happen for example when merges happened.
if len(existingMigrations) > 0 {
result = append(result, ToCatchup(migrations, existingMigrations, record)...)
}
// Figure out which migrations to apply
toApply := ToApply(migrations, record.Id, dir)
toApplyCount := len(toApply)
if max > 0 && max < toApplyCount {
toApplyCount = max
}
for _, v := range toApply[0:toApplyCount] {
if dir == Up {
result = append(result, &PlannedMigration{
Migration: v,
Queries: v.Up,
})
} else if dir == Down {
result = append(result, &PlannedMigration{
Migration: v,
Queries: v.Down,
})
}
}
return result, dbMap, nil
}
// Filter a slice of migrations into ones that should be applied.
func ToApply(migrations []*Migration, current string, direction MigrationDirection) []*Migration {
var index = -1
if current != "" {
for index < len(migrations)-1 {
index++
if migrations[index].Id == current {
break
}
}
}
if direction == Up {
return migrations[index+1:]
} else if direction == Down {
if index == -1 {
return []*Migration{}
}
// Add in reverse order
toApply := make([]*Migration, index+1)
for i := 0; i < index+1; i++ {
toApply[index-i] = migrations[i]
}
return toApply
}
panic("Not possible")
}
func ToCatchup(migrations, existingMigrations []*Migration, lastRun *Migration) []*PlannedMigration {
missing := make([]*PlannedMigration, 0)
for _, migration := range migrations {
found := false
for _, existing := range existingMigrations {
if existing.Id == migration.Id {
found = true
break
}
}
if !found && migration.Less(lastRun) {
missing = append(missing, &PlannedMigration{Migration: migration, Queries: migration.Up})
}
}
return missing
}
func GetMigrationRecords(db *sql.DB, dialect string) ([]*MigrationRecord, error) {
dbMap, err := getMigrationDbMap(db, dialect)
if err != nil {
return nil, err
}
var records []*MigrationRecord
query := fmt.Sprintf("SELECT * FROM %s ORDER BY id ASC", getTableName())
_, err = dbMap.Select(&records, query)
if err != nil {
return nil, err
}
return records, nil
}
func getMigrationDbMap(db *sql.DB, dialect string) (*gorp.DbMap, error) {
d, ok := MigrationDialects[dialect]
if !ok {
return nil, fmt.Errorf("Unknown dialect: %s", dialect)
}
// When using the mysql driver, make sure that the parseTime option is
// configured, otherwise it won't map time columns to time.Time. See
// https://github.com/rubenv/sql-migrate/issues/2
if dialect == "mysql" {
var out *time.Time
err := db.QueryRow("SELECT NOW()").Scan(&out)
if err != nil {
if err.Error() == "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time" {
return nil, errors.New(`Cannot parse dates.
Make sure that the parseTime option is supplied to your database connection.
Check https://github.com/go-sql-driver/mysql#parsetime for more info.`)
} else {
return nil, err
}
}
}
// Create migration database map
dbMap := &gorp.DbMap{Db: db, Dialect: d}
dbMap.AddTableWithNameAndSchema(MigrationRecord{}, schemaName, tableName).SetKeys(false, "Id")
//dbMap.TraceOn("", log.New(os.Stdout, "migrate: ", log.Lmicroseconds))
err := dbMap.CreateTablesIfNotExists()
if err != nil {
return nil, err
}
return dbMap, nil
}
// TODO: Run migration + record insert in transaction.

View file

@ -1,28 +0,0 @@
# SQL migration parser
Based on the [goose](https://bitbucket.org/liamstask/goose) migration parser.
## License
(The MIT License)
Copyright (C) 2014 by Ruben Vermeersch <ruben@rocketeer.be>
Copyright (C) 2012-2014 by Liam Staskawicz
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View file

@ -1,128 +0,0 @@
package sqlparse
import (
"bufio"
"bytes"
"errors"
"io"
"strings"
)
const sqlCmdPrefix = "-- +migrate "
// Checks the line to see if the line has a statement-ending semicolon
// or if the line contains a double-dash comment.
func endsWithSemicolon(line string) bool {
prev := ""
scanner := bufio.NewScanner(strings.NewReader(line))
scanner.Split(bufio.ScanWords)
for scanner.Scan() {
word := scanner.Text()
if strings.HasPrefix(word, "--") {
break
}
prev = word
}
return strings.HasSuffix(prev, ";")
}
// Split the given sql script into individual statements.
//
// The base case is to simply split on semicolons, as these
// naturally terminate a statement.
//
// However, more complex cases like pl/pgsql can have semicolons
// within a statement. For these cases, we provide the explicit annotations
// 'StatementBegin' and 'StatementEnd' to allow the script to
// tell us to ignore semicolons.
func SplitSQLStatements(r io.ReadSeeker, direction bool) ([]string, error) {
_, err := r.Seek(0, 0)
if err != nil {
return nil, err
}
var buf bytes.Buffer
scanner := bufio.NewScanner(r)
// track the count of each section
// so we can diagnose scripts with no annotations
upSections := 0
downSections := 0
statementEnded := false
ignoreSemicolons := false
directionIsActive := false
stmts := make([]string, 0)
for scanner.Scan() {
line := scanner.Text()
// handle any migrate-specific commands
if strings.HasPrefix(line, sqlCmdPrefix) {
cmd := strings.TrimSpace(line[len(sqlCmdPrefix):])
switch cmd {
case "Up":
directionIsActive = (direction == true)
upSections++
break
case "Down":
directionIsActive = (direction == false)
downSections++
break
case "StatementBegin":
if directionIsActive {
ignoreSemicolons = true
}
break
case "StatementEnd":
if directionIsActive {
statementEnded = (ignoreSemicolons == true)
ignoreSemicolons = false
}
break
}
}
if !directionIsActive {
continue
}
if _, err := buf.WriteString(line + "\n"); err != nil {
return nil, err
}
// Wrap up the two supported cases: 1) basic with semicolon; 2) psql statement
// Lines that end with semicolon that are in a statement block
// do not conclude statement.
if (!ignoreSemicolons && endsWithSemicolon(line)) || statementEnded {
statementEnded = false
stmts = append(stmts, buf.String())
buf.Reset()
}
}
if err := scanner.Err(); err != nil {
return nil, err
}
// diagnose likely migration script errors
if ignoreSemicolons {
return nil, errors.New("ERROR: saw '-- +migrate StatementBegin' with no matching '-- +migrate StatementEnd'")
}
if upSections == 0 && downSections == 0 {
return nil, errors.New(`ERROR: no Up/Down annotations found, so no statements were executed.
See https://github.com/rubenv/sql-migrate for details.`)
}
return stmts, nil
}

22
vendor/gopkg.in/gorp.v1/LICENSE generated vendored
View file

@ -1,22 +0,0 @@
(The MIT License)
Copyright (c) 2012 James Cooper <james@bitmechanic.com>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
'Software'), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

6
vendor/gopkg.in/gorp.v1/Makefile generated vendored
View file

@ -1,6 +0,0 @@
include $(GOROOT)/src/Make.inc
TARG = github.com/coopernurse/gorp
GOFILES = gorp.go dialect.go
include $(GOROOT)/src/Make.pkg

Some files were not shown because too many files have changed in this diff Show more