From fdc9647ae0148e8c6eb5fc5531ee9d201c475b9d Mon Sep 17 00:00:00 2001 From: Sandeep Bhardwaj Date: Wed, 14 Dec 2022 12:00:21 +0530 Subject: [PATCH 01/25] refactor: use pgx in project repo --- config/config_server.go | 2 +- go.mod | 16 +- go.sum | 31 ++- internal/errors/errors.go | 8 + .../000041_update_project_table.down.sql | 4 + .../000041_update_project_table.up.sql | 18 ++ internal/store/postgres/pgx.go | 34 +++ internal/store/postgres/postgres.go | 6 +- .../postgres/tenant/project_repository.go | 96 ++++----- .../tenant/project_repository_test.go | 12 +- internal/store/postgres/tracer.go | 198 ++++++++++++++++++ server/optimus.go | 10 +- tests/setup/database.go | 41 +++- 13 files changed, 381 insertions(+), 95 deletions(-) create mode 100644 internal/store/postgres/migrations/000041_update_project_table.down.sql create mode 100644 internal/store/postgres/migrations/000041_update_project_table.up.sql create mode 100644 internal/store/postgres/pgx.go create mode 100644 internal/store/postgres/tracer.go diff --git a/config/config_server.go b/config/config_server.go index 61281d7870..e718d5f03b 100644 --- a/config/config_server.go +++ b/config/config_server.go @@ -38,7 +38,7 @@ type Deployer struct { type DBConfig struct { DSN string `mapstructure:"dsn"` // data source name e.g.: postgres://user:password@host:123/database?sslmode=disable - MaxIdleConnection int `mapstructure:"max_idle_connection" default:"10"` // maximum allowed idle DB connections + MinOpenConnection int `mapstructure:"min_open_connection" default:"5"` // minimum open DB connections MaxOpenConnection int `mapstructure:"max_open_connection" default:"20"` // maximum allowed open DB connections } diff --git a/go.mod b/go.mod index 6660ec3edd..8ca540966c 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,6 @@ require ( cloud.google.com/go/bigquery v1.44.0 github.com/AlecAivazis/survey/v2 v2.2.7 github.com/MakeNowJust/heredoc v1.0.0 - github.com/Masterminds/sprig/v3 v3.2.2 github.com/PagerDuty/go-pagerduty v1.5.1 github.com/briandowns/spinner v1.18.0 github.com/charmbracelet/bubbles v0.13.0 @@ -25,6 +24,7 @@ require ( github.com/hashicorp/go-getter v1.6.2 github.com/hashicorp/go-hclog v0.14.1 github.com/hashicorp/go-plugin v1.4.1 + github.com/jackc/pgx/v5 v5.2.0 github.com/kushsharma/parallel v0.2.1 github.com/lib/pq v1.10.4 github.com/mattn/go-isatty v0.0.16 @@ -68,8 +68,6 @@ require ( cloud.google.com/go/compute/metadata v0.2.1 // indirect cloud.google.com/go/iam v0.7.0 // indirect cloud.google.com/go/storage v1.27.0 // indirect - github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.1.1 // indirect github.com/alecthomas/chroma v0.8.2 // indirect github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 // indirect github.com/atotto/clipboard v0.1.4 // indirect @@ -104,8 +102,6 @@ require ( github.com/hashicorp/go-version v1.3.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect - github.com/huandu/xstrings v1.3.2 // indirect - github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgconn v1.11.0 // indirect @@ -115,6 +111,7 @@ require ( github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect github.com/jackc/pgtype v1.10.0 // indirect github.com/jackc/pgx/v4 v4.15.0 // indirect + github.com/jackc/puddle/v2 v2.1.2 // indirect github.com/jeremywohl/flatten v1.0.1 // indirect github.com/jhump/protoreflect v1.9.1-0.20210817181203-db1a327a393e // indirect github.com/jinzhu/inflection v1.0.0 // indirect @@ -133,10 +130,8 @@ require ( github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect github.com/microcosm-cc/bluemonday v1.0.6 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-testing-interface v1.0.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b // indirect github.com/muesli/cancelreader v0.2.2 // indirect github.com/muesli/reflow v0.3.0 // indirect @@ -149,7 +144,7 @@ require ( github.com/prometheus/common v0.30.0 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/rivo/uniseg v0.2.0 // indirect - github.com/shopspring/decimal v1.2.0 // indirect + github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/spf13/cast v1.3.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -160,10 +155,11 @@ require ( github.com/yuin/goldmark-emoji v1.0.1 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel/metric v0.30.0 // indirect - go.uber.org/atomic v1.9.0 // indirect + go.uber.org/atomic v1.10.0 // indirect go.uber.org/ratelimit v0.2.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29 // indirect + golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect + golang.org/x/sync v0.1.0 // indirect golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.4.0 // indirect diff --git a/go.sum b/go.sum index a6b026b384..56e434097a 100644 --- a/go.sum +++ b/go.sum @@ -141,12 +141,8 @@ github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q github.com/GoogleCloudPlatform/cloudsql-proxy v1.29.0/go.mod h1:spvB9eLJH9dutlbPSRmHvSXXHOwGRyeXh1jVdquA2G8= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= -github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= -github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= -github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= @@ -904,9 +900,6 @@ github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKe github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174 h1:WlZsjVhE8Af9IcZDGgJGQpNflI3+MJSBhsgT5PCtzBQ= github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= -github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -914,7 +907,6 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -983,6 +975,8 @@ github.com/jackc/pgx/v4 v4.10.1/go.mod h1:QlrWebbs3kqEZPHCTGyxecvzG6tvIsYu+A5b1r github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= github.com/jackc/pgx/v4 v4.15.0 h1:B7dTkXsdILD3MF987WGGCcg+tvLW6bZJdEcqVFeU//w= github.com/jackc/pgx/v4 v4.15.0/go.mod h1:D/zyOyXiaM1TmVWnOM18p0xdDtdakRBa0RsVGI3U3bw= +github.com/jackc/pgx/v5 v5.2.0 h1:NdPpngX0Y6z6XDFKqmFQaE+bCtkqzvQIOt1wvBlAqs8= +github.com/jackc/pgx/v5 v5.2.0/go.mod h1:Ptn7zmohNsWEsdxRawMzk3gaKma2obW+NWTnKa0S4nk= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= @@ -990,6 +984,8 @@ github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dv github.com/jackc/puddle v1.1.2/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle/v2 v2.1.2 h1:0f7vaaXINONKTsxYDn4otOAiJanX/BMeAtY//BXqzlg= +github.com/jackc/puddle/v2 v2.1.2/go.mod h1:2lpufsF5mRHO6SuZkm0fNYxM6SWHfvyFj62KwNzgels= github.com/jeremywohl/flatten v1.0.1 h1:LrsxmB3hfwJuE+ptGOijix1PIfOoKLJ3Uee/mzbgtrs= github.com/jeremywohl/flatten v1.0.1/go.mod h1:4AmD/VxjWcI5SRB0n6szE2A6s2fsNHDLO0nAlMHgfLQ= github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= @@ -1058,8 +1054,8 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= @@ -1154,9 +1150,6 @@ github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -1173,9 +1166,6 @@ github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= @@ -1358,6 +1348,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= @@ -1577,8 +1569,9 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= @@ -1619,7 +1612,6 @@ golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1635,8 +1627,9 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211115234514-b4de73f9ece8/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29 h1:tkVvjkPTB7pnW3jnid7kNyAMPVWllTNOf/qKDze4p9o= golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 h1:Y/gsMcFOcR+6S6f3YeMKl5g+dZMEWqcz5Czj/GWYbkM= +golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1797,6 +1790,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/internal/errors/errors.go b/internal/errors/errors.go index 854a0e4fbb..6b54f98e6b 100644 --- a/internal/errors/errors.go +++ b/internal/errors/errors.go @@ -159,6 +159,14 @@ func Wrap(entity, msg string, err error) error { } } +func WrapIfErr(entity, msg string, err error) error { + if err == nil { + return nil + } + + return Wrap(entity, msg, err) +} + func GRPCErr(err error, msg string) error { code := codes.Internal var de *DomainError diff --git a/internal/store/postgres/migrations/000041_update_project_table.down.sql b/internal/store/postgres/migrations/000041_update_project_table.down.sql new file mode 100644 index 0000000000..ec40ef2f78 --- /dev/null +++ b/internal/store/postgres/migrations/000041_update_project_table.down.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS project; + +ALTER TABLE IF EXISTS project_old + RENAME TO project; \ No newline at end of file diff --git a/internal/store/postgres/migrations/000041_update_project_table.up.sql b/internal/store/postgres/migrations/000041_update_project_table.up.sql new file mode 100644 index 0000000000..82e1b4f3ce --- /dev/null +++ b/internal/store/postgres/migrations/000041_update_project_table.up.sql @@ -0,0 +1,18 @@ +ALTER TABLE IF EXISTS project + RENAME TO project_old; + +CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; +CREATE TABLE IF NOT EXISTS project ( + name VARCHAR(100) PRIMARY KEY, + id UUID NOT NULL UNIQUE DEFAULT uuid_generate_v4(), + + config JSONB, + created_at TIMESTAMP WITH TIME ZONE NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE NOT NULL, + deleted_at TIMESTAMP WITH TIME ZONE +); + +INSERT INTO project (name, id, config, created_at, updated_at, deleted_at) +SELECT name, id, config, created_at, updated_at, deleted_at +FROM project_old +WHERE deleted_at IS NULL; \ No newline at end of file diff --git a/internal/store/postgres/pgx.go b/internal/store/postgres/pgx.go new file mode 100644 index 0000000000..936f05cd40 --- /dev/null +++ b/internal/store/postgres/pgx.go @@ -0,0 +1,34 @@ +package postgres + +import ( + "context" + "fmt" + + "github.com/jackc/pgx/v5/pgxpool" + + "github.com/odpf/optimus/config" +) + +// Open will connect to the DB with custom configuration +func Open(config config.DBConfig) (*pgxpool.Pool, error) { + pgxConf, err := pgxpool.ParseConfig(config.DSN) + if err != nil { + return nil, err + } + + if config.MaxOpenConnection > 0 { + pgxConf.MaxConns = int32(config.MaxOpenConnection) + } + if config.MinOpenConnection > 0 { + pgxConf.MinConns = int32(config.MinOpenConnection) + } + + pgxConf.ConnConfig.Tracer = newTracer() + + dbPool, err := pgxpool.NewWithConfig(context.Background(), pgxConf) + if err != nil { + return nil, fmt.Errorf("unable to connect to database: %w", err) + } + + return dbPool, nil // cleanup to be done with dbPool.Close() +} diff --git a/internal/store/postgres/postgres.go b/internal/store/postgres/postgres.go index a830e668fb..5c96d62613 100644 --- a/internal/store/postgres/postgres.go +++ b/internal/store/postgres/postgres.go @@ -26,7 +26,7 @@ import ( const tracingSpanKey = "otel:span" -var tracer = otel.Tracer("optimus/store/postgres") +var tracerOtel = otel.Tracer("optimus/store/postgres") // Connect connect to the DB with custom configuration. func Connect(dbConf config.DBConfig, writer io.Writer) (*gorm.DB, error) { @@ -56,7 +56,7 @@ func Connect(dbConf config.DBConfig, writer io.Writer) (*gorm.DB, error) { if err != nil { return nil, err } - sqlDB.SetMaxIdleConns(dbConf.MaxIdleConnection) + //sqlDB.SetMaxIdleConns(dbConf.MaxIdleConnection) sqlDB.SetMaxOpenConns(dbConf.MaxOpenConnection) return db, nil } @@ -119,7 +119,7 @@ func beforeCallback(operation string) func(db *gorm.DB) { if !trace.SpanFromContext(db.Statement.Context).IsRecording() { return } - _, span := tracer.Start(db.Statement.Context, operation) + _, span := tracerOtel.Start(db.Statement.Context, operation) db.InstanceSet(tracingSpanKey, span) } } diff --git a/internal/store/postgres/tenant/project_repository.go b/internal/store/postgres/tenant/project_repository.go index e0616b8fb3..a118c9c3f6 100644 --- a/internal/store/postgres/tenant/project_repository.go +++ b/internal/store/postgres/tenant/project_repository.go @@ -2,19 +2,18 @@ package tenant import ( "context" - "encoding/json" "time" "github.com/google/uuid" - "gorm.io/datatypes" - "gorm.io/gorm" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" "github.com/odpf/optimus/core/tenant" "github.com/odpf/optimus/internal/errors" ) type ProjectRepository struct { - db *gorm.DB + pool *pgxpool.Pool } const ( @@ -22,71 +21,52 @@ const ( ) type Project struct { - ID uuid.UUID `gorm:"primary_key;type:uuid;default:uuid_generate_v4()"` - Name string `gorm:"not null;unique"` - Config datatypes.JSON + ID uuid.UUID + Name string + Config map[string]string - CreatedAt time.Time `gorm:"not null" json:"created_at"` - UpdatedAt time.Time `gorm:"not null" json:"updated_at"` - DeletedAt gorm.DeletedAt + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt time.Time } -func NewProject(spec *tenant.Project) (Project, error) { - jsonBytes, err := json.Marshal(spec.GetConfigs()) - if err != nil { - return Project{}, err - } - project := Project{ - Name: spec.Name().String(), - Config: jsonBytes, - } - return project, nil -} - -func (p Project) ToTenantProject() (*tenant.Project, error) { - var conf map[string]string - err := json.Unmarshal(p.Config, &conf) - if err != nil { - return nil, err - } - return tenant.NewProject(p.Name, conf) +func (p *Project) toTenantProject() (*tenant.Project, error) { + return tenant.NewProject(p.Name, p.Config) } func (repo ProjectRepository) Save(ctx context.Context, tenantProject *tenant.Project) error { - project, err := NewProject(tenantProject) + _, err := repo.get(ctx, tenantProject.Name()) if err != nil { - return err - } - - _, err = repo.get(ctx, tenantProject.Name()) - if err != nil { - if errors.Is(err, gorm.ErrRecordNotFound) { - insertProjectQuery := `INSERT INTO project (name, config, created_at, updated_at) VALUES (?, ?, now(), now())` - return repo.db.WithContext(ctx).Exec(insertProjectQuery, project.Name, project.Config).Error + if errors.Is(err, pgx.ErrNoRows) { + insertProjectQuery := `INSERT INTO project (name, config, created_at, updated_at) VALUES ($1, $2, now(), now())` + _, err = repo.pool.Exec(ctx, insertProjectQuery, tenantProject.Name(), tenantProject.GetConfigs()) + return errors.WrapIfErr(tenant.EntityProject, "unable to save project", err) } return errors.Wrap(tenant.EntityProject, "unable to save project", err) } - updateProjectQuery := `UPDATE project SET config=?, updated_at=now() WHERE name=?` - return repo.db.WithContext(ctx).Exec(updateProjectQuery, project.Config, project.Name).Error + updateProjectQuery := `UPDATE project SET config=$1, updated_at=now() WHERE name=$2` + _, err = repo.pool.Exec(ctx, updateProjectQuery, tenantProject.GetConfigs(), tenantProject.Name()) + return errors.WrapIfErr(tenant.EntityProject, "unable to update project", err) } func (repo ProjectRepository) GetByName(ctx context.Context, name tenant.ProjectName) (*tenant.Project, error) { project, err := repo.get(ctx, name) if err != nil { - if errors.Is(err, gorm.ErrRecordNotFound) { - return nil, errors.NotFound(tenant.EntityProject, "no record for "+name.String()) + if errors.Is(err, pgx.ErrNoRows) { + return nil, errors.NotFound(tenant.EntityProject, "no project for "+name.String()) } return nil, errors.Wrap(tenant.EntityProject, "error while getting project", err) } - return project.ToTenantProject() + return project.toTenantProject() } func (repo ProjectRepository) get(ctx context.Context, name tenant.ProjectName) (Project, error) { var project Project - getProjectByNameQuery := `SELECT ` + projectColumns + ` FROM project WHERE name = ? AND deleted_at IS NULL` - err := repo.db.WithContext(ctx).Raw(getProjectByNameQuery, name.String()).First(&project).Error + getProjectByNameQuery := `SELECT ` + projectColumns + ` FROM project WHERE name = $1 AND deleted_at IS NULL` + err := repo.pool.QueryRow(ctx, getProjectByNameQuery, name.String()). + Scan(&project.ID, &project.Name, &project.Config, &project.CreatedAt, &project.UpdatedAt) if err != nil { return Project{}, err } @@ -94,26 +74,34 @@ func (repo ProjectRepository) get(ctx context.Context, name tenant.ProjectName) } func (repo ProjectRepository) GetAll(ctx context.Context) ([]*tenant.Project, error) { - var projects []Project + var projects []*tenant.Project getAllProjects := `SELECT ` + projectColumns + ` FROM project WHERE deleted_at IS NULL` - if err := repo.db.WithContext(ctx).Raw(getAllProjects).Scan(&projects).Error; err != nil { + rows, err := repo.pool.Query(ctx, getAllProjects) + if err != nil { return nil, errors.Wrap(tenant.EntityProject, "error in GetAll", err) } + defer rows.Close() - var tenantProjects []*tenant.Project - for _, proj := range projects { - tenantProject, err := proj.ToTenantProject() + for rows.Next() { + var prj Project + err = rows.Scan(&prj.ID, &prj.Name, &prj.Config, &prj.CreatedAt, &prj.UpdatedAt) + if err != nil { + return nil, errors.Wrap(tenant.EntityProject, "error in GetAll", err) + } + + project, err := prj.toTenantProject() if err != nil { return nil, err } - tenantProjects = append(tenantProjects, tenantProject) + projects = append(projects, project) } - return tenantProjects, nil + + return projects, nil } -func NewProjectRepository(db *gorm.DB) *ProjectRepository { +func NewProjectRepository(pool *pgxpool.Pool) *ProjectRepository { return &ProjectRepository{ - db: db, + pool: pool, } } diff --git a/internal/store/postgres/tenant/project_repository_test.go b/internal/store/postgres/tenant/project_repository_test.go index 45e02cc31b..c2804eff01 100644 --- a/internal/store/postgres/tenant/project_repository_test.go +++ b/internal/store/postgres/tenant/project_repository_test.go @@ -6,8 +6,8 @@ import ( "context" "testing" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/assert" - "gorm.io/gorm" "github.com/odpf/optimus/core/tenant" postgres "github.com/odpf/optimus/internal/store/postgres/tenant" @@ -25,11 +25,11 @@ func TestPostgresProjectRepository(t *testing.T) { }) ctx := context.Background() - dbSetup := func() *gorm.DB { - dbConn := setup.TestDB() - setup.TruncateTables(dbConn) + dbSetup := func() *pgxpool.Pool { + dbPool := setup.TestPool() + setup.TruncateTablesWith(dbPool) - return dbConn + return dbPool } t.Run("Save", func(t *testing.T) { @@ -108,7 +108,7 @@ func TestPostgresProjectRepository(t *testing.T) { _, err := repo.GetByName(ctx, proj.Name()) assert.NotNil(t, err) - assert.EqualError(t, err, "not found for entity project: no record for t-optimus-1") + assert.EqualError(t, err, "not found for entity project: no project for t-optimus-1") }) t.Run("returns the saved project with same name", func(t *testing.T) { db := dbSetup() diff --git a/internal/store/postgres/tracer.go b/internal/store/postgres/tracer.go new file mode 100644 index 0000000000..b35d5b4f81 --- /dev/null +++ b/internal/store/postgres/tracer.go @@ -0,0 +1,198 @@ +package postgres + +import ( + "context" + + "github.com/jackc/pgx/v5" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.10.0" + "go.opentelemetry.io/otel/trace" +) + +// tracer is a wrapper around the pgx tracer interfaces which instrument +// queries. +type tracer struct { + tracer trace.Tracer + attrs []attribute.KeyValue + logSQLStatement bool +} + +// NewTracer returns a new Tracer. +func newTracer() *tracer { + return &tracer{ + tracer: otel.Tracer("store/postgres"), + attrs: []attribute.KeyValue{ + semconv.DBSystemPostgreSQL, + }, + } +} + +func recordError(span trace.Span, err error) { + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + } +} + +// TraceQueryStart is called at the beginning of Query, QueryRow, and Exec calls. +// The returned context is used for the rest of the call and will be passed to TraceQueryEnd. +func (t *tracer) TraceQueryStart(ctx context.Context, _ *pgx.Conn, data pgx.TraceQueryStartData) context.Context { + if !trace.SpanFromContext(ctx).IsRecording() { + return ctx + } + + opts := []trace.SpanStartOption{ + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(t.attrs...), + trace.WithAttributes(semconv.DBStatementKey.String(data.SQL)), + } + + spanName := "query " + data.SQL + ctx, _ = t.tracer.Start(ctx, spanName, opts...) + + return ctx +} + +// TraceQueryEnd is called at the end of Query, QueryRow, and Exec calls. +func (t *tracer) TraceQueryEnd(ctx context.Context, _ *pgx.Conn, data pgx.TraceQueryEndData) { + span := trace.SpanFromContext(ctx) + recordError(span, data.Err) + + span.End() +} + +// TraceCopyFromStart is called at the beginning of CopyFrom calls. The +// returned context is used for the rest of the call and will be passed to +// TraceCopyFromEnd. +func (t *tracer) TraceCopyFromStart(ctx context.Context, _ *pgx.Conn, data pgx.TraceCopyFromStartData) context.Context { + if !trace.SpanFromContext(ctx).IsRecording() { + return ctx + } + + opts := []trace.SpanStartOption{ + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(t.attrs...), + trace.WithAttributes(attribute.String("db.table", data.TableName.Sanitize())), + } + + ctx, _ = t.tracer.Start(ctx, "copy_from "+data.TableName.Sanitize(), opts...) + + return ctx +} + +// TraceCopyFromEnd is called at the end of CopyFrom calls. +func (t *tracer) TraceCopyFromEnd(ctx context.Context, _ *pgx.Conn, data pgx.TraceCopyFromEndData) { + span := trace.SpanFromContext(ctx) + recordError(span, data.Err) + + span.End() +} + +// TraceBatchStart is called at the beginning of SendBatch calls. The returned +// context is used for the rest of the call and will be passed to +// TraceBatchQuery and TraceBatchEnd. +func (t *tracer) TraceBatchStart(ctx context.Context, _ *pgx.Conn, data pgx.TraceBatchStartData) context.Context { + if !trace.SpanFromContext(ctx).IsRecording() { + return ctx + } + + var size int + if b := data.Batch; b != nil { + size = b.Len() + } + + opts := []trace.SpanStartOption{ + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(t.attrs...), + trace.WithAttributes(attribute.Int("pgx.batch.size", size)), + } + + ctx, _ = t.tracer.Start(ctx, "batch start", opts...) + + return ctx +} + +// TraceBatchQuery is called at the after each query in a batch. +func (t *tracer) TraceBatchQuery(ctx context.Context, _ *pgx.Conn, data pgx.TraceBatchQueryData) { + opts := []trace.SpanStartOption{ + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(t.attrs...), + trace.WithAttributes(semconv.DBStatementKey.String(data.SQL)), + } + + spanName := "batch query " + data.SQL + _, span := t.tracer.Start(ctx, spanName, opts...) + recordError(span, data.Err) + + span.End() +} + +// TraceBatchEnd is called at the end of SendBatch calls. +func (t *tracer) TraceBatchEnd(ctx context.Context, _ *pgx.Conn, data pgx.TraceBatchEndData) { + span := trace.SpanFromContext(ctx) + recordError(span, data.Err) + + span.End() +} + +// TraceConnectStart is called at the beginning of Connect and ConnectConfig +// calls. The returned context is used for the rest of the call and will be +// passed to TraceConnectEnd. +func (t *tracer) TraceConnectStart(ctx context.Context, data pgx.TraceConnectStartData) context.Context { + if !trace.SpanFromContext(ctx).IsRecording() { + return ctx + } + + opts := []trace.SpanStartOption{ + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(t.attrs...), + } + + if data.ConnConfig != nil { + opts = append(opts, + trace.WithAttributes(attribute.String("database.host", data.ConnConfig.Host)), + trace.WithAttributes(attribute.Int("database.port", int(data.ConnConfig.Port))), + trace.WithAttributes(attribute.String("database.user", data.ConnConfig.User))) + } + + ctx, _ = t.tracer.Start(ctx, "connect", opts...) + + return ctx +} + +// TraceConnectEnd is called at the end of Connect and ConnectConfig calls. +func (t *tracer) TraceConnectEnd(ctx context.Context, data pgx.TraceConnectEndData) { + span := trace.SpanFromContext(ctx) + recordError(span, data.Err) + + span.End() +} + +// TracePrepareStart is called at the beginning of Prepare calls. The returned +// context is used for the rest of the call and will be passed to +// TracePrepareEnd. +func (t *tracer) TracePrepareStart(ctx context.Context, _ *pgx.Conn, data pgx.TracePrepareStartData) context.Context { + if !trace.SpanFromContext(ctx).IsRecording() { + return ctx + } + + opts := []trace.SpanStartOption{ + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(semconv.DBStatementKey.String(data.SQL)), + } + + spanName := "prepare " + data.Name + ctx, _ = t.tracer.Start(ctx, spanName, opts...) + + return ctx +} + +// TracePrepareEnd is called at the end of Prepare calls. +func (t *tracer) TracePrepareEnd(ctx context.Context, _ *pgx.Conn, data pgx.TracePrepareEndData) { + span := trace.SpanFromContext(ctx) + recordError(span, data.Err) + + span.End() +} diff --git a/server/optimus.go b/server/optimus.go index 06bf16b3e4..c363ec2a96 100644 --- a/server/optimus.go +++ b/server/optimus.go @@ -11,6 +11,7 @@ import ( grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/hashicorp/go-hclog" hPlugin "github.com/hashicorp/go-plugin" + "github.com/jackc/pgx/v5/pgxpool" "github.com/odpf/salt/log" "github.com/prometheus/client_golang/prometheus" slackapi "github.com/slack-go/slack" @@ -54,6 +55,7 @@ type OptimusServer struct { conf config.ServerConfig logger log.Logger + dbPool *pgxpool.Pool dbConn *gorm.DB key *[keyLength]byte @@ -168,6 +170,12 @@ func (s *OptimusServer) setupDB() error { if err != nil { return fmt.Errorf("postgres.Connect: %w", err) } + + s.dbPool, err = postgres.Open(s.conf.Serve.DB) + if err != nil { + return fmt.Errorf("postgres.Open: %w", err) + } + return nil } @@ -236,7 +244,7 @@ func (s *OptimusServer) Shutdown() { func (s *OptimusServer) setupHandlers() error { // Tenant Bounded Context Setup - tProjectRepo := tenant.NewProjectRepository(s.dbConn) + tProjectRepo := tenant.NewProjectRepository(s.dbPool) tNamespaceRepo := tenant.NewNamespaceRepository(s.dbConn) tSecretRepo := tenant.NewSecretRepository(s.dbConn) diff --git a/tests/setup/database.go b/tests/setup/database.go index de5f06d1f9..f12119f9ad 100644 --- a/tests/setup/database.go +++ b/tests/setup/database.go @@ -7,6 +7,7 @@ import ( "strings" "sync" + "github.com/jackc/pgx/v5/pgxpool" "github.com/odpf/salt/log" "gorm.io/gorm" @@ -16,6 +17,7 @@ import ( var ( optimusDB *gorm.DB + dbPool *pgxpool.Pool initDBOnce sync.Once ) @@ -25,6 +27,12 @@ func TestDB() *gorm.DB { return optimusDB } +func TestPool() *pgxpool.Pool { + initDBOnce.Do(migrateDB) + + return dbPool +} + func mustReadDBConfig() string { dbURL, ok := os.LookupEnv("TEST_OPTIMUS_DB_URL") if ok { @@ -41,8 +49,8 @@ func migrateDB() { dbConf := config.DBConfig{ DSN: dbURL, - MaxIdleConnection: 1, - MaxOpenConnection: 1, + MinOpenConnection: 1, + MaxOpenConnection: 2, } dbConn, err := postgres.Connect(dbConf, os.Stdout) if err != nil { @@ -63,6 +71,12 @@ func migrateDB() { panic(err) } + pool, err := postgres.Open(dbConf) + if err != nil { + panic(err) + } + dbPool = pool + optimusDB = dbConn } @@ -126,3 +140,26 @@ func TruncateTables(db *gorm.DB) { db.Exec("TRUNCATE TABLE job_upstream CASCADE") } + +func TruncateTablesWith(pool *pgxpool.Pool) { + ctx := context.Background() + pool.Exec(ctx, "TRUNCATE TABLE backup_old, resource_old CASCADE") + pool.Exec(ctx, "TRUNCATE TABLE backup CASCADE") + pool.Exec(ctx, "TRUNCATE TABLE replay CASCADE") + pool.Exec(ctx, "TRUNCATE TABLE resource CASCADE") + + pool.Exec(ctx, "TRUNCATE TABLE job_run CASCADE") + pool.Exec(ctx, "TRUNCATE TABLE sensor_run CASCADE") + pool.Exec(ctx, "TRUNCATE TABLE task_run CASCADE") + pool.Exec(ctx, "TRUNCATE TABLE hook_run CASCADE") + + pool.Exec(ctx, "TRUNCATE TABLE job CASCADE") + + pool.Exec(ctx, "TRUNCATE TABLE secret CASCADE") + pool.Exec(ctx, "TRUNCATE TABLE namespace CASCADE") + pool.Exec(ctx, "TRUNCATE TABLE project CASCADE") + + pool.Exec(ctx, "TRUNCATE TABLE job_deployment CASCADE") + + pool.Exec(ctx, "TRUNCATE TABLE job_upstream CASCADE") +} From 74a29c8635b34a75a55097b0c4877841caefe04a Mon Sep 17 00:00:00 2001 From: Sandeep Bhardwaj Date: Wed, 14 Dec 2022 16:09:52 +0530 Subject: [PATCH 02/25] refactor: migrate namespace repository --- .../000042_update_namespace_table.down.sql | 4 + .../000042_update_namespace_table.up.sql | 20 +++ .../postgres/tenant/namespace_repository.go | 116 ++++++++---------- .../tenant/namespace_repository_test.go | 14 +-- .../postgres/tenant/project_repository.go | 2 +- tests/setup/database.go | 3 + 6 files changed, 85 insertions(+), 74 deletions(-) create mode 100644 internal/store/postgres/migrations/000042_update_namespace_table.down.sql create mode 100644 internal/store/postgres/migrations/000042_update_namespace_table.up.sql diff --git a/internal/store/postgres/migrations/000042_update_namespace_table.down.sql b/internal/store/postgres/migrations/000042_update_namespace_table.down.sql new file mode 100644 index 0000000000..14cfa4f7f0 --- /dev/null +++ b/internal/store/postgres/migrations/000042_update_namespace_table.down.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS namespace; + +ALTER TABLE IF EXISTS namespace_old + RENAME TO namespace; \ No newline at end of file diff --git a/internal/store/postgres/migrations/000042_update_namespace_table.up.sql b/internal/store/postgres/migrations/000042_update_namespace_table.up.sql new file mode 100644 index 0000000000..0b1423669d --- /dev/null +++ b/internal/store/postgres/migrations/000042_update_namespace_table.up.sql @@ -0,0 +1,20 @@ +ALTER TABLE IF EXISTS namespace + RENAME TO namespace_old; + +CREATE TABLE IF NOT EXISTS namespace ( + name VARCHAR(100) NOT NULL, + id UUID NOT NULL UNIQUE DEFAULT uuid_generate_v4(), + config JSONB, + project_name VARCHAR(100) NOT NULL REFERENCES project (name), + + created_at TIMESTAMP WITH TIME ZONE NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE NOT NULL, + deleted_at TIMESTAMP WITH TIME ZONE, + + PRIMARY KEY (project_name, name) +); + +INSERT INTO namespace (name, id, config, project_name, created_at, updated_at, deleted_at) +SELECT n.name, n.id, n.config, p.name, n.created_at, n.updated_at, n.deleted_at +FROM namespace_old n join project_old p ON n.project_id = p.id +WHERE n.deleted_at IS NULL; diff --git a/internal/store/postgres/tenant/namespace_repository.go b/internal/store/postgres/tenant/namespace_repository.go index 0da71aa5bd..f546c04650 100644 --- a/internal/store/postgres/tenant/namespace_repository.go +++ b/internal/store/postgres/tenant/namespace_repository.go @@ -2,108 +2,85 @@ package tenant import ( "context" - "encoding/json" + "fmt" "time" "github.com/google/uuid" - "gorm.io/datatypes" - "gorm.io/gorm" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" "github.com/odpf/optimus/core/tenant" "github.com/odpf/optimus/internal/errors" ) type NamespaceRepository struct { - db *gorm.DB + pool *pgxpool.Pool } const ( - namespaceColumns = `n.id, n.name, n.config, p.name as project_name, n.created_at, n.updated_at` + namespaceColumns = `id, name, config, project_name, created_at, updated_at` ) type Namespace struct { - ID uuid.UUID `gorm:"primary_key;type:uuid;default:uuid_generate_v4()"` - Name string `gorm:"not null;unique"` - Config datatypes.JSON + ID uuid.UUID + Name string + Config map[string]string - ProjectName string `json:"project_name"` + ProjectName string - CreatedAt time.Time `gorm:"not null" json:"created_at"` - UpdatedAt time.Time `gorm:"not null" json:"updated_at"` - DeletedAt gorm.DeletedAt + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt time.Time } -func NewNamespace(spec *tenant.Namespace) (Namespace, error) { - jsonBytes, err := json.Marshal(spec.GetConfigs()) - if err != nil { - return Namespace{}, err - } - namespace := Namespace{ - Name: spec.Name().String(), - ProjectName: spec.ProjectName().String(), - Config: jsonBytes, - } - return namespace, nil -} - -func (n Namespace) ToTenantNamespace() (*tenant.Namespace, error) { - var conf map[string]string - err := json.Unmarshal(n.Config, &conf) - if err != nil { - return nil, err - } +func (n *Namespace) toTenantNamespace() (*tenant.Namespace, error) { projName, err := tenant.ProjectNameFrom(n.ProjectName) if err != nil { return nil, err } - return tenant.NewNamespace(n.Name, projName, conf) + return tenant.NewNamespace(n.Name, projName, n.Config) } -func (n *NamespaceRepository) Save(ctx context.Context, tenantNamespace *tenant.Namespace) error { - namespace, err := NewNamespace(tenantNamespace) +func (n *NamespaceRepository) Save(ctx context.Context, namespace *tenant.Namespace) error { + _, err := n.get(ctx, namespace.ProjectName(), namespace.Name()) if err != nil { - return errors.Wrap(tenant.EntityNamespace, "not able to convert namespace", err) - } + fmt.Println(err) + if errors.Is(err, pgx.ErrNoRows) { + insertNamespace := `INSERT INTO namespace (name, config, project_name, created_at, updated_at) +VALUES ($1, $2, $3, now(), now())` - _, err = n.get(ctx, tenantNamespace.ProjectName(), tenantNamespace.Name()) - if err != nil { - if errors.Is(err, gorm.ErrRecordNotFound) { - insertNamespace := `INSERT INTO namespace (name, config, project_id, updated_at, created_at) -SELECT ?, ?, id, now(), now() FROM project p WHERE p.name = ?;` - return n.db.WithContext(ctx). - Exec(insertNamespace, namespace.Name, namespace.Config, namespace.ProjectName).Error + _, err = n.pool.Exec(ctx, insertNamespace, namespace.Name(), namespace.GetConfigs(), namespace.ProjectName()) + return errors.WrapIfErr(tenant.EntityNamespace, "unable to save namespace", err) } - return errors.Wrap(tenant.EntityProject, "unable to save project", err) + return errors.Wrap(tenant.EntityNamespace, "unable to save namespace", err) } - if len(tenantNamespace.GetConfigs()) == 0 { + if len(namespace.GetConfigs()) == 0 { return errors.NewError(errors.ErrFailedPrecond, tenant.EntityNamespace, "empty config") } - updateNamespaceQuery := `UPDATE namespace SET config=?, updated_at=now() FROM namespace n -JOIN project p ON p.id = n.project_id WHERE p.name = ? AND n.name=?` - return n.db.WithContext(ctx). - Exec(updateNamespaceQuery, namespace.Config, namespace.ProjectName, namespace.Name).Error + updateNamespaceQuery := `UPDATE namespace n SET config=$1, updated_at=now() WHERE n.name = $2 AND n.project_name=$3` + _, err = n.pool.Exec(ctx, updateNamespaceQuery, namespace.GetConfigs(), namespace.Name(), namespace.ProjectName()) + return errors.WrapIfErr(tenant.EntityProject, "unable to update namespace", err) } func (n *NamespaceRepository) GetByName(ctx context.Context, projectName tenant.ProjectName, name tenant.NamespaceName) (*tenant.Namespace, error) { ns, err := n.get(ctx, projectName, name) if err != nil { - if errors.Is(err, gorm.ErrRecordNotFound) { + if errors.Is(err, pgx.ErrNoRows) { return nil, errors.NotFound(tenant.EntityNamespace, "no record for "+name.String()) } return nil, errors.Wrap(tenant.EntityNamespace, "error while getting project", err) } - return ns.ToTenantNamespace() + return ns.toTenantNamespace() } func (n *NamespaceRepository) get(ctx context.Context, projName tenant.ProjectName, name tenant.NamespaceName) (Namespace, error) { var namespace Namespace - getNamespaceByNameQuery := `SELECT ` + namespaceColumns + ` FROM namespace n -JOIN PROJECT p ON p.id = n.project_id WHERE p.name = ? AND n.name = ? AND n.deleted_at IS NULL` - err := n.db.WithContext(ctx).Raw(getNamespaceByNameQuery, projName.String(), name.String()). - First(&namespace).Error + getNamespaceByNameQuery := `SELECT ` + namespaceColumns + ` FROM namespace WHERE project_name = $1 AND name = $2 AND deleted_at IS NULL` + err := n.pool.QueryRow(ctx, getNamespaceByNameQuery, projName, name). + Scan(&namespace.ID, &namespace.Name, &namespace.Config, &namespace.ProjectName, &namespace.CreatedAt, &namespace.UpdatedAt) if err != nil { return Namespace{}, err } @@ -111,28 +88,35 @@ JOIN PROJECT p ON p.id = n.project_id WHERE p.name = ? AND n.name = ? AND n.dele } func (n *NamespaceRepository) GetAll(ctx context.Context, projectName tenant.ProjectName) ([]*tenant.Namespace, error) { - var namespaces []Namespace + var namespaces []*tenant.Namespace + getAllNamespaceInProject := `SELECT ` + namespaceColumns + ` FROM namespace n -JOIN project p ON p.id = n.project_id WHERE p.name = ? AND n.deleted_at IS NULL` - err := n.db.WithContext(ctx).Raw(getAllNamespaceInProject, projectName.String()). - Scan(&namespaces).Error +WHERE project_name = $1 AND deleted_at IS NULL` + rows, err := n.pool.Query(ctx, getAllNamespaceInProject, projectName) if err != nil { return nil, errors.Wrap(tenant.EntityNamespace, "error in GetAll", err) } + defer rows.Close() - var tenantNamespace []*tenant.Namespace - for _, ns := range namespaces { - tenantNS, err := ns.ToTenantNamespace() + for rows.Next() { + var ns Namespace + err = rows.Scan(&ns.ID, &ns.Name, &ns.Config, &ns.ProjectName, &ns.CreatedAt, &ns.UpdatedAt) + if err != nil { + return nil, errors.Wrap(tenant.EntityNamespace, "error in GetAll", err) + } + + namespace, err := ns.toTenantNamespace() if err != nil { return nil, err } - tenantNamespace = append(tenantNamespace, tenantNS) + namespaces = append(namespaces, namespace) } - return tenantNamespace, nil + + return namespaces, nil } -func NewNamespaceRepository(db *gorm.DB) *NamespaceRepository { +func NewNamespaceRepository(pool *pgxpool.Pool) *NamespaceRepository { return &NamespaceRepository{ - db: db, + pool: pool, } } diff --git a/internal/store/postgres/tenant/namespace_repository_test.go b/internal/store/postgres/tenant/namespace_repository_test.go index c861a1f943..b27f50dc21 100644 --- a/internal/store/postgres/tenant/namespace_repository_test.go +++ b/internal/store/postgres/tenant/namespace_repository_test.go @@ -6,8 +6,8 @@ import ( "context" "testing" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/assert" - "gorm.io/gorm" "github.com/odpf/optimus/core/tenant" postgres "github.com/odpf/optimus/internal/store/postgres/tenant" @@ -29,17 +29,17 @@ func TestPostgresNamespaceRepository(t *testing.T) { }) ctx := context.Background() - dbSetup := func() *gorm.DB { - dbConn := setup.TestDB() - setup.TruncateTables(dbConn) + dbSetup := func() *pgxpool.Pool { + pool := setup.TestPool() + setup.TruncateTablesWith(pool) - prjRepo := postgres.NewProjectRepository(dbConn) + prjRepo := postgres.NewProjectRepository(pool) err := prjRepo.Save(ctx, proj) if err != nil { panic(err) } - return dbConn + return pool } t.Run("Save", func(t *testing.T) { @@ -135,7 +135,7 @@ func TestPostgresNamespaceRepository(t *testing.T) { _, err := repo.GetByName(ctx, proj.Name(), ns.Name()) assert.NotNil(t, err) - assert.EqualError(t, err, "not found for entity namespace: no record for n-optimus-1") + assert.ErrorContains(t, err, "no record for n-optimus-1") }) t.Run("returns the saved namespace with same name", func(t *testing.T) { db := dbSetup() diff --git a/internal/store/postgres/tenant/project_repository.go b/internal/store/postgres/tenant/project_repository.go index a118c9c3f6..b0a3b37955 100644 --- a/internal/store/postgres/tenant/project_repository.go +++ b/internal/store/postgres/tenant/project_repository.go @@ -65,7 +65,7 @@ func (repo ProjectRepository) get(ctx context.Context, name tenant.ProjectName) var project Project getProjectByNameQuery := `SELECT ` + projectColumns + ` FROM project WHERE name = $1 AND deleted_at IS NULL` - err := repo.pool.QueryRow(ctx, getProjectByNameQuery, name.String()). + err := repo.pool.QueryRow(ctx, getProjectByNameQuery, name). Scan(&project.ID, &project.Name, &project.Config, &project.CreatedAt, &project.UpdatedAt) if err != nil { return Project{}, err diff --git a/tests/setup/database.go b/tests/setup/database.go index f12119f9ad..047083ffa8 100644 --- a/tests/setup/database.go +++ b/tests/setup/database.go @@ -101,7 +101,9 @@ func dropTables(db *gorm.DB) error { "resource", "resource_old", "namespace", + "namespace_old", "project", + "project_old", "migration_steps", } var errMsgs []string @@ -158,6 +160,7 @@ func TruncateTablesWith(pool *pgxpool.Pool) { pool.Exec(ctx, "TRUNCATE TABLE secret CASCADE") pool.Exec(ctx, "TRUNCATE TABLE namespace CASCADE") pool.Exec(ctx, "TRUNCATE TABLE project CASCADE") + pool.Exec(ctx, "TRUNCATE TABLE project_old, namespace_old CASCADE") pool.Exec(ctx, "TRUNCATE TABLE job_deployment CASCADE") From 95c4c8dd70bb9977978a2e9d43eb32ef08d73c61 Mon Sep 17 00:00:00 2001 From: Sandeep Bhardwaj Date: Thu, 15 Dec 2022 12:01:13 +0530 Subject: [PATCH 03/25] refactor: move secret repository to pgx --- .../000043_update_secret_table.down.sql | 4 + .../000043_update_secret_table.up.sql | 27 +++ .../postgres/tenant/secret_repository.go | 216 +++++++++--------- .../postgres/tenant/secret_repository_test.go | 14 +- tests/setup/database.go | 3 +- 5 files changed, 144 insertions(+), 120 deletions(-) create mode 100644 internal/store/postgres/migrations/000043_update_secret_table.down.sql create mode 100644 internal/store/postgres/migrations/000043_update_secret_table.up.sql diff --git a/internal/store/postgres/migrations/000043_update_secret_table.down.sql b/internal/store/postgres/migrations/000043_update_secret_table.down.sql new file mode 100644 index 0000000000..cd85d4e867 --- /dev/null +++ b/internal/store/postgres/migrations/000043_update_secret_table.down.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS secret; + +ALTER TABLE IF EXISTS secret_old + RENAME TO secret; \ No newline at end of file diff --git a/internal/store/postgres/migrations/000043_update_secret_table.up.sql b/internal/store/postgres/migrations/000043_update_secret_table.up.sql new file mode 100644 index 0000000000..d865fe8997 --- /dev/null +++ b/internal/store/postgres/migrations/000043_update_secret_table.up.sql @@ -0,0 +1,27 @@ +ALTER TABLE IF EXISTS secret + RENAME TO secret_old; + +CREATE TABLE IF NOT EXISTS secret ( + name VARCHAR(100) NOT NULL, + id UUID NOT NULL UNIQUE DEFAULT uuid_generate_v4(), + value TEXT NOT NULL, + type VARCHAR(15) NOT NULL, + + project_name VARCHAR(100) NOT NULL, + namespace_name VARCHAR(100), + + created_at TIMESTAMP WITH TIME ZONE NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE NOT NULL, + deleted_at TIMESTAMP WITH TIME ZONE, + + PRIMARY KEY (project_name, name), + FOREIGN KEY (project_name) REFERENCES project (name), + FOREIGN KEY (project_name, namespace_name) REFERENCES namespace (project_name, name) +); + +INSERT INTO secret (name, id, value, type, project_name, namespace_name, created_at, updated_at, deleted_at) +SELECT s.name, s.id, s.value, s.type, p.name, n.name, s.created_at, s.updated_at, s.deleted_at +FROM secret_old s + LEFT JOIN namespace_old n ON n.id = s.id + JOIN project_old p on p.id = s.project_id +WHERE n.deleted_at IS NULL; diff --git a/internal/store/postgres/tenant/secret_repository.go b/internal/store/postgres/tenant/secret_repository.go index f0b4be83a3..05499e9aee 100644 --- a/internal/store/postgres/tenant/secret_repository.go +++ b/internal/store/postgres/tenant/secret_repository.go @@ -2,12 +2,15 @@ package tenant import ( "context" + "database/sql" "encoding/base64" "time" "github.com/google/uuid" "github.com/gtank/cryptopasta" - "gorm.io/gorm" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgx/v5/pgxpool" "github.com/odpf/optimus/core/tenant" "github.com/odpf/optimus/core/tenant/dto" @@ -15,52 +18,45 @@ import ( ) type SecretRepository struct { - db *gorm.DB + pool *pgxpool.Pool } const ( - secretColumns = `s.id, s.name, s.value, s.type, p.name as project_name, n.name as namespace_name, s.created_at, s.updated_at` + secretColumns = `id, name, value, type, project_name, namespace_name, created_at, updated_at` getAllSecretsInProject = `SELECT ` + secretColumns + ` -FROM secret s -JOIN project p ON p.id = s.project_id -LEFT JOIN namespace n ON n.id = s.namespace_id -WHERE p.name = ?` - - secretCTE = `WITH cte_tenant AS ( -SELECT p.id AS project_id, p.name AS project_name, n.id AS namespace_id, n.name AS namespace_name -FROM project p - LEFT JOIN namespace n - ON p.id = n.project_id and n.name = ? -WHERE p.name = ? -) ` +FROM secret s WHERE project_name = $1` ) type Secret struct { - ID uuid.UUID `gorm:"primary_key;type:uuid;default:uuid_generate_v4()"` + ID uuid.UUID - Name string `gorm:"not null;default:null"` + Name string Value string Type string - ProjectName string `json:"project_name"` - NamespaceName string `json:"namespace_name"` + ProjectName string + NamespaceName sql.NullString - CreatedAt time.Time `gorm:"not null" json:"created_at"` - UpdatedAt time.Time `gorm:"not null" json:"updated_at"` + CreatedAt time.Time + UpdatedAt time.Time } func NewSecret(secret *tenant.Secret) Secret { // base64 for storing safely in db base64cipher := base64.StdEncoding.EncodeToString([]byte(secret.EncodedValue())) + nsName := sql.NullString{} + if secret.NamespaceName() != "" { + nsName = sql.NullString{String: secret.NamespaceName(), Valid: true} + } return Secret{ Name: secret.Name().String(), Value: base64cipher, Type: secret.Type().String(), ProjectName: secret.ProjectName().String(), - NamespaceName: secret.NamespaceName(), + NamespaceName: nsName, } } @@ -81,7 +77,12 @@ func (s Secret) ToTenantSecret() (*tenant.Secret, error) { return nil, err } - return tenant.NewSecret(s.Name, typ, string(encrypted), projName, s.NamespaceName) + nsName := "" + if s.NamespaceName.Valid { + nsName = s.NamespaceName.String + } + + return tenant.NewSecret(s.Name, typ, string(encrypted), projName, nsName) } func (s Secret) ToSecretInfo() (*dto.SecretInfo, error) { @@ -98,11 +99,16 @@ func (s Secret) ToSecretInfo() (*dto.SecretInfo, error) { return nil, err } + nsName := "" + if s.NamespaceName.Valid { + nsName = s.NamespaceName.String + } + return &dto.SecretInfo{ Name: s.Name, Digest: base64encoded, Type: typ, - Namespace: s.NamespaceName, + Namespace: nsName, UpdatedAt: s.UpdatedAt, }, nil } @@ -110,49 +116,41 @@ func (s Secret) ToSecretInfo() (*dto.SecretInfo, error) { func (s SecretRepository) Save(ctx context.Context, tenantSecret *tenant.Secret) error { secret := NewSecret(tenantSecret) - _, err := s.get(ctx, tenantSecret.ProjectName(), tenantSecret.Name()) + err := s.get(ctx, tenantSecret.ProjectName(), tenantSecret.Name()) if err == nil { return errors.NewError(errors.ErrAlreadyExists, tenant.EntitySecret, "secret already exists") } - if !errors.Is(err, gorm.ErrRecordNotFound) { + if !errors.Is(err, pgx.ErrNoRows) { return errors.Wrap(tenant.EntitySecret, "unable to save secret", err) } - insertSecret := secretCTE + `INSERT INTO secret (name, value, type, project_id, namespace_id, updated_at, created_at) -SELECT ?, ?, ?, t.project_id, t.namespace_id, NOW(), NOW() -FROM cte_tenant t` - - result := s.db.WithContext(ctx).Exec(insertSecret, secret.NamespaceName, secret.ProjectName, - secret.Name, secret.Value, secret.Type) + insertSecret := `INSERT INTO secret (name, value, type, project_name, namespace_name, created_at, updated_at) +VALUES ($1, $2, $3, $4, $5, NOW(), NOW())` + _, err = s.pool.Exec(ctx, insertSecret, secret.Name, secret.Value, secret.Type, secret.ProjectName, secret.NamespaceName) - if result.Error != nil { + if err != nil { return errors.Wrap(tenant.EntitySecret, "unable to save secret", err) } - if result.RowsAffected == 0 { - return errors.InternalError(tenant.EntitySecret, "unable to save, rows affected 0", nil) - } return nil } func (s SecretRepository) Update(ctx context.Context, tenantSecret *tenant.Secret) error { secret := NewSecret(tenantSecret) - _, err := s.get(ctx, tenantSecret.ProjectName(), tenantSecret.Name()) + err := s.get(ctx, tenantSecret.ProjectName(), tenantSecret.Name()) if err != nil { - if errors.Is(err, gorm.ErrRecordNotFound) { + if errors.Is(err, pgx.ErrNoRows) { return errors.NotFound(tenant.EntitySecret, "unable to update, secret not found for "+tenantSecret.Name().String()) } return errors.Wrap(tenant.EntitySecret, "unable to update secret", err) } - updateSecret := `UPDATE secret s -SET value=?, type=?, updated_at=NOW() -FROM project p -WHERE p.id = s.project_id AND p.name = ? AND s.name=?` + updateSecret := `UPDATE secret SET value=$1, type=$2, updated_at=NOW() +WHERE project_name = $3 AND name=$4` - err = s.db.WithContext(ctx).Exec(updateSecret, secret.Value, secret.Type, secret.ProjectName, secret.Name).Error + _, err = s.pool.Exec(ctx, updateSecret, secret.Value, secret.Type, secret.ProjectName, secret.Name) if err != nil { return errors.Wrap(tenant.EntitySecret, "unable to update secret", err) } @@ -163,69 +161,63 @@ WHERE p.id = s.project_id AND p.name = ? AND s.name=?` func (s SecretRepository) Get(ctx context.Context, projName tenant.ProjectName, nsName string, name tenant.SecretName) (*tenant.Secret, error) { var secret Secret - getSecretByNameQuery := secretCTE + `SELECT s.id, s.name, s.value, s.type, t.project_name, t.namespace_name, s.created_at, s.updated_at -FROM secret s - JOIN cte_tenant t - ON t.project_id = s.project_id - AND (t.namespace_id IS NULL OR s.namespace_id IS NULL OR t.namespace_id = s.namespace_id ) -WHERE s.name = ?` - err := s.db.WithContext(ctx).Raw(getSecretByNameQuery, nsName, projName.String(), name). - First(&secret).Error + getSecretByNameQuery := `SELECT ` + secretColumns + ` +FROM secret s WHERE name = $1 +AND project_name = $2 +AND (namespace_name IS NULL OR namespace_name = $3)` + + err := s.pool.QueryRow(ctx, getSecretByNameQuery, name, projName, nsName). + Scan(&secret.ID, &secret.Name, &secret.Value, &secret.Type, + &secret.ProjectName, &secret.NamespaceName, &secret.CreatedAt, &secret.UpdatedAt) if err != nil { - if errors.Is(err, gorm.ErrRecordNotFound) { + if errors.Is(err, pgx.ErrNoRows) { return nil, errors.NotFound(tenant.EntitySecret, "no record for "+name.String()) } - return nil, errors.Wrap(tenant.EntitySecret, "error while getting project", err) + return nil, errors.Wrap(tenant.EntitySecret, "error while getting secret", err) } return secret.ToTenantSecret() } // get is scoped only at project level, used for db operations -func (s SecretRepository) get(ctx context.Context, projName tenant.ProjectName, name tenant.SecretName) (Secret, error) { // nolint: unparam - var secret Secret - - getSecretByNameAtProject := `SELECT s.name -FROM secret s -JOIN project p -ON p.id = s.project_id -WHERE s.name = ? -AND p.name = ? -` - err := s.db.WithContext(ctx).Raw(getSecretByNameAtProject, name.String(), projName.String()). - First(&secret).Error - - return secret, err +func (s SecretRepository) get(ctx context.Context, projName tenant.ProjectName, name tenant.SecretName) error { + var dummyName string + getSecretByNameAtProject := `SELECT s.name FROM secret s WHERE name = $1 AND project_name = $2` + err := s.pool.QueryRow(ctx, getSecretByNameAtProject, name, projName).Scan(&dummyName) + return err } func (s SecretRepository) GetAll(ctx context.Context, projName tenant.ProjectName, nsName string) ([]*tenant.Secret, error) { - var secrets []Secret var queryErr error + var rows pgx.Rows if nsName != "" { - getAllSecretsAvailableForNamespace := `SELECT ` + secretColumns + ` -FROM secret s - JOIN project p ON p.id = s.project_id - LEFT JOIN namespace n ON n.id = s.namespace_id -WHERE p.name = ? -AND (s.namespace_id IS NULL or n.name = ?)` - queryErr = s.db.WithContext(ctx).Raw(getAllSecretsAvailableForNamespace, projName.String(), nsName). - Scan(&secrets).Error + getAllSecretsAvailableForNamespace := `SELECT ` + secretColumns + ` FROM secret +WHERE project_name = $1 AND (namespace_name IS NULL or namespace_name = $2)` + rows, queryErr = s.pool.Query(ctx, getAllSecretsAvailableForNamespace, projName, nsName) } else { - queryErr = s.db.WithContext(ctx).Raw(getAllSecretsInProject, projName.String()). - Scan(&secrets).Error + rows, queryErr = s.pool.Query(ctx, getAllSecretsInProject, projName) } + if queryErr != nil { return nil, errors.Wrap(tenant.EntitySecret, "unable to get all secrets in scope", queryErr) } + defer rows.Close() - tenantSecrets := make([]*tenant.Secret, len(secrets)) - for i, secret := range secrets { - tenantSecret, err := secret.ToTenantSecret() + var tenantSecrets []*tenant.Secret + for rows.Next() { + var sec Secret + err := rows.Scan(&sec.ID, &sec.Name, &sec.Value, &sec.Type, + &sec.ProjectName, &sec.NamespaceName, &sec.CreatedAt, &sec.UpdatedAt) + if err != nil { + return nil, errors.Wrap(tenant.EntitySecret, "error in GetAll", err) + } + + secret, err := sec.ToTenantSecret() if err != nil { return nil, err } - tenantSecrets[i] = tenantSecret + tenantSecrets = append(tenantSecrets, secret) } return tenantSecrets, nil @@ -233,55 +225,55 @@ AND (s.namespace_id IS NULL or n.name = ?)` // Delete will not support soft delete, once deleted it has to be created again func (s SecretRepository) Delete(ctx context.Context, projName tenant.ProjectName, nsName string, name tenant.SecretName) error { - var result *gorm.DB + var result pgconn.CommandTag + var err error if nsName != "" { - deleteForNamespaceScope := secretCTE + `DELETE -FROM secret s -USING cte_tenant t -WHERE s.name = ? -AND s.project_id = t.project_id -AND s.namespace_id = t.namespace_id` - result = s.db.WithContext(ctx).Exec(deleteForNamespaceScope, nsName, projName.String(), name.String()) + deleteForNamespaceScope := `DELETE FROM secret +WHERE name = $1 AND project_name = $2 AND namespace_name = $3` + result, err = s.pool.Exec(ctx, deleteForNamespaceScope, name, projName, nsName) } else { - deleteForProjectScope := `DELETE -FROM secret s -USING project p -WHERE p.name = ? -AND s.name = ? -AND s.project_id = p.id -AND s.namespace_id IS NULL` - result = s.db.WithContext(ctx).Exec(deleteForProjectScope, projName.String(), name) + deleteForProjectScope := `DELETE FROM secret +WHERE project_name = $1 AND name = $2 AND namespace_name IS NULL` + result, err = s.pool.Exec(ctx, deleteForProjectScope, projName, name) } - if result.Error != nil { - return errors.Wrap(tenant.EntitySecret, "error during delete of secret", result.Error) + if err != nil { + return errors.Wrap(tenant.EntitySecret, "error during delete of secret", err) } - if result.RowsAffected == 0 { + if result.RowsAffected() == 0 { return errors.NotFound(tenant.EntitySecret, "secret to delete not found "+name.String()) } return nil } func (s SecretRepository) GetSecretsInfo(ctx context.Context, projName tenant.ProjectName) ([]*dto.SecretInfo, error) { - var secrets []Secret - if err := s.db.WithContext(ctx).Raw(getAllSecretsInProject, projName.String()). - Scan(&secrets).Error; err != nil { - return nil, errors.Wrap(tenant.EntitySecret, "unable to get information about secrets", err) + rows, err := s.pool.Query(ctx, getAllSecretsInProject, projName) + + if err != nil { + return nil, errors.Wrap(tenant.EntitySecret, "unable to get all secrets info", err) } + defer rows.Close() + + var secretInfo []*dto.SecretInfo + for rows.Next() { + var sec Secret + err := rows.Scan(&sec.ID, &sec.Name, &sec.Value, &sec.Type, + &sec.ProjectName, &sec.NamespaceName, &sec.CreatedAt, &sec.UpdatedAt) + if err != nil { + return nil, errors.Wrap(tenant.EntitySecret, "error in GetAll", err) + } - infos := make([]*dto.SecretInfo, len(secrets)) - for i, secret := range secrets { - info, err := secret.ToSecretInfo() + secret, err := sec.ToSecretInfo() if err != nil { return nil, err } - infos[i] = info + secretInfo = append(secretInfo, secret) } - return infos, nil + return secretInfo, nil } -func NewSecretRepository(db *gorm.DB) *SecretRepository { - return &SecretRepository{db: db} +func NewSecretRepository(pool *pgxpool.Pool) *SecretRepository { + return &SecretRepository{pool: pool} } diff --git a/internal/store/postgres/tenant/secret_repository_test.go b/internal/store/postgres/tenant/secret_repository_test.go index a5cff88b5c..cc46f39a93 100644 --- a/internal/store/postgres/tenant/secret_repository_test.go +++ b/internal/store/postgres/tenant/secret_repository_test.go @@ -6,8 +6,8 @@ import ( "context" "testing" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/assert" - "gorm.io/gorm" "github.com/odpf/optimus/core/tenant" postgres "github.com/odpf/optimus/internal/store/postgres/tenant" @@ -32,18 +32,18 @@ func TestPostgresSecretRepository(t *testing.T) { "bucket": "gs://ns_bucket", }) - dbSetup := func() *gorm.DB { - dbConn := setup.TestDB() - setup.TruncateTables(dbConn) + dbSetup := func() *pgxpool.Pool { + pool := setup.TestPool() + setup.TruncateTablesWith(pool) - projRepo := postgres.NewProjectRepository(dbConn) + projRepo := postgres.NewProjectRepository(pool) assert.Nil(t, projRepo.Save(ctx, proj)) - namespaceRepo := postgres.NewNamespaceRepository(dbConn) + namespaceRepo := postgres.NewNamespaceRepository(pool) assert.Nil(t, namespaceRepo.Save(ctx, namespace)) assert.Nil(t, namespaceRepo.Save(ctx, otherNamespace)) - return dbConn + return pool } t.Run("Save", func(t *testing.T) { diff --git a/tests/setup/database.go b/tests/setup/database.go index 047083ffa8..0dacaed2cb 100644 --- a/tests/setup/database.go +++ b/tests/setup/database.go @@ -91,6 +91,7 @@ func dropTables(db *gorm.DB) error { "backup", "backup_old", "secret", + "secret_old", "job_deployment", "job_source", "replay", @@ -160,7 +161,7 @@ func TruncateTablesWith(pool *pgxpool.Pool) { pool.Exec(ctx, "TRUNCATE TABLE secret CASCADE") pool.Exec(ctx, "TRUNCATE TABLE namespace CASCADE") pool.Exec(ctx, "TRUNCATE TABLE project CASCADE") - pool.Exec(ctx, "TRUNCATE TABLE project_old, namespace_old CASCADE") + pool.Exec(ctx, "TRUNCATE TABLE project_old, namespace_old, secret_old CASCADE") pool.Exec(ctx, "TRUNCATE TABLE job_deployment CASCADE") From d005963ba2ee8da60538cbb1ea380a607d6ff02c Mon Sep 17 00:00:00 2001 From: Sandeep Bhardwaj Date: Thu, 15 Dec 2022 12:08:49 +0530 Subject: [PATCH 04/25] fix: fix lint and test issues --- config/loader_test.go | 2 +- .../store/postgres/job/job_repository_test.go | 5 +++-- internal/store/postgres/postgres.go | 2 +- .../store/postgres/tenant/namespace_repository.go | 2 -- internal/store/postgres/tracer.go | 15 +++++++-------- server/optimus.go | 4 ++-- 6 files changed, 14 insertions(+), 16 deletions(-) diff --git a/config/loader_test.go b/config/loader_test.go index c3e2cd0e10..4607ed968b 100644 --- a/config/loader_test.go +++ b/config/loader_test.go @@ -271,7 +271,7 @@ func (s *ConfigTestSuite) initExpectedServerConfig() { s.expectedServerConfig.Serve.Deployer.QueueCapacity = 10 s.expectedServerConfig.Serve.DB = config.DBConfig{} s.expectedServerConfig.Serve.DB.DSN = "postgres://user:password@localhost:5432/database?sslmode=disable" - s.expectedServerConfig.Serve.DB.MaxIdleConnection = 5 + s.expectedServerConfig.Serve.DB.MinOpenConnection = 5 s.expectedServerConfig.Serve.DB.MaxOpenConnection = 10 s.expectedServerConfig.Scheduler = config.SchedulerConfig{} diff --git a/internal/store/postgres/job/job_repository_test.go b/internal/store/postgres/job/job_repository_test.go index 9b5fdf5f02..dbd3f8c3f7 100644 --- a/internal/store/postgres/job/job_repository_test.go +++ b/internal/store/postgres/job/job_repository_test.go @@ -52,10 +52,11 @@ func TestPostgresJobRepository(t *testing.T) { dbConn := setup.TestDB() setup.TruncateTables(dbConn) - projRepo := tenantPostgres.NewProjectRepository(dbConn) + pool := setup.TestPool() + projRepo := tenantPostgres.NewProjectRepository(pool) assert.NoError(t, projRepo.Save(ctx, proj)) - namespaceRepo := tenantPostgres.NewNamespaceRepository(dbConn) + namespaceRepo := tenantPostgres.NewNamespaceRepository(pool) assert.NoError(t, namespaceRepo.Save(ctx, namespace)) assert.NoError(t, namespaceRepo.Save(ctx, otherNamespace)) diff --git a/internal/store/postgres/postgres.go b/internal/store/postgres/postgres.go index 5c96d62613..20eec53b73 100644 --- a/internal/store/postgres/postgres.go +++ b/internal/store/postgres/postgres.go @@ -56,7 +56,7 @@ func Connect(dbConf config.DBConfig, writer io.Writer) (*gorm.DB, error) { if err != nil { return nil, err } - //sqlDB.SetMaxIdleConns(dbConf.MaxIdleConnection) + // sqlDB.SetMaxIdleConns(dbConf.MaxIdleConnection) sqlDB.SetMaxOpenConns(dbConf.MaxOpenConnection) return db, nil } diff --git a/internal/store/postgres/tenant/namespace_repository.go b/internal/store/postgres/tenant/namespace_repository.go index f546c04650..6283035c83 100644 --- a/internal/store/postgres/tenant/namespace_repository.go +++ b/internal/store/postgres/tenant/namespace_repository.go @@ -2,7 +2,6 @@ package tenant import ( "context" - "fmt" "time" "github.com/google/uuid" @@ -45,7 +44,6 @@ func (n *Namespace) toTenantNamespace() (*tenant.Namespace, error) { func (n *NamespaceRepository) Save(ctx context.Context, namespace *tenant.Namespace) error { _, err := n.get(ctx, namespace.ProjectName(), namespace.Name()) if err != nil { - fmt.Println(err) if errors.Is(err, pgx.ErrNoRows) { insertNamespace := `INSERT INTO namespace (name, config, project_name, created_at, updated_at) VALUES ($1, $2, $3, now(), now())` diff --git a/internal/store/postgres/tracer.go b/internal/store/postgres/tracer.go index b35d5b4f81..2a1184ee11 100644 --- a/internal/store/postgres/tracer.go +++ b/internal/store/postgres/tracer.go @@ -14,9 +14,8 @@ import ( // tracer is a wrapper around the pgx tracer interfaces which instrument // queries. type tracer struct { - tracer trace.Tracer - attrs []attribute.KeyValue - logSQLStatement bool + tracer trace.Tracer + attrs []attribute.KeyValue } // NewTracer returns a new Tracer. @@ -56,7 +55,7 @@ func (t *tracer) TraceQueryStart(ctx context.Context, _ *pgx.Conn, data pgx.Trac } // TraceQueryEnd is called at the end of Query, QueryRow, and Exec calls. -func (t *tracer) TraceQueryEnd(ctx context.Context, _ *pgx.Conn, data pgx.TraceQueryEndData) { +func (*tracer) TraceQueryEnd(ctx context.Context, _ *pgx.Conn, data pgx.TraceQueryEndData) { span := trace.SpanFromContext(ctx) recordError(span, data.Err) @@ -83,7 +82,7 @@ func (t *tracer) TraceCopyFromStart(ctx context.Context, _ *pgx.Conn, data pgx.T } // TraceCopyFromEnd is called at the end of CopyFrom calls. -func (t *tracer) TraceCopyFromEnd(ctx context.Context, _ *pgx.Conn, data pgx.TraceCopyFromEndData) { +func (*tracer) TraceCopyFromEnd(ctx context.Context, _ *pgx.Conn, data pgx.TraceCopyFromEndData) { span := trace.SpanFromContext(ctx) recordError(span, data.Err) @@ -130,7 +129,7 @@ func (t *tracer) TraceBatchQuery(ctx context.Context, _ *pgx.Conn, data pgx.Trac } // TraceBatchEnd is called at the end of SendBatch calls. -func (t *tracer) TraceBatchEnd(ctx context.Context, _ *pgx.Conn, data pgx.TraceBatchEndData) { +func (*tracer) TraceBatchEnd(ctx context.Context, _ *pgx.Conn, data pgx.TraceBatchEndData) { span := trace.SpanFromContext(ctx) recordError(span, data.Err) @@ -163,7 +162,7 @@ func (t *tracer) TraceConnectStart(ctx context.Context, data pgx.TraceConnectSta } // TraceConnectEnd is called at the end of Connect and ConnectConfig calls. -func (t *tracer) TraceConnectEnd(ctx context.Context, data pgx.TraceConnectEndData) { +func (*tracer) TraceConnectEnd(ctx context.Context, data pgx.TraceConnectEndData) { span := trace.SpanFromContext(ctx) recordError(span, data.Err) @@ -190,7 +189,7 @@ func (t *tracer) TracePrepareStart(ctx context.Context, _ *pgx.Conn, data pgx.Tr } // TracePrepareEnd is called at the end of Prepare calls. -func (t *tracer) TracePrepareEnd(ctx context.Context, _ *pgx.Conn, data pgx.TracePrepareEndData) { +func (*tracer) TracePrepareEnd(ctx context.Context, _ *pgx.Conn, data pgx.TracePrepareEndData) { span := trace.SpanFromContext(ctx) recordError(span, data.Err) diff --git a/server/optimus.go b/server/optimus.go index c363ec2a96..cb5a3ff2fa 100644 --- a/server/optimus.go +++ b/server/optimus.go @@ -245,8 +245,8 @@ func (s *OptimusServer) Shutdown() { func (s *OptimusServer) setupHandlers() error { // Tenant Bounded Context Setup tProjectRepo := tenant.NewProjectRepository(s.dbPool) - tNamespaceRepo := tenant.NewNamespaceRepository(s.dbConn) - tSecretRepo := tenant.NewSecretRepository(s.dbConn) + tNamespaceRepo := tenant.NewNamespaceRepository(s.dbPool) + tSecretRepo := tenant.NewSecretRepository(s.dbPool) tProjectService := tService.NewProjectService(tProjectRepo) tNamespaceService := tService.NewNamespaceService(tNamespaceRepo) From 809d47beba11feec6e5103da0569ab489a5e7385 Mon Sep 17 00:00:00 2001 From: Sandeep Bhardwaj Date: Thu, 15 Dec 2022 13:53:58 +0530 Subject: [PATCH 05/25] refactor: add foreign key for tenant in resource and job --- config.sample.yaml | 2 +- .../store/postgres/job/job_repository_test.go | 23 ++++++++++----- ...l => 000042_update_project_table.down.sql} | 0 ...sql => 000042_update_project_table.up.sql} | 0 ...=> 000043_update_namespace_table.down.sql} | 0 ...l => 000043_update_namespace_table.up.sql} | 0 ...ql => 000044_update_secret_table.down.sql} | 0 ....sql => 000044_update_secret_table.up.sql} | 0 .../000045_update_foreign_keys.down.sql | 12 ++++++++ .../000045_update_foreign_keys.up.sql | 12 ++++++++ .../resource/backup_repository_test.go | 2 +- .../postgres/resource/repository_test.go | 28 ++++++++++++++++++- 12 files changed, 69 insertions(+), 10 deletions(-) rename internal/store/postgres/migrations/{000041_update_project_table.down.sql => 000042_update_project_table.down.sql} (100%) rename internal/store/postgres/migrations/{000041_update_project_table.up.sql => 000042_update_project_table.up.sql} (100%) rename internal/store/postgres/migrations/{000042_update_namespace_table.down.sql => 000043_update_namespace_table.down.sql} (100%) rename internal/store/postgres/migrations/{000042_update_namespace_table.up.sql => 000043_update_namespace_table.up.sql} (100%) rename internal/store/postgres/migrations/{000043_update_secret_table.down.sql => 000044_update_secret_table.down.sql} (100%) rename internal/store/postgres/migrations/{000043_update_secret_table.up.sql => 000044_update_secret_table.up.sql} (100%) create mode 100644 internal/store/postgres/migrations/000045_update_foreign_keys.down.sql create mode 100644 internal/store/postgres/migrations/000045_update_foreign_keys.up.sql diff --git a/config.sample.yaml b/config.sample.yaml index 0542bd47a5..72d7714a63 100644 --- a/config.sample.yaml +++ b/config.sample.yaml @@ -30,7 +30,7 @@ log: # # database connection string # dsn: postgres://user:password@localhost:5432/database?sslmode=disable # -# max_idle_connection: 5 +# min_open_connection: 5 # max_open_connection: 10 # optimus supports multiple scheduler types diff --git a/internal/store/postgres/job/job_repository_test.go b/internal/store/postgres/job/job_repository_test.go index dbd3f8c3f7..7df8e06c9c 100644 --- a/internal/store/postgres/job/job_repository_test.go +++ b/internal/store/postgres/job/job_repository_test.go @@ -27,26 +27,33 @@ func TestPostgresJobRepository(t *testing.T) { tenant.ProjectStoragePathKey: "gs://location", }) assert.NoError(t, err) + + otherProj, err := tenant.NewProject("test-other-proj", + map[string]string{ + "bucket": "gs://some_folder-3", + tenant.ProjectSchedulerHost: "host", + tenant.ProjectStoragePathKey: "gs://location", + }) + assert.NoError(t, err) + namespace, err := tenant.NewNamespace("test-ns", proj.Name(), map[string]string{ "bucket": "gs://ns_bucket", }) assert.NoError(t, err) + otherNamespace, err := tenant.NewNamespace("other-ns", proj.Name(), map[string]string{ "bucket": "gs://ns_bucket", }) - assert.NoError(t, err) - sampleTenant, err := tenant.NewTenant(proj.Name().String(), namespace.Name().String()) - assert.NoError(t, err) - otherProj, err := tenant.NewProject("test-other-proj", + otherNamespace2, err := tenant.NewNamespace("other-ns", otherProj.Name(), map[string]string{ - "bucket": "gs://some_folder-3", - tenant.ProjectSchedulerHost: "host", - tenant.ProjectStoragePathKey: "gs://location", + "bucket": "gs://ns_bucket", }) assert.NoError(t, err) + sampleTenant, err := tenant.NewTenant(proj.Name().String(), namespace.Name().String()) + assert.NoError(t, err) dbSetup := func() *gorm.DB { dbConn := setup.TestDB() @@ -55,10 +62,12 @@ func TestPostgresJobRepository(t *testing.T) { pool := setup.TestPool() projRepo := tenantPostgres.NewProjectRepository(pool) assert.NoError(t, projRepo.Save(ctx, proj)) + assert.NoError(t, projRepo.Save(ctx, otherProj)) namespaceRepo := tenantPostgres.NewNamespaceRepository(pool) assert.NoError(t, namespaceRepo.Save(ctx, namespace)) assert.NoError(t, namespaceRepo.Save(ctx, otherNamespace)) + assert.NoError(t, namespaceRepo.Save(ctx, otherNamespace2)) return dbConn } diff --git a/internal/store/postgres/migrations/000041_update_project_table.down.sql b/internal/store/postgres/migrations/000042_update_project_table.down.sql similarity index 100% rename from internal/store/postgres/migrations/000041_update_project_table.down.sql rename to internal/store/postgres/migrations/000042_update_project_table.down.sql diff --git a/internal/store/postgres/migrations/000041_update_project_table.up.sql b/internal/store/postgres/migrations/000042_update_project_table.up.sql similarity index 100% rename from internal/store/postgres/migrations/000041_update_project_table.up.sql rename to internal/store/postgres/migrations/000042_update_project_table.up.sql diff --git a/internal/store/postgres/migrations/000042_update_namespace_table.down.sql b/internal/store/postgres/migrations/000043_update_namespace_table.down.sql similarity index 100% rename from internal/store/postgres/migrations/000042_update_namespace_table.down.sql rename to internal/store/postgres/migrations/000043_update_namespace_table.down.sql diff --git a/internal/store/postgres/migrations/000042_update_namespace_table.up.sql b/internal/store/postgres/migrations/000043_update_namespace_table.up.sql similarity index 100% rename from internal/store/postgres/migrations/000042_update_namespace_table.up.sql rename to internal/store/postgres/migrations/000043_update_namespace_table.up.sql diff --git a/internal/store/postgres/migrations/000043_update_secret_table.down.sql b/internal/store/postgres/migrations/000044_update_secret_table.down.sql similarity index 100% rename from internal/store/postgres/migrations/000043_update_secret_table.down.sql rename to internal/store/postgres/migrations/000044_update_secret_table.down.sql diff --git a/internal/store/postgres/migrations/000043_update_secret_table.up.sql b/internal/store/postgres/migrations/000044_update_secret_table.up.sql similarity index 100% rename from internal/store/postgres/migrations/000043_update_secret_table.up.sql rename to internal/store/postgres/migrations/000044_update_secret_table.up.sql diff --git a/internal/store/postgres/migrations/000045_update_foreign_keys.down.sql b/internal/store/postgres/migrations/000045_update_foreign_keys.down.sql new file mode 100644 index 0000000000..65cedac9f6 --- /dev/null +++ b/internal/store/postgres/migrations/000045_update_foreign_keys.down.sql @@ -0,0 +1,12 @@ +ALTER TABLE resource + DROP CONSTRAINT IF EXISTS pk_resource; + +ALTER TABLE resource + DROP CONSTRAINT IF EXISTS fk_resource_namespace; + +ALTER TABLE backup + DROP CONSTRAINT IF EXISTS fk_backup_namespace; + +ALTER TABLE job + DROP CONSTRAINT IF EXISTS fk_job_namespace; + diff --git a/internal/store/postgres/migrations/000045_update_foreign_keys.up.sql b/internal/store/postgres/migrations/000045_update_foreign_keys.up.sql new file mode 100644 index 0000000000..a37de65ce0 --- /dev/null +++ b/internal/store/postgres/migrations/000045_update_foreign_keys.up.sql @@ -0,0 +1,12 @@ +ALTER TABLE resource +ADD CONSTRAINT pk_resource PRIMARY KEY (project_name, namespace_name, store, full_name); + +ALTER TABLE resource +ADD CONSTRAINT fk_resource_namespace FOREIGN KEY (project_name, namespace_name) REFERENCES namespace (project_name, name); + +ALTER TABLE backup +ADD CONSTRAINT fk_backup_namespace FOREIGN KEY (project_name, namespace_name) REFERENCES namespace (project_name, name); + +ALTER TABLE job +ADD CONSTRAINT fk_job_namespace FOREIGN KEY (project_name, namespace_name) REFERENCES namespace (project_name, name); + diff --git a/internal/store/postgres/resource/backup_repository_test.go b/internal/store/postgres/resource/backup_repository_test.go index b356dd8deb..24ecdb7c11 100644 --- a/internal/store/postgres/resource/backup_repository_test.go +++ b/internal/store/postgres/resource/backup_repository_test.go @@ -16,7 +16,7 @@ import ( func TestPostgresBackupRepository(t *testing.T) { ctx := context.Background() - tnnt, _ := tenant.NewTenant("proj", "namespace") + tnnt, _ := tenant.NewTenant("t-optimus-1", "n-optimus-1") resNames := []string{"bigquery-project.playground.test-table", "bigquery-project.playground.table1"} created := time.Date(2022, 11, 22, 5, 0, 0, 0, time.UTC) conf := map[string]string{"config1": "value1", "ttl": "720h"} diff --git a/internal/store/postgres/resource/repository_test.go b/internal/store/postgres/resource/repository_test.go index f38cb2543c..f34ce98230 100644 --- a/internal/store/postgres/resource/repository_test.go +++ b/internal/store/postgres/resource/repository_test.go @@ -15,12 +15,13 @@ import ( serviceResource "github.com/odpf/optimus/core/resource" "github.com/odpf/optimus/core/tenant" repoResource "github.com/odpf/optimus/internal/store/postgres/resource" + tenantPostgres "github.com/odpf/optimus/internal/store/postgres/tenant" "github.com/odpf/optimus/tests/setup" ) func TestPostgresResourceRepository(t *testing.T) { ctx := context.Background() - tnnt, err := tenant.NewTenant("project_test", "namespace_test") + tnnt, err := tenant.NewTenant("t-optimus-1", "n-optimus-1") assert.NoError(t, err) spec := map[string]any{ "description": "spec for test", @@ -328,5 +329,30 @@ func fromModelToResource(r *repoResource.Resource) (*serviceResource.Resource, e func dbSetup() *gorm.DB { dbConn := setup.TestDB() setup.TruncateTables(dbConn) + + pool := setup.TestPool() + ctx := context.Background() + proj, _ := tenant.NewProject("t-optimus-1", + map[string]string{ + "bucket": "gs://some_folder-2", + tenant.ProjectSchedulerHost: "host", + tenant.ProjectStoragePathKey: "gs://location", + }) + projRepo := tenantPostgres.NewProjectRepository(pool) + err := projRepo.Save(ctx, proj) + if err != nil { + panic(err) + } + + namespaceRepo := tenantPostgres.NewNamespaceRepository(pool) + ns, _ := tenant.NewNamespace("n-optimus-1", proj.Name(), + map[string]string{ + "bucket": "gs://ns_bucket", + }) + err = namespaceRepo.Save(ctx, ns) + if err != nil { + panic(err) + } + return dbConn } From c06ea5f411ba0c9efb672d4fd70e5aa23f610215 Mon Sep 17 00:00:00 2001 From: Sandeep Bhardwaj Date: Thu, 15 Dec 2022 18:21:33 +0530 Subject: [PATCH 06/25] refactor: use pgx for resource and backup --- .../store/postgres/job/job_repository_test.go | 1 + internal/store/postgres/postgres.go | 1 - .../postgres/resource/backup_repository.go | 90 +++++---- .../store/postgres/resource/repository.go | 184 +++++++++--------- .../postgres/resource/repository_test.go | 151 ++++---------- internal/store/postgres/resource/resource.go | 19 +- server/optimus.go | 4 +- 7 files changed, 193 insertions(+), 257 deletions(-) diff --git a/internal/store/postgres/job/job_repository_test.go b/internal/store/postgres/job/job_repository_test.go index 7df8e06c9c..2eb82a7fea 100644 --- a/internal/store/postgres/job/job_repository_test.go +++ b/internal/store/postgres/job/job_repository_test.go @@ -46,6 +46,7 @@ func TestPostgresJobRepository(t *testing.T) { map[string]string{ "bucket": "gs://ns_bucket", }) + assert.NoError(t, err) otherNamespace2, err := tenant.NewNamespace("other-ns", otherProj.Name(), map[string]string{ diff --git a/internal/store/postgres/postgres.go b/internal/store/postgres/postgres.go index 20eec53b73..28723f7cb7 100644 --- a/internal/store/postgres/postgres.go +++ b/internal/store/postgres/postgres.go @@ -56,7 +56,6 @@ func Connect(dbConf config.DBConfig, writer io.Writer) (*gorm.DB, error) { if err != nil { return nil, err } - // sqlDB.SetMaxIdleConns(dbConf.MaxIdleConnection) sqlDB.SetMaxOpenConns(dbConf.MaxOpenConnection) return db, nil } diff --git a/internal/store/postgres/resource/backup_repository.go b/internal/store/postgres/resource/backup_repository.go index f7dfd698cd..a9a6df7ee0 100644 --- a/internal/store/postgres/resource/backup_repository.go +++ b/internal/store/postgres/resource/backup_repository.go @@ -2,41 +2,40 @@ package resource import ( "context" - "encoding/json" "time" "github.com/google/uuid" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" "github.com/lib/pq" - "gorm.io/datatypes" - "gorm.io/gorm" "github.com/odpf/optimus/core/resource" "github.com/odpf/optimus/core/tenant" "github.com/odpf/optimus/internal/errors" ) +const ( + backupToStoreColumns = `store, project_name, namespace_name, description, resource_names, config, created_at, updated_at` + backupColumns = `id, ` + backupToStoreColumns +) + type Backup struct { - ID uuid.UUID `gorm:"primary_key;type:uuid;default:uuid_generate_v4()"` + ID uuid.UUID Store string ProjectName string NamespaceName string Description string - ResourceNames pq.StringArray `gorm:"type:text[]"` + ResourceNames pq.StringArray - Config datatypes.JSON + Config map[string]string - CreatedAt time.Time `gorm:"not null" json:"created_at"` - UpdatedAt time.Time `gorm:"not null" json:"updated_at"` + CreatedAt time.Time + UpdatedAt time.Time } -func NewBackup(b *resource.Backup) (Backup, error) { - conf, err := json.Marshal(b.Config()) - if err != nil { - return Backup{}, err - } - +func NewBackup(b *resource.Backup) Backup { return Backup{ ResourceNames: b.ResourceNames(), Store: b.Store().String(), @@ -44,8 +43,8 @@ func NewBackup(b *resource.Backup) (Backup, error) { NamespaceName: b.Tenant().NamespaceName().String(), Description: b.Description(), CreatedAt: b.CreatedAt(), - Config: conf, - }, nil + Config: b.Config(), + } } func (b Backup) ToResourceBackup() (*resource.Backup, error) { @@ -58,12 +57,7 @@ func (b Backup) ToResourceBackup() (*resource.Backup, error) { return nil, err } - var config map[string]string - if err = json.Unmarshal(b.Config, &config); err != nil { - return nil, errors.Wrap(resource.EntityBackup, "error unmarshalling config", err) - } - - backup, err := resource.NewBackup(s, tnnt, b.ResourceNames, b.Description, b.CreatedAt.UTC(), config) + backup, err := resource.NewBackup(s, tnnt, b.ResourceNames, b.Description, b.CreatedAt.UTC(), b.Config) if err != nil { return nil, err } @@ -77,16 +71,17 @@ func (b Backup) ToResourceBackup() (*resource.Backup, error) { } type BackupRepository struct { - db *gorm.DB + pool *pgxpool.Pool } func (repo BackupRepository) Create(ctx context.Context, resourceBackup *resource.Backup) error { - backup, err := NewBackup(resourceBackup) - if err != nil { - return err - } + backup := NewBackup(resourceBackup) + + insertBackup := `INSERT INTO backup (` + backupToStoreColumns + `) VALUES ($1, $2, $3, $4, $5, $6, $7, now()) returning id` + err := repo.pool.QueryRow(ctx, insertBackup, backup.Store, backup.ProjectName, backup.NamespaceName, + backup.Description, backup.ResourceNames, backup.Config, backup.CreatedAt).Scan(&backup.ID) - if err = repo.db.WithContext(ctx).Create(&backup).Error; err != nil { + if err != nil { return errors.Wrap(resource.EntityBackup, "unable to save backup in db", err) } @@ -95,9 +90,13 @@ func (repo BackupRepository) Create(ctx context.Context, resourceBackup *resourc func (repo BackupRepository) GetByID(ctx context.Context, id resource.BackupID) (*resource.Backup, error) { var b Backup - if err := repo.db.WithContext(ctx). - Where("id = ?", id.UUID()).First(&b).Error; err != nil { - if errors.Is(err, gorm.ErrRecordNotFound) { + getByID := `SELECT ` + backupColumns + ` FROM backup WHERE id = $1` + err := repo.pool.QueryRow(ctx, getByID, id.String()). + Scan(&b.ID, &b.Store, &b.ProjectName, &b.NamespaceName, + &b.Description, &b.ResourceNames, &b.Config, &b.CreatedAt, &b.UpdatedAt) + + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { return nil, errors.NotFound(resource.EntityBackup, "record not found for id "+id.String()) } return nil, errors.Wrap(resource.EntityBackup, "error while getting backup for id "+id.String(), err) @@ -107,27 +106,32 @@ func (repo BackupRepository) GetByID(ctx context.Context, id resource.BackupID) } func (repo BackupRepository) GetAll(ctx context.Context, tnnt tenant.Tenant, store resource.Store) ([]*resource.Backup, error) { - var backups []Backup - err := repo.db.WithContext(ctx).Where("project_name = ?", tnnt.ProjectName().String()). - Where("namespace_name = ?", tnnt.NamespaceName().String()). - Where("store = ?", store.String()). - Find(&backups).Error + getAllBackups := `SELECT ` + backupColumns + ` FROM backup WHERE project_name = $1 AND namespace_name = $2 AND store = $3` + rows, err := repo.pool.Query(ctx, getAllBackups, tnnt.ProjectName(), tnnt.NamespaceName(), store) if err != nil { return nil, errors.Wrap(resource.EntityBackup, "error while getting backup", err) } + defer rows.Close() + + var backups []*resource.Backup + for rows.Next() { + var b Backup + err = rows.Scan(&b.ID, &b.Store, &b.ProjectName, &b.NamespaceName, + &b.Description, &b.ResourceNames, &b.Config, &b.CreatedAt, &b.UpdatedAt) + if err != nil { + return nil, err + } - var resourceBackups []*resource.Backup - for _, backup := range backups { - resourceBackup, err := backup.ToResourceBackup() + resourceBackup, err := b.ToResourceBackup() if err != nil { return nil, err } - resourceBackups = append(resourceBackups, resourceBackup) + backups = append(backups, resourceBackup) } - return resourceBackups, nil + return backups, nil } -func NewBackupRepository(db *gorm.DB) *BackupRepository { - return &BackupRepository{db: db} +func NewBackupRepository(pool *pgxpool.Pool) *BackupRepository { + return &BackupRepository{pool: pool} } diff --git a/internal/store/postgres/resource/repository.go b/internal/store/postgres/resource/repository.go index 4b44e3abe7..edb6f8c098 100644 --- a/internal/store/postgres/resource/repository.go +++ b/internal/store/postgres/resource/repository.go @@ -2,138 +2,148 @@ package resource import ( "context" - "fmt" - "gorm.io/gorm" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" "github.com/odpf/optimus/core/resource" "github.com/odpf/optimus/core/tenant" "github.com/odpf/optimus/internal/errors" ) +const ( + resourceColumns = `full_name, kind, store, status, urn, project_name, namespace_name, metadata, spec, created_at, updated_at` +) + type Repository struct { - db *gorm.DB + pool *pgxpool.Pool } -func NewRepository(db *gorm.DB) *Repository { +func NewRepository(pool *pgxpool.Pool) *Repository { return &Repository{ - db: db, + pool: pool, } } -func (r Repository) Create(ctx context.Context, res *resource.Resource) error { - incomingResource := fromResourceToModel(res) - return r.create(r.db.WithContext(ctx), incomingResource) +func (r Repository) Create(ctx context.Context, resourceModel *resource.Resource) error { + res := FromResourceToModel(resourceModel) + + insertResource := `INSERT INTO resource (` + resourceColumns + `) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, now(), now())` + _, err := r.pool.Exec(ctx, insertResource, res.FullName, res.Kind, res.Store, res.Status, res.URN, + res.ProjectName, res.NamespaceName, res.Metadata, res.Spec) + return errors.WrapIfErr(tenant.EntityNamespace, "error creating resource to database", err) } -func (r Repository) Update(ctx context.Context, res *resource.Resource) error { - incomingResource := fromResourceToModel(res) - return r.update(r.db.WithContext(ctx), incomingResource) +func (r Repository) Update(ctx context.Context, resourceModel *resource.Resource) error { + res := FromResourceToModel(resourceModel) + + updateResource := `UPDATE resource SET kind=$1, status=$2, urn=$3, metadata=$4, spec=$5, updated_at=now() + WHERE full_name=$6 AND store=$7 AND project_name = $8 And namespace_name = $9` + tag, err := r.pool.Exec(ctx, updateResource, res.Kind, res.Status, res.URN, + res.Metadata, res.Spec, res.FullName, res.Store, res.ProjectName, res.NamespaceName) + + if err != nil { + return errors.Wrap(resource.EntityResource, "error updating resource to database", err) + } + + if tag.RowsAffected() == 0 { + return errors.NotFound(resource.EntityResource, "no resource to update for "+res.FullName) + } + return nil } func (r Repository) ReadByFullName(ctx context.Context, tnnt tenant.Tenant, store resource.Store, fullName string) (*resource.Resource, error) { - res, err := r.readByFullName(r.db.WithContext(ctx), tnnt.ProjectName().String(), tnnt.NamespaceName().String(), store.String(), fullName) + var res Resource + getResource := `SELECT ` + resourceColumns + ` FROM resource WHERE full_name = $1 AND store = $2 AND + project_name = $3 AND namespace_name = $4` + err := r.pool.QueryRow(ctx, getResource, fullName, store, tnnt.ProjectName(), tnnt.NamespaceName()). + Scan(&res.FullName, &res.Kind, &res.Store, &res.Status, &res.URN, + &res.ProjectName, &res.NamespaceName, &res.Metadata, &res.Spec, &res.CreatedAt, &res.UpdatedAt) + if err != nil { - return nil, err + if errors.Is(err, pgx.ErrNoRows) { + return nil, errors.NotFound(resource.EntityResource, "no resource found for "+res.FullName) + } + + return nil, errors.Wrap(resource.EntityResource, "error reading the resource "+res.FullName, err) } - return fromModelToResource(res) + + return FromModelToResource(&res) } func (r Repository) ReadAll(ctx context.Context, tnnt tenant.Tenant, store resource.Store) ([]*resource.Resource, error) { - var resources []*Resource - if err := r.db.WithContext(ctx). - Where("project_name = ? and namespace_name = ? and store = ?", - tnnt.ProjectName().String(), tnnt.NamespaceName().String(), store.String(), - ).Find(&resources).Error; err != nil { - return nil, errors.Wrap(resource.EntityResource, "error reading from database", err) + getAllResources := `SELECT ` + resourceColumns + ` FROM resource WHERE project_name = $1 and namespace_name = $2 and store = $3` + rows, err := r.pool.Query(ctx, getAllResources, tnnt.ProjectName(), tnnt.NamespaceName(), store) + if err != nil { + return nil, errors.Wrap(resource.EntityResource, "error in ReadAll", err) } - output := make([]*resource.Resource, len(resources)) - for i, res := range resources { - m, err := fromModelToResource(res) + defer rows.Close() + + var resources []*resource.Resource + for rows.Next() { + var res Resource + err = rows.Scan(&res.FullName, &res.Kind, &res.Store, &res.Status, &res.URN, + &res.ProjectName, &res.NamespaceName, &res.Metadata, &res.Spec, &res.CreatedAt, &res.UpdatedAt) + if err != nil { + return nil, errors.Wrap(resource.EntityResource, "error in GetAll", err) + } + + resourceModel, err := FromModelToResource(&res) if err != nil { return nil, err } - output[i] = m + resources = append(resources, resourceModel) } - return output, nil + + return resources, nil } func (r Repository) GetResources(ctx context.Context, tnnt tenant.Tenant, store resource.Store, names []string) ([]*resource.Resource, error) { - var resources []*Resource - result := r.db.WithContext(ctx). - Where("project_name = ?", tnnt.ProjectName().String()). - Where("namespace_name = ?", tnnt.NamespaceName().String()). - Where("store = ?", store.String()).Where("full_name IN ?", names). - Find(&resources) - if result.Error != nil { - return nil, errors.Wrap(resource.EntityResource, "error reading from database", result.Error) + getAllResources := `SELECT ` + resourceColumns + ` FROM resource WHERE project_name = $1 and namespace_name = $2 and +store = $3 AND full_name = any ($4)` + rows, err := r.pool.Query(ctx, getAllResources, tnnt.ProjectName(), tnnt.NamespaceName(), store, names) + if err != nil { + return nil, errors.Wrap(resource.EntityResource, "error in ReadAll", err) } + defer rows.Close() - var tenantResources = make([]*resource.Resource, len(resources)) - for i, res := range resources { - model, err := fromModelToResource(res) + var resources []*resource.Resource + for rows.Next() { + var res Resource + err = rows.Scan(&res.FullName, &res.Kind, &res.Store, &res.Status, &res.URN, + &res.ProjectName, &res.NamespaceName, &res.Metadata, &res.Spec, &res.CreatedAt, &res.UpdatedAt) + if err != nil { + return nil, errors.Wrap(resource.EntityResource, "error in GetAll", err) + } + + resourceModel, err := FromModelToResource(&res) if err != nil { return nil, err } - tenantResources[i] = model + resources = append(resources, resourceModel) } - return tenantResources, nil + + return resources, nil } func (r Repository) UpdateStatus(ctx context.Context, resources ...*resource.Resource) error { - resourceModels := make([]*Resource, len(resources)) - for i, res := range resources { - resourceModels[i] = fromResourceToModel(res) + batch := pgx.Batch{} + for _, res := range resources { + updateStatus := `UPDATE resource SET status = $1 WHERE project_name = $2 AND namespace_name = $3 AND store = $4 AND full_name = $5` + batch.Queue(updateStatus, res.Status(), res.Tenant().ProjectName(), res.Tenant().NamespaceName(), res.Store(), res.FullName()) } - multiErr := errors.NewMultiError("error updating resources status") - for _, m := range resourceModels { - result := r.db.WithContext(ctx).Model(&Resource{}). - Where("project_name = ?", m.ProjectName). - Where("namespace_name = ?", m.NamespaceName). - Where("store = ?", m.Store). - Where("full_name = ?", m.FullName). - Update("status", m.Status) - if result.Error != nil { - multiErr.Append(errors.Wrap(resource.EntityResource, "error updating status to database", result.Error)) - } - if result.RowsAffected == 0 { - multiErr.Append(errors.NotFound(resource.EntityResource, "resource is not found "+m.FullName)) - } - } - return errors.MultiToError(multiErr) -} + results := r.pool.SendBatch(ctx, &batch) + defer results.Close() -func (r Repository) update(db *gorm.DB, res *Resource) error { - existingResource, err := r.readByFullName(db, res.ProjectName, res.NamespaceName, res.Store, res.FullName) - if err != nil { - return err - } - err = db.Where(existingResource).Updates(res).Error - if err != nil { - err = errors.Wrap(resource.EntityResource, "error updating resource to database", err) - } - return err -} - -func (Repository) readByFullName(db *gorm.DB, projectName, namespaceName, store, fullName string) (*Resource, error) { - var res *Resource - query := "project_name = ? and store = ? and full_name = ?" - if namespaceName != "" { - query += " and namespace_name = ?" - } - if err := db.Where(query, projectName, store, fullName, namespaceName).First(&res).Error; err != nil { - if errors.Is(err, gorm.ErrRecordNotFound) { - return nil, errors.NotFound(resource.EntityResource, fmt.Sprintf("resource [%s] is not found", fullName)) + multiErr := errors.NewMultiError("error updating resources status") + for i := range resources { + tag, err := results.Exec() + multiErr.Append(err) + if tag.RowsAffected() == 0 { + multiErr.Append(errors.InternalError(resource.EntityResource, "error updating status for "+resources[i].FullName(), nil)) } - return nil, errors.Wrap(resource.EntityResource, "error reading from database", err) } - return res, nil -} -func (Repository) create(db *gorm.DB, m *Resource) error { - if err := db.Create(m).Error; err != nil { - return errors.Wrap(resource.EntityResource, "error creating resource to database", err) - } - return nil + return errors.MultiToError(multiErr) } diff --git a/internal/store/postgres/resource/repository_test.go b/internal/store/postgres/resource/repository_test.go index f34ce98230..33c4a33b6a 100644 --- a/internal/store/postgres/resource/repository_test.go +++ b/internal/store/postgres/resource/repository_test.go @@ -5,12 +5,10 @@ package resource_test import ( "context" - "encoding/json" "testing" - "time" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/assert" - "gorm.io/gorm" serviceResource "github.com/odpf/optimus/core/resource" "github.com/odpf/optimus/core/tenant" @@ -38,8 +36,8 @@ func TestPostgresResourceRepository(t *testing.T) { t.Run("Create", func(t *testing.T) { t.Run("returns error if resource with the provided full name is already defined within project and namespace", func(t *testing.T) { - db := dbSetup() - repository := repoResource.NewRepository(db) + pool := dbSetup() + repository := repoResource.NewRepository(pool) resourceToCreate, err := serviceResource.NewResource("project.dataset", kindDataset, store, tnnt, meta, spec) assert.NoError(t, err) @@ -52,8 +50,8 @@ func TestPostgresResourceRepository(t *testing.T) { }) t.Run("stores resource to database and returns nil if no error is encountered", func(t *testing.T) { - db := dbSetup() - repository := repoResource.NewRepository(db) + pool := dbSetup() + repository := repoResource.NewRepository(pool) resourceToCreate, err := serviceResource.NewResource("project.dataset", kindDataset, store, tnnt, meta, spec) assert.NoError(t, err) @@ -63,17 +61,16 @@ func TestPostgresResourceRepository(t *testing.T) { actualError := repository.Create(ctx, resourceToCreate) assert.NoError(t, actualError) - storedResources, err := readAllFromDB(db) + storedResource, err := repository.ReadByFullName(ctx, tnnt, store, "project.dataset") assert.NoError(t, err) - assert.Len(t, storedResources, 1) - assert.EqualValues(t, resourceToCreate, storedResources[0]) + assert.EqualValues(t, resourceToCreate, storedResource) }) }) t.Run("Update", func(t *testing.T) { t.Run("returns error if resource does not exist", func(t *testing.T) { - db := dbSetup() - repository := repoResource.NewRepository(db) + pool := dbSetup() + repository := repoResource.NewRepository(pool) resourceToUpdate, err := serviceResource.NewResource("project.dataset", kindDataset, store, tnnt, meta, spec) assert.NoError(t, err) @@ -84,21 +81,21 @@ func TestPostgresResourceRepository(t *testing.T) { }) t.Run("updates resource and returns nil if no error is encountered", func(t *testing.T) { - db := dbSetup() - repository := repoResource.NewRepository(db) + pool := dbSetup() + repository := repoResource.NewRepository(pool) resourceToCreate, err := serviceResource.NewResource("project.dataset", kindDataset, store, tnnt, meta, spec) assert.NoError(t, err) resourceToCreate.UpdateURN("bigquery://project:dataset") - err = insertAllToDB(db, []*serviceResource.Resource{resourceToCreate}) + err = repository.Create(ctx, resourceToCreate) assert.NoError(t, err) resourceToUpdate := serviceResource.FromExisting(resourceToCreate, serviceResource.ReplaceStatus(serviceResource.StatusSuccess)) actualError := repository.Update(ctx, resourceToUpdate) assert.NoError(t, actualError) - storedResources, err := readAllFromDB(db) + storedResources, err := repository.ReadAll(ctx, tnnt, store) assert.NoError(t, err) assert.Len(t, storedResources, 1) assert.EqualValues(t, resourceToUpdate, storedResources[0]) @@ -107,8 +104,8 @@ func TestPostgresResourceRepository(t *testing.T) { t.Run("ReadByFullName", func(t *testing.T) { t.Run("returns nil and error if resource does not exist", func(t *testing.T) { - db := dbSetup() - repository := repoResource.NewRepository(db) + pool := dbSetup() + repository := repoResource.NewRepository(pool) actualResource, actualError := repository.ReadByFullName(ctx, tnnt, store, "project.dataset") assert.Nil(t, actualResource) @@ -116,14 +113,14 @@ func TestPostgresResourceRepository(t *testing.T) { }) t.Run("returns resource and nil if no error is encountered", func(t *testing.T) { - db := dbSetup() - repository := repoResource.NewRepository(db) + pool := dbSetup() + repository := repoResource.NewRepository(pool) fullName := "project.dataset" resourceToCreate, err := serviceResource.NewResource(fullName, kindDataset, store, tnnt, meta, spec) assert.NoError(t, err) - err = insertAllToDB(db, []*serviceResource.Resource{resourceToCreate}) + err = repository.Create(ctx, resourceToCreate) assert.NoError(t, err) actualResource, actualError := repository.ReadByFullName(ctx, tnnt, store, fullName) @@ -135,8 +132,8 @@ func TestPostgresResourceRepository(t *testing.T) { t.Run("ReadAll", func(t *testing.T) { t.Run("returns empty and nil if no resource is found", func(t *testing.T) { - db := dbSetup() - repository := repoResource.NewRepository(db) + pool := dbSetup() + repository := repoResource.NewRepository(pool) actualResources, actualError := repository.ReadAll(ctx, tnnt, store) assert.Empty(t, actualResources) @@ -144,8 +141,8 @@ func TestPostgresResourceRepository(t *testing.T) { }) t.Run("returns resource and nil if no error is encountered", func(t *testing.T) { - db := dbSetup() - repository := repoResource.NewRepository(db) + pool := dbSetup() + repository := repoResource.NewRepository(pool) resourceToCreate, err := serviceResource.NewResource("project.dataset", kindDataset, store, tnnt, meta, spec) assert.NoError(t, err) @@ -162,8 +159,8 @@ func TestPostgresResourceRepository(t *testing.T) { t.Run("GetResources", func(t *testing.T) { t.Run("gets the resources with given full_names", func(t *testing.T) { - db := dbSetup() - repository := repoResource.NewRepository(db) + pool := dbSetup() + repository := repoResource.NewRepository(pool) name1 := "project.dataset" resourceToCreate1, err := serviceResource.NewResource(name1, kindDataset, store, tnnt, meta, spec) @@ -193,12 +190,12 @@ func TestPostgresResourceRepository(t *testing.T) { t.Run("UpdateStatus", func(t *testing.T) { t.Run("updates status and return error for partial update success", func(t *testing.T) { - db := dbSetup() - repository := repoResource.NewRepository(db) + pool := dbSetup() + repository := repoResource.NewRepository(pool) existingResource, err := serviceResource.NewResource("project.dataset1", kindDataset, store, tnnt, meta, spec) assert.NoError(t, err) - err = insertAllToDB(db, []*serviceResource.Resource{existingResource}) + err = repository.Create(ctx, existingResource) assert.NoError(t, err) nonExistingResource, err := serviceResource.NewResource("project.dataset2", kindDataset, store, tnnt, meta, spec) assert.NoError(t, err) @@ -209,16 +206,17 @@ func TestPostgresResourceRepository(t *testing.T) { } actualError := repository.UpdateStatus(ctx, resourcesToUpdate...) assert.Error(t, actualError) + assert.ErrorContains(t, actualError, "error updating status for project.dataset2") - storedResources, err := readAllFromDB(db) + storedResources, err := repository.ReadAll(ctx, tnnt, store) assert.NoError(t, err) assert.Len(t, storedResources, 1) assert.EqualValues(t, serviceResource.StatusSuccess, storedResources[0].Status()) }) t.Run("updates only status and returns nil if no error is encountered", func(t *testing.T) { - db := dbSetup() - repository := repoResource.NewRepository(db) + pool := dbSetup() + repository := repoResource.NewRepository(pool) existingResource1, err := serviceResource.NewResource("project.dataset1", kindDataset, store, tnnt, meta, spec) assert.NoError(t, err) @@ -226,7 +224,9 @@ func TestPostgresResourceRepository(t *testing.T) { existingResource2, err := serviceResource.NewResource("project.dataset2", kindDataset, store, tnnt, meta, spec) assert.NoError(t, err) existingResource2.UpdateURN("bigquery://project:dataset2") - err = insertAllToDB(db, []*serviceResource.Resource{existingResource1, existingResource2}) + err = repository.Create(ctx, existingResource1) + assert.NoError(t, err) + err = repository.Create(ctx, existingResource2) assert.NoError(t, err) newSpec := map[string]any{ @@ -244,7 +244,7 @@ func TestPostgresResourceRepository(t *testing.T) { actualError := repository.UpdateStatus(ctx, resourcesToUpdate...) assert.NoError(t, actualError) - storedResources, err := readAllFromDB(db) + storedResources, err := repository.ReadAll(ctx, tnnt, store) assert.NoError(t, err) assert.Len(t, storedResources, 2) assert.EqualValues(t, existingResource1.Spec(), storedResources[0].Spec()) @@ -255,83 +255,10 @@ func TestPostgresResourceRepository(t *testing.T) { }) } -func readAllFromDB(db *gorm.DB) ([]*serviceResource.Resource, error) { - var rs []*repoResource.Resource - if err := db.Find(&rs).Error; err != nil { - return nil, err - } - output := make([]*serviceResource.Resource, len(rs)) - for i, r := range rs { - o, err := fromModelToResource(r) - if err != nil { - return nil, err - } - output[i] = o - } - return output, nil -} - -func insertAllToDB(db *gorm.DB, rs []*serviceResource.Resource) error { - for _, r := range rs { - resourceToCreate := fromResourceToModel(r) - if err := db.Create(resourceToCreate).Error; err != nil { - return err - } - } - return nil -} - -func fromResourceToModel(r *serviceResource.Resource) *repoResource.Resource { - metadata, _ := json.Marshal(r.Metadata()) - spec, _ := json.Marshal(r.Spec()) - return &repoResource.Resource{ - FullName: r.FullName(), - Kind: r.Kind(), - Store: r.Store().String(), - ProjectName: r.Tenant().ProjectName().String(), - NamespaceName: r.Tenant().NamespaceName().String(), - Metadata: metadata, - Spec: spec, - URN: r.URN(), - Status: r.Status().String(), - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - } -} - -func fromModelToResource(r *repoResource.Resource) (*serviceResource.Resource, error) { - store, err := serviceResource.FromStringToStore(r.Store) - if err != nil { - return nil, err - } - tnnt, err := tenant.NewTenant(r.ProjectName, r.NamespaceName) - if err != nil { - return nil, err - } - var meta *serviceResource.Metadata - if err := json.Unmarshal(r.Metadata, &meta); err != nil { - return nil, err - } - var spec map[string]any - if err := json.Unmarshal(r.Spec, &spec); err != nil { - return nil, err - } - output, err := serviceResource.NewResource(r.FullName, r.Kind, store, tnnt, meta, spec) - if err != nil { - return nil, err - } - status := serviceResource.FromStringToStatus(r.Status) - res := serviceResource.FromExisting(output, serviceResource.ReplaceStatus(status)) - res.UpdateURN(r.URN) - return res, nil -} - -func dbSetup() *gorm.DB { - dbConn := setup.TestDB() - setup.TruncateTables(dbConn) - - pool := setup.TestPool() +func dbSetup() *pgxpool.Pool { ctx := context.Background() + pool := setup.TestPool() + setup.TruncateTablesWith(pool) proj, _ := tenant.NewProject("t-optimus-1", map[string]string{ "bucket": "gs://some_folder-2", @@ -354,5 +281,5 @@ func dbSetup() *gorm.DB { panic(err) } - return dbConn + return pool } diff --git a/internal/store/postgres/resource/resource.go b/internal/store/postgres/resource/resource.go index 63738959b7..9ea0699834 100644 --- a/internal/store/postgres/resource/resource.go +++ b/internal/store/postgres/resource/resource.go @@ -20,7 +20,7 @@ type Resource struct { NamespaceName string Metadata datatypes.JSON - Spec datatypes.JSON + Spec map[string]any URN string @@ -30,9 +30,9 @@ type Resource struct { UpdatedAt time.Time } -func fromResourceToModel(r *resource.Resource) *Resource { +func FromResourceToModel(r *resource.Resource) *Resource { metadata, _ := json.Marshal(r.Metadata()) - spec, _ := json.Marshal(r.Spec()) + return &Resource{ FullName: r.FullName(), Kind: r.Kind(), @@ -40,15 +40,13 @@ func fromResourceToModel(r *resource.Resource) *Resource { ProjectName: r.Tenant().ProjectName().String(), NamespaceName: r.Tenant().NamespaceName().String(), Metadata: metadata, - Spec: spec, + Spec: r.Spec(), URN: r.URN(), Status: r.Status().String(), - CreatedAt: time.Now(), - UpdatedAt: time.Now(), } } -func fromModelToResource(r *Resource) (*resource.Resource, error) { +func FromModelToResource(r *Resource) (*resource.Resource, error) { store, err := resource.FromStringToStore(r.Store) if err != nil { return nil, errors.Wrap(resource.EntityResource, "error constructing kind", err) @@ -61,11 +59,8 @@ func fromModelToResource(r *Resource) (*resource.Resource, error) { if err := json.Unmarshal(r.Metadata, &metadata); err != nil { return nil, errors.Wrap(resource.EntityResource, "error unmarshalling metadata", err) } - var spec map[string]any - if err := json.Unmarshal(r.Spec, &spec); err != nil { - return nil, errors.Wrap(resource.EntityResource, "error unmarshalling spec", err) - } - output, err := resource.NewResource(r.FullName, r.Kind, store, tnnt, metadata, spec) + + output, err := resource.NewResource(r.FullName, r.Kind, store, tnnt, metadata, r.Spec) if err == nil { output = resource.FromExisting(output, resource.ReplaceStatus(resource.FromStringToStatus(r.Status))) output.UpdateURN(r.URN) diff --git a/server/optimus.go b/server/optimus.go index cb5a3ff2fa..8bb1fc4f38 100644 --- a/server/optimus.go +++ b/server/optimus.go @@ -254,8 +254,8 @@ func (s *OptimusServer) setupHandlers() error { tenantService := tService.NewTenantService(tProjectService, tNamespaceService, tSecretService) // Resource Bounded Context - resourceRepository := resource.NewRepository(s.dbConn) - backupRepository := resource.NewBackupRepository(s.dbConn) + resourceRepository := resource.NewRepository(s.dbPool) + backupRepository := resource.NewBackupRepository(s.dbPool) resourceManager := rService.NewResourceManager(resourceRepository, s.logger) resourceService := rService.NewResourceService(s.logger, resourceRepository, resourceManager, tenantService) backupService := rService.NewBackupService(backupRepository, resourceRepository, resourceManager) From 57a6920d3bae199647c410ed102496061877cf55 Mon Sep 17 00:00:00 2001 From: Sandeep Bhardwaj Date: Tue, 20 Dec 2022 12:40:00 +0530 Subject: [PATCH 07/25] refactor: use pgx for job repository --- internal/errors/multi.go | 12 +- internal/store/postgres/job/adapter.go | 41 ++ internal/store/postgres/job/job_repository.go | 412 ++++++++++-------- .../store/postgres/job/job_repository_test.go | 11 +- .../000045_update_foreign_keys.down.sql | 2 + .../000045_update_foreign_keys.up.sql | 2 + internal/store/postgres/tracer.go | 4 + server/optimus.go | 2 +- 8 files changed, 295 insertions(+), 191 deletions(-) diff --git a/internal/errors/multi.go b/internal/errors/multi.go index d08264f0a6..17f632bdf4 100644 --- a/internal/errors/multi.go +++ b/internal/errors/multi.go @@ -38,10 +38,14 @@ func (m *MultiError) Error() string { func MultiToError(e error) error { var me *MultiError if errors.As(e, &me) { - if len(me.Errors) == 0 { - return nil - } - return me + return me.ToErr() } return e } + +func (m *MultiError) ToErr() error { + if len(m.Errors) == 0 { + return nil + } + return m +} diff --git a/internal/store/postgres/job/adapter.go b/internal/store/postgres/job/adapter.go index 16c3ab6490..9082d6a8a4 100644 --- a/internal/store/postgres/job/adapter.go +++ b/internal/store/postgres/job/adapter.go @@ -5,11 +5,14 @@ import ( "time" "github.com/google/uuid" + "github.com/jackc/pgx/v5" "github.com/lib/pq" "gorm.io/datatypes" "gorm.io/gorm" "github.com/odpf/optimus/core/job" + "github.com/odpf/optimus/core/resource" + "github.com/odpf/optimus/internal/errors" "github.com/odpf/optimus/internal/models" ) @@ -566,3 +569,41 @@ func fromStorageAssets(raw []byte) (map[string]string, error) { } return assetsMap, nil } + +func FromRow(row pgx.Row) (*Spec, error) { + var js Spec + + err := row.Scan(&js.ID, &js.Name, &js.Version, &js.Owner, &js.Description, + &js.Labels, &js.StartDate, &js.EndDate, &js.Interval, &js.DependsOnPast, + &js.CatchUp, &js.Retry, &js.Alert, &js.StaticUpstreams, &js.HTTPUpstreams, + &js.TaskName, &js.TaskConfig, &js.WindowSize, &js.WindowOffset, &js.WindowTruncateTo, + &js.Assets, &js.Hooks, &js.Metadata, &js.Destination, &js.Sources, + &js.ProjectName, &js.NamespaceName, &js.CreatedAt, &js.UpdatedAt, &js.DeletedAt) + + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, errors.NotFound(job.EntityJob, "job not found") + } + + return nil, errors.Wrap(resource.EntityResource, "error in reading row for resource", err) + } + + return &js, nil +} + +func UpstreamFromRow(row pgx.Row) (*JobWithUpstream, error) { + var js JobWithUpstream + + err := row.Scan(&js.JobName, &js.ProjectName, &js.UpstreamJobName, &js.UpstreamResourceURN, + &js.UpstreamProjectName, &js.UpstreamNamespaceName, &js.UpstreamTaskName, &js.UpstreamHost, + &js.UpstreamType, &js.UpstreamState, &js.UpstreamExternal) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, errors.NotFound(job.EntityJob, "job upstream not found") + } + + return nil, errors.Wrap(resource.EntityResource, "error in reading row for resource", err) + } + + return &js, nil +} diff --git a/internal/store/postgres/job/job_repository.go b/internal/store/postgres/job/job_repository.go index 389392a35e..8b2757f67f 100644 --- a/internal/store/postgres/job/job_repository.go +++ b/internal/store/postgres/job/job_repository.go @@ -4,19 +4,22 @@ import ( "context" "fmt" - "gorm.io/gorm" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgx/v5/pgxpool" "github.com/odpf/optimus/core/job" + "github.com/odpf/optimus/core/resource" "github.com/odpf/optimus/core/tenant" "github.com/odpf/optimus/internal/errors" ) type JobRepository struct { - db *gorm.DB + pool *pgxpool.Pool } -func NewJobRepository(db *gorm.DB) *JobRepository { - return &JobRepository{db: db} +func NewJobRepository(pool *pgxpool.Pool) *JobRepository { + return &JobRepository{pool: pool} } func (j JobRepository) Add(ctx context.Context, jobs []*job.Job) ([]*job.Job, error) { @@ -58,30 +61,29 @@ func (j JobRepository) triggerInsert(ctx context.Context, jobEntity *job.Job) er insertJobQuery := ` INSERT INTO job ( - name, version, owner, description, + name, version, owner, description, labels, start_date, end_date, interval, depends_on_past, catch_up, retry, alert, static_upstreams, http_upstreams, task_name, task_config, window_size, window_offset, window_truncate_to, assets, hooks, metadata, - destination, sources, + destination, sources, project_name, namespace_name, created_at, updated_at ) VALUES ( - ?, ?, ?, ?, - ?, ?, ?, ?, - ?, ?, ?, ?, - ?, ?, ?, ?, - ?, ?, ?, - ?, ?, ?, - ?, ?, - ?, ?, + $1, $2, $3, $4, + $5, $6, $7, $8, + $9, $10, $11, $12, + $13, $14, $15, $16, + $17, $18, $19, + $20, $21, $22, + $23, $24, + $25, $26, NOW(), NOW() -); -` +);` - result := j.db.WithContext(ctx).Exec(insertJobQuery, + tag, err := j.pool.Exec(ctx, insertJobQuery, storageJob.Name, storageJob.Version, storageJob.Owner, storageJob.Description, storageJob.Labels, storageJob.StartDate, storageJob.EndDate, storageJob.Interval, storageJob.DependsOnPast, storageJob.CatchUp, storageJob.Retry, storageJob.Alert, @@ -91,11 +93,11 @@ VALUES ( storageJob.Destination, storageJob.Sources, storageJob.ProjectName, storageJob.NamespaceName) - if result.Error != nil { - return errors.Wrap(job.EntityJob, "unable to save job spec", result.Error) + if err != nil { + return errors.Wrap(job.EntityJob, "unable to save job spec", err) } - if result.RowsAffected == 0 { + if tag.RowsAffected() == 0 { return errors.InternalError(job.EntityJob, "unable to save job spec, rows affected 0", nil) } return nil @@ -148,21 +150,20 @@ func (j JobRepository) triggerUpdate(ctx context.Context, jobEntity *job.Job) er } updateJobQuery := ` -UPDATE job SET - version = ?, owner = ?, description = ?, - labels = ?, start_date = ?, end_date = ?, interval = ?, - depends_on_past = ?, catch_up = ?, retry = ?, alert = ?, - static_upstreams = ?, http_upstreams = ?, task_name = ?, task_config = ?, - window_size = ?, window_offset = ?, window_truncate_to = ?, - assets = ?, hooks = ?, metadata = ?, - destination = ?, sources = ?, +UPDATE job SET + version = $1, owner = $2, description = $3, + labels = $4, start_date = $5, end_date = $6, interval = $7, + depends_on_past = $8, catch_up = $9, retry = $10, alert = $11, + static_upstreams = $12, http_upstreams = $13, task_name = $14, task_config = $15, + window_size = $16, window_offset = $17, window_truncate_to = $18, + assets = $19, hooks = $20, metadata = $21, + destination = $22, sources = $23, updated_at = NOW(), deleted_at = null -WHERE - name = ? AND - project_name = ?; -` +WHERE + name = $24 AND + project_name = $25;` - result := j.db.WithContext(ctx).Exec(updateJobQuery, + tag, err := j.pool.Exec(ctx, updateJobQuery, storageJob.Version, storageJob.Owner, storageJob.Description, storageJob.Labels, storageJob.StartDate, storageJob.EndDate, storageJob.Interval, storageJob.DependsOnPast, storageJob.CatchUp, storageJob.Retry, storageJob.Alert, @@ -172,35 +173,28 @@ WHERE storageJob.Destination, storageJob.Sources, storageJob.Name, storageJob.ProjectName) - if result.Error != nil { - return errors.Wrap(job.EntityJob, "unable to update job spec", result.Error) + if err != nil { + return errors.Wrap(job.EntityJob, "unable to update job spec", err) } - if result.RowsAffected == 0 { + if tag.RowsAffected() == 0 { return errors.InternalError(job.EntityJob, "unable to update job spec, rows affected 0", nil) } return nil } func (j JobRepository) get(ctx context.Context, projectName tenant.ProjectName, jobName job.Name, onlyActiveJob bool) (*Spec, error) { - var spec Spec - getJobByNameAtProject := `SELECT * FROM job -WHERE name = ? -AND project_name = ? -` +WHERE name = $1 +AND project_name = $2` + if onlyActiveJob { jobDeletedFilter := " AND deleted_at IS NULL" getJobByNameAtProject += jobDeletedFilter } - err := j.db.WithContext(ctx).Raw(getJobByNameAtProject, jobName.String(), projectName.String()). - First(&spec).Error - if err != nil && errors.Is(err, gorm.ErrRecordNotFound) { - return nil, errors.NotFound(job.EntityJob, fmt.Sprintf("job %s not found in project %s", jobName.String(), projectName.String())) - } - return &spec, err + return FromRow(j.pool.QueryRow(ctx, getJobByNameAtProject, jobName.String(), projectName.String())) } func (j JobRepository) ResolveUpstreams(ctx context.Context, projectName tenant.ProjectName, jobNames []job.Name) (map[job.Name][]*job.Upstream, error) { @@ -209,8 +203,8 @@ WITH static_upstreams AS ( SELECT j.name, j.project_name, d.static_upstream FROM job j JOIN UNNEST(j.static_upstreams) d(static_upstream) ON true - WHERE project_name = ? - AND name IN (?) + WHERE project_name = $1 + AND name = any ($2) AND j.deleted_at IS NULL ), @@ -218,14 +212,14 @@ inferred_upstreams AS ( SELECT j.name, j.project_name, s.source FROM job j JOIN UNNEST(j.sources) s(source) ON true - WHERE project_name = ? - AND name IN (?) + WHERE project_name = $1 + AND name = any ($2) AND j.deleted_at IS NULL ) SELECT - su.name AS job_name, - su.project_name, + su.name AS job_name, + su.project_name, j.name AS upstream_job_name, j.project_name AS upstream_project_name, j.namespace_name AS upstream_namespace_name, @@ -234,13 +228,13 @@ SELECT 'static' AS upstream_type, false AS upstream_external FROM static_upstreams su -JOIN job j ON - (su.static_upstream = j.name and su.project_name = j.project_name) OR +JOIN job j ON + (su.static_upstream = j.name and su.project_name = j.project_name) OR (su.static_upstream = j.project_name || '/' ||j.name) WHERE j.deleted_at IS NULL UNION ALL - + SELECT id.name AS job_name, id.project_name, @@ -252,19 +246,28 @@ SELECT 'inferred' AS upstream_type, false AS upstream_external FROM inferred_upstreams id -JOIN job j ON id.source = j.destination -WHERE j.deleted_at IS NULL; -` +JOIN job j ON id.source = j.destination +WHERE j.deleted_at IS NULL;` - jobNamesStr := make([]string, len(jobNames)) - for i, jobName := range jobNames { - jobNamesStr[i] = jobName.String() + rows, err := j.pool.Query(ctx, query, projectName, jobNames) + if err != nil { + return nil, errors.Wrap(job.EntityJob, "error while getting job with upstreams", err) } + defer rows.Close() var storeJobsWithUpstreams []JobWithUpstream - if err := j.db.WithContext(ctx).Raw(query, projectName.String(), jobNamesStr, projectName.String(), jobNames). - Scan(&storeJobsWithUpstreams).Error; err != nil { - return nil, errors.Wrap(job.EntityJob, "error while getting job with upstreams", err) + for rows.Next() { + var jwu JobWithUpstream + err := rows.Scan(&jwu.JobName, &jwu.ProjectName, &jwu.UpstreamJobName, &jwu.UpstreamProjectName, + &jwu.UpstreamNamespaceName, &jwu.UpstreamResourceURN, &jwu.UpstreamTaskName, &jwu.UpstreamType, &jwu.UpstreamExternal) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, errors.NotFound(job.EntityJob, "job upstream not found") + } + + return nil, errors.Wrap(resource.EntityResource, "error in reading row for resource", err) + } + storeJobsWithUpstreams = append(storeJobsWithUpstreams, jwu) } return j.toJobNameWithUpstreams(storeJobsWithUpstreams) @@ -343,7 +346,7 @@ func (JobRepository) toUpstreams(storeUpstreams []JobWithUpstream) ([]*job.Upstr upstream := job.NewUpstreamResolved(upstreamName, storeUpstream.UpstreamHost, resourceURN, upstreamTenant, upstreamType, taskName, storeUpstream.UpstreamExternal) upstreams = append(upstreams, upstream) } - if err := errors.MultiToError(me); err != nil { + if err := me.ToErr(); err != nil { return nil, err } return upstreams, nil @@ -364,61 +367,71 @@ func (j JobRepository) GetByJobName(ctx context.Context, projectName tenant.Proj } func (j JobRepository) GetAllByProjectName(ctx context.Context, projectName tenant.ProjectName) ([]*job.Job, error) { - specs := []Spec{} me := errors.NewMultiError("get all job specs by project name errors") getAllByProjectName := `SELECT * FROM job -WHERE project_name = ? -AND deleted_at IS NULL; -` - if err := j.db.WithContext(ctx).Raw(getAllByProjectName, projectName).Find(&specs).Error; err != nil { - return nil, err +WHERE project_name = $1 +AND deleted_at IS NULL;` + + rows, err := j.pool.Query(ctx, getAllByProjectName, projectName) + if err != nil { + return nil, errors.Wrap(job.EntityJob, "error while jobs for project: "+projectName.String(), err) } + defer rows.Close() - jobs := []*job.Job{} - for _, spec := range specs { - job, err := specToJob(&spec) + var jobs []*job.Job + for rows.Next() { + spec, err := FromRow(rows) if err != nil { me.Append(err) continue } - jobs = append(jobs, job) - } - if len(me.Errors) > 0 { - return jobs, me + + jobSpec, err := specToJob(spec) + if err != nil { + me.Append(err) + continue + } + + jobs = append(jobs, jobSpec) } - return jobs, nil + return jobs, me.ToErr() } func (j JobRepository) GetAllByResourceDestination(ctx context.Context, resourceDestination job.ResourceURN) ([]*job.Job, error) { - specs := []Spec{} me := errors.NewMultiError("get all job specs by resource destination") - getAllByProjectName := `SELECT * + getAllByDestination := `SELECT * FROM job -WHERE destination = ? -AND deleted_at IS NULL; -` - if err := j.db.WithContext(ctx).Raw(getAllByProjectName, resourceDestination).Find(&specs).Error; err != nil { - return nil, err +WHERE destination = $1 +AND deleted_at IS NULL;` + + rows, err := j.pool.Query(ctx, getAllByDestination, resourceDestination) + if err != nil { + return nil, errors.Wrap(job.EntityJob, "error while jobs for destination: "+resourceDestination.String(), err) } + defer rows.Close() - jobs := []*job.Job{} - for _, spec := range specs { - job, err := specToJob(&spec) + var jobs []*job.Job + for rows.Next() { + spec, err := FromRow(rows) if err != nil { me.Append(err) continue } - jobs = append(jobs, job) - } - if len(me.Errors) > 0 { - return jobs, me + + jobSpec, err := specToJob(spec) + if err != nil { + me.Append(err) + continue + } + + jobs = append(jobs, jobSpec) } - return jobs, nil + return jobs, me.ToErr() } func specToJob(spec *Spec) (*job.Job, error) { @@ -468,93 +481,99 @@ func (j JobRepository) ReplaceUpstreams(ctx context.Context, jobsWithUpstreams [ storageJobUpstreams = append(storageJobUpstreams, upstream...) } - return j.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { - var jobFullName []string - for _, upstream := range storageJobUpstreams { - jobFullName = append(jobFullName, upstream.getJobFullName()) - } + tx, err := j.pool.Begin(ctx) + if err != nil { + return errors.InternalError(job.EntityJob, "unable to update upstreams", err) + } - if err := j.deleteUpstreams(tx, jobFullName); err != nil { - return err - } - return j.insertUpstreams(tx, storageJobUpstreams) - }) + var jobFullName []string + for _, upstream := range storageJobUpstreams { + jobFullName = append(jobFullName, upstream.getJobFullName()) + } + + if err = j.deleteUpstreams(ctx, tx, jobFullName); err != nil { + tx.Rollback(ctx) + return err + } + if err = j.insertUpstreams(ctx, tx, storageJobUpstreams); err != nil { + tx.Rollback(ctx) + return err + } + + tx.Commit(ctx) + return nil } -func (JobRepository) insertUpstreams(tx *gorm.DB, storageJobUpstreams []*JobWithUpstream) error { +func (JobRepository) insertUpstreams(ctx context.Context, tx pgx.Tx, storageJobUpstreams []*JobWithUpstream) error { insertResolvedUpstreamQuery := ` INSERT INTO job_upstream ( - job_id, job_name, project_name, - upstream_job_id, upstream_job_name, upstream_resource_urn, + job_id, job_name, project_name, + upstream_job_id, upstream_job_name, upstream_resource_urn, upstream_project_name, upstream_namespace_name, upstream_host, upstream_task_name, upstream_external, upstream_type, upstream_state, created_at ) VALUES ( - (select id FROM job WHERE name = ?), ?, ?, - (select id FROM job WHERE name = ?), ?, ?, - ?, ?, ?, - ?, ?, - ?, ?, + (select id FROM job WHERE name = $1), $1, $2, + (select id FROM job WHERE name = $3), $3, $4, + $5, $6, $7, + $8, $9, + $10, $11, NOW() -); -` +);` insertUnresolvedUpstreamQuery := ` INSERT INTO job_upstream ( - job_id, job_name, project_name, + job_id, job_name, project_name, upstream_job_name, upstream_resource_urn, upstream_project_name, upstream_type, upstream_state, created_at ) VALUES ( - (select id FROM job WHERE name = ?), ?, ?, - ?, ?, ?, - ?, ?, + (select id FROM job WHERE name = $1), $1, $2, + $3, $4, $5, + $6, $7, NOW() ); ` + var tag pgconn.CommandTag + var err error for _, upstream := range storageJobUpstreams { - var result *gorm.DB if upstream.UpstreamState == job.UpstreamStateResolved.String() { - result = tx.Exec(insertResolvedUpstreamQuery, - upstream.JobName, upstream.JobName, upstream.ProjectName, - upstream.UpstreamJobName, upstream.UpstreamJobName, upstream.UpstreamResourceURN, + tag, err = tx.Exec(ctx, insertResolvedUpstreamQuery, + upstream.JobName, upstream.ProjectName, + upstream.UpstreamJobName, upstream.UpstreamResourceURN, upstream.UpstreamProjectName, upstream.UpstreamNamespaceName, upstream.UpstreamHost, upstream.UpstreamTaskName, upstream.UpstreamExternal, upstream.UpstreamType, upstream.UpstreamState) } else { - result = tx.Exec(insertUnresolvedUpstreamQuery, - upstream.JobName, upstream.JobName, upstream.ProjectName, + tag, err = tx.Exec(ctx, insertUnresolvedUpstreamQuery, + upstream.JobName, upstream.ProjectName, upstream.UpstreamJobName, upstream.UpstreamResourceURN, upstream.UpstreamProjectName, upstream.UpstreamType, upstream.UpstreamState) } - if result.Error != nil { - return errors.NewError(errors.ErrInternalError, job.EntityJob, fmt.Sprintf("unable to save job upstream: %s", result.Error)) + if err != nil { + return errors.InternalError(job.EntityJob, "unable to save job upstream", err) } - if result.RowsAffected == 0 { + if tag.RowsAffected() == 0 { return errors.NewError(errors.ErrInternalError, job.EntityJob, "unable to save job upstream, rows affected 0") } } return nil } -func (JobRepository) deleteUpstreams(tx *gorm.DB, jobUpstreams []string) error { - var result *gorm.DB - +func (JobRepository) deleteUpstreams(ctx context.Context, tx pgx.Tx, jobUpstreams []string) error { deleteForProjectScope := `DELETE FROM job_upstream -WHERE project_name || '/' || job_name in (?); -` - - result = tx.Exec(deleteForProjectScope, jobUpstreams) +WHERE project_name || '/' || job_name = any ($1);` - if result.Error != nil { - return errors.Wrap(job.EntityJob, "error during delete of job upstream", result.Error) + _, err := tx.Exec(ctx, deleteForProjectScope, jobUpstreams) + if err != nil { + return errors.Wrap(job.EntityJob, "error during delete of job upstream", err) } return nil @@ -602,15 +621,15 @@ func (j JobRepository) Delete(ctx context.Context, projectName tenant.ProjectNam func (j JobRepository) hardDelete(ctx context.Context, projectName tenant.ProjectName, jobName job.Name) error { query := ` -DELETE +DELETE FROM job -WHERE project_name = ? AND name = ? -` - result := j.db.WithContext(ctx).Exec(query, projectName.String(), jobName.String()) - if result.Error != nil { - return errors.Wrap(job.EntityJob, "error during job deletion", result.Error) +WHERE project_name = $1 AND name = $2` + + tag, err := j.pool.Exec(ctx, query, projectName, jobName) + if err != nil { + return errors.Wrap(job.EntityJob, "error during job deletion", err) } - if result.RowsAffected == 0 { + if tag.RowsAffected() == 0 { return errors.NewError(errors.ErrInternalError, job.EntityJob, fmt.Sprintf("job %s failed to be deleted", jobName.String())) } return nil @@ -620,42 +639,48 @@ func (j JobRepository) softDelete(ctx context.Context, projectName tenant.Projec query := ` UPDATE job SET deleted_at = current_timestamp -WHERE project_name = ? AND name = ? -` - result := j.db.WithContext(ctx).Exec(query, projectName.String(), jobName.String()) - if result.Error != nil { - return errors.Wrap(job.EntityJob, "error during job deletion", result.Error) +WHERE project_name = $1 AND name = $2` + + tag, err := j.pool.Exec(ctx, query, projectName, jobName) + if err != nil { + return errors.Wrap(job.EntityJob, "error during job deletion", err) } - if result.RowsAffected == 0 { + if tag.RowsAffected() == 0 { return errors.NewError(errors.ErrInternalError, job.EntityJob, fmt.Sprintf("job %s failed to be deleted", jobName.String())) } return nil } func (j JobRepository) GetAllByTenant(ctx context.Context, jobTenant tenant.Tenant) ([]*job.Job, error) { - var specs []Spec me := errors.NewMultiError("get all job specs by project name errors") getAllByProjectName := `SELECT * FROM job -WHERE project_name = ? -AND namespace_name = ? -AND deleted_at IS NULL; -` - if err := j.db.WithContext(ctx).Raw(getAllByProjectName, jobTenant.ProjectName().String(), jobTenant.NamespaceName().String()).Find(&specs).Error; err != nil { - return nil, err +WHERE project_name = $1 +AND namespace_name = $2 +AND deleted_at IS NULL;` + + rows, err := j.pool.Query(ctx, getAllByProjectName, jobTenant.ProjectName(), jobTenant.NamespaceName()) + if err != nil { + return nil, errors.Wrap(job.EntityJob, "error while jobs for project: "+jobTenant.ProjectName().String(), err) } + defer rows.Close() var jobs []*job.Job - for _, spec := range specs { - jobSpec, err := fromStorageSpec(&spec) + for rows.Next() { + spec, err := FromRow(rows) if err != nil { me.Append(err) continue } - // TODO: pass destination and sources values - job := job.NewJob(jobTenant, jobSpec, "", nil) - jobs = append(jobs, job) + + jobSpec, err := specToJob(spec) + if err != nil { + me.Append(err) + continue + } + + jobs = append(jobs, jobSpec) } return jobs, errors.MultiToError(me) @@ -664,15 +689,24 @@ AND deleted_at IS NULL; func (j JobRepository) GetUpstreams(ctx context.Context, projectName tenant.ProjectName, jobName job.Name) ([]*job.Upstream, error) { query := ` SELECT - * + job_name, project_name, upstream_job_name, upstream_resource_urn, upstream_project_name, + upstream_namespace_name, upstream_task_name, upstream_host, upstream_type, upstream_state, upstream_external FROM job_upstream -WHERE project_name=? AND job_name=?; -` +WHERE project_name=$1 AND job_name=$2;` + + rows, err := j.pool.Query(ctx, query, projectName, jobName) + if err != nil { + return nil, errors.Wrap(job.EntityJob, "error while getting jobs with upstreams", err) + } + defer rows.Close() var storeJobsWithUpstreams []JobWithUpstream - if err := j.db.WithContext(ctx).Raw(query, projectName.String(), jobName.String()). - Scan(&storeJobsWithUpstreams).Error; err != nil { - return nil, errors.Wrap(job.EntityJob, "error while getting job with upstreams", err) + for rows.Next() { + upstream, err := UpstreamFromRow(rows) + if err != nil { + return nil, err + } + storeJobsWithUpstreams = append(storeJobsWithUpstreams, *upstream) } return j.toUpstreams(storeJobsWithUpstreams) @@ -683,14 +717,23 @@ func (j JobRepository) GetDownstreamByDestination(ctx context.Context, projectNa SELECT name as job_name, project_name, namespace_name, task_name FROM job -WHERE project_name = ? AND ? = ANY(sources) -AND deleted_at IS NULL; -` +WHERE project_name = $1 AND $2 = ANY(sources) +AND deleted_at IS NULL;` + + rows, err := j.pool.Query(ctx, query, projectName, destination) + if err != nil { + return nil, errors.Wrap(job.EntityJob, "error while getting job downstream", err) + } + defer rows.Close() var storeDownstream []Downstream - if err := j.db.WithContext(ctx).Raw(query, projectName.String(), destination.String()). - Scan(&storeDownstream).Error; err != nil { - return nil, errors.Wrap(job.EntityJob, "error while getting downstream by destination", err) + for rows.Next() { + var downstream Downstream + err := rows.Scan(&downstream.JobName, &downstream.ProjectName, &downstream.NamespaceName, &downstream.TaskName) + if err != nil { + return nil, errors.Wrap(job.EntityJob, "error while getting downstream by destination", err) + } + storeDownstream = append(storeDownstream, downstream) } return fromStoreDownstream(storeDownstream) @@ -709,14 +752,23 @@ SELECT j.name as job_name, j.project_name, j.namespace_name, j.task_name FROM job_upstream ju JOIN job j ON (ju.job_name = j.name AND ju.project_name = j.project_name) -WHERE upstream_project_name=? AND upstream_job_name=? -AND j.deleted_at IS NULL; -` +WHERE upstream_project_name=$1 AND upstream_job_name=$2 +AND j.deleted_at IS NULL;` + + rows, err := j.pool.Query(ctx, query, projectName, jobName) + if err != nil { + return nil, errors.Wrap(job.EntityJob, "error while getting job downstream by job name", err) + } + defer rows.Close() var storeDownstream []Downstream - if err := j.db.WithContext(ctx).Raw(query, projectName.String(), jobName.String()). - Scan(&storeDownstream).Error; err != nil { - return nil, errors.Wrap(job.EntityJob, "error while getting downstream by job name", err) + for rows.Next() { + var downstream Downstream + err := rows.Scan(&downstream.JobName, &downstream.ProjectName, &downstream.NamespaceName, &downstream.TaskName) + if err != nil { + return nil, errors.Wrap(job.EntityJob, "error while getting downstream by destination", err) + } + storeDownstream = append(storeDownstream, downstream) } return fromStoreDownstream(storeDownstream) diff --git a/internal/store/postgres/job/job_repository_test.go b/internal/store/postgres/job/job_repository_test.go index 2eb82a7fea..c615e67ce2 100644 --- a/internal/store/postgres/job/job_repository_test.go +++ b/internal/store/postgres/job/job_repository_test.go @@ -6,8 +6,8 @@ import ( "context" "testing" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/assert" - "gorm.io/gorm" "github.com/odpf/optimus/core/job" "github.com/odpf/optimus/core/tenant" @@ -56,11 +56,9 @@ func TestPostgresJobRepository(t *testing.T) { sampleTenant, err := tenant.NewTenant(proj.Name().String(), namespace.Name().String()) assert.NoError(t, err) - dbSetup := func() *gorm.DB { - dbConn := setup.TestDB() - setup.TruncateTables(dbConn) - + dbSetup := func() *pgxpool.Pool { pool := setup.TestPool() + setup.TruncateTablesWith(pool) projRepo := tenantPostgres.NewProjectRepository(pool) assert.NoError(t, projRepo.Save(ctx, proj)) assert.NoError(t, projRepo.Save(ctx, otherProj)) @@ -70,7 +68,7 @@ func TestPostgresJobRepository(t *testing.T) { assert.NoError(t, namespaceRepo.Save(ctx, otherNamespace)) assert.NoError(t, namespaceRepo.Save(ctx, otherNamespace2)) - return dbConn + return pool } jobVersion, err := job.VersionFrom(1) @@ -841,6 +839,7 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("GetUpstreams", func(t *testing.T) { t.Run("returns upstream given project and job name", func(t *testing.T) { + // TODO: test is failing for nullable fields in upstream db := dbSetup() jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() diff --git a/internal/store/postgres/migrations/000045_update_foreign_keys.down.sql b/internal/store/postgres/migrations/000045_update_foreign_keys.down.sql index 65cedac9f6..1cbbc36998 100644 --- a/internal/store/postgres/migrations/000045_update_foreign_keys.down.sql +++ b/internal/store/postgres/migrations/000045_update_foreign_keys.down.sql @@ -1,3 +1,5 @@ +ALTER TABLE resource DROP IF EXISTS id; + ALTER TABLE resource DROP CONSTRAINT IF EXISTS pk_resource; diff --git a/internal/store/postgres/migrations/000045_update_foreign_keys.up.sql b/internal/store/postgres/migrations/000045_update_foreign_keys.up.sql index a37de65ce0..7f8f102c08 100644 --- a/internal/store/postgres/migrations/000045_update_foreign_keys.up.sql +++ b/internal/store/postgres/migrations/000045_update_foreign_keys.up.sql @@ -1,3 +1,5 @@ +ALTER TABLE resource ADD IF NOT EXISTS id UUID DEFAULT uuid_generate_v4(); + ALTER TABLE resource ADD CONSTRAINT pk_resource PRIMARY KEY (project_name, namespace_name, store, full_name); diff --git a/internal/store/postgres/tracer.go b/internal/store/postgres/tracer.go index 2a1184ee11..21fb4a9f08 100644 --- a/internal/store/postgres/tracer.go +++ b/internal/store/postgres/tracer.go @@ -59,6 +59,10 @@ func (*tracer) TraceQueryEnd(ctx context.Context, _ *pgx.Conn, data pgx.TraceQue span := trace.SpanFromContext(ctx) recordError(span, data.Err) + span.SetAttributes( + attribute.String("result", data.CommandTag.String()), + ) + span.End() } diff --git a/server/optimus.go b/server/optimus.go index 8bb1fc4f38..1bffb539ce 100644 --- a/server/optimus.go +++ b/server/optimus.go @@ -303,7 +303,7 @@ func (s *OptimusServer) setupHandlers() error { newJobRunService := schedulerService.NewJobRunService(s.logger, jobProviderRepo, jobRunRepo, operatorRunRepository, newScheduler, newPriorityResolver, jobInputCompiler) // Job Bounded Context Setup - jJobRepo := jRepo.NewJobRepository(s.dbConn) + jJobRepo := jRepo.NewJobRepository(s.dbPool) jPluginService := jService.NewJobPluginService(tSecretService, s.pluginRepo, newEngine, s.logger) jExternalUpstreamResolver, _ := jResolver.NewExternalUpstreamResolver(s.conf.ResourceManagers) jInternalUpstreamResolver := jResolver.NewInternalUpstreamResolver(jJobRepo) From 779744f0f7bc18d87eeaf4e7f002ec10fd69e4a7 Mon Sep 17 00:00:00 2001 From: Sandeep Bhardwaj Date: Wed, 21 Dec 2022 14:32:27 +0530 Subject: [PATCH 08/25] refactor: change repo for scheduler bc --- .../scheduler/job_operator_repository.go | 31 +++-- .../postgres/scheduler/job_repository.go | 130 ++++++++++++------ .../postgres/scheduler/job_run_repository.go | 61 ++++---- 3 files changed, 138 insertions(+), 84 deletions(-) diff --git a/internal/store/postgres/scheduler/job_operator_repository.go b/internal/store/postgres/scheduler/job_operator_repository.go index 69f2304f80..09353a271d 100644 --- a/internal/store/postgres/scheduler/job_operator_repository.go +++ b/internal/store/postgres/scheduler/job_operator_repository.go @@ -2,10 +2,12 @@ package scheduler import ( "context" + "database/sql" "time" "github.com/google/uuid" - "gorm.io/gorm" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" "github.com/odpf/optimus/core/scheduler" "github.com/odpf/optimus/internal/errors" @@ -18,7 +20,8 @@ const ( ) type OperatorRunRepository struct { - db *gorm.DB + // TODO: Add test + pool *pgxpool.Pool } type operatorRun struct { @@ -35,7 +38,7 @@ type operatorRun struct { CreatedAt time.Time `gorm:"not null" json:"created_at"` UpdatedAt time.Time `gorm:"not null" json:"updated_at"` // TODO: add a remarks colum to capture failure reason - DeletedAt gorm.DeletedAt + DeletedAt sql.NullTime } func operatorTypeToTableName(operatorType scheduler.OperatorType) (string, error) { @@ -73,13 +76,15 @@ func (o *OperatorRunRepository) GetOperatorRun(ctx context.Context, name string, if err != nil { return nil, err } - getJobRunByID := `SELECT id, name, job_run_id, status, start_time, end_time FROM ` + operatorTableName + ` j where job_run_id = ? and name = ? order by created_at desc limit 1` - k := o.db.WithContext(ctx).Raw(getJobRunByID, jobRunID, name).First(&opRun) - err = k.Error + getJobRunByID := `SELECT id, name, job_run_id, status, start_time, end_time FROM ` + operatorTableName + ` j where job_run_id = $1 and name = $2 order by created_at desc limit 1` + err = o.pool.QueryRow(ctx, getJobRunByID, jobRunID, name). + Scan(&opRun.ID, &opRun.Name, &opRun.JobRunID, &opRun.Status, &opRun.StartTime, &opRun.EndTime) + if err != nil { - if errors.Is(err, gorm.ErrRecordNotFound) { + if errors.Is(err, pgx.ErrNoRows) { return nil, errors.NotFound(scheduler.EntityJobRun, "no record for "+operatorType.String()+"/"+name+" for job_run ID: "+jobRunID.String()) } + return nil, errors.Wrap(scheduler.EntityJobRun, "error while getting operator run", err) } return opRun.toOperatorRun() } @@ -88,8 +93,9 @@ func (o *OperatorRunRepository) CreateOperatorRun(ctx context.Context, name stri if err != nil { return err } - insertOperatorRun := `INSERT INTO ` + operatorTableName + ` ( job_run_id , name , status, start_time, end_time, created_at, updated_at) values ( ?, ?, ?, ?, TIMESTAMP '3000-01-01 00:00:00', NOW(), NOW())` - return o.db.WithContext(ctx).Exec(insertOperatorRun, jobRunID, name, scheduler.StateRunning, startTime).Error + insertOperatorRun := `INSERT INTO ` + operatorTableName + ` ( job_run_id , name , status, start_time, end_time, created_at, updated_at) values ( $1, $2, $3, $4, TIMESTAMP '3000-01-01 00:00:00', NOW(), NOW())` + _, err = o.pool.Exec(ctx, insertOperatorRun, jobRunID, name, scheduler.StateRunning, startTime) + return errors.WrapIfErr(scheduler.EntityJobRun, "error while inserting the run", err) } func (o *OperatorRunRepository) UpdateOperatorRun(ctx context.Context, operatorType scheduler.OperatorType, operatorRunID uuid.UUID, eventTime time.Time, state scheduler.State) error { @@ -98,11 +104,12 @@ func (o *OperatorRunRepository) UpdateOperatorRun(ctx context.Context, operatorT return err } updateJobRun := "update " + operatorTableName + " set status = ?, end_time = ?, updated_at = NOW() where id = ?" - return o.db.WithContext(ctx).Exec(updateJobRun, state.String(), eventTime, operatorRunID).Error + _, err = o.pool.Exec(ctx, updateJobRun, state.String(), eventTime, operatorRunID) + return errors.WrapIfErr(scheduler.EntityJobRun, "error while updating the run", err) } -func NewOperatorRunRepository(db *gorm.DB) *OperatorRunRepository { +func NewOperatorRunRepository(pool *pgxpool.Pool) *OperatorRunRepository { return &OperatorRunRepository{ - db: db, + pool: pool, } } diff --git a/internal/store/postgres/scheduler/job_repository.go b/internal/store/postgres/scheduler/job_repository.go index a8fcc8204a..c3ea90c662 100644 --- a/internal/store/postgres/scheduler/job_repository.go +++ b/internal/store/postgres/scheduler/job_repository.go @@ -2,16 +2,18 @@ package scheduler import ( "context" + "database/sql" "encoding/json" "fmt" - "strings" "time" "github.com/google/uuid" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" "github.com/lib/pq" - "gorm.io/datatypes" - "gorm.io/gorm" + "github.com/odpf/optimus/core/job" + "github.com/odpf/optimus/core/resource" "github.com/odpf/optimus/core/scheduler" "github.com/odpf/optimus/core/tenant" "github.com/odpf/optimus/internal/errors" @@ -20,7 +22,7 @@ import ( ) type JobRepository struct { - db *gorm.DB + pool *pgxpool.Pool } type JobUpstreams struct { JobID uuid.UUID @@ -41,6 +43,23 @@ type JobUpstreams struct { UpdatedAt time.Time `gorm:"not null" json:"updated_at"` } +func UpstreamFromRow(row pgx.Row) (*JobUpstreams, error) { + var js JobUpstreams + + err := row.Scan(&js.JobName, &js.ProjectName, &js.UpstreamJobName, &js.UpstreamResourceUrn, + &js.UpstreamProjectName, &js.UpstreamNamespaceName, &js.UpstreamTaskName, &js.UpstreamHost, + &js.UpstreamType, &js.UpstreamState, &js.UpstreamExternal) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, errors.NotFound(job.EntityJob, "job upstream not found") + } + + return nil, errors.Wrap(resource.EntityResource, "error in reading row for resource", err) + } + + return &js, nil +} + func (j *JobUpstreams) toJobUpstreams() (*scheduler.JobUpstream, error) { t, err := tenant.NewTenant(j.UpstreamProjectName, j.UpstreamNamespaceName) if err != nil { @@ -65,7 +84,7 @@ type Job struct { Version int Owner string Description string - Labels datatypes.JSON + Labels json.RawMessage StartDate time.Time EndDate *time.Time @@ -74,25 +93,25 @@ type Job struct { // Behavior DependsOnPast bool `json:"depends_on_past"` CatchUp bool `json:"catch_up"` - Retry datatypes.JSON - Alert datatypes.JSON + Retry json.RawMessage + Alert json.RawMessage // Upstreams StaticUpstreams pq.StringArray `gorm:"type:varchar(220)[]" json:"static_upstreams"` // ExternalUpstreams - HTTPUpstreams datatypes.JSON `json:"http_upstreams"` + HTTPUpstreams json.RawMessage `json:"http_upstreams"` TaskName string - TaskConfig datatypes.JSON + TaskConfig json.RawMessage WindowSize string WindowOffset string WindowTruncateTo string - Assets datatypes.JSON - Hooks datatypes.JSON - Metadata datatypes.JSON + Assets json.RawMessage + Hooks json.RawMessage + Metadata json.RawMessage Destination string Sources pq.StringArray `gorm:"type:varchar(300)[]"` @@ -102,7 +121,7 @@ type Job struct { CreatedAt time.Time `gorm:"not null" json:"created_at"` UpdatedAt time.Time `gorm:"not null" json:"updated_at"` - DeletedAt gorm.DeletedAt + DeletedAt sql.NullTime } func (j *Job) toJob() (*scheduler.Job, error) { @@ -200,29 +219,40 @@ func (j *Job) toJobWithDetails() (*scheduler.JobWithDetails, error) { return schedulerJobWithDetails, nil } -func (j *JobRepository) GetJob(ctx context.Context, projectName tenant.ProjectName, jobName scheduler.JobName) (*scheduler.Job, error) { - var spec Job +func FromRow(row pgx.Row) (*Job, error) { + var js Job + + err := row.Scan(&js.ID, &js.Name, &js.Version, &js.Owner, &js.Description, + &js.Labels, &js.StartDate, &js.EndDate, &js.Interval, &js.DependsOnPast, + &js.CatchUp, &js.Retry, &js.Alert, &js.StaticUpstreams, &js.HTTPUpstreams, + &js.TaskName, &js.TaskConfig, &js.WindowSize, &js.WindowOffset, &js.WindowTruncateTo, + &js.Assets, &js.Hooks, &js.Metadata, &js.Destination, &js.Sources, + &js.ProjectName, &js.NamespaceName, &js.CreatedAt, &js.UpdatedAt, &js.DeletedAt) - getJobByNameAtProject := `SELECT * FROM job WHERE name = ? AND project_name = ?` - err := j.db.WithContext(ctx).Raw(getJobByNameAtProject, jobName.String(), projectName.String()). - First(&spec).Error if err != nil { - if errors.Is(err, gorm.ErrRecordNotFound) { - return nil, errors.NotFound(scheduler.EntityJobRun, "unable to find job:"+jobName.String()+" in project:"+projectName.String()) + if errors.Is(err, pgx.ErrNoRows) { + return nil, errors.NotFound(job.EntityJob, "job not found") } + + return nil, errors.Wrap(scheduler.EntityJobRun, "error in reading row for resource", err) + } + + return &js, nil +} + +func (j *JobRepository) GetJob(ctx context.Context, projectName tenant.ProjectName, jobName scheduler.JobName) (*scheduler.Job, error) { + getJobByNameAtProject := `SELECT * FROM job WHERE name = $1 AND project_name = $2` + spec, err := FromRow(j.pool.QueryRow(ctx, getJobByNameAtProject, jobName, projectName)) + if err != nil { return nil, err } return spec.toJob() } + func (j *JobRepository) GetJobDetails(ctx context.Context, projectName tenant.ProjectName, jobName scheduler.JobName) (*scheduler.JobWithDetails, error) { - var spec Job - getJobByNameAtProject := `SELECT * FROM job WHERE name = ? AND project_name = ?` - err := j.db.WithContext(ctx).Raw(getJobByNameAtProject, jobName.String(), projectName.String()). - First(&spec).Error + getJobByNameAtProject := `SELECT * FROM job WHERE name = $1 AND project_name = $2` + spec, err := FromRow(j.pool.QueryRow(ctx, getJobByNameAtProject, jobName, projectName)) if err != nil { - if errors.Is(err, gorm.ErrRecordNotFound) { - return nil, errors.NotFound(scheduler.EntityJobRun, "unable to find job:"+jobName.String()+" in project:"+projectName.String()) - } return nil, err } return spec.toJobWithDetails() @@ -248,29 +278,39 @@ func groupUpstreamsByJobName(jobUpstreams []JobUpstreams) (map[string][]*schedul } func (j *JobRepository) getJobsUpstreams(ctx context.Context, projectName tenant.ProjectName, jobNames []string) (map[string][]*scheduler.JobUpstream, error) { - var jobsUpstreams []JobUpstreams - jobNameListString := strings.Join(jobNames, "', '") - getJobUpstreamsByNameAtProject := fmt.Sprintf("SELECT * FROM job_upstream WHERE project_name = '%s' and job_name in ('%s')", projectName.String(), jobNameListString) - err := j.db.WithContext(ctx).Raw(getJobUpstreamsByNameAtProject, projectName.String()).Find(&jobsUpstreams).Error + getJobUpstreamsByNameAtProject := "SELECT * FROM job_upstream WHERE project_name = $1 and job_name = any ($2)" + rows, err := j.pool.Query(ctx, getJobUpstreamsByNameAtProject, projectName, jobNames) if err != nil { - if errors.Is(err, gorm.ErrRecordNotFound) { - return nil, errors.NotFound(scheduler.EntityJobRun, "unable to find jobsUpstreams in project:"+projectName.String()+" for:"+jobNameListString) + return nil, errors.Wrap(job.EntityJob, "error while getting job with upstreams", err) + } + defer rows.Close() + + var upstreams []JobUpstreams + for rows.Next() { + var jwu JobUpstreams + err := rows.Scan(&jwu.JobName, &jwu.ProjectName, &jwu.UpstreamJobName, &jwu.UpstreamProjectName, + &jwu.UpstreamNamespaceName, &jwu.UpstreamResourceUrn, &jwu.UpstreamTaskName, &jwu.UpstreamType, &jwu.UpstreamExternal) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, errors.NotFound(scheduler.EntityJobRun, "job upstream not found") + } + + return nil, errors.Wrap(scheduler.EntityJobRun, "error in reading row for resource", err) } - return nil, err + upstreams = append(upstreams, jwu) } - return groupUpstreamsByJobName(jobsUpstreams) + + return groupUpstreamsByJobName(upstreams) } func (j *JobRepository) GetAll(ctx context.Context, projectName tenant.ProjectName) ([]*scheduler.JobWithDetails, error) { var specs []Job - getJobByNameAtProject := `SELECT * FROM job WHERE project_name = ?` - err := j.db.WithContext(ctx).Raw(getJobByNameAtProject, projectName.String()).Find(&specs).Error - if err != nil { - if errors.Is(err, gorm.ErrRecordNotFound) { - return nil, errors.NotFound(scheduler.EntityJobRun, "unable to find jobs in project:"+projectName.String()) - } - return nil, err - } + // getJobByNameAtProject := `SELECT * FROM job WHERE project_name = $1` + //rows, err := j.pool.Query(ctx, getJobByNameAtProject, projectName) + //if err != nil { + // return nil, errors.Wrap(job.EntityJob, "error while getting all jobs", err) + //} + jobsMap := map[string]*scheduler.JobWithDetails{} var jobNameList []string multiError := errors.NewMultiError("errorInGetAll") @@ -294,8 +334,8 @@ func (j *JobRepository) GetAll(ctx context.Context, projectName tenant.ProjectNa return utils.MapToList[*scheduler.JobWithDetails](jobsMap), errors.MultiToError(multiError) } -func NewJobProviderRepository(db *gorm.DB) *JobRepository { +func NewJobProviderRepository(pool *pgxpool.Pool) *JobRepository { return &JobRepository{ - db: db, + pool: pool, } } diff --git a/internal/store/postgres/scheduler/job_run_repository.go b/internal/store/postgres/scheduler/job_run_repository.go index d7a922b5bd..40056a4cb4 100644 --- a/internal/store/postgres/scheduler/job_run_repository.go +++ b/internal/store/postgres/scheduler/job_run_repository.go @@ -6,7 +6,8 @@ import ( "time" "github.com/google/uuid" - "gorm.io/gorm" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" "github.com/odpf/optimus/core/scheduler" "github.com/odpf/optimus/core/tenant" @@ -14,7 +15,7 @@ import ( ) type JobRunRepository struct { - db *gorm.DB + pool *pgxpool.Pool } type jobRun struct { @@ -49,53 +50,59 @@ func (j jobRun) toJobRun() (*scheduler.JobRun, error) { } func (j *JobRunRepository) GetByID(ctx context.Context, id scheduler.JobRunID) (*scheduler.JobRun, error) { - var jobRun jobRun - getJobRunByID := `SELECT job_name, namespace_name, project_name, scheduled_at, start_time, end_time, status, sla_definition FROM job_run j where id = ?` - err := j.db.WithContext(ctx).Raw(getJobRunByID, id).First(&jobRun).Error + var jr jobRun + getJobRunByID := `SELECT job_name, namespace_name, project_name, scheduled_at, start_time, end_time, status, sla_definition FROM job_run where id = $1` + err := j.pool.QueryRow(ctx, getJobRunByID, id). + Scan(&jr.JobName, &jr.NamespaceName, &jr.ProjectName, &jr.ScheduledAt, &jr.StartTime, &jr.EndTime, + &jr.Status, &jr.SLADefinition) if err != nil { - return &scheduler.JobRun{}, err + return nil, err } - return jobRun.toJobRun() + return jr.toJobRun() } func (j *JobRunRepository) GetByScheduledAt(ctx context.Context, t tenant.Tenant, jobName scheduler.JobName, scheduledAt time.Time) (*scheduler.JobRun, error) { - var jobRun jobRun - getJobRunByID := `SELECT id, job_name, namespace_name, project_name, scheduled_at, start_time, end_time, status, sla_definition FROM job_run j where project_name = ? and namespace_name = ? and job_name = ? and scheduled_at = ? order by created_at desc limit 1` - err := j.db.WithContext(ctx).Raw(getJobRunByID, t.ProjectName(), t.NamespaceName(), jobName, scheduledAt).First(&jobRun).Error + var jr jobRun + getJobRunByID := `SELECT id, job_name, namespace_name, project_name, scheduled_at, start_time, end_time, status, sla_definition FROM job_run j where project_name = $1 and namespace_name = $2 and job_name = $3 and scheduled_at = $4 order by created_at desc limit 1` + err := j.pool.QueryRow(ctx, getJobRunByID, t.ProjectName(), t.NamespaceName(), jobName, scheduledAt). + Scan(&jr.JobName, &jr.NamespaceName, &jr.ProjectName, &jr.ScheduledAt, &jr.StartTime, &jr.EndTime, + &jr.Status, &jr.SLADefinition) + if err != nil { - if errors.Is(err, gorm.ErrRecordNotFound) { + if errors.Is(err, pgx.ErrNoRows) { return nil, errors.NotFound(scheduler.EntityJobRun, "no record for job:"+jobName.String()+" scheduled at: "+scheduledAt.String()) } - return nil, err + return nil, errors.Wrap(scheduler.EntityJobRun, "error while getting run", err) } - return jobRun.toJobRun() + return jr.toJobRun() } func (j *JobRunRepository) Update(ctx context.Context, jobRunID uuid.UUID, endTime time.Time, status scheduler.State) error { updateJobRun := "update job_run set status = ?, end_time = ? , updated_at = NOW() where id = ?" - return j.db.WithContext(ctx).Exec(updateJobRun, status.String(), endTime, jobRunID).Error + _, err := j.pool.Exec(ctx, updateJobRun, status, endTime, jobRunID) + return errors.WrapIfErr(scheduler.EntityJobRun, "unable to update job run", err) } func (j *JobRunRepository) UpdateSLA(ctx context.Context, slaObjects []*scheduler.SLAObject) error { - jobIDListString := "" - totalIds := len(slaObjects) - for i, slaObject := range slaObjects { - jobIDListString += fmt.Sprintf("('%s','%s')", slaObject.JobName, slaObject.JobScheduledAt.Format("2006-01-02 15:04:05")) - if !(i == totalIds-1) { - jobIDListString += ", " - } + var jobIDList []string + for _, slaObject := range slaObjects { + jobIDs := fmt.Sprintf("('%s','%s')", slaObject.JobName, slaObject.JobScheduledAt.Format("2006-01-02 15:04:05")) + jobIDList = append(jobIDList, jobIDs) } - query := "update job_run set sla_alert = True, updated_at = NOW() where (job_name, scheduled_at) in (" + jobIDListString + ")" - return j.db.WithContext(ctx).Exec(query).Error + + query := "update job_run set sla_alert = True, updated_at = NOW() where (job_name, scheduled_at) = any ($1)" + _, err := j.pool.Exec(ctx, query, jobIDList) + return errors.WrapIfErr(scheduler.EntityJobRun, "unable to update SLA", err) } func (j *JobRunRepository) Create(ctx context.Context, t tenant.Tenant, jobName scheduler.JobName, scheduledAt time.Time, slaDefinitionInSec int64) error { - insertJobRun := `INSERT INTO job_run (job_name, namespace_name, project_name, scheduled_at, start_time, end_time, status, sla_definition, created_at, updated_at) values (?, ?, ?, ?, NOW(), TIMESTAMP '3000-01-01 00:00:00', ?, ?, NOW(), NOW())` - return j.db.WithContext(ctx).Exec(insertJobRun, jobName.String(), t.NamespaceName().String(), t.ProjectName().String(), scheduledAt, scheduler.StateRunning, slaDefinitionInSec).Error + insertJobRun := `INSERT INTO job_run (job_name, namespace_name, project_name, scheduled_at, start_time, end_time, status, sla_definition, created_at, updated_at) values ($1, $2, $3, $4, NOW(), TIMESTAMP '3000-01-01 00:00:00', ?, ?, NOW(), NOW())` + _, err := j.pool.Exec(ctx, insertJobRun, jobName, t.NamespaceName(), t.ProjectName(), scheduledAt, scheduler.StateRunning, slaDefinitionInSec) + return errors.WrapIfErr(scheduler.EntityJobRun, "unable to create job run", err) } -func NewJobRunRepository(db *gorm.DB) *JobRunRepository { +func NewJobRunRepository(pool *pgxpool.Pool) *JobRunRepository { return &JobRunRepository{ - db: db, + pool: pool, } } From 044248610c1db919137d71787108c0c137dddf87 Mon Sep 17 00:00:00 2001 From: Sandeep Bhardwaj Date: Wed, 21 Dec 2022 14:33:28 +0530 Subject: [PATCH 09/25] refactor: move migration command to server --- client/cmd/commands.go | 2 - main.go | 2 + server/cmd/migration/migrate_to.go | 49 +++++++++++++++++++ {client => server}/cmd/migration/migration.go | 1 + {client => server}/cmd/migration/rollback.go | 16 ++---- 5 files changed, 57 insertions(+), 13 deletions(-) create mode 100644 server/cmd/migration/migrate_to.go rename {client => server}/cmd/migration/migration.go (88%) rename {client => server}/cmd/migration/rollback.go (70%) diff --git a/client/cmd/commands.go b/client/cmd/commands.go index 90ce52631f..a999531c70 100644 --- a/client/cmd/commands.go +++ b/client/cmd/commands.go @@ -10,7 +10,6 @@ import ( "github.com/odpf/optimus/client/cmd/extension" "github.com/odpf/optimus/client/cmd/initialize" "github.com/odpf/optimus/client/cmd/job" - "github.com/odpf/optimus/client/cmd/migration" "github.com/odpf/optimus/client/cmd/namespace" "github.com/odpf/optimus/client/cmd/playground" "github.com/odpf/optimus/client/cmd/plugin" @@ -64,7 +63,6 @@ func New() *cli.Command { deploy.NewDeployCommand(), initialize.NewInitializeCommand(), job.NewJobCommand(), - migration.NewMigrationCommand(), namespace.NewNamespaceCommand(), project.NewProjectCommand(), replay.NewReplayCommand(), diff --git a/main.go b/main.go index 8d6e20f27f..782a59bf4d 100644 --- a/main.go +++ b/main.go @@ -10,6 +10,7 @@ import ( clientCmd "github.com/odpf/optimus/client/cmd" _ "github.com/odpf/optimus/client/extension/provider" server "github.com/odpf/optimus/server/cmd" + "github.com/odpf/optimus/server/cmd/migration" ) var errRequestFail = errors.New("🔥 unable to complete request successfully") @@ -23,6 +24,7 @@ func main() { // Add Server related commands command.AddCommand( server.NewServeCommand(), + migration.NewMigrationCommand(), ) if err := command.Execute(); err != nil { diff --git a/server/cmd/migration/migrate_to.go b/server/cmd/migration/migrate_to.go new file mode 100644 index 0000000000..6634489e41 --- /dev/null +++ b/server/cmd/migration/migrate_to.go @@ -0,0 +1,49 @@ +package migration + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/odpf/optimus/config" + "github.com/odpf/optimus/internal/store/postgres" +) + +type migrateTo struct { + configFilePath string + version int +} + +// NewMigrateToCommand initializes command for migration to a specific version +func NewMigrateToCommand() *cobra.Command { + to := &migrateTo{} + cmd := &cobra.Command{ + Use: "to", + Short: "Command to migrate to specific migration version", + RunE: to.RunE, + } + cmd.Flags().StringVarP(&to.configFilePath, "config", "c", to.configFilePath, "File path for server configuration") + cmd.Flags().IntVarP(&to.version, "version", "v", -1, "Number of migrations to rollback") + return cmd +} + +func (m *migrateTo) RunE(_ *cobra.Command, _ []string) error { + clientConfig, err := config.LoadServerConfig(m.configFilePath) + if err != nil { + return fmt.Errorf("error loading client config: %w", err) + } + + if m.version < 0 { + return fmt.Errorf("invalid migration version") + } + + dsn := clientConfig.Serve.DB.DSN + + fmt.Printf("Executing migration to version %d \n", m.version) + err = postgres.ToVersion(uint(m.version), dsn) + if err != nil { + return fmt.Errorf("error during migration: %w", err) + } + fmt.Println("Migration finished successfully") + return nil +} diff --git a/client/cmd/migration/migration.go b/server/cmd/migration/migration.go similarity index 88% rename from client/cmd/migration/migration.go rename to server/cmd/migration/migration.go index 06ae212dee..97f7a24d77 100644 --- a/client/cmd/migration/migration.go +++ b/server/cmd/migration/migration.go @@ -9,5 +9,6 @@ func NewMigrationCommand() *cobra.Command { Short: "Command to do migration activity", } cmd.AddCommand(NewRollbackCommand()) + cmd.AddCommand(NewMigrateToCommand()) return cmd } diff --git a/client/cmd/migration/rollback.go b/server/cmd/migration/rollback.go similarity index 70% rename from client/cmd/migration/rollback.go rename to server/cmd/migration/rollback.go index 7513622947..2cbd12a939 100644 --- a/client/cmd/migration/rollback.go +++ b/server/cmd/migration/rollback.go @@ -1,18 +1,17 @@ package migration import ( - "context" "fmt" "github.com/spf13/cobra" - "github.com/odpf/optimus/client/cmd/internal/logger" "github.com/odpf/optimus/config" "github.com/odpf/optimus/internal/store/postgres" ) type rollbackCommand struct { configFilePath string + count int } // NewRollbackCommand initializes command for migration rollback @@ -24,6 +23,7 @@ func NewRollbackCommand() *cobra.Command { RunE: rollback.RunE, } cmd.Flags().StringVarP(&rollback.configFilePath, "config", "c", rollback.configFilePath, "File path for server configuration") + cmd.Flags().IntVarP(&rollback.count, "count", "n", 1, "Number of migrations to rollback") return cmd } @@ -33,19 +33,13 @@ func (r *rollbackCommand) RunE(_ *cobra.Command, _ []string) error { return fmt.Errorf("error loading client config: %w", err) } - l := logger.NewClientLogger() dsn := clientConfig.Serve.DB.DSN - l.Info("initiating migration") - migration, err := postgres.NewMigration(l, config.BuildVersion, dsn) + fmt.Printf("Executing rollback for %d migrations\n", r.count) + err = postgres.Rollback(dsn, r.count) if err != nil { - return fmt.Errorf("error initializing migration: %w", err) - } - - l.Info("executing rollback") - if err := migration.Rollback(context.Background()); err != nil { return fmt.Errorf("error rolling back migration: %w", err) } - l.Info("rollback finished successfully") + fmt.Println("Rollback finished successfully") return nil } From 80cda9c2abb83fc99b12da6489939ac2a07038b5 Mon Sep 17 00:00:00 2001 From: Sandeep Bhardwaj Date: Wed, 21 Dec 2022 14:36:56 +0530 Subject: [PATCH 10/25] refactor: use pgx remove gorm --- go.mod | 12 - go.sum | 27 --- internal/store/postgres/job/adapter.go | 40 ++-- internal/store/postgres/migration.go | 224 +++--------------- internal/store/postgres/postgres.go | 146 ------------ .../postgres/resource/repository_test.go | 1 - internal/store/postgres/resource/resource.go | 4 +- server/optimus.go | 28 +-- tests/setup/database.go | 80 +++---- 9 files changed, 92 insertions(+), 470 deletions(-) delete mode 100644 internal/store/postgres/postgres.go diff --git a/go.mod b/go.mod index 8ca540966c..ed6aef6a51 100644 --- a/go.mod +++ b/go.mod @@ -57,9 +57,6 @@ require ( google.golang.org/protobuf v1.28.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - gorm.io/datatypes v1.0.0 - gorm.io/driver/postgres v1.0.8 - gorm.io/gorm v1.21.16 ) require ( @@ -103,19 +100,11 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect - github.com/jackc/chunkreader/v2 v2.0.1 // indirect - github.com/jackc/pgconn v1.11.0 // indirect - github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgproto3/v2 v2.2.0 // indirect github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect - github.com/jackc/pgtype v1.10.0 // indirect - github.com/jackc/pgx/v4 v4.15.0 // indirect github.com/jackc/puddle/v2 v2.1.2 // indirect github.com/jeremywohl/flatten v1.0.1 // indirect github.com/jhump/protoreflect v1.9.1-0.20210817181203-db1a327a393e // indirect - github.com/jinzhu/inflection v1.0.0 // indirect - github.com/jinzhu/now v1.1.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/klauspost/compress v1.15.1 // indirect @@ -124,7 +113,6 @@ require ( github.com/mattn/go-colorable v0.1.6 // indirect github.com/mattn/go-localereader v0.0.1 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect - github.com/mattn/go-sqlite3 v2.0.1+incompatible // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/mcuadros/go-defaults v1.2.0 // indirect github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect diff --git a/go.sum b/go.sum index 56e434097a..f240676b39 100644 --- a/go.sum +++ b/go.sum @@ -141,7 +141,6 @@ github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q github.com/GoogleCloudPlatform/cloudsql-proxy v1.29.0/go.mod h1:spvB9eLJH9dutlbPSRmHvSXXHOwGRyeXh1jVdquA2G8= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= @@ -358,7 +357,6 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go/v2 v2.1.1/go.mod h1:7NtUnP6eK+l6k483WSYNrq3Kb23bWV10IRV1TyeSpwM= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= @@ -501,7 +499,6 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= -github.com/denisenkom/go-mssqldb v0.12.0 h1:VtrkII767ttSPNRfFekePK3sctr+joXgO58stqQbtUA= github.com/denisenkom/go-mssqldb v0.12.0/go.mod h1:iiK0YP1ZeepvmBQk/QpLEhhTNJgfzrpArPY/aFvc9yU= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= @@ -637,7 +634,6 @@ github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GO github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= @@ -679,7 +675,6 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= @@ -695,9 +690,7 @@ github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-migrate/migrate/v4 v4.15.2 h1:vU+M05vs6jWHKDdmE1Ecwj0BznygFc4QsdRe2E/L7kc= github.com/golang-migrate/migrate/v4 v4.15.2/go.mod h1:f2toGLkYqD3JH+Todi4aZ2ZdbeUNx4sIwiOK96rE9Lw= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= -github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188 h1:+eHOFJl1BaXrQxKX+T06f78590z4qA2ZzBTqahsKSE4= github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+wXQnTPR4KqTKDgJukSZ6amVRtWMPEjE6sQoK8= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -915,7 +908,6 @@ github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6t github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= @@ -927,14 +919,11 @@ github.com/jackc/pgconn v1.7.0/go.mod h1:sF/lPpNEMEOp+IYhyQGdAvrG20gWf6A1tKlr0v7 github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.11.0 h1:HiHArx4yFbwl91X3qqIHtUFoiIfLNJXCQRsnzkiwwaQ= github.com/jackc/pgconn v1.11.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= -github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= -github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= @@ -948,7 +937,6 @@ github.com/jackc/pgproto3/v2 v2.0.5/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwX github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.0.7/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.2.0 h1:r7JypeP2D3onoQTCxWdTpCtJ4D+qpKr0TxvoyMhZ5ns= github.com/jackc/pgproto3/v2 v2.2.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= @@ -962,7 +950,6 @@ github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/V github.com/jackc/pgtype v1.5.0/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig= github.com/jackc/pgtype v1.6.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig= github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.10.0 h1:ILnBWrRMSXGczYvmkYD6PsYyVFUNLTnIUJHHDLmqk38= github.com/jackc/pgtype v1.10.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= @@ -973,7 +960,6 @@ github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9s github.com/jackc/pgx/v4 v4.9.0/go.mod h1:MNGWmViCgqbZck9ujOOBN63gK9XVGILXWCvKLGKmnms= github.com/jackc/pgx/v4 v4.10.1/go.mod h1:QlrWebbs3kqEZPHCTGyxecvzG6tvIsYu+A5b1raylkA= github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.15.0 h1:B7dTkXsdILD3MF987WGGCcg+tvLW6bZJdEcqVFeU//w= github.com/jackc/pgx/v4 v4.15.0/go.mod h1:D/zyOyXiaM1TmVWnOM18p0xdDtdakRBa0RsVGI3U3bw= github.com/jackc/pgx/v5 v5.2.0 h1:NdPpngX0Y6z6XDFKqmFQaE+bCtkqzvQIOt1wvBlAqs8= github.com/jackc/pgx/v5 v5.2.0/go.mod h1:Ptn7zmohNsWEsdxRawMzk3gaKma2obW+NWTnKa0S4nk= @@ -991,11 +977,8 @@ github.com/jeremywohl/flatten v1.0.1/go.mod h1:4AmD/VxjWcI5SRB0n6szE2A6s2fsNHDLO github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jhump/protoreflect v1.9.1-0.20210817181203-db1a327a393e h1:Yb4fEGk+GtBSNuvy5rs0ZJt/jtopc/z9azQaj3xbies= github.com/jhump/protoreflect v1.9.1-0.20210817181203-db1a327a393e/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= -github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/jinzhu/now v1.1.2 h1:eVKgfIdy9b6zbWBMgFpfDPoAMifwSZagU9HmEU6zgiI= -github.com/jinzhu/now v1.1.2/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -1132,8 +1115,6 @@ github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOq github.com/mattn/go-sqlite3 v1.14.3/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v2.0.1+incompatible h1:xQ15muvnzGBHpIpdrNi1DA5x0+TcBZzsIDwmw9uTHzw= -github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= @@ -1371,7 +1352,6 @@ github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= @@ -2306,16 +2286,11 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/datatypes v1.0.0 h1:5rDW3AnqXaacuQn6nB/ZNAIfTCIvmL5oKGa/TtCoBFA= gorm.io/datatypes v1.0.0/go.mod h1:aKpJ+RNhLXWeF5OAdxfzBwT1UPw1wseSchF0AY3/lSw= -gorm.io/driver/mysql v1.0.3 h1:+JKBYPfn1tygR1/of/Fh2T8iwuVwzt+PEJmKaXzMQXg= gorm.io/driver/mysql v1.0.3/go.mod h1:twGxftLBlFgNVNakL7F+P/x9oYqoymG3YYT8cAfI9oI= gorm.io/driver/postgres v1.0.5/go.mod h1:qrD92UurYzNctBMVCJ8C3VQEjffEuphycXtxOudXNCA= -gorm.io/driver/postgres v1.0.8 h1:PAgM+PaHOSAeroTjHkCHCBIHHoBIf9RgPWGo8dF2DA8= gorm.io/driver/postgres v1.0.8/go.mod h1:4eOzrI1MUfm6ObJU/UcmbXyiHSs8jSwH95G5P5dxcAg= -gorm.io/driver/sqlite v1.1.3 h1:BYfdVuZB5He/u9dt4qDpZqiqDJ6KhPqs5QUqsr/Eeuc= gorm.io/driver/sqlite v1.1.3/go.mod h1:AKDgRWk8lcSQSw+9kxCJnX/yySj8G3rdwYlU57cB45c= -gorm.io/driver/sqlserver v1.0.5 h1:n5knSvyaEwufxl0aROEW90pn+aLoV9h+vahYJk1x5l4= gorm.io/driver/sqlserver v1.0.5/go.mod h1:WI/bfZ+s9TigYXe3hb3XjNaUP0TqmTdXl11pECyLATs= gorm.io/gorm v1.20.1/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= gorm.io/gorm v1.20.2/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= @@ -2323,8 +2298,6 @@ gorm.io/gorm v1.20.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= gorm.io/gorm v1.20.5/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= gorm.io/gorm v1.20.12/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= gorm.io/gorm v1.21.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= -gorm.io/gorm v1.21.16 h1:YBIQLtP5PLfZQz59qfrq7xbrK7KWQ+JsXXCH/THlMqs= -gorm.io/gorm v1.21.16/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= diff --git a/internal/store/postgres/job/adapter.go b/internal/store/postgres/job/adapter.go index 9082d6a8a4..cc881e8394 100644 --- a/internal/store/postgres/job/adapter.go +++ b/internal/store/postgres/job/adapter.go @@ -1,17 +1,15 @@ package job import ( + "database/sql" "encoding/json" "time" "github.com/google/uuid" "github.com/jackc/pgx/v5" "github.com/lib/pq" - "gorm.io/datatypes" - "gorm.io/gorm" "github.com/odpf/optimus/core/job" - "github.com/odpf/optimus/core/resource" "github.com/odpf/optimus/internal/errors" "github.com/odpf/optimus/internal/models" ) @@ -19,12 +17,12 @@ import ( const jobDatetimeLayout = "2006-01-02" type Spec struct { - ID uuid.UUID `gorm:"primary_key;type:uuid;default:uuid_generate_v4()"` - Name string `gorm:"not null" json:"name"` + ID uuid.UUID + Name string Version int Owner string Description string - Labels datatypes.JSON + Labels json.RawMessage StartDate time.Time EndDate *time.Time @@ -33,35 +31,35 @@ type Spec struct { // Behavior DependsOnPast bool `json:"depends_on_past"` CatchUp bool `json:"catch_up"` - Retry datatypes.JSON - Alert datatypes.JSON + Retry json.RawMessage + Alert json.RawMessage // Upstreams - StaticUpstreams pq.StringArray `gorm:"type:varchar(220)[]" json:"static_upstreams"` + StaticUpstreams pq.StringArray `json:"static_upstreams"` // ExternalUpstreams - HTTPUpstreams datatypes.JSON `json:"http_upstreams"` + HTTPUpstreams json.RawMessage `json:"http_upstreams"` TaskName string - TaskConfig datatypes.JSON + TaskConfig json.RawMessage WindowSize string WindowOffset string WindowTruncateTo string - Assets datatypes.JSON - Hooks datatypes.JSON - Metadata datatypes.JSON + Assets json.RawMessage + Hooks json.RawMessage + Metadata json.RawMessage Destination string - Sources pq.StringArray `gorm:"type:varchar(300)[]"` + Sources pq.StringArray ProjectName string `json:"project_name"` NamespaceName string `json:"namespace_name"` - CreatedAt time.Time `gorm:"not null" json:"created_at"` - UpdatedAt time.Time `gorm:"not null" json:"updated_at"` - DeletedAt gorm.DeletedAt + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt sql.NullTime } type Retry struct { @@ -83,7 +81,7 @@ type Asset struct { type Hook struct { Name string - Config datatypes.JSON + Config json.RawMessage } type Metadata struct { @@ -585,7 +583,7 @@ func FromRow(row pgx.Row) (*Spec, error) { return nil, errors.NotFound(job.EntityJob, "job not found") } - return nil, errors.Wrap(resource.EntityResource, "error in reading row for resource", err) + return nil, errors.Wrap(job.EntityJob, "error in reading row for job", err) } return &js, nil @@ -602,7 +600,7 @@ func UpstreamFromRow(row pgx.Row) (*JobWithUpstream, error) { return nil, errors.NotFound(job.EntityJob, "job upstream not found") } - return nil, errors.Wrap(resource.EntityResource, "error in reading row for resource", err) + return nil, errors.Wrap(job.EntityJob, "error in reading row for upstream", err) } return &js, nil diff --git a/internal/store/postgres/migration.go b/internal/store/postgres/migration.go index 015df9e9b6..59fb9ba3b1 100644 --- a/internal/store/postgres/migration.go +++ b/internal/store/postgres/migration.go @@ -1,230 +1,74 @@ package postgres import ( - "context" "embed" "errors" "fmt" - "strings" - "time" "github.com/golang-migrate/migrate/v4" + _ "github.com/golang-migrate/migrate/v4/database/postgres" // required for postgres migrate driver "github.com/golang-migrate/migrate/v4/source/iofs" - "github.com/odpf/salt/log" - "gorm.io/driver/postgres" - "gorm.io/gorm" ) //go:embed migrations var migrationFs embed.FS -type migrationStep struct { - CurrentOptimusVersion string - CurrentMigrationVersion uint - PreviousOptimusVersion string - CreatedAt time.Time -} - -type Migration interface { - Up(context.Context) error - Rollback(context.Context) error -} - -type migration struct { - incomingOptimusVersion string - dbConnURL string - - logger log.Logger -} - -// NewMigration initializes migration mechanism specific for postgres -func NewMigration(logger log.Logger, incomingOptimusVersion, dbConnURL string) (Migration, error) { - if logger == nil { - return nil, errors.New("logger is nil") - } - if incomingOptimusVersion == "" { - return nil, errors.New("incoming optimus version is empty") - } - if dbConnURL == "" { - return nil, errors.New("database connection url is empty") - } - return &migration{ - incomingOptimusVersion: incomingOptimusVersion, - dbConnURL: dbConnURL, - logger: logger, - }, nil -} - -func (m *migration) Up(ctx context.Context) error { - dbClient, dbClientCleanup, err := m.newDBClient() - if err != nil { - return fmt.Errorf("error initializing db client: %w", err) - } - defer dbClientCleanup() - - if err := dbClient.WithContext(ctx).AutoMigrate(&migrationStep{}); err != nil { - return fmt.Errorf("error setting up migration_steps: %w", err) - } - - latestStep, err := m.getLatestMigrationStep(ctx, dbClient) - if err != nil { - return fmt.Errorf("error getting the latest migration step: %w", err) - } - if m.incomingOptimusVersion < latestStep.CurrentOptimusVersion { - return fmt.Errorf("optimus version [%s] should be higher or equal than existing [%s]", m.incomingOptimusVersion, latestStep.CurrentOptimusVersion) - } - if m.incomingOptimusVersion == latestStep.CurrentOptimusVersion { - m.logger.Warn(fmt.Sprintf("migration up is skipped because optimus version [%s] is the same as current one", m.incomingOptimusVersion)) - return nil - } - - migrationClient, migrationClientCleanup, err := m.newMigrationClient() - if err != nil { - return fmt.Errorf("error initializing migration client: %w", err) - } - defer migrationClientCleanup() +const ( + resourcePath = "migrations" +) - if err := migrationClient.Up(); err != nil && !errors.Is(err, migrate.ErrNoChange) { - return fmt.Errorf("error executing migration up: %w", err) - } - newVersion, _, err := migrationClient.Version() +func NewMigrator(dbConnURL string) (*migrate.Migrate, error) { + sourceDriver, err := iofs.New(migrationFs, resourcePath) if err != nil { - return fmt.Errorf("error getting current migration version: %w", err) + return nil, fmt.Errorf("error initializing source driver: %w", err) } - newMigrationVersion := &migrationStep{ - CurrentOptimusVersion: m.incomingOptimusVersion, - CurrentMigrationVersion: newVersion, - PreviousOptimusVersion: latestStep.CurrentOptimusVersion, - CreatedAt: time.Now(), - } - return m.addMigrationStep(ctx, dbClient, newMigrationVersion) + return migrate.NewWithSourceInstance("iofs", sourceDriver, dbConnURL) } -func (m *migration) Rollback(ctx context.Context) error { - dbClient, dbClientCleanup, err := m.newDBClient() +// Migrate to run up migrations +func Migrate(connURL string) error { + m, err := NewMigrator(connURL) if err != nil { - return fmt.Errorf("error initializing db client: %w", err) + return fmt.Errorf("db migrator: %w", err) } - defer dbClientCleanup() + defer m.Close() - if err := dbClient.WithContext(ctx).AutoMigrate(&migrationStep{}); err != nil { - return fmt.Errorf("error setting up migration_steps: %w", err) + if err := m.Up(); err != nil && !errors.Is(err, migrate.ErrNoChange) { + return fmt.Errorf("db migrator: %w", err) } + return nil +} - latestStep, err := m.getLatestMigrationStep(ctx, dbClient) +// Rollback to run up migrations +func Rollback(connURL string, count int) error { + m, err := NewMigrator(connURL) if err != nil { - return err - } - if m.incomingOptimusVersion != latestStep.CurrentOptimusVersion { - return fmt.Errorf("expecting optimus with version [%s] but got [%s]", latestStep.CurrentOptimusVersion, m.incomingOptimusVersion) + return fmt.Errorf("db migrator: %w", err) } + defer m.Close() - previousMigrationVersion, err := m.getMigrationVersion(ctx, dbClient, latestStep.PreviousOptimusVersion) - if err != nil { - return err - } - if previousMigrationVersion == 0 { - m.logger.Warn("migration rollback is skipped because previous migration version is not registered") - return nil + if count < 1 { + return fmt.Errorf("invalid value[%d] for rollback", count) } - migrationClient, migrationClientCleanup, err := m.newMigrationClient() + err = m.Steps(count * -1) if err != nil { - return fmt.Errorf("error initializing migration client: %w", err) + return fmt.Errorf("db migrator: %w", err) } - defer migrationClientCleanup() - - if err := migrationClient.Migrate(previousMigrationVersion); err != nil && !errors.Is(err, migrate.ErrNoChange) { - return fmt.Errorf("error migrating to version [%d]: %w", previousMigrationVersion, err) - } - return m.removeMigrationStep(ctx, dbClient, latestStep) + return nil } -func (m *migration) newMigrationClient() (*migrate.Migrate, func(), error) { - path := "migrations" - sourceDriver, err := iofs.New(migrationFs, path) - if err != nil { - return nil, nil, fmt.Errorf("error initializing source driver: %w", err) - } - name := "iofs" - migrationClient, err := migrate.NewWithSourceInstance(name, sourceDriver, m.dbConnURL) +func ToVersion(version uint, connURL string) error { + m, err := NewMigrator(connURL) if err != nil { - return nil, nil, fmt.Errorf("error initializing migration client: %w", err) + return fmt.Errorf("db migrator: %w", err) } - cleanup := func() { - sourceErr, databaseErr := migrationClient.Close() - if sourceErr != nil { - m.logger.Error("source driver error encountered when closing migration connection: %w", sourceErr) - } - if databaseErr != nil { - m.logger.Error("database error encountered when closing migration connection: %w", databaseErr) - } - } - return migrationClient, cleanup, nil -} + defer m.Close() -func (m *migration) newDBClient() (*gorm.DB, func(), error) { - dbClient, err := gorm.Open(postgres.Open(m.dbConnURL)) + err = m.Migrate(version) if err != nil { - return nil, nil, fmt.Errorf("error initializing db client: %w", err) - } - cleanup := func() { - db, err := dbClient.DB() - if err != nil { - m.logger.Error("error getting db: %w", err) - return - } - if err := db.Close(); err != nil { - m.logger.Error("error encountered when closing db connection: %w", err) - } - } - return dbClient, cleanup, nil -} - -func (*migration) removeMigrationStep(ctx context.Context, db *gorm.DB, oldStep *migrationStep) error { - return db.WithContext(ctx). - Where("current_optimus_version = ? and current_migration_version = ? and previous_optimus_version = ?", - oldStep.CurrentOptimusVersion, oldStep.CurrentMigrationVersion, oldStep.PreviousOptimusVersion). - Delete(&migrationStep{}).Error -} - -func (*migration) getMigrationVersion(ctx context.Context, db *gorm.DB, optimusVersion string) (uint, error) { - var rst migrationStep - if err := db.WithContext(ctx). - Select("current_optimus_version, current_migration_version, previous_optimus_version, created_at"). - Table("migration_steps"). - Where("current_optimus_version = ?", optimusVersion). - Order("created_at desc limit 1"). - Find(&rst).Error; err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - return 0, fmt.Errorf("error getting migration version for optimus version [%s]: %w", optimusVersion, err) - } - return rst.CurrentMigrationVersion, nil -} - -func (m *migration) addMigrationStep(ctx context.Context, db *gorm.DB, newStep *migrationStep) error { - var existingSteps []migrationStep - if err := db.WithContext(ctx). - Where(newStep). - First(&existingSteps).Error; err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - return fmt.Errorf("error getting existing steps: %w", err) - } - if len(existingSteps) > 0 { - m.logger.Warn("migration step is not added because it already exists") - return nil - } - return db.WithContext(ctx).Create(newStep).Error -} - -func (*migration) getLatestMigrationStep(ctx context.Context, db *gorm.DB) (*migrationStep, error) { - var rst migrationStep - if err := db.WithContext(ctx). - Select("m.current_optimus_version, m.current_migration_version, m.previous_optimus_version, m.created_at"). - Table("migration_steps m"). - Joins("right join schema_migrations s on m.current_migration_version = s.version"). - Order("m.created_at desc limit 1"). - Find(&rst).Error; err != nil && !errors.Is(err, gorm.ErrRecordNotFound) && !strings.Contains(err.Error(), "42P01") { - return nil, fmt.Errorf("error getting existing step: %w", err) + return fmt.Errorf("db migrator: %w", err) } - return &rst, nil + return nil } diff --git a/internal/store/postgres/postgres.go b/internal/store/postgres/postgres.go deleted file mode 100644 index 28723f7cb7..0000000000 --- a/internal/store/postgres/postgres.go +++ /dev/null @@ -1,146 +0,0 @@ -// Package postgres implementation relies on gorm for queries which is very very -// inefficient at the moment, we are trading convenience with performance -// for example in lot of select stmts, we pull all related relations as well -// even when we don't really need to, most of the times these relation -// queries even in update gets executed for no reason even if user didn't -// intend to update them. -package postgres - -import ( - "fmt" - "io" - stdlog "log" - "time" - - _ "github.com/golang-migrate/migrate/v4/database/postgres" // required for postgres migrate driver - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "gorm.io/driver/postgres" - "gorm.io/gorm" - "gorm.io/gorm/logger" - "gorm.io/gorm/schema" - - "github.com/odpf/optimus/config" -) - -const tracingSpanKey = "otel:span" - -var tracerOtel = otel.Tracer("optimus/store/postgres") - -// Connect connect to the DB with custom configuration. -func Connect(dbConf config.DBConfig, writer io.Writer) (*gorm.DB, error) { - db, err := gorm.Open(postgres.Open(dbConf.DSN), &gorm.Config{ - Logger: logger.New( - stdlog.New(writer, "\r\n", stdlog.LstdFlags), // io writer - logger.Config{ - SlowThreshold: time.Second, - LogLevel: logger.Warn, - IgnoreRecordNotFoundError: true, - Colorful: true, - }, - ), - NamingStrategy: schema.NamingStrategy{ - SingularTable: true, - }, - }) - if err != nil { - return nil, fmt.Errorf("failed to initialize postgres db connection: %w", err) - } - - if err := InitTrace(db); err != nil { - return nil, fmt.Errorf("failed to initialize tracing for postgresql: %w", err) - } - - sqlDB, err := db.DB() - if err != nil { - return nil, err - } - sqlDB.SetMaxOpenConns(dbConf.MaxOpenConnection) - return db, nil -} - -func InitTrace(db *gorm.DB) error { - // create - if err := db.Callback().Create().Before("gorm:create").Register("otel:before_create", beforeCallback("db:create")); err != nil { - return err - } - if err := db.Callback().Create().After("gorm:create").Register("otel:after_create", afterCallback); err != nil { - return err - } - - // query - if err := db.Callback().Query().Before("gorm:query").Register("otel:before_query", beforeCallback("db:query")); err != nil { - return err - } - if err := db.Callback().Query().After("gorm:query").Register("otel:after_query", afterCallback); err != nil { - return err - } - - // update - if err := db.Callback().Update().Before("gorm:update").Register("otel:before_update", beforeCallback("db:update")); err != nil { - return err - } - if err := db.Callback().Update().After("gorm:update").Register("otel:after_update", afterCallback); err != nil { - return err - } - - // delete - if err := db.Callback().Delete().Before("gorm:delete").Register("otel:before_delete", beforeCallback("db:delete")); err != nil { - return err - } - if err := db.Callback().Delete().After("gorm:delete").Register("otel:after_delete", afterCallback); err != nil { - return err - } - - // row - if err := db.Callback().Row().Before("gorm:row").Register("otel:before_row", beforeCallback("db:row")); err != nil { - return err - } - if err := db.Callback().Row().After("gorm:row").Register("otel:after_row", afterCallback); err != nil { - return err - } - - // raw - if err := db.Callback().Raw().Before("gorm:raw").Register("otel:before_raw", beforeCallback("db:raw")); err != nil { - return err - } - - return db.Callback().Raw().After("gorm:raw").Register("otel:after_raw", afterCallback) -} - -func beforeCallback(operation string) func(db *gorm.DB) { - return func(db *gorm.DB) { - if db == nil || db.Statement == nil || db.Statement.Context == nil { - return - } - // if not tracing - if !trace.SpanFromContext(db.Statement.Context).IsRecording() { - return - } - _, span := tracerOtel.Start(db.Statement.Context, operation) - db.InstanceSet(tracingSpanKey, span) - } -} - -func afterCallback(db *gorm.DB) { - if db == nil || db.Statement == nil || db.Statement.Context == nil { - return - } - // extract sp from db context - v, ok := db.InstanceGet(tracingSpanKey) - if !ok || v == nil { - return - } - sp, ok := v.(trace.Span) - if !ok || sp == nil { - return - } - defer sp.End() - - sp.SetAttributes( - attribute.String("table", db.Statement.Table), - attribute.Int64("rows_affected", db.Statement.RowsAffected), - attribute.String("sql", db.Statement.SQL.String()), - ) -} diff --git a/internal/store/postgres/resource/repository_test.go b/internal/store/postgres/resource/repository_test.go index 33c4a33b6a..8244b74f1b 100644 --- a/internal/store/postgres/resource/repository_test.go +++ b/internal/store/postgres/resource/repository_test.go @@ -1,5 +1,4 @@ //go:build !unit_test -// +build !unit_test package resource_test diff --git a/internal/store/postgres/resource/resource.go b/internal/store/postgres/resource/resource.go index 9ea0699834..13f440efe8 100644 --- a/internal/store/postgres/resource/resource.go +++ b/internal/store/postgres/resource/resource.go @@ -4,8 +4,6 @@ import ( "encoding/json" "time" - "gorm.io/datatypes" - "github.com/odpf/optimus/core/resource" "github.com/odpf/optimus/core/tenant" "github.com/odpf/optimus/internal/errors" @@ -19,7 +17,7 @@ type Resource struct { ProjectName string NamespaceName string - Metadata datatypes.JSON + Metadata json.RawMessage Spec map[string]any URN string diff --git a/server/optimus.go b/server/optimus.go index 1bffb539ce..c09b791e3a 100644 --- a/server/optimus.go +++ b/server/optimus.go @@ -16,7 +16,6 @@ import ( "github.com/prometheus/client_golang/prometheus" slackapi "github.com/slack-go/slack" "google.golang.org/grpc" - "gorm.io/gorm" "github.com/odpf/optimus/config" jHandler "github.com/odpf/optimus/core/job/handler/v1beta1" @@ -56,7 +55,6 @@ type OptimusServer struct { logger log.Logger dbPool *pgxpool.Pool - dbConn *gorm.DB key *[keyLength]byte serverAddr string @@ -157,19 +155,10 @@ func applicationKeyFromString(appKey string) (*[keyLength]byte, error) { } func (s *OptimusServer) setupDB() error { - migration, err := postgres.NewMigration(s.logger, config.BuildVersion, s.conf.Serve.DB.DSN) + err := postgres.Migrate(s.conf.Serve.DB.DSN) if err != nil { return fmt.Errorf("error initializing migration: %w", err) } - ctx := context.Background() - if err := migration.Up(ctx); err != nil { - return fmt.Errorf("error executing migration up: %w", err) - } - - s.dbConn, err = postgres.Connect(s.conf.Serve.DB, s.logger.Writer()) - if err != nil { - return fmt.Errorf("postgres.Connect: %w", err) - } s.dbPool, err = postgres.Open(s.conf.Serve.DB) if err != nil { @@ -230,13 +219,8 @@ func (s *OptimusServer) Shutdown() { fn() // Todo: log all the errors from cleanup before exit } - if s.dbConn != nil { - sqlConn, err := s.dbConn.DB() - if err != nil { - s.logger.Error("Error while getting sqlConn", err) - } else if err := sqlConn.Close(); err != nil { - s.logger.Error("Error in sqlConn.Close", err) - } + if s.dbPool != nil { + s.dbPool.Close() } s.logger.Info("Server shutdown complete") @@ -266,9 +250,9 @@ func (s *OptimusServer) setupHandlers() error { resourceManager.RegisterDatastore(rModel.Bigquery, bigqueryStore) // Scheduler bounded context - jobRunRepo := schedulerRepo.NewJobRunRepository(s.dbConn) - operatorRunRepository := schedulerRepo.NewOperatorRunRepository(s.dbConn) - jobProviderRepo := schedulerRepo.NewJobProviderRepository(s.dbConn) + jobRunRepo := schedulerRepo.NewJobRunRepository(s.dbPool) + operatorRunRepository := schedulerRepo.NewOperatorRunRepository(s.dbPool) + jobProviderRepo := schedulerRepo.NewJobProviderRepository(s.dbPool) notificationContext, cancelNotifiers := context.WithCancel(context.Background()) s.cleanupFn = append(s.cleanupFn, cancelNotifiers) diff --git a/tests/setup/database.go b/tests/setup/database.go index 0dacaed2cb..73df8cb51d 100644 --- a/tests/setup/database.go +++ b/tests/setup/database.go @@ -6,27 +6,20 @@ import ( "os" "strings" "sync" + "time" + "github.com/golang-migrate/migrate/v4" "github.com/jackc/pgx/v5/pgxpool" - "github.com/odpf/salt/log" - "gorm.io/gorm" "github.com/odpf/optimus/config" "github.com/odpf/optimus/internal/store/postgres" ) var ( - optimusDB *gorm.DB dbPool *pgxpool.Pool initDBOnce sync.Once ) -func TestDB() *gorm.DB { - initDBOnce.Do(migrateDB) - - return optimusDB -} - func TestPool() *pgxpool.Pool { initDBOnce.Do(migrateDB) @@ -52,35 +45,48 @@ func migrateDB() { MinOpenConnection: 1, MaxOpenConnection: 2, } - dbConn, err := postgres.Connect(dbConf, os.Stdout) + + pool, err := postgres.Open(dbConf) if err != nil { panic(err) } - if err := dropTables(dbConn); err != nil { - panic(err) - } + dbPool = pool - logger := log.NewLogrus(log.LogrusWithWriter(os.Stdout)) - optimusVersion := "integration_test" - m, err := postgres.NewMigration(logger, optimusVersion, dbURL) + m, err := postgres.NewMigrator(dbURL) if err != nil { panic(err) } - ctx := context.Background() - if err := m.Up(ctx); err != nil { + + cleanDB(m, pool) + + if err = postgres.Migrate(dbURL); err != nil { panic(err) } +} - pool, err := postgres.Open(dbConf) - if err != nil { - panic(err) +func cleanDB(m *migrate.Migrate, pool *pgxpool.Pool) { + shouldDrop := false + _, ok := os.LookupEnv("TEST_OPTIMUS_DROP_DB") + if !ok { + shouldDrop = true + } + + if shouldDrop { + if err := m.Drop(); err != nil { + panic(err) + } + return } - dbPool = pool - optimusDB = dbConn + if err := dropTables(pool); err != nil { + panic(err) + } } -func dropTables(db *gorm.DB) error { +func dropTables(db *pgxpool.Pool) error { + ctx, cancelFunc := context.WithTimeout(context.Background(), time.Minute*5) + defer cancelFunc() + tablesToDelete := []string{ "instance", "hook_run", @@ -109,8 +115,8 @@ func dropTables(db *gorm.DB) error { } var errMsgs []string for _, table := range tablesToDelete { - if err := db.Exec(fmt.Sprintf("drop table if exists %s", table)).Error; err != nil { - toleratedErrMsg := fmt.Sprintf("table \"%s\" does not exist", table) + if _, err := db.Exec(ctx, "drop table if exists "+table); err != nil { + toleratedErrMsg := fmt.Sprintf("table %q does not exist", table) if !strings.Contains(err.Error(), toleratedErrMsg) { errMsgs = append(errMsgs, err.Error()) } @@ -122,28 +128,6 @@ func dropTables(db *gorm.DB) error { return nil } -func TruncateTables(db *gorm.DB) { - db.Exec("TRUNCATE TABLE backup_old, resource_old CASCADE") - db.Exec("TRUNCATE TABLE backup CASCADE") - db.Exec("TRUNCATE TABLE replay CASCADE") - db.Exec("TRUNCATE TABLE resource CASCADE") - - db.Exec("TRUNCATE TABLE job_run CASCADE") - db.Exec("TRUNCATE TABLE sensor_run CASCADE") - db.Exec("TRUNCATE TABLE task_run CASCADE") - db.Exec("TRUNCATE TABLE hook_run CASCADE") - - db.Exec("TRUNCATE TABLE job CASCADE") - - db.Exec("TRUNCATE TABLE secret CASCADE") - db.Exec("TRUNCATE TABLE namespace CASCADE") - db.Exec("TRUNCATE TABLE project CASCADE") - - db.Exec("TRUNCATE TABLE job_deployment CASCADE") - - db.Exec("TRUNCATE TABLE job_upstream CASCADE") -} - func TruncateTablesWith(pool *pgxpool.Pool) { ctx := context.Background() pool.Exec(ctx, "TRUNCATE TABLE backup_old, resource_old CASCADE") From d05d7825f6322e590a223eae502b3743f84c4b05 Mon Sep 17 00:00:00 2001 From: Sandeep Bhardwaj Date: Fri, 23 Dec 2022 13:06:01 +0530 Subject: [PATCH 11/25] refactor: update tenant bounded context --- .../postgres/tenant/namespace_repository.go | 13 +++++------ .../postgres/tenant/project_repository.go | 13 +++++------ .../postgres/tenant/secret_repository.go | 22 +++++++++---------- 3 files changed, 23 insertions(+), 25 deletions(-) diff --git a/internal/store/postgres/tenant/namespace_repository.go b/internal/store/postgres/tenant/namespace_repository.go index 6283035c83..a66ee04254 100644 --- a/internal/store/postgres/tenant/namespace_repository.go +++ b/internal/store/postgres/tenant/namespace_repository.go @@ -13,7 +13,7 @@ import ( ) type NamespaceRepository struct { - pool *pgxpool.Pool + db *pgxpool.Pool } const ( @@ -29,7 +29,6 @@ type Namespace struct { CreatedAt time.Time UpdatedAt time.Time - DeletedAt time.Time } func (n *Namespace) toTenantNamespace() (*tenant.Namespace, error) { @@ -48,7 +47,7 @@ func (n *NamespaceRepository) Save(ctx context.Context, namespace *tenant.Namesp insertNamespace := `INSERT INTO namespace (name, config, project_name, created_at, updated_at) VALUES ($1, $2, $3, now(), now())` - _, err = n.pool.Exec(ctx, insertNamespace, namespace.Name(), namespace.GetConfigs(), namespace.ProjectName()) + _, err = n.db.Exec(ctx, insertNamespace, namespace.Name(), namespace.GetConfigs(), namespace.ProjectName()) return errors.WrapIfErr(tenant.EntityNamespace, "unable to save namespace", err) } return errors.Wrap(tenant.EntityNamespace, "unable to save namespace", err) @@ -58,7 +57,7 @@ VALUES ($1, $2, $3, now(), now())` return errors.NewError(errors.ErrFailedPrecond, tenant.EntityNamespace, "empty config") } updateNamespaceQuery := `UPDATE namespace n SET config=$1, updated_at=now() WHERE n.name = $2 AND n.project_name=$3` - _, err = n.pool.Exec(ctx, updateNamespaceQuery, namespace.GetConfigs(), namespace.Name(), namespace.ProjectName()) + _, err = n.db.Exec(ctx, updateNamespaceQuery, namespace.GetConfigs(), namespace.Name(), namespace.ProjectName()) return errors.WrapIfErr(tenant.EntityProject, "unable to update namespace", err) } @@ -77,7 +76,7 @@ func (n *NamespaceRepository) get(ctx context.Context, projName tenant.ProjectNa var namespace Namespace getNamespaceByNameQuery := `SELECT ` + namespaceColumns + ` FROM namespace WHERE project_name = $1 AND name = $2 AND deleted_at IS NULL` - err := n.pool.QueryRow(ctx, getNamespaceByNameQuery, projName, name). + err := n.db.QueryRow(ctx, getNamespaceByNameQuery, projName, name). Scan(&namespace.ID, &namespace.Name, &namespace.Config, &namespace.ProjectName, &namespace.CreatedAt, &namespace.UpdatedAt) if err != nil { return Namespace{}, err @@ -90,7 +89,7 @@ func (n *NamespaceRepository) GetAll(ctx context.Context, projectName tenant.Pro getAllNamespaceInProject := `SELECT ` + namespaceColumns + ` FROM namespace n WHERE project_name = $1 AND deleted_at IS NULL` - rows, err := n.pool.Query(ctx, getAllNamespaceInProject, projectName) + rows, err := n.db.Query(ctx, getAllNamespaceInProject, projectName) if err != nil { return nil, errors.Wrap(tenant.EntityNamespace, "error in GetAll", err) } @@ -115,6 +114,6 @@ WHERE project_name = $1 AND deleted_at IS NULL` func NewNamespaceRepository(pool *pgxpool.Pool) *NamespaceRepository { return &NamespaceRepository{ - pool: pool, + db: pool, } } diff --git a/internal/store/postgres/tenant/project_repository.go b/internal/store/postgres/tenant/project_repository.go index b0a3b37955..29aad58673 100644 --- a/internal/store/postgres/tenant/project_repository.go +++ b/internal/store/postgres/tenant/project_repository.go @@ -13,7 +13,7 @@ import ( ) type ProjectRepository struct { - pool *pgxpool.Pool + db *pgxpool.Pool } const ( @@ -27,7 +27,6 @@ type Project struct { CreatedAt time.Time UpdatedAt time.Time - DeletedAt time.Time } func (p *Project) toTenantProject() (*tenant.Project, error) { @@ -39,14 +38,14 @@ func (repo ProjectRepository) Save(ctx context.Context, tenantProject *tenant.Pr if err != nil { if errors.Is(err, pgx.ErrNoRows) { insertProjectQuery := `INSERT INTO project (name, config, created_at, updated_at) VALUES ($1, $2, now(), now())` - _, err = repo.pool.Exec(ctx, insertProjectQuery, tenantProject.Name(), tenantProject.GetConfigs()) + _, err = repo.db.Exec(ctx, insertProjectQuery, tenantProject.Name(), tenantProject.GetConfigs()) return errors.WrapIfErr(tenant.EntityProject, "unable to save project", err) } return errors.Wrap(tenant.EntityProject, "unable to save project", err) } updateProjectQuery := `UPDATE project SET config=$1, updated_at=now() WHERE name=$2` - _, err = repo.pool.Exec(ctx, updateProjectQuery, tenantProject.GetConfigs(), tenantProject.Name()) + _, err = repo.db.Exec(ctx, updateProjectQuery, tenantProject.GetConfigs(), tenantProject.Name()) return errors.WrapIfErr(tenant.EntityProject, "unable to update project", err) } @@ -65,7 +64,7 @@ func (repo ProjectRepository) get(ctx context.Context, name tenant.ProjectName) var project Project getProjectByNameQuery := `SELECT ` + projectColumns + ` FROM project WHERE name = $1 AND deleted_at IS NULL` - err := repo.pool.QueryRow(ctx, getProjectByNameQuery, name). + err := repo.db.QueryRow(ctx, getProjectByNameQuery, name). Scan(&project.ID, &project.Name, &project.Config, &project.CreatedAt, &project.UpdatedAt) if err != nil { return Project{}, err @@ -77,7 +76,7 @@ func (repo ProjectRepository) GetAll(ctx context.Context) ([]*tenant.Project, er var projects []*tenant.Project getAllProjects := `SELECT ` + projectColumns + ` FROM project WHERE deleted_at IS NULL` - rows, err := repo.pool.Query(ctx, getAllProjects) + rows, err := repo.db.Query(ctx, getAllProjects) if err != nil { return nil, errors.Wrap(tenant.EntityProject, "error in GetAll", err) } @@ -102,6 +101,6 @@ func (repo ProjectRepository) GetAll(ctx context.Context) ([]*tenant.Project, er func NewProjectRepository(pool *pgxpool.Pool) *ProjectRepository { return &ProjectRepository{ - pool: pool, + db: pool, } } diff --git a/internal/store/postgres/tenant/secret_repository.go b/internal/store/postgres/tenant/secret_repository.go index 05499e9aee..7d6752f096 100644 --- a/internal/store/postgres/tenant/secret_repository.go +++ b/internal/store/postgres/tenant/secret_repository.go @@ -18,7 +18,7 @@ import ( ) type SecretRepository struct { - pool *pgxpool.Pool + db *pgxpool.Pool } const ( @@ -127,7 +127,7 @@ func (s SecretRepository) Save(ctx context.Context, tenantSecret *tenant.Secret) insertSecret := `INSERT INTO secret (name, value, type, project_name, namespace_name, created_at, updated_at) VALUES ($1, $2, $3, $4, $5, NOW(), NOW())` - _, err = s.pool.Exec(ctx, insertSecret, secret.Name, secret.Value, secret.Type, secret.ProjectName, secret.NamespaceName) + _, err = s.db.Exec(ctx, insertSecret, secret.Name, secret.Value, secret.Type, secret.ProjectName, secret.NamespaceName) if err != nil { return errors.Wrap(tenant.EntitySecret, "unable to save secret", err) @@ -150,7 +150,7 @@ func (s SecretRepository) Update(ctx context.Context, tenantSecret *tenant.Secre updateSecret := `UPDATE secret SET value=$1, type=$2, updated_at=NOW() WHERE project_name = $3 AND name=$4` - _, err = s.pool.Exec(ctx, updateSecret, secret.Value, secret.Type, secret.ProjectName, secret.Name) + _, err = s.db.Exec(ctx, updateSecret, secret.Value, secret.Type, secret.ProjectName, secret.Name) if err != nil { return errors.Wrap(tenant.EntitySecret, "unable to update secret", err) } @@ -166,7 +166,7 @@ FROM secret s WHERE name = $1 AND project_name = $2 AND (namespace_name IS NULL OR namespace_name = $3)` - err := s.pool.QueryRow(ctx, getSecretByNameQuery, name, projName, nsName). + err := s.db.QueryRow(ctx, getSecretByNameQuery, name, projName, nsName). Scan(&secret.ID, &secret.Name, &secret.Value, &secret.Type, &secret.ProjectName, &secret.NamespaceName, &secret.CreatedAt, &secret.UpdatedAt) if err != nil { @@ -183,7 +183,7 @@ AND (namespace_name IS NULL OR namespace_name = $3)` func (s SecretRepository) get(ctx context.Context, projName tenant.ProjectName, name tenant.SecretName) error { var dummyName string getSecretByNameAtProject := `SELECT s.name FROM secret s WHERE name = $1 AND project_name = $2` - err := s.pool.QueryRow(ctx, getSecretByNameAtProject, name, projName).Scan(&dummyName) + err := s.db.QueryRow(ctx, getSecretByNameAtProject, name, projName).Scan(&dummyName) return err } @@ -194,9 +194,9 @@ func (s SecretRepository) GetAll(ctx context.Context, projName tenant.ProjectNam if nsName != "" { getAllSecretsAvailableForNamespace := `SELECT ` + secretColumns + ` FROM secret WHERE project_name = $1 AND (namespace_name IS NULL or namespace_name = $2)` - rows, queryErr = s.pool.Query(ctx, getAllSecretsAvailableForNamespace, projName, nsName) + rows, queryErr = s.db.Query(ctx, getAllSecretsAvailableForNamespace, projName, nsName) } else { - rows, queryErr = s.pool.Query(ctx, getAllSecretsInProject, projName) + rows, queryErr = s.db.Query(ctx, getAllSecretsInProject, projName) } if queryErr != nil { @@ -230,11 +230,11 @@ func (s SecretRepository) Delete(ctx context.Context, projName tenant.ProjectNam if nsName != "" { deleteForNamespaceScope := `DELETE FROM secret WHERE name = $1 AND project_name = $2 AND namespace_name = $3` - result, err = s.pool.Exec(ctx, deleteForNamespaceScope, name, projName, nsName) + result, err = s.db.Exec(ctx, deleteForNamespaceScope, name, projName, nsName) } else { deleteForProjectScope := `DELETE FROM secret WHERE project_name = $1 AND name = $2 AND namespace_name IS NULL` - result, err = s.pool.Exec(ctx, deleteForProjectScope, projName, name) + result, err = s.db.Exec(ctx, deleteForProjectScope, projName, name) } if err != nil { @@ -248,7 +248,7 @@ WHERE project_name = $1 AND name = $2 AND namespace_name IS NULL` } func (s SecretRepository) GetSecretsInfo(ctx context.Context, projName tenant.ProjectName) ([]*dto.SecretInfo, error) { - rows, err := s.pool.Query(ctx, getAllSecretsInProject, projName) + rows, err := s.db.Query(ctx, getAllSecretsInProject, projName) if err != nil { return nil, errors.Wrap(tenant.EntitySecret, "unable to get all secrets info", err) @@ -275,5 +275,5 @@ func (s SecretRepository) GetSecretsInfo(ctx context.Context, projName tenant.Pr } func NewSecretRepository(pool *pgxpool.Pool) *SecretRepository { - return &SecretRepository{pool: pool} + return &SecretRepository{db: pool} } From 199ff05abe90524ac6911791f8cc665309185b3c Mon Sep 17 00:00:00 2001 From: Sandeep Bhardwaj Date: Fri, 23 Dec 2022 13:08:17 +0530 Subject: [PATCH 12/25] refactor: update scheduler bounded context --- .../scheduler/job_operator_repository.go | 29 ++-- .../postgres/scheduler/job_repository.go | 132 ++++++++---------- .../postgres/scheduler/job_run_repository.go | 43 +++--- 3 files changed, 95 insertions(+), 109 deletions(-) diff --git a/internal/store/postgres/scheduler/job_operator_repository.go b/internal/store/postgres/scheduler/job_operator_repository.go index 09353a271d..90ccf05d8c 100644 --- a/internal/store/postgres/scheduler/job_operator_repository.go +++ b/internal/store/postgres/scheduler/job_operator_repository.go @@ -17,26 +17,29 @@ const ( sensorRunTableName = "sensor_run" taskRunTableName = "task_run" hookRunTableName = "hook_run" + + jobOperatorColumnsToStore = `name, job_run_id, status, start_time, end_time` + jobOperatorColumns = `id, ` + jobOperatorColumnsToStore ) type OperatorRunRepository struct { // TODO: Add test - pool *pgxpool.Pool + db *pgxpool.Pool } type operatorRun struct { - ID uuid.UUID `gorm:"primary_key;type:uuid;default:uuid_generate_v4()"` + ID uuid.UUID JobRunID uuid.UUID Name string OperatorType string Status string - StartTime time.Time `gorm:"not null"` - EndTime time.Time `gorm:"default:TIMESTAMP '3000-01-01 00:00:00'"` + StartTime time.Time + EndTime time.Time - CreatedAt time.Time `gorm:"not null" json:"created_at"` - UpdatedAt time.Time `gorm:"not null" json:"updated_at"` + CreatedAt time.Time + UpdatedAt time.Time // TODO: add a remarks colum to capture failure reason DeletedAt sql.NullTime } @@ -76,8 +79,8 @@ func (o *OperatorRunRepository) GetOperatorRun(ctx context.Context, name string, if err != nil { return nil, err } - getJobRunByID := `SELECT id, name, job_run_id, status, start_time, end_time FROM ` + operatorTableName + ` j where job_run_id = $1 and name = $2 order by created_at desc limit 1` - err = o.pool.QueryRow(ctx, getJobRunByID, jobRunID, name). + getJobRunByID := "SELECT " + jobOperatorColumns + " FROM " + operatorTableName + " j where job_run_id = $1 and name = $2 order by created_at desc limit 1" + err = o.db.QueryRow(ctx, getJobRunByID, jobRunID, name). Scan(&opRun.ID, &opRun.Name, &opRun.JobRunID, &opRun.Status, &opRun.StartTime, &opRun.EndTime) if err != nil { @@ -93,8 +96,8 @@ func (o *OperatorRunRepository) CreateOperatorRun(ctx context.Context, name stri if err != nil { return err } - insertOperatorRun := `INSERT INTO ` + operatorTableName + ` ( job_run_id , name , status, start_time, end_time, created_at, updated_at) values ( $1, $2, $3, $4, TIMESTAMP '3000-01-01 00:00:00', NOW(), NOW())` - _, err = o.pool.Exec(ctx, insertOperatorRun, jobRunID, name, scheduler.StateRunning, startTime) + insertOperatorRun := "INSERT INTO " + operatorTableName + " ( " + jobOperatorColumnsToStore + " created_at, updated_at) values ( $1, $2, $3, $4, TIMESTAMP '3000-01-01 00:00:00', NOW(), NOW())" + _, err = o.db.Exec(ctx, insertOperatorRun, name, jobRunID, scheduler.StateRunning, startTime) return errors.WrapIfErr(scheduler.EntityJobRun, "error while inserting the run", err) } @@ -103,13 +106,13 @@ func (o *OperatorRunRepository) UpdateOperatorRun(ctx context.Context, operatorT if err != nil { return err } - updateJobRun := "update " + operatorTableName + " set status = ?, end_time = ?, updated_at = NOW() where id = ?" - _, err = o.pool.Exec(ctx, updateJobRun, state.String(), eventTime, operatorRunID) + updateJobRun := "UPDATE " + operatorTableName + " SET status = $1, end_time = $2, updated_at = NOW() where id = $3" + _, err = o.db.Exec(ctx, updateJobRun, state, eventTime, operatorRunID) return errors.WrapIfErr(scheduler.EntityJobRun, "error while updating the run", err) } func NewOperatorRunRepository(pool *pgxpool.Pool) *OperatorRunRepository { return &OperatorRunRepository{ - pool: pool, + db: pool, } } diff --git a/internal/store/postgres/scheduler/job_repository.go b/internal/store/postgres/scheduler/job_repository.go index c3ea90c662..0fb93c811c 100644 --- a/internal/store/postgres/scheduler/job_repository.go +++ b/internal/store/postgres/scheduler/job_repository.go @@ -13,7 +13,6 @@ import ( "github.com/lib/pq" "github.com/odpf/optimus/core/job" - "github.com/odpf/optimus/core/resource" "github.com/odpf/optimus/core/scheduler" "github.com/odpf/optimus/core/tenant" "github.com/odpf/optimus/internal/errors" @@ -21,9 +20,22 @@ import ( "github.com/odpf/optimus/internal/utils" ) +const ( + jobColumns = `id, name, version, owner, description, + labels, start_date, end_date, interval, depends_on_past, + catch_up, retry, alert, static_upstreams, http_upstreams, + task_name, task_config, window_size, window_offset, window_truncate_to, + assets, hooks, metadata, destination, sources, + project_name, namespace_name, created_at, updated_at` + upstreamColumns = ` + job_name, project_name, upstream_job_name, upstream_project_name, + upstream_namespace_name, upstream_resource_urn, upstream_task_name, upstream_type, upstream_external` +) + type JobRepository struct { - pool *pgxpool.Pool + db *pgxpool.Pool } + type JobUpstreams struct { JobID uuid.UUID JobName string @@ -39,25 +51,8 @@ type JobUpstreams struct { UpstreamState string UpstreamExternal bool - CreatedAt time.Time `gorm:"not null" json:"created_at"` - UpdatedAt time.Time `gorm:"not null" json:"updated_at"` -} - -func UpstreamFromRow(row pgx.Row) (*JobUpstreams, error) { - var js JobUpstreams - - err := row.Scan(&js.JobName, &js.ProjectName, &js.UpstreamJobName, &js.UpstreamResourceUrn, - &js.UpstreamProjectName, &js.UpstreamNamespaceName, &js.UpstreamTaskName, &js.UpstreamHost, - &js.UpstreamType, &js.UpstreamState, &js.UpstreamExternal) - if err != nil { - if errors.Is(err, pgx.ErrNoRows) { - return nil, errors.NotFound(job.EntityJob, "job upstream not found") - } - - return nil, errors.Wrap(resource.EntityResource, "error in reading row for resource", err) - } - - return &js, nil + CreatedAt time.Time + UpdatedAt time.Time } func (j *JobUpstreams) toJobUpstreams() (*scheduler.JobUpstream, error) { @@ -79,12 +74,12 @@ func (j *JobUpstreams) toJobUpstreams() (*scheduler.JobUpstream, error) { } type Job struct { - ID uuid.UUID `gorm:"primary_key;type:uuid;default:uuid_generate_v4()"` - Name string `gorm:"not null" json:"name"` + ID uuid.UUID + Name string Version int Owner string Description string - Labels json.RawMessage + Labels map[string]string StartDate time.Time EndDate *time.Time @@ -97,30 +92,30 @@ type Job struct { Alert json.RawMessage // Upstreams - StaticUpstreams pq.StringArray `gorm:"type:varchar(220)[]" json:"static_upstreams"` + StaticUpstreams pq.StringArray `json:"static_upstreams"` // ExternalUpstreams HTTPUpstreams json.RawMessage `json:"http_upstreams"` TaskName string - TaskConfig json.RawMessage + TaskConfig map[string]string WindowSize string WindowOffset string WindowTruncateTo string - Assets json.RawMessage + Assets map[string]string Hooks json.RawMessage Metadata json.RawMessage Destination string - Sources pq.StringArray `gorm:"type:varchar(300)[]"` + Sources pq.StringArray - ProjectName string `json:"project_name"` - NamespaceName string `json:"namespace_name"` + ProjectName string + NamespaceName string - CreatedAt time.Time `gorm:"not null" json:"created_at"` - UpdatedAt time.Time `gorm:"not null" json:"updated_at"` + CreatedAt time.Time + UpdatedAt time.Time DeletedAt sql.NullTime } @@ -138,17 +133,11 @@ func (j *Job) toJob() (*scheduler.Job, error) { Tenant: t, Destination: j.Destination, Window: window, - } - - if j.TaskConfig != nil { - taskConfig := map[string]string{} - if err := json.Unmarshal(j.TaskConfig, &taskConfig); err != nil { - return nil, err - } - schedulerJob.Task = &scheduler.Task{ + Assets: j.Assets, + Task: &scheduler.Task{ Name: j.TaskName, - Config: taskConfig, - } + Config: j.TaskConfig, + }, } if j.Hooks != nil { @@ -159,14 +148,6 @@ func (j *Job) toJob() (*scheduler.Job, error) { schedulerJob.Hooks = hookConfig } - if j.Assets != nil { - assets := map[string]string{} - if err := json.Unmarshal(j.Assets, &assets); err != nil { - return nil, err - } - schedulerJob.Assets = assets - } - return &schedulerJob, nil } @@ -183,6 +164,7 @@ func (j *Job) toJobWithDetails() (*scheduler.JobWithDetails, error) { Version: j.Version, Owner: j.Owner, Description: j.Description, + Labels: j.Labels, }, Schedule: &scheduler.Schedule{ DependsOnPast: j.DependsOnPast, @@ -194,13 +176,6 @@ func (j *Job) toJobWithDetails() (*scheduler.JobWithDetails, error) { if !j.EndDate.IsZero() { schedulerJobWithDetails.Schedule.EndDate = j.EndDate } - if j.Labels != nil { - var labels map[string]string - if err := json.Unmarshal(j.Labels, &labels); err != nil { - return nil, err - } - schedulerJobWithDetails.JobMetadata.Labels = labels - } if j.Retry != nil { if err := json.Unmarshal(j.Retry, &schedulerJobWithDetails.Retry); err != nil { @@ -241,8 +216,8 @@ func FromRow(row pgx.Row) (*Job, error) { } func (j *JobRepository) GetJob(ctx context.Context, projectName tenant.ProjectName, jobName scheduler.JobName) (*scheduler.Job, error) { - getJobByNameAtProject := `SELECT * FROM job WHERE name = $1 AND project_name = $2` - spec, err := FromRow(j.pool.QueryRow(ctx, getJobByNameAtProject, jobName, projectName)) + getJobByNameAtProject := `SELECT ` + jobColumns + ` FROM job WHERE name = $1 AND project_name = $2` + spec, err := FromRow(j.db.QueryRow(ctx, getJobByNameAtProject, jobName, projectName)) if err != nil { return nil, err } @@ -250,8 +225,8 @@ func (j *JobRepository) GetJob(ctx context.Context, projectName tenant.ProjectNa } func (j *JobRepository) GetJobDetails(ctx context.Context, projectName tenant.ProjectName, jobName scheduler.JobName) (*scheduler.JobWithDetails, error) { - getJobByNameAtProject := `SELECT * FROM job WHERE name = $1 AND project_name = $2` - spec, err := FromRow(j.pool.QueryRow(ctx, getJobByNameAtProject, jobName, projectName)) + getJobByNameAtProject := `SELECT ` + jobColumns + ` FROM job WHERE name = $1 AND project_name = $2` + spec, err := FromRow(j.db.QueryRow(ctx, getJobByNameAtProject, jobName, projectName)) if err != nil { return nil, err } @@ -265,21 +240,18 @@ func groupUpstreamsByJobName(jobUpstreams []JobUpstreams) (map[string][]*schedul for _, upstream := range jobUpstreams { schedulerUpstream, err := upstream.toJobUpstreams() if err != nil { - multiError.Append( - errors.Wrap( - scheduler.EntityJobRun, - fmt.Sprintf("unable to parse upstream:%s for job:%s", upstream.UpstreamJobName, upstream.JobName), - err)) + msg := fmt.Sprintf("unable to parse upstream:%s for job:%s", upstream.UpstreamJobName, upstream.JobName) + multiError.Append(errors.Wrap(scheduler.EntityJobRun, msg, err)) continue } jobUpstreamGroup[upstream.JobName] = append(jobUpstreamGroup[upstream.JobName], schedulerUpstream) } - return jobUpstreamGroup, errors.MultiToError(multiError) + return jobUpstreamGroup, multiError.ToErr() } func (j *JobRepository) getJobsUpstreams(ctx context.Context, projectName tenant.ProjectName, jobNames []string) (map[string][]*scheduler.JobUpstream, error) { - getJobUpstreamsByNameAtProject := "SELECT * FROM job_upstream WHERE project_name = $1 and job_name = any ($2)" - rows, err := j.pool.Query(ctx, getJobUpstreamsByNameAtProject, projectName, jobNames) + getJobUpstreamsByNameAtProject := "SELECT " + upstreamColumns + " FROM job_upstream WHERE project_name = $1 and job_name = any ($2)" + rows, err := j.db.Query(ctx, getJobUpstreamsByNameAtProject, projectName, jobNames) if err != nil { return nil, errors.Wrap(job.EntityJob, "error while getting job with upstreams", err) } @@ -304,17 +276,23 @@ func (j *JobRepository) getJobsUpstreams(ctx context.Context, projectName tenant } func (j *JobRepository) GetAll(ctx context.Context, projectName tenant.ProjectName) ([]*scheduler.JobWithDetails, error) { - var specs []Job - // getJobByNameAtProject := `SELECT * FROM job WHERE project_name = $1` - //rows, err := j.pool.Query(ctx, getJobByNameAtProject, projectName) - //if err != nil { - // return nil, errors.Wrap(job.EntityJob, "error while getting all jobs", err) - //} + getJobByNameAtProject := `SELECT ` + jobColumns + ` FROM job WHERE project_name = $1` + rows, err := j.db.Query(ctx, getJobByNameAtProject, projectName) + if err != nil { + return nil, errors.Wrap(job.EntityJob, "error while getting all jobs", err) + } + defer rows.Close() jobsMap := map[string]*scheduler.JobWithDetails{} var jobNameList []string multiError := errors.NewMultiError("errorInGetAll") - for _, spec := range specs { + for rows.Next() { + spec, err := FromRow(rows) + if err != nil { + multiError.Append(errors.Wrap(scheduler.EntityJobRun, "error parsing job:"+spec.Name, err)) + continue + } + job, err := spec.toJobWithDetails() if err != nil { multiError.Append(errors.Wrap(scheduler.EntityJobRun, "error parsing job:"+spec.Name, err)) @@ -336,6 +314,6 @@ func (j *JobRepository) GetAll(ctx context.Context, projectName tenant.ProjectNa func NewJobProviderRepository(pool *pgxpool.Pool) *JobRepository { return &JobRepository{ - pool: pool, + db: pool, } } diff --git a/internal/store/postgres/scheduler/job_run_repository.go b/internal/store/postgres/scheduler/job_run_repository.go index 40056a4cb4..fb5ba98af8 100644 --- a/internal/store/postgres/scheduler/job_run_repository.go +++ b/internal/store/postgres/scheduler/job_run_repository.go @@ -14,26 +14,31 @@ import ( "github.com/odpf/optimus/internal/errors" ) +const ( + columnsToStore = `job_name, namespace_name, project_name, scheduled_at, start_time, end_time, status, sla_definition` + jobRunColumns = `id, ` + columnsToStore +) + type JobRunRepository struct { - pool *pgxpool.Pool + db *pgxpool.Pool } type jobRun struct { - ID uuid.UUID `gorm:"primary_key;type:uuid;default:uuid_generate_v4()"` + ID uuid.UUID JobName string NamespaceName string ProjectName string - ScheduledAt time.Time `gorm:"not null"` - StartTime time.Time `gorm:"not null"` - EndTime time.Time `gorm:"default:TIMESTAMP '3000-01-01 00:00:00'"` + ScheduledAt time.Time + StartTime time.Time + EndTime time.Time Status string SLADefinition int64 - CreatedAt time.Time `gorm:"not null" json:"created_at"` - UpdatedAt time.Time `gorm:"not null" json:"updated_at"` + CreatedAt time.Time + UpdatedAt time.Time } func (j jobRun) toJobRun() (*scheduler.JobRun, error) { @@ -51,9 +56,9 @@ func (j jobRun) toJobRun() (*scheduler.JobRun, error) { func (j *JobRunRepository) GetByID(ctx context.Context, id scheduler.JobRunID) (*scheduler.JobRun, error) { var jr jobRun - getJobRunByID := `SELECT job_name, namespace_name, project_name, scheduled_at, start_time, end_time, status, sla_definition FROM job_run where id = $1` - err := j.pool.QueryRow(ctx, getJobRunByID, id). - Scan(&jr.JobName, &jr.NamespaceName, &jr.ProjectName, &jr.ScheduledAt, &jr.StartTime, &jr.EndTime, + getJobRunByID := `SELECT ` + jobRunColumns + ` FROM job_run where id = $1` + err := j.db.QueryRow(ctx, getJobRunByID, id). + Scan(&jr.ID, &jr.JobName, &jr.NamespaceName, &jr.ProjectName, &jr.ScheduledAt, &jr.StartTime, &jr.EndTime, &jr.Status, &jr.SLADefinition) if err != nil { return nil, err @@ -63,9 +68,9 @@ func (j *JobRunRepository) GetByID(ctx context.Context, id scheduler.JobRunID) ( func (j *JobRunRepository) GetByScheduledAt(ctx context.Context, t tenant.Tenant, jobName scheduler.JobName, scheduledAt time.Time) (*scheduler.JobRun, error) { var jr jobRun - getJobRunByID := `SELECT id, job_name, namespace_name, project_name, scheduled_at, start_time, end_time, status, sla_definition FROM job_run j where project_name = $1 and namespace_name = $2 and job_name = $3 and scheduled_at = $4 order by created_at desc limit 1` - err := j.pool.QueryRow(ctx, getJobRunByID, t.ProjectName(), t.NamespaceName(), jobName, scheduledAt). - Scan(&jr.JobName, &jr.NamespaceName, &jr.ProjectName, &jr.ScheduledAt, &jr.StartTime, &jr.EndTime, + getJobRunByID := `SELECT ` + jobRunColumns + ` FROM job_run j where project_name = $1 and namespace_name = $2 and job_name = $3 and scheduled_at = $4 order by created_at desc limit 1` + err := j.db.QueryRow(ctx, getJobRunByID, t.ProjectName(), t.NamespaceName(), jobName, scheduledAt). + Scan(&jr.ID, &jr.JobName, &jr.NamespaceName, &jr.ProjectName, &jr.ScheduledAt, &jr.StartTime, &jr.EndTime, &jr.Status, &jr.SLADefinition) if err != nil { @@ -78,8 +83,8 @@ func (j *JobRunRepository) GetByScheduledAt(ctx context.Context, t tenant.Tenant } func (j *JobRunRepository) Update(ctx context.Context, jobRunID uuid.UUID, endTime time.Time, status scheduler.State) error { - updateJobRun := "update job_run set status = ?, end_time = ? , updated_at = NOW() where id = ?" - _, err := j.pool.Exec(ctx, updateJobRun, status, endTime, jobRunID) + updateJobRun := "update job_run set status = $1, end_time = $2, updated_at = NOW() where id = $3" + _, err := j.db.Exec(ctx, updateJobRun, status, endTime, jobRunID) return errors.WrapIfErr(scheduler.EntityJobRun, "unable to update job run", err) } @@ -91,18 +96,18 @@ func (j *JobRunRepository) UpdateSLA(ctx context.Context, slaObjects []*schedule } query := "update job_run set sla_alert = True, updated_at = NOW() where (job_name, scheduled_at) = any ($1)" - _, err := j.pool.Exec(ctx, query, jobIDList) + _, err := j.db.Exec(ctx, query, jobIDList) return errors.WrapIfErr(scheduler.EntityJobRun, "unable to update SLA", err) } func (j *JobRunRepository) Create(ctx context.Context, t tenant.Tenant, jobName scheduler.JobName, scheduledAt time.Time, slaDefinitionInSec int64) error { - insertJobRun := `INSERT INTO job_run (job_name, namespace_name, project_name, scheduled_at, start_time, end_time, status, sla_definition, created_at, updated_at) values ($1, $2, $3, $4, NOW(), TIMESTAMP '3000-01-01 00:00:00', ?, ?, NOW(), NOW())` - _, err := j.pool.Exec(ctx, insertJobRun, jobName, t.NamespaceName(), t.ProjectName(), scheduledAt, scheduler.StateRunning, slaDefinitionInSec) + insertJobRun := `INSERT INTO job_run (` + columnsToStore + ` created_at, updated_at) values ($1, $2, $3, $4, NOW(), TIMESTAMP '3000-01-01 00:00:00', $5, $6, NOW(), NOW())` + _, err := j.db.Exec(ctx, insertJobRun, jobName, t.NamespaceName(), t.ProjectName(), scheduledAt, scheduler.StateRunning, slaDefinitionInSec) return errors.WrapIfErr(scheduler.EntityJobRun, "unable to create job run", err) } func NewJobRunRepository(pool *pgxpool.Pool) *JobRunRepository { return &JobRunRepository{ - pool: pool, + db: pool, } } From 473a2ff7e2b89898054d197e36236804ad3e3123 Mon Sep 17 00:00:00 2001 From: Sandeep Bhardwaj Date: Fri, 23 Dec 2022 13:09:07 +0530 Subject: [PATCH 13/25] refactor: update resource bounded context --- .../postgres/resource/backup_repository.go | 10 ++++---- .../store/postgres/resource/repository.go | 25 ++++++++++--------- internal/store/postgres/resource/resource.go | 4 +++ 3 files changed, 22 insertions(+), 17 deletions(-) diff --git a/internal/store/postgres/resource/backup_repository.go b/internal/store/postgres/resource/backup_repository.go index a9a6df7ee0..f95f776c00 100644 --- a/internal/store/postgres/resource/backup_repository.go +++ b/internal/store/postgres/resource/backup_repository.go @@ -71,14 +71,14 @@ func (b Backup) ToResourceBackup() (*resource.Backup, error) { } type BackupRepository struct { - pool *pgxpool.Pool + db *pgxpool.Pool } func (repo BackupRepository) Create(ctx context.Context, resourceBackup *resource.Backup) error { backup := NewBackup(resourceBackup) insertBackup := `INSERT INTO backup (` + backupToStoreColumns + `) VALUES ($1, $2, $3, $4, $5, $6, $7, now()) returning id` - err := repo.pool.QueryRow(ctx, insertBackup, backup.Store, backup.ProjectName, backup.NamespaceName, + err := repo.db.QueryRow(ctx, insertBackup, backup.Store, backup.ProjectName, backup.NamespaceName, backup.Description, backup.ResourceNames, backup.Config, backup.CreatedAt).Scan(&backup.ID) if err != nil { @@ -91,7 +91,7 @@ func (repo BackupRepository) Create(ctx context.Context, resourceBackup *resourc func (repo BackupRepository) GetByID(ctx context.Context, id resource.BackupID) (*resource.Backup, error) { var b Backup getByID := `SELECT ` + backupColumns + ` FROM backup WHERE id = $1` - err := repo.pool.QueryRow(ctx, getByID, id.String()). + err := repo.db.QueryRow(ctx, getByID, id.String()). Scan(&b.ID, &b.Store, &b.ProjectName, &b.NamespaceName, &b.Description, &b.ResourceNames, &b.Config, &b.CreatedAt, &b.UpdatedAt) @@ -107,7 +107,7 @@ func (repo BackupRepository) GetByID(ctx context.Context, id resource.BackupID) func (repo BackupRepository) GetAll(ctx context.Context, tnnt tenant.Tenant, store resource.Store) ([]*resource.Backup, error) { getAllBackups := `SELECT ` + backupColumns + ` FROM backup WHERE project_name = $1 AND namespace_name = $2 AND store = $3` - rows, err := repo.pool.Query(ctx, getAllBackups, tnnt.ProjectName(), tnnt.NamespaceName(), store) + rows, err := repo.db.Query(ctx, getAllBackups, tnnt.ProjectName(), tnnt.NamespaceName(), store) if err != nil { return nil, errors.Wrap(resource.EntityBackup, "error while getting backup", err) } @@ -133,5 +133,5 @@ func (repo BackupRepository) GetAll(ctx context.Context, tnnt tenant.Tenant, sto } func NewBackupRepository(pool *pgxpool.Pool) *BackupRepository { - return &BackupRepository{pool: pool} + return &BackupRepository{db: pool} } diff --git a/internal/store/postgres/resource/repository.go b/internal/store/postgres/resource/repository.go index edb6f8c098..ce7146f644 100644 --- a/internal/store/postgres/resource/repository.go +++ b/internal/store/postgres/resource/repository.go @@ -12,24 +12,25 @@ import ( ) const ( - resourceColumns = `full_name, kind, store, status, urn, project_name, namespace_name, metadata, spec, created_at, updated_at` + columnsToStore = `full_name, kind, store, status, urn, project_name, namespace_name, metadata, spec, created_at, updated_at` + resourceColumns = `id, ` + columnsToStore ) type Repository struct { - pool *pgxpool.Pool + db *pgxpool.Pool } func NewRepository(pool *pgxpool.Pool) *Repository { return &Repository{ - pool: pool, + db: pool, } } func (r Repository) Create(ctx context.Context, resourceModel *resource.Resource) error { res := FromResourceToModel(resourceModel) - insertResource := `INSERT INTO resource (` + resourceColumns + `) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, now(), now())` - _, err := r.pool.Exec(ctx, insertResource, res.FullName, res.Kind, res.Store, res.Status, res.URN, + insertResource := `INSERT INTO resource (` + columnsToStore + `) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, now(), now())` + _, err := r.db.Exec(ctx, insertResource, res.FullName, res.Kind, res.Store, res.Status, res.URN, res.ProjectName, res.NamespaceName, res.Metadata, res.Spec) return errors.WrapIfErr(tenant.EntityNamespace, "error creating resource to database", err) } @@ -39,7 +40,7 @@ func (r Repository) Update(ctx context.Context, resourceModel *resource.Resource updateResource := `UPDATE resource SET kind=$1, status=$2, urn=$3, metadata=$4, spec=$5, updated_at=now() WHERE full_name=$6 AND store=$7 AND project_name = $8 And namespace_name = $9` - tag, err := r.pool.Exec(ctx, updateResource, res.Kind, res.Status, res.URN, + tag, err := r.db.Exec(ctx, updateResource, res.Kind, res.Status, res.URN, res.Metadata, res.Spec, res.FullName, res.Store, res.ProjectName, res.NamespaceName) if err != nil { @@ -56,8 +57,8 @@ func (r Repository) ReadByFullName(ctx context.Context, tnnt tenant.Tenant, stor var res Resource getResource := `SELECT ` + resourceColumns + ` FROM resource WHERE full_name = $1 AND store = $2 AND project_name = $3 AND namespace_name = $4` - err := r.pool.QueryRow(ctx, getResource, fullName, store, tnnt.ProjectName(), tnnt.NamespaceName()). - Scan(&res.FullName, &res.Kind, &res.Store, &res.Status, &res.URN, + err := r.db.QueryRow(ctx, getResource, fullName, store, tnnt.ProjectName(), tnnt.NamespaceName()). + Scan(&res.ID, &res.FullName, &res.Kind, &res.Store, &res.Status, &res.URN, &res.ProjectName, &res.NamespaceName, &res.Metadata, &res.Spec, &res.CreatedAt, &res.UpdatedAt) if err != nil { @@ -73,7 +74,7 @@ func (r Repository) ReadByFullName(ctx context.Context, tnnt tenant.Tenant, stor func (r Repository) ReadAll(ctx context.Context, tnnt tenant.Tenant, store resource.Store) ([]*resource.Resource, error) { getAllResources := `SELECT ` + resourceColumns + ` FROM resource WHERE project_name = $1 and namespace_name = $2 and store = $3` - rows, err := r.pool.Query(ctx, getAllResources, tnnt.ProjectName(), tnnt.NamespaceName(), store) + rows, err := r.db.Query(ctx, getAllResources, tnnt.ProjectName(), tnnt.NamespaceName(), store) if err != nil { return nil, errors.Wrap(resource.EntityResource, "error in ReadAll", err) } @@ -82,7 +83,7 @@ func (r Repository) ReadAll(ctx context.Context, tnnt tenant.Tenant, store resou var resources []*resource.Resource for rows.Next() { var res Resource - err = rows.Scan(&res.FullName, &res.Kind, &res.Store, &res.Status, &res.URN, + err = rows.Scan(&res.ID, &res.FullName, &res.Kind, &res.Store, &res.Status, &res.URN, &res.ProjectName, &res.NamespaceName, &res.Metadata, &res.Spec, &res.CreatedAt, &res.UpdatedAt) if err != nil { return nil, errors.Wrap(resource.EntityResource, "error in GetAll", err) @@ -101,7 +102,7 @@ func (r Repository) ReadAll(ctx context.Context, tnnt tenant.Tenant, store resou func (r Repository) GetResources(ctx context.Context, tnnt tenant.Tenant, store resource.Store, names []string) ([]*resource.Resource, error) { getAllResources := `SELECT ` + resourceColumns + ` FROM resource WHERE project_name = $1 and namespace_name = $2 and store = $3 AND full_name = any ($4)` - rows, err := r.pool.Query(ctx, getAllResources, tnnt.ProjectName(), tnnt.NamespaceName(), store, names) + rows, err := r.db.Query(ctx, getAllResources, tnnt.ProjectName(), tnnt.NamespaceName(), store, names) if err != nil { return nil, errors.Wrap(resource.EntityResource, "error in ReadAll", err) } @@ -133,7 +134,7 @@ func (r Repository) UpdateStatus(ctx context.Context, resources ...*resource.Res batch.Queue(updateStatus, res.Status(), res.Tenant().ProjectName(), res.Tenant().NamespaceName(), res.Store(), res.FullName()) } - results := r.pool.SendBatch(ctx, &batch) + results := r.db.SendBatch(ctx, &batch) defer results.Close() multiErr := errors.NewMultiError("error updating resources status") diff --git a/internal/store/postgres/resource/resource.go b/internal/store/postgres/resource/resource.go index 13f440efe8..fbc0c296be 100644 --- a/internal/store/postgres/resource/resource.go +++ b/internal/store/postgres/resource/resource.go @@ -4,12 +4,16 @@ import ( "encoding/json" "time" + "github.com/google/uuid" + "github.com/odpf/optimus/core/resource" "github.com/odpf/optimus/core/tenant" "github.com/odpf/optimus/internal/errors" ) type Resource struct { + ID uuid.UUID + FullName string Kind string Store string From b5a4bd612560c8e3f1614168049fca2bff79597a Mon Sep 17 00:00:00 2001 From: Sandeep Bhardwaj Date: Fri, 23 Dec 2022 13:09:59 +0530 Subject: [PATCH 14/25] refactor: update job bounded context --- internal/store/postgres/job/adapter.go | 89 +++-------------- internal/store/postgres/job/job_repository.go | 97 +++++++------------ 2 files changed, 46 insertions(+), 140 deletions(-) diff --git a/internal/store/postgres/job/adapter.go b/internal/store/postgres/job/adapter.go index cc881e8394..fdda177293 100644 --- a/internal/store/postgres/job/adapter.go +++ b/internal/store/postgres/job/adapter.go @@ -22,7 +22,7 @@ type Spec struct { Version int Owner string Description string - Labels json.RawMessage + Labels map[string]string StartDate time.Time EndDate *time.Time @@ -41,13 +41,13 @@ type Spec struct { HTTPUpstreams json.RawMessage `json:"http_upstreams"` TaskName string - TaskConfig json.RawMessage + TaskConfig map[string]string WindowSize string WindowOffset string WindowTruncateTo string - Assets json.RawMessage + Assets map[string]string Hooks json.RawMessage Metadata json.RawMessage @@ -81,7 +81,7 @@ type Asset struct { type Hook struct { Name string - Config json.RawMessage + Config map[string]string } type Metadata struct { @@ -108,14 +108,6 @@ func toStorageSpec(jobEntity *job.Job) (*Spec, error) { jobSpec := jobEntity.Spec() - var labelsBytes []byte - if jobSpec.Labels() != nil { - labelsBytes, err = json.Marshal(jobSpec.Labels()) - if err != nil { - return nil, err - } - } - startDate, err := time.Parse(jobDatetimeLayout, jobSpec.Schedule().StartDate().String()) if err != nil { return nil, err @@ -139,20 +131,6 @@ func toStorageSpec(jobEntity *job.Job) (*Spec, error) { return nil, err } - taskConfigBytes, err := toConfig(jobSpec.Task().Config()) - if err != nil { - return nil, err - } - - var assetsBytes []byte - if jobSpec.Asset() != nil { - a, err := toStorageAsset(jobSpec.Asset().Assets()) - if err != nil { - return nil, err - } - assetsBytes = a - } - hooksBytes, err := toStorageHooks(jobSpec.Hooks()) if err != nil { return nil, err @@ -187,8 +165,8 @@ func toStorageSpec(jobEntity *job.Job) (*Spec, error) { Version: jobSpec.Version().Int(), Owner: jobSpec.Owner().String(), Description: jobSpec.Description(), - Labels: labelsBytes, - Assets: assetsBytes, + Labels: jobSpec.Labels(), + Assets: jobSpec.Asset().Assets(), Metadata: metadataBytes, StartDate: startDate, @@ -196,7 +174,7 @@ func toStorageSpec(jobEntity *job.Job) (*Spec, error) { Interval: jobSpec.Schedule().Interval(), TaskName: jobSpec.Task().Name().String(), - TaskConfig: taskConfigBytes, + TaskConfig: jobSpec.Task().Config().Configs(), Hooks: hooksBytes, @@ -240,24 +218,12 @@ func toStorageHooks(hookSpecs []*job.Hook) ([]byte, error) { } func toStorageHook(spec *job.Hook) (Hook, error) { - configJSON, err := json.Marshal(spec.Config().Configs()) - if err != nil { - return Hook{}, err - } return Hook{ Name: spec.Name().String(), - Config: configJSON, + Config: spec.Config().Configs(), }, nil } -func toStorageAsset(assetSpecs map[string]string) ([]byte, error) { - assetsJSON, err := json.Marshal(assetSpecs) - if err != nil { - return nil, err - } - return assetsJSON, nil -} - func toStorageAlerts(alertSpecs []*job.AlertSpec) ([]byte, error) { if alertSpecs == nil { return nil, nil @@ -319,13 +285,6 @@ func toStorageMetadata(metadataSpec *job.Metadata) ([]byte, error) { return json.Marshal(metadata) } -func toConfig(configSpec *job.Config) ([]byte, error) { - if configSpec == nil { - return nil, nil - } - return json.Marshal(configSpec.Configs()) -} - func fromStorageSpec(jobSpec *Spec) (*job.Spec, error) { version, err := job.VersionFrom(jobSpec.Version) if err != nil { @@ -386,11 +345,7 @@ func fromStorageSpec(jobSpec *Spec) (*job.Spec, error) { var taskConfig *job.Config if jobSpec.TaskConfig != nil { - var configMap map[string]string - if err := json.Unmarshal(jobSpec.TaskConfig, &configMap); err != nil { - return nil, err - } - taskConfig, err = job.NewConfig(configMap) + taskConfig, err = job.NewConfig(jobSpec.TaskConfig) if err != nil { return nil, err } @@ -403,11 +358,7 @@ func fromStorageSpec(jobSpec *Spec) (*job.Spec, error) { jobSpecBuilder := job.NewSpecBuilder(version, jobName, owner, schedule, window, task).WithDescription(jobSpec.Description) if jobSpec.Labels != nil { - var labels map[string]string - if err := json.Unmarshal(jobSpec.Labels, &labels); err != nil { - return nil, err - } - jobSpecBuilder = jobSpecBuilder.WithLabels(labels) + jobSpecBuilder = jobSpecBuilder.WithLabels(jobSpec.Labels) } if jobSpec.Hooks != nil { @@ -480,11 +431,7 @@ func fromStorageSpec(jobSpec *Spec) (*job.Spec, error) { } if jobSpec.Assets != nil { - assetsMap, err := fromStorageAssets(jobSpec.Assets) - if err != nil { - return nil, err - } - asset, err := job.NewAsset(assetsMap) + asset, err := job.NewAsset(jobSpec.Assets) if err != nil { return nil, err } @@ -517,11 +464,7 @@ func fromStorageHooks(raw []byte) ([]*job.Hook, error) { } func fromStorageHook(hook Hook) (*job.Hook, error) { - var configMap map[string]string - if err := json.Unmarshal(hook.Config, &configMap); err != nil { - return nil, err - } - config, err := job.NewConfig(configMap) + config, err := job.NewConfig(hook.Config) if err != nil { return nil, err } @@ -560,14 +503,6 @@ func fromStorageAlerts(raw []byte) ([]*job.AlertSpec, error) { return jobAlerts, nil } -func fromStorageAssets(raw []byte) (map[string]string, error) { - var assetsMap map[string]string - if err := json.Unmarshal(raw, &assetsMap); err != nil { - return nil, err - } - return assetsMap, nil -} - func FromRow(row pgx.Row) (*Spec, error) { var js Spec diff --git a/internal/store/postgres/job/job_repository.go b/internal/store/postgres/job/job_repository.go index 8b2757f67f..ccd4de1de2 100644 --- a/internal/store/postgres/job/job_repository.go +++ b/internal/store/postgres/job/job_repository.go @@ -14,12 +14,21 @@ import ( "github.com/odpf/optimus/internal/errors" ) +const ( + jobColumnsToStore = `name, version, owner, description, labels, start_date, end_date, interval, + depends_on_past, catch_up, retry, alert, static_upstreams, http_upstreams, task_name, task_config, + window_size, window_offset, window_truncate_to, assets, hooks, metadata, destination, sources, + project_name, namespace_name, created_at, updated_at` + + jobColumns = `id, ` + jobColumnsToStore +) + type JobRepository struct { - pool *pgxpool.Pool + db *pgxpool.Pool } func NewJobRepository(pool *pgxpool.Pool) *JobRepository { - return &JobRepository{pool: pool} + return &JobRepository{db: pool} } func (j JobRepository) Add(ctx context.Context, jobs []*job.Job) ([]*job.Job, error) { @@ -59,31 +68,11 @@ func (j JobRepository) triggerInsert(ctx context.Context, jobEntity *job.Job) er return err } - insertJobQuery := ` -INSERT INTO job ( - name, version, owner, description, - labels, start_date, end_date, interval, - depends_on_past, catch_up, retry, alert, - static_upstreams, http_upstreams, task_name, task_config, - window_size, window_offset, window_truncate_to, - assets, hooks, metadata, - destination, sources, - project_name, namespace_name, - created_at, updated_at -) -VALUES ( - $1, $2, $3, $4, - $5, $6, $7, $8, - $9, $10, $11, $12, - $13, $14, $15, $16, - $17, $18, $19, - $20, $21, $22, - $23, $24, - $25, $26, - NOW(), NOW() -);` + insertJobQuery := `INSERT INTO job (` + jobColumnsToStore + `) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, + $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, NOW(), NOW());` - tag, err := j.pool.Exec(ctx, insertJobQuery, + tag, err := j.db.Exec(ctx, insertJobQuery, storageJob.Name, storageJob.Version, storageJob.Owner, storageJob.Description, storageJob.Labels, storageJob.StartDate, storageJob.EndDate, storageJob.Interval, storageJob.DependsOnPast, storageJob.CatchUp, storageJob.Retry, storageJob.Alert, @@ -163,7 +152,7 @@ WHERE name = $24 AND project_name = $25;` - tag, err := j.pool.Exec(ctx, updateJobQuery, + tag, err := j.db.Exec(ctx, updateJobQuery, storageJob.Version, storageJob.Owner, storageJob.Description, storageJob.Labels, storageJob.StartDate, storageJob.EndDate, storageJob.Interval, storageJob.DependsOnPast, storageJob.CatchUp, storageJob.Retry, storageJob.Alert, @@ -184,17 +173,14 @@ WHERE } func (j JobRepository) get(ctx context.Context, projectName tenant.ProjectName, jobName job.Name, onlyActiveJob bool) (*Spec, error) { - getJobByNameAtProject := `SELECT * -FROM job -WHERE name = $1 -AND project_name = $2` + getJobByNameAtProject := `SELECT ` + jobColumns + ` FROM job WHERE name = $1 AND project_name = $2` if onlyActiveJob { jobDeletedFilter := " AND deleted_at IS NULL" getJobByNameAtProject += jobDeletedFilter } - return FromRow(j.pool.QueryRow(ctx, getJobByNameAtProject, jobName.String(), projectName.String())) + return FromRow(j.db.QueryRow(ctx, getJobByNameAtProject, jobName.String(), projectName.String())) } func (j JobRepository) ResolveUpstreams(ctx context.Context, projectName tenant.ProjectName, jobNames []job.Name) (map[job.Name][]*job.Upstream, error) { @@ -249,7 +235,7 @@ FROM inferred_upstreams id JOIN job j ON id.source = j.destination WHERE j.deleted_at IS NULL;` - rows, err := j.pool.Query(ctx, query, projectName, jobNames) + rows, err := j.db.Query(ctx, query, projectName, jobNames) if err != nil { return nil, errors.Wrap(job.EntityJob, "error while getting job with upstreams", err) } @@ -369,12 +355,9 @@ func (j JobRepository) GetByJobName(ctx context.Context, projectName tenant.Proj func (j JobRepository) GetAllByProjectName(ctx context.Context, projectName tenant.ProjectName) ([]*job.Job, error) { me := errors.NewMultiError("get all job specs by project name errors") - getAllByProjectName := `SELECT * -FROM job -WHERE project_name = $1 -AND deleted_at IS NULL;` + getAllByProjectName := `SELECT ` + jobColumns + ` FROM job WHERE project_name = $1 AND deleted_at IS NULL;` - rows, err := j.pool.Query(ctx, getAllByProjectName, projectName) + rows, err := j.db.Query(ctx, getAllByProjectName, projectName) if err != nil { return nil, errors.Wrap(job.EntityJob, "error while jobs for project: "+projectName.String(), err) } @@ -403,12 +386,9 @@ AND deleted_at IS NULL;` func (j JobRepository) GetAllByResourceDestination(ctx context.Context, resourceDestination job.ResourceURN) ([]*job.Job, error) { me := errors.NewMultiError("get all job specs by resource destination") - getAllByDestination := `SELECT * -FROM job -WHERE destination = $1 -AND deleted_at IS NULL;` + getAllByDestination := `SELECT ` + jobColumns + ` FROM job WHERE destination = $1 AND deleted_at IS NULL;` - rows, err := j.pool.Query(ctx, getAllByDestination, resourceDestination) + rows, err := j.db.Query(ctx, getAllByDestination, resourceDestination) if err != nil { return nil, errors.Wrap(job.EntityJob, "error while jobs for destination: "+resourceDestination.String(), err) } @@ -481,7 +461,7 @@ func (j JobRepository) ReplaceUpstreams(ctx context.Context, jobsWithUpstreams [ storageJobUpstreams = append(storageJobUpstreams, upstream...) } - tx, err := j.pool.Begin(ctx) + tx, err := j.db.Begin(ctx) if err != nil { return errors.InternalError(job.EntityJob, "unable to update upstreams", err) } @@ -620,12 +600,9 @@ func (j JobRepository) Delete(ctx context.Context, projectName tenant.ProjectNam } func (j JobRepository) hardDelete(ctx context.Context, projectName tenant.ProjectName, jobName job.Name) error { - query := ` -DELETE -FROM job -WHERE project_name = $1 AND name = $2` + query := `DELETE FROM job WHERE project_name = $1 AND name = $2` - tag, err := j.pool.Exec(ctx, query, projectName, jobName) + tag, err := j.db.Exec(ctx, query, projectName, jobName) if err != nil { return errors.Wrap(job.EntityJob, "error during job deletion", err) } @@ -636,12 +613,9 @@ WHERE project_name = $1 AND name = $2` } func (j JobRepository) softDelete(ctx context.Context, projectName tenant.ProjectName, jobName job.Name) error { - query := ` -UPDATE job -SET deleted_at = current_timestamp -WHERE project_name = $1 AND name = $2` + query := `UPDATE job SET deleted_at = current_timestamp WHERE project_name = $1 AND name = $2` - tag, err := j.pool.Exec(ctx, query, projectName, jobName) + tag, err := j.db.Exec(ctx, query, projectName, jobName) if err != nil { return errors.Wrap(job.EntityJob, "error during job deletion", err) } @@ -654,13 +628,10 @@ WHERE project_name = $1 AND name = $2` func (j JobRepository) GetAllByTenant(ctx context.Context, jobTenant tenant.Tenant) ([]*job.Job, error) { me := errors.NewMultiError("get all job specs by project name errors") - getAllByProjectName := `SELECT * -FROM job -WHERE project_name = $1 -AND namespace_name = $2 -AND deleted_at IS NULL;` + getAllByProjectName := `SELECT ` + jobColumns + ` FROM job + WHERE project_name = $1 AND namespace_name = $2 AND deleted_at IS NULL;` - rows, err := j.pool.Query(ctx, getAllByProjectName, jobTenant.ProjectName(), jobTenant.NamespaceName()) + rows, err := j.db.Query(ctx, getAllByProjectName, jobTenant.ProjectName(), jobTenant.NamespaceName()) if err != nil { return nil, errors.Wrap(job.EntityJob, "error while jobs for project: "+jobTenant.ProjectName().String(), err) } @@ -694,7 +665,7 @@ SELECT FROM job_upstream WHERE project_name=$1 AND job_name=$2;` - rows, err := j.pool.Query(ctx, query, projectName, jobName) + rows, err := j.db.Query(ctx, query, projectName, jobName) if err != nil { return nil, errors.Wrap(job.EntityJob, "error while getting jobs with upstreams", err) } @@ -720,7 +691,7 @@ FROM job WHERE project_name = $1 AND $2 = ANY(sources) AND deleted_at IS NULL;` - rows, err := j.pool.Query(ctx, query, projectName, destination) + rows, err := j.db.Query(ctx, query, projectName, destination) if err != nil { return nil, errors.Wrap(job.EntityJob, "error while getting job downstream", err) } @@ -755,7 +726,7 @@ JOIN job j ON (ju.job_name = j.name AND ju.project_name = j.project_name) WHERE upstream_project_name=$1 AND upstream_job_name=$2 AND j.deleted_at IS NULL;` - rows, err := j.pool.Query(ctx, query, projectName, jobName) + rows, err := j.db.Query(ctx, query, projectName, jobName) if err != nil { return nil, errors.Wrap(job.EntityJob, "error while getting job downstream by job name", err) } From d32a5fa50053f2c211f8f8540644fd3eeb1604cd Mon Sep 17 00:00:00 2001 From: Sandeep Bhardwaj Date: Mon, 2 Jan 2023 16:54:37 +0530 Subject: [PATCH 15/25] fix: fix lint errors --- internal/store/postgres/job/adapter.go | 9 +++------ server/cmd/migration/migrate_to.go | 4 ++-- server/cmd/migration/rollback.go | 4 ++-- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/internal/store/postgres/job/adapter.go b/internal/store/postgres/job/adapter.go index fdda177293..1dbffd4a11 100644 --- a/internal/store/postgres/job/adapter.go +++ b/internal/store/postgres/job/adapter.go @@ -204,10 +204,7 @@ func toStorageHooks(hookSpecs []*job.Hook) ([]byte, error) { } var hooks []Hook for _, hookSpec := range hookSpecs { - hook, err := toStorageHook(hookSpec) - if err != nil { - return nil, err - } + hook := toStorageHook(hookSpec) hooks = append(hooks, hook) } hooksJSON, err := json.Marshal(hooks) @@ -217,11 +214,11 @@ func toStorageHooks(hookSpecs []*job.Hook) ([]byte, error) { return hooksJSON, nil } -func toStorageHook(spec *job.Hook) (Hook, error) { +func toStorageHook(spec *job.Hook) Hook { return Hook{ Name: spec.Name().String(), Config: spec.Config().Configs(), - }, nil + } } func toStorageAlerts(alertSpecs []*job.AlertSpec) ([]byte, error) { diff --git a/server/cmd/migration/migrate_to.go b/server/cmd/migration/migrate_to.go index 6634489e41..32cb7048d1 100644 --- a/server/cmd/migration/migrate_to.go +++ b/server/cmd/migration/migrate_to.go @@ -39,11 +39,11 @@ func (m *migrateTo) RunE(_ *cobra.Command, _ []string) error { dsn := clientConfig.Serve.DB.DSN - fmt.Printf("Executing migration to version %d \n", m.version) + fmt.Printf("Executing migration to version %d \n", m.version) // nolint:forbidigo err = postgres.ToVersion(uint(m.version), dsn) if err != nil { return fmt.Errorf("error during migration: %w", err) } - fmt.Println("Migration finished successfully") + fmt.Println("Migration finished successfully") // nolint:forbidigo return nil } diff --git a/server/cmd/migration/rollback.go b/server/cmd/migration/rollback.go index 2cbd12a939..a0cf374d35 100644 --- a/server/cmd/migration/rollback.go +++ b/server/cmd/migration/rollback.go @@ -35,11 +35,11 @@ func (r *rollbackCommand) RunE(_ *cobra.Command, _ []string) error { dsn := clientConfig.Serve.DB.DSN - fmt.Printf("Executing rollback for %d migrations\n", r.count) + fmt.Printf("Executing rollback for %d migrations\n", r.count) // nolint:forbidigo err = postgres.Rollback(dsn, r.count) if err != nil { return fmt.Errorf("error rolling back migration: %w", err) } - fmt.Println("Rollback finished successfully") + fmt.Println("Rollback finished successfully") // nolint:forbidigo return nil } From f2f0a8d131202ace073c3e26f7ca4c08c596c567 Mon Sep 17 00:00:00 2001 From: Sandeep Bhardwaj Date: Mon, 2 Jan 2023 17:54:30 +0530 Subject: [PATCH 16/25] fix: fix issues for field scanning --- internal/store/postgres/job/adapter.go | 9 +++++++-- internal/store/postgres/resource/repository.go | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/internal/store/postgres/job/adapter.go b/internal/store/postgres/job/adapter.go index 1dbffd4a11..669d3c9e3f 100644 --- a/internal/store/postgres/job/adapter.go +++ b/internal/store/postgres/job/adapter.go @@ -160,13 +160,18 @@ func toStorageSpec(jobEntity *job.Job) (*Spec, error) { sources[i] = source.String() } + assets := map[string]string{} + if jobSpec.Asset() != nil { + assets = jobSpec.Asset().Assets() + } + return &Spec{ Name: jobSpec.Name().String(), Version: jobSpec.Version().Int(), Owner: jobSpec.Owner().String(), Description: jobSpec.Description(), Labels: jobSpec.Labels(), - Assets: jobSpec.Asset().Assets(), + Assets: assets, Metadata: metadataBytes, StartDate: startDate, @@ -508,7 +513,7 @@ func FromRow(row pgx.Row) (*Spec, error) { &js.CatchUp, &js.Retry, &js.Alert, &js.StaticUpstreams, &js.HTTPUpstreams, &js.TaskName, &js.TaskConfig, &js.WindowSize, &js.WindowOffset, &js.WindowTruncateTo, &js.Assets, &js.Hooks, &js.Metadata, &js.Destination, &js.Sources, - &js.ProjectName, &js.NamespaceName, &js.CreatedAt, &js.UpdatedAt, &js.DeletedAt) + &js.ProjectName, &js.NamespaceName, &js.CreatedAt, &js.UpdatedAt) if err != nil { if errors.Is(err, pgx.ErrNoRows) { diff --git a/internal/store/postgres/resource/repository.go b/internal/store/postgres/resource/repository.go index ce7146f644..7d9768ab4c 100644 --- a/internal/store/postgres/resource/repository.go +++ b/internal/store/postgres/resource/repository.go @@ -111,7 +111,7 @@ store = $3 AND full_name = any ($4)` var resources []*resource.Resource for rows.Next() { var res Resource - err = rows.Scan(&res.FullName, &res.Kind, &res.Store, &res.Status, &res.URN, + err = rows.Scan(&res.ID, &res.FullName, &res.Kind, &res.Store, &res.Status, &res.URN, &res.ProjectName, &res.NamespaceName, &res.Metadata, &res.Spec, &res.CreatedAt, &res.UpdatedAt) if err != nil { return nil, errors.Wrap(resource.EntityResource, "error in GetAll", err) From e699cd76a9b9f162af7000497d15b913f42eb9c6 Mon Sep 17 00:00:00 2001 From: Arinda Arif Date: Tue, 3 Jan 2023 11:40:57 +0700 Subject: [PATCH 17/25] fix: job deleted_at value is always nil issue --- internal/store/postgres/job/adapter.go | 2 +- internal/store/postgres/job/job_repository.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/store/postgres/job/adapter.go b/internal/store/postgres/job/adapter.go index 669d3c9e3f..17198dbd0e 100644 --- a/internal/store/postgres/job/adapter.go +++ b/internal/store/postgres/job/adapter.go @@ -513,7 +513,7 @@ func FromRow(row pgx.Row) (*Spec, error) { &js.CatchUp, &js.Retry, &js.Alert, &js.StaticUpstreams, &js.HTTPUpstreams, &js.TaskName, &js.TaskConfig, &js.WindowSize, &js.WindowOffset, &js.WindowTruncateTo, &js.Assets, &js.Hooks, &js.Metadata, &js.Destination, &js.Sources, - &js.ProjectName, &js.NamespaceName, &js.CreatedAt, &js.UpdatedAt) + &js.ProjectName, &js.NamespaceName, &js.CreatedAt, &js.UpdatedAt, &js.DeletedAt) if err != nil { if errors.Is(err, pgx.ErrNoRows) { diff --git a/internal/store/postgres/job/job_repository.go b/internal/store/postgres/job/job_repository.go index ccd4de1de2..19ee3bedd2 100644 --- a/internal/store/postgres/job/job_repository.go +++ b/internal/store/postgres/job/job_repository.go @@ -20,7 +20,7 @@ const ( window_size, window_offset, window_truncate_to, assets, hooks, metadata, destination, sources, project_name, namespace_name, created_at, updated_at` - jobColumns = `id, ` + jobColumnsToStore + jobColumns = `id, ` + jobColumnsToStore + `, deleted_at` ) type JobRepository struct { From defa281b332d7c562c19236bf8bbfdca8cdad7d1 Mon Sep 17 00:00:00 2001 From: Arinda Arif Date: Wed, 4 Jan 2023 10:56:03 +0700 Subject: [PATCH 18/25] fix: job repository read write issues and alter job table --- internal/store/postgres/job/adapter.go | 231 +++++++++++------- internal/store/postgres/job/job_repository.go | 153 +++++++----- .../migrations/000046_update_job_table.up.sql | 14 ++ 3 files changed, 247 insertions(+), 151 deletions(-) create mode 100644 internal/store/postgres/migrations/000046_update_job_table.up.sql diff --git a/internal/store/postgres/job/adapter.go b/internal/store/postgres/job/adapter.go index 17198dbd0e..95a17436b3 100644 --- a/internal/store/postgres/job/adapter.go +++ b/internal/store/postgres/job/adapter.go @@ -24,31 +24,21 @@ type Spec struct { Description string Labels map[string]string - StartDate time.Time - EndDate *time.Time - Interval string + Schedule json.RawMessage + WindowSpec json.RawMessage - // Behavior - DependsOnPast bool `json:"depends_on_past"` - CatchUp bool `json:"catch_up"` - Retry json.RawMessage - Alert json.RawMessage + Alert json.RawMessage - // Upstreams - StaticUpstreams pq.StringArray `json:"static_upstreams"` - - // ExternalUpstreams - HTTPUpstreams json.RawMessage `json:"http_upstreams"` + StaticUpstreams pq.StringArray + HTTPUpstreams json.RawMessage TaskName string TaskConfig map[string]string - WindowSize string - WindowOffset string - WindowTruncateTo string + Hooks json.RawMessage + + Assets map[string]string - Assets map[string]string - Hooks json.RawMessage Metadata json.RawMessage Destination string @@ -62,6 +52,21 @@ type Spec struct { DeletedAt sql.NullTime } +type Schedule struct { + StartDate time.Time + EndDate *time.Time + Interval string + DependsOnPast bool `json:"depends_on_past"` + CatchUp bool `json:"catch_up"` + Retry *Retry +} + +type Window struct { + WindowSize string + WindowOffset string + WindowTruncateTo string +} + type Retry struct { Count int `json:"count"` Delay int32 `json:"delay"` @@ -108,24 +113,6 @@ func toStorageSpec(jobEntity *job.Job) (*Spec, error) { jobSpec := jobEntity.Spec() - startDate, err := time.Parse(jobDatetimeLayout, jobSpec.Schedule().StartDate().String()) - if err != nil { - return nil, err - } - - var endDate time.Time - if jobSpec.Schedule().EndDate() != "" { - endDate, err = time.Parse(jobDatetimeLayout, jobSpec.Schedule().EndDate().String()) - if err != nil { - return nil, err - } - } - - retryBytes, err := toStorageRetry(jobSpec.Schedule().Retry()) - if err != nil { - return nil, err - } - alertsBytes, err := toStorageAlerts(jobSpec.AlertSpecs()) if err != nil { return nil, err @@ -160,11 +147,21 @@ func toStorageSpec(jobEntity *job.Job) (*Spec, error) { sources[i] = source.String() } - assets := map[string]string{} + var assets map[string]string if jobSpec.Asset() != nil { assets = jobSpec.Asset().Assets() } + schedule, err := toStorageSchedule(jobSpec.Schedule()) + if err != nil { + return nil, err + } + + windowBytes, err := toStorageWindow(jobSpec.Window()) + if err != nil { + return nil, err + } + return &Spec{ Name: jobSpec.Name().String(), Version: jobSpec.Version().Int(), @@ -174,24 +171,16 @@ func toStorageSpec(jobEntity *job.Job) (*Spec, error) { Assets: assets, Metadata: metadataBytes, - StartDate: startDate, - EndDate: &endDate, - Interval: jobSpec.Schedule().Interval(), + Schedule: schedule, + WindowSpec: windowBytes, + + Alert: alertsBytes, TaskName: jobSpec.Task().Name().String(), TaskConfig: jobSpec.Task().Config().Configs(), Hooks: hooksBytes, - WindowSize: jobSpec.Window().GetSize(), - WindowOffset: jobSpec.Window().GetOffset(), - WindowTruncateTo: jobSpec.Window().GetTruncateTo(), - - DependsOnPast: jobSpec.Schedule().DependsOnPast(), - CatchUp: jobSpec.Schedule().CatchUp(), - Retry: retryBytes, - Alert: alertsBytes, - StaticUpstreams: staticUpstreams, HTTPUpstreams: httpUpstreamsInBytes, @@ -203,6 +192,19 @@ func toStorageSpec(jobEntity *job.Job) (*Spec, error) { }, nil } +func toStorageWindow(windowSpec models.Window) ([]byte, error) { + window := Window{ + WindowSize: windowSpec.GetSize(), + WindowOffset: windowSpec.GetOffset(), + WindowTruncateTo: windowSpec.GetTruncateTo(), + } + windowJSON, err := json.Marshal(window) + if err != nil { + return nil, err + } + return windowJSON, nil +} + func toStorageHooks(hookSpecs []*job.Hook) ([]byte, error) { if hookSpecs == nil { return nil, nil @@ -241,16 +243,42 @@ func toStorageAlerts(alertSpecs []*job.AlertSpec) ([]byte, error) { return json.Marshal(alerts) } -func toStorageRetry(retrySpec *job.Retry) ([]byte, error) { - if retrySpec == nil { +func toStorageSchedule(scheduleSpec *job.Schedule) ([]byte, error) { + if scheduleSpec == nil { return nil, nil } - retry := Retry{ - Count: retrySpec.Count(), - Delay: retrySpec.Delay(), - ExponentialBackoff: retrySpec.ExponentialBackoff(), + + startDate, err := time.Parse(jobDatetimeLayout, scheduleSpec.StartDate().String()) + if err != nil { + return nil, err + } + + var endDate time.Time + if scheduleSpec.EndDate() != "" { + endDate, err = time.Parse(jobDatetimeLayout, scheduleSpec.EndDate().String()) + if err != nil { + return nil, err + } + } + + var retry *Retry + if scheduleSpec.Retry() != nil { + retry = &Retry{ + Count: scheduleSpec.Retry().Count(), + Delay: scheduleSpec.Retry().Delay(), + ExponentialBackoff: scheduleSpec.Retry().ExponentialBackoff(), + } } - return json.Marshal(retry) + + schedule := Schedule{ + StartDate: startDate, + EndDate: &endDate, + Interval: scheduleSpec.Interval(), + DependsOnPast: scheduleSpec.DependsOnPast(), + CatchUp: scheduleSpec.CatchUp(), + Retry: retry, + } + return json.Marshal(schedule) } func toStorageMetadata(metadataSpec *job.Metadata) ([]byte, error) { @@ -303,46 +331,20 @@ func fromStorageSpec(jobSpec *Spec) (*job.Spec, error) { return nil, err } - startDate, err := job.ScheduleDateFrom(jobSpec.StartDate.Format(job.DateLayout)) - if err != nil { - return nil, err - } - - scheduleBuilder := job.NewScheduleBuilder(startDate). - WithCatchUp(jobSpec.CatchUp). - WithDependsOnPast(jobSpec.DependsOnPast). - WithInterval(jobSpec.Interval) - - if !jobSpec.EndDate.IsZero() { - endDate, err := job.ScheduleDateFrom(jobSpec.EndDate.Format(job.DateLayout)) + var schedule *job.Schedule + if jobSpec.Schedule != nil { + schedule, err = fromStorageSchedule(jobSpec.Schedule) if err != nil { return nil, err } - scheduleBuilder = scheduleBuilder.WithEndDate(endDate) } - if jobSpec.Retry != nil { - var storageRetry Retry - if err := json.Unmarshal(jobSpec.Retry, &storageRetry); err != nil { + var window models.Window + if jobSpec.WindowSpec != nil { + window, err = fromStorageWindow(jobSpec.WindowSpec, jobSpec.Version) + if err != nil { return nil, err } - retry := job.NewRetry(storageRetry.Count, storageRetry.Delay, storageRetry.ExponentialBackoff) - scheduleBuilder = scheduleBuilder.WithRetry(retry) - } - - schedule, err := scheduleBuilder.Build() - if err != nil { - return nil, err - } - - window, err := models.NewWindow( - jobSpec.Version, - jobSpec.WindowTruncateTo, - jobSpec.WindowOffset, - jobSpec.WindowSize, - ) - if err != nil { - return nil, err } var taskConfig *job.Config @@ -357,6 +359,7 @@ func fromStorageSpec(jobSpec *Spec) (*job.Spec, error) { return nil, err } task := job.NewTaskBuilder(taskName, taskConfig).Build() + jobSpecBuilder := job.NewSpecBuilder(version, jobName, owner, schedule, window, task).WithDescription(jobSpec.Description) if jobSpec.Labels != nil { @@ -443,6 +446,50 @@ func fromStorageSpec(jobSpec *Spec) (*job.Spec, error) { return jobSpecBuilder.Build(), nil } +func fromStorageWindow(raw []byte, jobVersion int) (models.Window, error) { + var storageWindow Window + if err := json.Unmarshal(raw, &storageWindow); err != nil { + return nil, err + } + + return models.NewWindow( + jobVersion, + storageWindow.WindowTruncateTo, + storageWindow.WindowOffset, + storageWindow.WindowSize, + ) +} + +func fromStorageSchedule(raw []byte) (*job.Schedule, error) { + var storageSchedule Schedule + if err := json.Unmarshal(raw, &storageSchedule); err != nil { + return nil, err + } + startDate, err := job.ScheduleDateFrom(storageSchedule.StartDate.Format(job.DateLayout)) + if err != nil { + return nil, err + } + scheduleBuilder := job.NewScheduleBuilder(startDate). + WithCatchUp(storageSchedule.CatchUp). + WithDependsOnPast(storageSchedule.DependsOnPast). + WithInterval(storageSchedule.Interval) + + if !storageSchedule.EndDate.IsZero() { + endDate, err := job.ScheduleDateFrom(storageSchedule.EndDate.Format(job.DateLayout)) + if err != nil { + return nil, err + } + scheduleBuilder = scheduleBuilder.WithEndDate(endDate) + } + + if storageSchedule.Retry != nil { + retry := job.NewRetry(storageSchedule.Retry.Count, storageSchedule.Retry.Delay, storageSchedule.Retry.ExponentialBackoff) + scheduleBuilder = scheduleBuilder.WithRetry(retry) + } + + return scheduleBuilder.Build() +} + func fromStorageHooks(raw []byte) ([]*job.Hook, error) { if raw == nil { return nil, nil @@ -509,10 +556,8 @@ func FromRow(row pgx.Row) (*Spec, error) { var js Spec err := row.Scan(&js.ID, &js.Name, &js.Version, &js.Owner, &js.Description, - &js.Labels, &js.StartDate, &js.EndDate, &js.Interval, &js.DependsOnPast, - &js.CatchUp, &js.Retry, &js.Alert, &js.StaticUpstreams, &js.HTTPUpstreams, - &js.TaskName, &js.TaskConfig, &js.WindowSize, &js.WindowOffset, &js.WindowTruncateTo, - &js.Assets, &js.Hooks, &js.Metadata, &js.Destination, &js.Sources, + &js.Labels, &js.Schedule, &js.Alert, &js.StaticUpstreams, &js.HTTPUpstreams, + &js.TaskName, &js.TaskConfig, &js.WindowSpec, &js.Assets, &js.Hooks, &js.Metadata, &js.Destination, &js.Sources, &js.ProjectName, &js.NamespaceName, &js.CreatedAt, &js.UpdatedAt, &js.DeletedAt) if err != nil { diff --git a/internal/store/postgres/job/job_repository.go b/internal/store/postgres/job/job_repository.go index 19ee3bedd2..05b7fe43a5 100644 --- a/internal/store/postgres/job/job_repository.go +++ b/internal/store/postgres/job/job_repository.go @@ -2,6 +2,7 @@ package job import ( "context" + "database/sql" "fmt" "github.com/jackc/pgx/v5" @@ -15,10 +16,8 @@ import ( ) const ( - jobColumnsToStore = `name, version, owner, description, labels, start_date, end_date, interval, - depends_on_past, catch_up, retry, alert, static_upstreams, http_upstreams, task_name, task_config, - window_size, window_offset, window_truncate_to, assets, hooks, metadata, destination, sources, - project_name, namespace_name, created_at, updated_at` + jobColumnsToStore = `name, version, owner, description, labels, schedule, alert, static_upstreams, http_upstreams, + task_name, task_config, window_spec, assets, hooks, metadata, destination, sources, project_name, namespace_name, created_at, updated_at` jobColumns = `id, ` + jobColumnsToStore + `, deleted_at` ) @@ -70,16 +69,13 @@ func (j JobRepository) triggerInsert(ctx context.Context, jobEntity *job.Job) er insertJobQuery := `INSERT INTO job (` + jobColumnsToStore + `) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, - $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, NOW(), NOW());` + $17, $18, $19, NOW(), NOW());` tag, err := j.db.Exec(ctx, insertJobQuery, - storageJob.Name, storageJob.Version, storageJob.Owner, storageJob.Description, - storageJob.Labels, storageJob.StartDate, storageJob.EndDate, storageJob.Interval, - storageJob.DependsOnPast, storageJob.CatchUp, storageJob.Retry, storageJob.Alert, - storageJob.StaticUpstreams, storageJob.HTTPUpstreams, storageJob.TaskName, storageJob.TaskConfig, - storageJob.WindowSize, storageJob.WindowOffset, storageJob.WindowTruncateTo, - storageJob.Assets, storageJob.Hooks, storageJob.Metadata, - storageJob.Destination, storageJob.Sources, + storageJob.Name, storageJob.Version, storageJob.Owner, storageJob.Description, storageJob.Labels, + storageJob.Schedule, storageJob.Alert, storageJob.StaticUpstreams, storageJob.HTTPUpstreams, + storageJob.TaskName, storageJob.TaskConfig, storageJob.WindowSpec, storageJob.Assets, + storageJob.Hooks, storageJob.Metadata, storageJob.Destination, storageJob.Sources, storageJob.ProjectName, storageJob.NamespaceName) if err != nil { @@ -140,25 +136,19 @@ func (j JobRepository) triggerUpdate(ctx context.Context, jobEntity *job.Job) er updateJobQuery := ` UPDATE job SET - version = $1, owner = $2, description = $3, - labels = $4, start_date = $5, end_date = $6, interval = $7, - depends_on_past = $8, catch_up = $9, retry = $10, alert = $11, - static_upstreams = $12, http_upstreams = $13, task_name = $14, task_config = $15, - window_size = $16, window_offset = $17, window_truncate_to = $18, - assets = $19, hooks = $20, metadata = $21, - destination = $22, sources = $23, + version = $1, owner = $2, description = $3, labels = $4, schedule = $5, alert = $6, + static_upstreams = $7, http_upstreams = $8, task_name = $9, task_config = $10, + window_spec = $11, assets = $12, hooks = $13, metadata = $14, destination = $15, sources = $16, updated_at = NOW(), deleted_at = null WHERE - name = $24 AND - project_name = $25;` + name = $17 AND + project_name = $18;` tag, err := j.db.Exec(ctx, updateJobQuery, storageJob.Version, storageJob.Owner, storageJob.Description, - storageJob.Labels, storageJob.StartDate, storageJob.EndDate, storageJob.Interval, - storageJob.DependsOnPast, storageJob.CatchUp, storageJob.Retry, storageJob.Alert, + storageJob.Labels, storageJob.Schedule, storageJob.Alert, storageJob.StaticUpstreams, storageJob.HTTPUpstreams, storageJob.TaskName, storageJob.TaskConfig, - storageJob.WindowSize, storageJob.WindowOffset, storageJob.WindowTruncateTo, - storageJob.Assets, storageJob.Hooks, storageJob.Metadata, + storageJob.WindowSpec, storageJob.Assets, storageJob.Hooks, storageJob.Metadata, storageJob.Destination, storageJob.Sources, storageJob.Name, storageJob.ProjectName) @@ -300,36 +290,66 @@ func (JobRepository) toUpstreams(storeUpstreams []JobWithUpstream) ([]*job.Upstr var upstreams []*job.Upstream for _, storeUpstream := range storeUpstreams { - resourceURN := job.ResourceURN(storeUpstream.UpstreamResourceURN) - upstreamName, _ := job.NameFrom(storeUpstream.UpstreamJobName) - projectName, _ := tenant.ProjectNameFrom(storeUpstream.UpstreamProjectName) + var resourceURN job.ResourceURN + if storeUpstream.UpstreamResourceURN.Valid { + resourceURN = job.ResourceURN(storeUpstream.UpstreamResourceURN.String) + } + + var upstreamName job.Name + if storeUpstream.UpstreamJobName.Valid { + upstreamName, _ = job.NameFrom(storeUpstream.UpstreamJobName.String) + } + + var upstreamProjectName tenant.ProjectName + if storeUpstream.UpstreamProjectName.Valid { + upstreamProjectName, _ = tenant.ProjectNameFrom(storeUpstream.UpstreamProjectName.String) + } - if storeUpstream.UpstreamState == job.UpstreamStateUnresolved.String() && storeUpstream.UpstreamJobName != "" { - upstreams = append(upstreams, job.NewUpstreamUnresolvedStatic(upstreamName, projectName)) + if storeUpstream.UpstreamState == job.UpstreamStateUnresolved.String() && storeUpstream.UpstreamJobName.Valid { + upstreams = append(upstreams, job.NewUpstreamUnresolvedStatic(upstreamName, upstreamProjectName)) continue } - if storeUpstream.UpstreamState == job.UpstreamStateUnresolved.String() && storeUpstream.UpstreamResourceURN != "" { + if storeUpstream.UpstreamState == job.UpstreamStateUnresolved.String() && storeUpstream.UpstreamResourceURN.Valid { upstreams = append(upstreams, job.NewUpstreamUnresolvedInferred(resourceURN)) continue } - upstreamTenant, err := tenant.NewTenant(storeUpstream.UpstreamProjectName, storeUpstream.UpstreamNamespaceName) - if err != nil { - me.Append(err) - continue + var upstreamTenant tenant.Tenant + var err error + if storeUpstream.UpstreamProjectName.Valid && storeUpstream.UpstreamNamespaceName.Valid { + upstreamTenant, err = tenant.NewTenant(storeUpstream.UpstreamProjectName.String, storeUpstream.UpstreamNamespaceName.String) + if err != nil { + me.Append(err) + continue + } } - taskName, err := job.TaskNameFrom(storeUpstream.UpstreamTaskName) - if err != nil { - me.Append(err) - continue + + var taskName job.TaskName + if storeUpstream.UpstreamTaskName.Valid { + taskName, err = job.TaskNameFrom(storeUpstream.UpstreamTaskName.String) + if err != nil { + me.Append(err) + continue + } } upstreamType, err := job.UpstreamTypeFrom(storeUpstream.UpstreamType) if err != nil { continue } - upstream := job.NewUpstreamResolved(upstreamName, storeUpstream.UpstreamHost, resourceURN, upstreamTenant, upstreamType, taskName, storeUpstream.UpstreamExternal) + + var upstreamHost string + if storeUpstream.UpstreamHost.Valid { + upstreamHost = storeUpstream.UpstreamHost.String + } + + var upstreamExternal bool + if storeUpstream.UpstreamExternal.Valid { + upstreamExternal = storeUpstream.UpstreamExternal.Bool + } + + upstream := job.NewUpstreamResolved(upstreamName, upstreamHost, resourceURN, upstreamTenant, upstreamType, taskName, upstreamExternal) upstreams = append(upstreams, upstream) } if err := me.ToErr(); err != nil { @@ -437,17 +457,17 @@ func specToJob(spec *Spec) (*job.Job, error) { } type JobWithUpstream struct { - JobName string `json:"job_name"` - ProjectName string `json:"project_name"` - UpstreamJobName string `json:"upstream_job_name"` - UpstreamResourceURN string `json:"upstream_resource_urn"` - UpstreamProjectName string `json:"upstream_project_name"` - UpstreamNamespaceName string `json:"upstream_namespace_name"` - UpstreamTaskName string `json:"upstream_task_name"` - UpstreamHost string `json:"upstream_host"` - UpstreamType string `json:"upstream_type"` - UpstreamState string `json:"upstream_state"` - UpstreamExternal bool `json:"upstream_external"` + JobName string `json:"job_name"` + ProjectName string `json:"project_name"` + UpstreamJobName sql.NullString `json:"upstream_job_name"` + UpstreamResourceURN sql.NullString `json:"upstream_resource_urn"` + UpstreamProjectName sql.NullString `json:"upstream_project_name"` + UpstreamNamespaceName sql.NullString `json:"upstream_namespace_name"` + UpstreamTaskName sql.NullString `json:"upstream_task_name"` + UpstreamHost sql.NullString `json:"upstream_host"` + UpstreamType string `json:"upstream_type"` + UpstreamState string `json:"upstream_state"` + UpstreamExternal sql.NullBool `json:"upstream_external"` } func (j JobWithUpstream) getJobFullName() string { @@ -573,20 +593,37 @@ func toJobUpstream(jobWithUpstream *job.WithUpstream) []*JobWithUpstream { jobUpstreams = append(jobUpstreams, &JobWithUpstream{ JobName: jobWithUpstream.Name().String(), ProjectName: jobWithUpstream.Job().ProjectName().String(), - UpstreamJobName: upstream.Name().String(), - UpstreamResourceURN: upstream.Resource().String(), - UpstreamProjectName: upstreamProjectName, - UpstreamNamespaceName: upstreamNamespaceName, - UpstreamTaskName: upstream.TaskName().String(), - UpstreamHost: upstream.Host(), + UpstreamJobName: toNullString(upstream.Name().String()), + UpstreamResourceURN: toNullString(upstream.Resource().String()), + UpstreamProjectName: toNullString(upstreamProjectName), + UpstreamNamespaceName: toNullString(upstreamNamespaceName), + UpstreamTaskName: toNullString(upstream.TaskName().String()), + UpstreamHost: toNullString(upstream.Host()), UpstreamType: upstream.Type().String(), UpstreamState: upstream.State().String(), - UpstreamExternal: upstream.External(), + UpstreamExternal: toNullBool(upstream.External()), }) } return jobUpstreams } +func toNullString(val string) sql.NullString { + if val == "" { + return sql.NullString{} + } + return sql.NullString{ + String: val, + Valid: true, + } +} + +func toNullBool(val bool) sql.NullBool { + return sql.NullBool{ + Bool: val, + Valid: true, + } +} + type ProjectAndJobNames struct { ProjectName string `json:"project_name"` JobName string `json:"job_name"` diff --git a/internal/store/postgres/migrations/000046_update_job_table.up.sql b/internal/store/postgres/migrations/000046_update_job_table.up.sql new file mode 100644 index 0000000000..723e2809c0 --- /dev/null +++ b/internal/store/postgres/migrations/000046_update_job_table.up.sql @@ -0,0 +1,14 @@ +ALTER TABLE job + +ADD COLUMN schedule JSONB, +DROP COLUMN start_date, +DROP COLUMN end_date, +DROP COLUMN interval, +DROP COLUMN depends_on_past, +DROP COLUMN catch_up, +DROP COLUMN retry, + +ADD COLUMN window_spec JSONB, +DROP COLUMN window_size, +DROP COLUMN window_offset, +DROP COLUMN window_truncate_to; From e469fe32343f05f535f93bcd5da37cf8cdf30a8e Mon Sep 17 00:00:00 2001 From: Yash Bhardwaj Date: Wed, 4 Jan 2023 12:39:54 +0530 Subject: [PATCH 19/25] fix: add scheduler repo tests (#698) * fix: bug in __lib.py due to mishandling response from handle_pod_overlap (#695) * fix: add scheduler repo tests Co-authored-by: Anwar Hidayat <15167551+irainia@users.noreply.github.com> --- core/scheduler/job_run.go | 8 +- ext/scheduler/airflow/__lib.py | 2 +- .../scheduler/job_operator_repository.go | 2 +- .../scheduler/job_operator_repository_test.go | 108 +++++++++ .../postgres/scheduler/job_repository.go | 114 +++++---- .../postgres/scheduler/job_repository_test.go | 220 ++++++++++++++++++ .../postgres/scheduler/job_run_repository.go | 36 +-- .../scheduler/job_run_repository_test.go | 96 ++++++++ 8 files changed, 525 insertions(+), 61 deletions(-) create mode 100644 internal/store/postgres/scheduler/job_operator_repository_test.go create mode 100644 internal/store/postgres/scheduler/job_repository_test.go create mode 100644 internal/store/postgres/scheduler/job_run_repository_test.go diff --git a/core/scheduler/job_run.go b/core/scheduler/job_run.go index 7637561e77..25930d4c88 100644 --- a/core/scheduler/job_run.go +++ b/core/scheduler/job_run.go @@ -35,10 +35,12 @@ func (i JobRunID) IsEmpty() bool { type JobRun struct { ID uuid.UUID - JobName JobName - Tenant tenant.Tenant - + JobName JobName + Tenant tenant.Tenant + State State StartTime time.Time + SLAAlert bool + EndTime time.Time } type OperatorRun struct { diff --git a/ext/scheduler/airflow/__lib.py b/ext/scheduler/airflow/__lib.py index fa2239ab8d..712b324522 100644 --- a/ext/scheduler/airflow/__lib.py +++ b/ext/scheduler/airflow/__lib.py @@ -189,7 +189,7 @@ def execute(self, context): if len(pod_list.items) == 1: try_numbers_match = self._try_numbers_match(context, pod_list.items[0]) - final_state, result = self.handle_pod_overlap(labels, try_numbers_match, launcher, pod_list.items[0]) + final_state, _, result = self.handle_pod_overlap(labels, try_numbers_match, launcher, pod_list.items[0]) else: final_state, _, result = self.create_new_pod_for_operator(labels, launcher) diff --git a/internal/store/postgres/scheduler/job_operator_repository.go b/internal/store/postgres/scheduler/job_operator_repository.go index 90ccf05d8c..9845d94936 100644 --- a/internal/store/postgres/scheduler/job_operator_repository.go +++ b/internal/store/postgres/scheduler/job_operator_repository.go @@ -96,7 +96,7 @@ func (o *OperatorRunRepository) CreateOperatorRun(ctx context.Context, name stri if err != nil { return err } - insertOperatorRun := "INSERT INTO " + operatorTableName + " ( " + jobOperatorColumnsToStore + " created_at, updated_at) values ( $1, $2, $3, $4, TIMESTAMP '3000-01-01 00:00:00', NOW(), NOW())" + insertOperatorRun := "INSERT INTO " + operatorTableName + " ( " + jobOperatorColumnsToStore + ", created_at, updated_at) values ( $1, $2, $3, $4, TIMESTAMP '3000-01-01 00:00:00', NOW(), NOW())" _, err = o.db.Exec(ctx, insertOperatorRun, name, jobRunID, scheduler.StateRunning, startTime) return errors.WrapIfErr(scheduler.EntityJobRun, "error while inserting the run", err) } diff --git a/internal/store/postgres/scheduler/job_operator_repository_test.go b/internal/store/postgres/scheduler/job_operator_repository_test.go new file mode 100644 index 0000000000..8609031794 --- /dev/null +++ b/internal/store/postgres/scheduler/job_operator_repository_test.go @@ -0,0 +1,108 @@ +//go:build !unit_test + +package scheduler_test + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/odpf/optimus/core/scheduler" + "github.com/odpf/optimus/core/tenant" + "github.com/odpf/optimus/internal/errors" + postgres "github.com/odpf/optimus/internal/store/postgres/scheduler" +) + +func TestPostgresJobOperatorRepository(t *testing.T) { + ctx := context.Background() + tnnt, _ := tenant.NewTenant("test-proj", "test-ns") + currentTime := time.Now() + scheduledAt := currentTime.Add(-time.Hour) + operatorStartTime := currentTime + operatorEndTime := currentTime.Add(time.Hour) + slaDefinitionInSec := int64(3600) //seconds + + t.Run("CreateOperatorRun", func(t *testing.T) { + t.Run("creates a operator run", func(t *testing.T) { + db := dbSetup() + _ = addJobs(ctx, t, db) + jobRunRepo := postgres.NewJobRunRepository(db) + err := jobRunRepo.Create(ctx, tnnt, jobAName, scheduledAt, slaDefinitionInSec) + assert.Nil(t, err) + + jobRun, err := jobRunRepo.GetByScheduledAt(ctx, tnnt, jobAName, scheduledAt) + assert.Nil(t, err) + + operatorRunRepo := postgres.NewOperatorRunRepository(db) + err = operatorRunRepo.CreateOperatorRun(ctx, "some-operator-name", scheduler.OperatorSensor, jobRun.ID, operatorStartTime) + assert.Nil(t, err) + + operatorRun, err := operatorRunRepo.GetOperatorRun(ctx, "some-operator-name", scheduler.OperatorSensor, jobRun.ID) + assert.Nil(t, err) + assert.True(t, operatorStartTime.Equal(operatorRun.StartTime)) + }) + }) + t.Run("GetOperatorRun", func(t *testing.T) { + t.Run("should return not found error", func(t *testing.T) { + db := dbSetup() + _ = addJobs(ctx, t, db) + jobRunRepo := postgres.NewJobRunRepository(db) + err := jobRunRepo.Create(ctx, tnnt, jobAName, scheduledAt, slaDefinitionInSec) + assert.Nil(t, err) + + jobRun, err := jobRunRepo.GetByScheduledAt(ctx, tnnt, jobAName, scheduledAt) + assert.Nil(t, err) + + operatorRunRepo := postgres.NewOperatorRunRepository(db) + operatorRun, err := operatorRunRepo.GetOperatorRun(ctx, "some-operator-name", scheduler.OperatorHook, jobRun.ID) + assert.True(t, errors.IsErrorType(err, errors.ErrNotFound)) + assert.Nil(t, operatorRun) + }) + t.Run("should return InvalidArgument error when wrong operator name", func(t *testing.T) { + db := dbSetup() + _ = addJobs(ctx, t, db) + jobRunRepo := postgres.NewJobRunRepository(db) + err := jobRunRepo.Create(ctx, tnnt, jobAName, scheduledAt, slaDefinitionInSec) + assert.Nil(t, err) + + jobRun, err := jobRunRepo.GetByScheduledAt(ctx, tnnt, jobAName, scheduledAt) + assert.Nil(t, err) + + operatorRunRepo := postgres.NewOperatorRunRepository(db) + operatorRun, err := operatorRunRepo.GetOperatorRun(ctx, "some-operator-name", "some-other-operator", jobRun.ID) + assert.True(t, errors.IsErrorType(err, errors.ErrInvalidArgument)) + assert.Nil(t, operatorRun) + }) + }) + + t.Run("UpdateOperatorRun", func(t *testing.T) { + t.Run("updates a specific operator run by job id", func(t *testing.T) { + db := dbSetup() + _ = addJobs(ctx, t, db) + jobRunRepo := postgres.NewJobRunRepository(db) + err := jobRunRepo.Create(ctx, tnnt, jobAName, scheduledAt, slaDefinitionInSec) + assert.Nil(t, err) + + jobRun, err := jobRunRepo.GetByScheduledAt(ctx, tnnt, jobAName, scheduledAt) + assert.Nil(t, err) + + operatorRunRepo := postgres.NewOperatorRunRepository(db) + err = operatorRunRepo.CreateOperatorRun(ctx, "some-operator-name", scheduler.OperatorTask, jobRun.ID, operatorStartTime) + assert.Nil(t, err) + + operatorRun, err := operatorRunRepo.GetOperatorRun(ctx, "some-operator-name", scheduler.OperatorTask, jobRun.ID) + assert.Nil(t, err) + assert.True(t, operatorStartTime.Equal(operatorRun.StartTime)) + + err = operatorRunRepo.UpdateOperatorRun(ctx, scheduler.OperatorTask, operatorRun.ID, operatorEndTime, scheduler.StateFailed) + assert.Nil(t, err) + + operatorRun, err = operatorRunRepo.GetOperatorRun(ctx, "some-operator-name", scheduler.OperatorTask, jobRun.ID) + assert.Nil(t, err) + assert.True(t, operatorEndTime.Equal(operatorRun.EndTime)) + assert.Equal(t, scheduler.StateFailed, operatorRun.Status) + }) + }) +} diff --git a/internal/store/postgres/scheduler/job_repository.go b/internal/store/postgres/scheduler/job_repository.go index 0fb93c811c..7d4bd70a35 100644 --- a/internal/store/postgres/scheduler/job_repository.go +++ b/internal/store/postgres/scheduler/job_repository.go @@ -21,12 +21,8 @@ import ( ) const ( - jobColumns = `id, name, version, owner, description, - labels, start_date, end_date, interval, depends_on_past, - catch_up, retry, alert, static_upstreams, http_upstreams, - task_name, task_config, window_size, window_offset, window_truncate_to, - assets, hooks, metadata, destination, sources, - project_name, namespace_name, created_at, updated_at` + jobColumns = `id, name, version, owner, description, labels, schedule, alert, static_upstreams, http_upstreams, + task_name, task_config, window_spec, assets, hooks, metadata, destination, sources, project_name, namespace_name, created_at, updated_at` upstreamColumns = ` job_name, project_name, upstream_job_name, upstream_project_name, upstream_namespace_name, upstream_resource_urn, upstream_task_name, upstream_type, upstream_external` @@ -36,6 +32,20 @@ type JobRepository struct { db *pgxpool.Pool } +type Schedule struct { + StartDate time.Time + EndDate *time.Time + Interval string + DependsOnPast bool `json:"depends_on_past"` + CatchUp bool `json:"catch_up"` + Retry *Retry +} +type Retry struct { + Count int `json:"count"` + Delay int32 `json:"delay"` + ExponentialBackoff bool `json:"exponential_backoff"` +} + type JobUpstreams struct { JobID uuid.UUID JobName string @@ -81,52 +91,63 @@ type Job struct { Description string Labels map[string]string - StartDate time.Time - EndDate *time.Time - Interval string - - // Behavior - DependsOnPast bool `json:"depends_on_past"` - CatchUp bool `json:"catch_up"` - Retry json.RawMessage - Alert json.RawMessage + Schedule json.RawMessage + WindowSpec json.RawMessage - // Upstreams - StaticUpstreams pq.StringArray `json:"static_upstreams"` + Alert json.RawMessage - // ExternalUpstreams - HTTPUpstreams json.RawMessage `json:"http_upstreams"` + StaticUpstreams pq.StringArray + HTTPUpstreams json.RawMessage TaskName string TaskConfig map[string]string - WindowSize string - WindowOffset string - WindowTruncateTo string + Hooks json.RawMessage + + Assets map[string]string - Assets map[string]string - Hooks json.RawMessage Metadata json.RawMessage Destination string Sources pq.StringArray - ProjectName string - NamespaceName string + ProjectName string `json:"project_name"` + NamespaceName string `json:"namespace_name"` CreatedAt time.Time UpdatedAt time.Time DeletedAt sql.NullTime } +type Window struct { + WindowSize string + WindowOffset string + WindowTruncateTo string +} + +func fromStorageWindow(raw []byte, jobVersion int) (models.Window, error) { + var storageWindow Window + if err := json.Unmarshal(raw, &storageWindow); err != nil { + return nil, err + } + return models.NewWindow( + jobVersion, + storageWindow.WindowTruncateTo, + storageWindow.WindowOffset, + storageWindow.WindowSize, + ) +} func (j *Job) toJob() (*scheduler.Job, error) { t, err := tenant.NewTenant(j.ProjectName, j.NamespaceName) if err != nil { return nil, err } - window, err := models.NewWindow(j.Version, j.WindowTruncateTo, j.WindowOffset, j.WindowSize) - if err != nil { - return nil, err + var window models.Window + if j.WindowSpec != nil { + window, err = fromStorageWindow(j.WindowSpec, j.Version) + if err != nil { + return nil, err + } } schedulerJob := scheduler.Job{ Name: scheduler.JobName(j.Name), @@ -156,6 +177,10 @@ func (j *Job) toJobWithDetails() (*scheduler.JobWithDetails, error) { if err != nil { return nil, err } + var storageSchedule Schedule + if err := json.Unmarshal(j.Schedule, &storageSchedule); err != nil { + return nil, err + } schedulerJobWithDetails := &scheduler.JobWithDetails{ Name: job.Name, @@ -167,19 +192,21 @@ func (j *Job) toJobWithDetails() (*scheduler.JobWithDetails, error) { Labels: j.Labels, }, Schedule: &scheduler.Schedule{ - DependsOnPast: j.DependsOnPast, - CatchUp: j.CatchUp, - StartDate: j.StartDate, - Interval: j.Interval, + DependsOnPast: storageSchedule.DependsOnPast, + CatchUp: storageSchedule.CatchUp, + StartDate: storageSchedule.StartDate, + Interval: storageSchedule.Interval, }, } - if !j.EndDate.IsZero() { - schedulerJobWithDetails.Schedule.EndDate = j.EndDate + if !storageSchedule.EndDate.IsZero() { + schedulerJobWithDetails.Schedule.EndDate = storageSchedule.EndDate } - if j.Retry != nil { - if err := json.Unmarshal(j.Retry, &schedulerJobWithDetails.Retry); err != nil { - return nil, err + if storageSchedule.Retry != nil { + schedulerJobWithDetails.Retry = scheduler.Retry{ + ExponentialBackoff: storageSchedule.Retry.ExponentialBackoff, + Count: storageSchedule.Retry.Count, + Delay: storageSchedule.Retry.Delay, } } @@ -198,11 +225,9 @@ func FromRow(row pgx.Row) (*Job, error) { var js Job err := row.Scan(&js.ID, &js.Name, &js.Version, &js.Owner, &js.Description, - &js.Labels, &js.StartDate, &js.EndDate, &js.Interval, &js.DependsOnPast, - &js.CatchUp, &js.Retry, &js.Alert, &js.StaticUpstreams, &js.HTTPUpstreams, - &js.TaskName, &js.TaskConfig, &js.WindowSize, &js.WindowOffset, &js.WindowTruncateTo, - &js.Assets, &js.Hooks, &js.Metadata, &js.Destination, &js.Sources, - &js.ProjectName, &js.NamespaceName, &js.CreatedAt, &js.UpdatedAt, &js.DeletedAt) + &js.Labels, &js.Schedule, &js.Alert, &js.StaticUpstreams, &js.HTTPUpstreams, + &js.TaskName, &js.TaskConfig, &js.WindowSpec, &js.Assets, &js.Hooks, &js.Metadata, &js.Destination, &js.Sources, + &js.ProjectName, &js.NamespaceName, &js.CreatedAt, &js.UpdatedAt) if err != nil { if errors.Is(err, pgx.ErrNoRows) { @@ -301,6 +326,9 @@ func (j *JobRepository) GetAll(ctx context.Context, projectName tenant.ProjectNa jobNameList = append(jobNameList, job.GetName()) jobsMap[job.GetName()] = job } + if len(jobNameList) == 0 { + return nil, errors.NotFound(scheduler.EntityJobRun, "unable to find jobs in project:"+projectName.String()) + } jobUpstreamGroupedByName, err := j.getJobsUpstreams(ctx, projectName, jobNameList) multiError.Append(err) diff --git a/internal/store/postgres/scheduler/job_repository_test.go b/internal/store/postgres/scheduler/job_repository_test.go new file mode 100644 index 0000000000..43224d345a --- /dev/null +++ b/internal/store/postgres/scheduler/job_repository_test.go @@ -0,0 +1,220 @@ +//go:build !unit_test + +package scheduler_test + +import ( + "context" + "testing" + + "github.com/jackc/pgx/v5/pgxpool" + "github.com/stretchr/testify/assert" + + "github.com/odpf/optimus/core/job" + "github.com/odpf/optimus/core/scheduler" + "github.com/odpf/optimus/core/tenant" + "github.com/odpf/optimus/internal/errors" + "github.com/odpf/optimus/internal/models" + jobRepo "github.com/odpf/optimus/internal/store/postgres/job" + postgres "github.com/odpf/optimus/internal/store/postgres/scheduler" + tenantPostgres "github.com/odpf/optimus/internal/store/postgres/tenant" + "github.com/odpf/optimus/tests/setup" +) + +const ( + jobAName = "sample-job-A" + jobBName = "sample-job-B" +) + +func TestPostgresJobRepository(t *testing.T) { + ctx := context.Background() + tnnt, _ := tenant.NewTenant("test-proj", "test-ns") + + t.Run("GetAll", func(t *testing.T) { + t.Run("get all jobs for a given project name", func(t *testing.T) { + db := dbSetup() + jobs := addJobs(ctx, t, db) + + jobProviderRepo := postgres.NewJobProviderRepository(db) + + allJobs, err := jobProviderRepo.GetAll(ctx, tnnt.ProjectName()) + assert.Nil(t, err) + assert.Equal(t, len(jobs), len(allJobs)) + for _, expectedJob := range jobs { + found := false + for _, job := range allJobs { + if compareEqualJobWithDetails(expectedJob, job) { + found = true + break + } + } + assert.True(t, found) + } + }) + + t.Run("return not found error when jobs not found", func(t *testing.T) { + db := dbSetup() + jobProviderRepo := postgres.NewJobProviderRepository(db) + allJobs, err := jobProviderRepo.GetAll(ctx, "some-other-project-1") + assert.True(t, errors.IsErrorType(err, errors.ErrNotFound)) + assert.Nil(t, allJobs) + }) + }) + + t.Run("GetJobDetails", func(t *testing.T) { + t.Run("gets one job with details", func(t *testing.T) { + db := dbSetup() + jobs := addJobs(ctx, t, db) + + jobProviderRepo := postgres.NewJobProviderRepository(db) + + jobWithDetails, err := jobProviderRepo.GetJobDetails(ctx, tnnt.ProjectName(), jobAName) + assert.Nil(t, err) + assert.True(t, compareEqualJobWithDetails(jobs[jobAName], jobWithDetails)) + }) + t.Run("returns not found error when job not found", func(t *testing.T) { + db := dbSetup() + jobProviderRepo := postgres.NewJobProviderRepository(db) + jobObject, err := jobProviderRepo.GetJobDetails(ctx, tnnt.ProjectName(), "some-other-job") + assert.True(t, errors.IsErrorType(err, errors.ErrNotFound)) + assert.Nil(t, jobObject) + }) + }) + t.Run("GetJob", func(t *testing.T) { + t.Run("returns one job", func(t *testing.T) { + db := dbSetup() + jobs := addJobs(ctx, t, db) + jobProviderRepo := postgres.NewJobProviderRepository(db) + + jobObject, err := jobProviderRepo.GetJob(ctx, tnnt.ProjectName(), jobAName) + assert.Nil(t, err) + assert.True(t, compareEqualJob(jobs[jobAName], jobObject)) + }) + t.Run("returns not found error when job not found", func(t *testing.T) { + db := dbSetup() + jobProviderRepo := postgres.NewJobProviderRepository(db) + jobObject, err := jobProviderRepo.GetJob(ctx, tnnt.ProjectName(), "some-other-job") + assert.True(t, errors.IsErrorType(err, errors.ErrNotFound)) + assert.Nil(t, jobObject) + }) + }) +} + +func dbSetup() *pgxpool.Pool { + pool := setup.TestPool() + setup.TruncateTablesWith(pool) + return pool +} + +func addJobs(ctx context.Context, t *testing.T, pool *pgxpool.Pool) map[string]*job.Job { + t.Helper() + jobVersion, err := job.VersionFrom(1) + assert.NoError(t, err) + jobOwner, err := job.OwnerFrom("dev_test") + assert.NoError(t, err) + jobDescription := "sample job" + jobRetry := job.NewRetry(5, 0, false) + startDate, err := job.ScheduleDateFrom("2022-10-01") + assert.NoError(t, err) + jobSchedule, err := job.NewScheduleBuilder(startDate).WithRetry(jobRetry).Build() + assert.NoError(t, err) + jobWindow, err := models.NewWindow(jobVersion.Int(), "d", "24h", "24h") + assert.NoError(t, err) + jobTaskConfig, err := job.NewConfig(map[string]string{"sample_task_key": "sample_value"}) + assert.NoError(t, err) + taskName, err := job.TaskNameFrom("bq2bq") + assert.NoError(t, err) + jobTask := job.NewTaskBuilder(taskName, jobTaskConfig).Build() + + jobLabels := map[string]string{ + "environment": "integration", + } + + proj, err := tenant.NewProject("test-proj", + map[string]string{ + "bucket": "gs://some_folder-2", + tenant.ProjectSchedulerHost: "host", + tenant.ProjectStoragePathKey: "gs://location", + }) + assert.NoError(t, err) + + projRepo := tenantPostgres.NewProjectRepository(pool) + assert.NoError(t, projRepo.Save(ctx, proj)) + + namespace, err := tenant.NewNamespace("test-ns", proj.Name(), + map[string]string{ + "bucket": "gs://ns_bucket", + }) + assert.NoError(t, err) + + namespaceRepo := tenantPostgres.NewNamespaceRepository(pool) + assert.NoError(t, namespaceRepo.Save(ctx, namespace)) + + jobHookConfig, err := job.NewConfig(map[string]string{"sample_hook_key": "sample_value"}) + assert.NoError(t, err) + jobHooks := []*job.Hook{job.NewHook("sample_hook", jobHookConfig)} + jobAlertConfig, err := job.NewConfig(map[string]string{"sample_alert_key": "sample_value"}) + assert.NoError(t, err) + alert, _ := job.NewAlertBuilder(job.SLAMissEvent, []string{"sample-channel"}).WithConfig(jobAlertConfig).Build() + jobAlerts := []*job.AlertSpec{alert} + upstreamName1 := job.SpecUpstreamNameFrom("job-upstream-1") + upstreamName2 := job.SpecUpstreamNameFrom("job-upstream-2") + jobUpstream, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{upstreamName1, upstreamName2}).Build() + jobAsset, err := job.NewAsset(map[string]string{"sample-asset": "value-asset"}) + assert.NoError(t, err) + resourceRequestConfig := job.NewMetadataResourceConfig("250m", "128Mi") + resourceLimitConfig := job.NewMetadataResourceConfig("250m", "128Mi") + resourceMetadata := job.NewResourceMetadata(resourceRequestConfig, resourceLimitConfig) + jobMetadata, _ := job.NewMetadataBuilder(). + WithResource(resourceMetadata). + WithScheduler(map[string]string{"scheduler_config_key": "value"}). + Build() + jobSpecA := job.NewSpecBuilder(jobVersion, jobAName, jobOwner, jobSchedule, jobWindow, jobTask). + WithDescription(jobDescription). + WithLabels(jobLabels). + WithHooks(jobHooks). + WithAlerts(jobAlerts). + WithSpecUpstream(jobUpstream). + WithAsset(jobAsset). + WithMetadata(jobMetadata). + Build() + sampleTenant, err := tenant.NewTenant(proj.Name().String(), namespace.Name().String()) + assert.NoError(t, err) + jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"resource-3"}) + + jobSpecB := job.NewSpecBuilder(jobVersion, jobBName, jobOwner, jobSchedule, jobWindow, jobTask). + WithDescription(jobDescription). + WithLabels(jobLabels). + WithHooks(jobHooks). + WithAlerts(jobAlerts). + WithAsset(jobAsset). + WithMetadata(jobMetadata). + Build() + jobB := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_b", nil) + + jobs := []*job.Job{jobA, jobB} + + jobRepository := jobRepo.NewJobRepository(pool) + addedJobs, err := jobRepository.Add(ctx, jobs) + assert.NoError(t, err) + assert.EqualValues(t, jobs, addedJobs) + jobMap := make(map[string]*job.Job) + for _, jobSpec := range jobs { + jobMap[jobSpec.GetName()] = jobSpec + } + return jobMap +} + +func compareEqualJob(j *job.Job, s *scheduler.Job) bool { + return j.GetName() == s.Name.String() && + j.Tenant() == s.Tenant && + j.Destination().String() == s.Destination && + j.Spec().Task().Name().String() == s.Task.Name +} + +func compareEqualJobWithDetails(j *job.Job, s *scheduler.JobWithDetails) bool { + return compareEqualJob(j, s.Job) && + j.GetName() == s.Name.String() && + j.Spec().Version().Int() == s.JobMetadata.Version && + j.Spec().Owner().String() == s.JobMetadata.Owner && + j.Spec().Schedule().Interval() == s.Schedule.Interval +} diff --git a/internal/store/postgres/scheduler/job_run_repository.go b/internal/store/postgres/scheduler/job_run_repository.go index fb5ba98af8..4a0e268b3e 100644 --- a/internal/store/postgres/scheduler/job_run_repository.go +++ b/internal/store/postgres/scheduler/job_run_repository.go @@ -15,7 +15,7 @@ import ( ) const ( - columnsToStore = `job_name, namespace_name, project_name, scheduled_at, start_time, end_time, status, sla_definition` + columnsToStore = `job_name, namespace_name, project_name, scheduled_at, start_time, end_time, status, sla_definition, sla_alert` jobRunColumns = `id, ` + columnsToStore ) @@ -35,6 +35,7 @@ type jobRun struct { EndTime time.Time Status string + SLAAlert bool SLADefinition int64 CreatedAt time.Time @@ -46,20 +47,27 @@ func (j jobRun) toJobRun() (*scheduler.JobRun, error) { if err != nil { return nil, err } + state, err := scheduler.StateFromString(j.Status) + if err != nil { + return nil, err + } return &scheduler.JobRun{ ID: j.ID, JobName: scheduler.JobName(j.JobName), Tenant: t, + State: state, StartTime: j.StartTime, + SLAAlert: j.SLAAlert, + EndTime: j.EndTime, }, nil } func (j *JobRunRepository) GetByID(ctx context.Context, id scheduler.JobRunID) (*scheduler.JobRun, error) { var jr jobRun getJobRunByID := `SELECT ` + jobRunColumns + ` FROM job_run where id = $1` - err := j.db.QueryRow(ctx, getJobRunByID, id). + err := j.db.QueryRow(ctx, getJobRunByID, id.UUID()). Scan(&jr.ID, &jr.JobName, &jr.NamespaceName, &jr.ProjectName, &jr.ScheduledAt, &jr.StartTime, &jr.EndTime, - &jr.Status, &jr.SLADefinition) + &jr.Status, &jr.SLADefinition, &jr.SLAAlert) if err != nil { return nil, err } @@ -68,10 +76,10 @@ func (j *JobRunRepository) GetByID(ctx context.Context, id scheduler.JobRunID) ( func (j *JobRunRepository) GetByScheduledAt(ctx context.Context, t tenant.Tenant, jobName scheduler.JobName, scheduledAt time.Time) (*scheduler.JobRun, error) { var jr jobRun - getJobRunByID := `SELECT ` + jobRunColumns + ` FROM job_run j where project_name = $1 and namespace_name = $2 and job_name = $3 and scheduled_at = $4 order by created_at desc limit 1` + getJobRunByID := `SELECT ` + jobRunColumns + `, created_at FROM job_run j where project_name = $1 and namespace_name = $2 and job_name = $3 and scheduled_at = $4 order by created_at desc limit 1` err := j.db.QueryRow(ctx, getJobRunByID, t.ProjectName(), t.NamespaceName(), jobName, scheduledAt). Scan(&jr.ID, &jr.JobName, &jr.NamespaceName, &jr.ProjectName, &jr.ScheduledAt, &jr.StartTime, &jr.EndTime, - &jr.Status, &jr.SLADefinition) + &jr.Status, &jr.SLADefinition, &jr.SLAAlert, &jr.CreatedAt) if err != nil { if errors.Is(err, pgx.ErrNoRows) { @@ -89,19 +97,21 @@ func (j *JobRunRepository) Update(ctx context.Context, jobRunID uuid.UUID, endTi } func (j *JobRunRepository) UpdateSLA(ctx context.Context, slaObjects []*scheduler.SLAObject) error { - var jobIDList []string - for _, slaObject := range slaObjects { - jobIDs := fmt.Sprintf("('%s','%s')", slaObject.JobName, slaObject.JobScheduledAt.Format("2006-01-02 15:04:05")) - jobIDList = append(jobIDList, jobIDs) + var jobIDListString string + totalIds := len(slaObjects) + for i, slaObject := range slaObjects { + jobIDListString += fmt.Sprintf("('%s','%s')", slaObject.JobName, slaObject.JobScheduledAt.UTC().Format("2006-01-02 15:04:05.000000")) + if !(i == totalIds-1) { + jobIDListString += ", " + } } - - query := "update job_run set sla_alert = True, updated_at = NOW() where (job_name, scheduled_at) = any ($1)" - _, err := j.db.Exec(ctx, query, jobIDList) + query := "update job_run set sla_alert = True, updated_at = NOW() where (job_name, scheduled_at) = (" + jobIDListString + ")" + _, err := j.db.Exec(ctx, query) return errors.WrapIfErr(scheduler.EntityJobRun, "unable to update SLA", err) } func (j *JobRunRepository) Create(ctx context.Context, t tenant.Tenant, jobName scheduler.JobName, scheduledAt time.Time, slaDefinitionInSec int64) error { - insertJobRun := `INSERT INTO job_run (` + columnsToStore + ` created_at, updated_at) values ($1, $2, $3, $4, NOW(), TIMESTAMP '3000-01-01 00:00:00', $5, $6, NOW(), NOW())` + insertJobRun := `INSERT INTO job_run (` + columnsToStore + `, created_at, updated_at) values ($1, $2, $3, $4, NOW(), TIMESTAMP '3000-01-01 00:00:00', $5, $6, FALSE, NOW(), NOW())` _, err := j.db.Exec(ctx, insertJobRun, jobName, t.NamespaceName(), t.ProjectName(), scheduledAt, scheduler.StateRunning, slaDefinitionInSec) return errors.WrapIfErr(scheduler.EntityJobRun, "unable to create job run", err) } diff --git a/internal/store/postgres/scheduler/job_run_repository_test.go b/internal/store/postgres/scheduler/job_run_repository_test.go new file mode 100644 index 0000000000..d0c4b568f6 --- /dev/null +++ b/internal/store/postgres/scheduler/job_run_repository_test.go @@ -0,0 +1,96 @@ +//go:build !unit_test + +package scheduler_test + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/odpf/optimus/core/scheduler" + "github.com/odpf/optimus/core/tenant" + postgres "github.com/odpf/optimus/internal/store/postgres/scheduler" +) + +func TestPostgresJobRunRepository(t *testing.T) { + ctx := context.Background() + tnnt, _ := tenant.NewTenant("test-proj", "test-ns") + currentTime := time.Now() + scheduledAt := currentTime.Add(-time.Hour) + slaDefinitionInSec := int64(3600) //seconds + + t.Run("Create", func(t *testing.T) { + t.Run("creates a job run", func(t *testing.T) { + db := dbSetup() + _ = addJobs(ctx, t, db) + jobRunRepo := postgres.NewJobRunRepository(db) + err := jobRunRepo.Create(ctx, tnnt, jobAName, scheduledAt, slaDefinitionInSec) + assert.Nil(t, err) + jobRun, err := jobRunRepo.GetByScheduledAt(ctx, tnnt, jobAName, scheduledAt) + assert.Nil(t, err) + assert.Equal(t, jobAName, jobRun.JobName.String()) + }) + }) + t.Run("GetByID", func(t *testing.T) { + t.Run("gets a specific job run by ID", func(t *testing.T) { + db := dbSetup() + _ = addJobs(ctx, t, db) + jobRunRepo := postgres.NewJobRunRepository(db) + err := jobRunRepo.Create(ctx, tnnt, jobAName, scheduledAt, slaDefinitionInSec) + assert.Nil(t, err) + jobRun, err := jobRunRepo.GetByScheduledAt(ctx, tnnt, jobAName, scheduledAt) + assert.Nil(t, err) + + jobRunByID, err := jobRunRepo.GetByID(ctx, scheduler.JobRunID(jobRun.ID)) + assert.Nil(t, err) + assert.EqualValues(t, jobRunByID, jobRun) + }) + }) + + t.Run("Update", func(t *testing.T) { + t.Run("updates a specific job run by id", func(t *testing.T) { + db := dbSetup() + _ = addJobs(ctx, t, db) + jobRunRepo := postgres.NewJobRunRepository(db) + err := jobRunRepo.Create(ctx, tnnt, jobAName, scheduledAt, slaDefinitionInSec) + assert.Nil(t, err) + jobRun, err := jobRunRepo.GetByScheduledAt(ctx, tnnt, jobAName, scheduledAt) + assert.Nil(t, err) + + jobEndTime := currentTime.Add(-time.Minute) + err = jobRunRepo.Update(ctx, jobRun.ID, jobEndTime, scheduler.StateSuccess) + assert.Nil(t, err) + + jobRunByID, err := jobRunRepo.GetByID(ctx, scheduler.JobRunID(jobRun.ID)) + assert.Nil(t, err) + assert.EqualValues(t, scheduler.StateSuccess, jobRunByID.State) + assert.True(t, jobEndTime.Equal(jobRunByID.EndTime)) + }) + }) + t.Run("UpdateSLA", func(t *testing.T) { + t.Run("updates jobs sla alert firing status", func(t *testing.T) { + db := dbSetup() + _ = addJobs(ctx, t, db) + jobRunRepo := postgres.NewJobRunRepository(db) + err := jobRunRepo.Create(ctx, tnnt, jobAName, scheduledAt, slaDefinitionInSec) + assert.Nil(t, err) + jobRun, err := jobRunRepo.GetByScheduledAt(ctx, tnnt, jobAName, scheduledAt) + assert.Nil(t, err) + + var slaObject = scheduler.SLAObject{ + JobName: jobAName, + JobScheduledAt: scheduledAt, + } + var slaObjects = []*scheduler.SLAObject{&slaObject} + + err = jobRunRepo.UpdateSLA(ctx, slaObjects) + assert.Nil(t, err) + + jobRunByID, err := jobRunRepo.GetByID(ctx, scheduler.JobRunID(jobRun.ID)) + assert.Nil(t, err) + assert.True(t, jobRunByID.SLAAlert) + }) + }) +} From 8ef3213d361a1cedb220cd89d4aaf5537e5bf82d Mon Sep 17 00:00:00 2001 From: Yash Bhardwaj Date: Wed, 4 Jan 2023 13:06:28 +0530 Subject: [PATCH 20/25] fix: fix time zone to utc --- .../store/postgres/scheduler/job_operator_repository_test.go | 2 +- internal/store/postgres/scheduler/job_run_repository_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/store/postgres/scheduler/job_operator_repository_test.go b/internal/store/postgres/scheduler/job_operator_repository_test.go index 8609031794..ab82b36301 100644 --- a/internal/store/postgres/scheduler/job_operator_repository_test.go +++ b/internal/store/postgres/scheduler/job_operator_repository_test.go @@ -18,7 +18,7 @@ import ( func TestPostgresJobOperatorRepository(t *testing.T) { ctx := context.Background() tnnt, _ := tenant.NewTenant("test-proj", "test-ns") - currentTime := time.Now() + currentTime := time.Now().UTC() scheduledAt := currentTime.Add(-time.Hour) operatorStartTime := currentTime operatorEndTime := currentTime.Add(time.Hour) diff --git a/internal/store/postgres/scheduler/job_run_repository_test.go b/internal/store/postgres/scheduler/job_run_repository_test.go index d0c4b568f6..e75309e580 100644 --- a/internal/store/postgres/scheduler/job_run_repository_test.go +++ b/internal/store/postgres/scheduler/job_run_repository_test.go @@ -17,7 +17,7 @@ import ( func TestPostgresJobRunRepository(t *testing.T) { ctx := context.Background() tnnt, _ := tenant.NewTenant("test-proj", "test-ns") - currentTime := time.Now() + currentTime := time.Now().UTC() scheduledAt := currentTime.Add(-time.Hour) slaDefinitionInSec := int64(3600) //seconds From 591b0d72dbdb9927b8f836d8ee20e265c9df9f7c Mon Sep 17 00:00:00 2001 From: Yash Bhardwaj Date: Wed, 4 Jan 2023 14:59:56 +0530 Subject: [PATCH 21/25] fix: test time zone failures --- .../postgres/scheduler/job_operator_repository_test.go | 6 +++--- .../store/postgres/scheduler/job_run_repository_test.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/store/postgres/scheduler/job_operator_repository_test.go b/internal/store/postgres/scheduler/job_operator_repository_test.go index ab82b36301..07696d201f 100644 --- a/internal/store/postgres/scheduler/job_operator_repository_test.go +++ b/internal/store/postgres/scheduler/job_operator_repository_test.go @@ -41,7 +41,7 @@ func TestPostgresJobOperatorRepository(t *testing.T) { operatorRun, err := operatorRunRepo.GetOperatorRun(ctx, "some-operator-name", scheduler.OperatorSensor, jobRun.ID) assert.Nil(t, err) - assert.True(t, operatorStartTime.Equal(operatorRun.StartTime)) + assert.Equal(t, operatorStartTime.UTC().Format(time.RFC1123), operatorRun.StartTime.UTC().Format(time.RFC1123)) }) }) t.Run("GetOperatorRun", func(t *testing.T) { @@ -94,14 +94,14 @@ func TestPostgresJobOperatorRepository(t *testing.T) { operatorRun, err := operatorRunRepo.GetOperatorRun(ctx, "some-operator-name", scheduler.OperatorTask, jobRun.ID) assert.Nil(t, err) - assert.True(t, operatorStartTime.Equal(operatorRun.StartTime)) + assert.Equal(t, operatorStartTime.UTC().Format(time.RFC1123), operatorRun.StartTime.UTC().Format(time.RFC1123)) err = operatorRunRepo.UpdateOperatorRun(ctx, scheduler.OperatorTask, operatorRun.ID, operatorEndTime, scheduler.StateFailed) assert.Nil(t, err) operatorRun, err = operatorRunRepo.GetOperatorRun(ctx, "some-operator-name", scheduler.OperatorTask, jobRun.ID) assert.Nil(t, err) - assert.True(t, operatorEndTime.Equal(operatorRun.EndTime)) + assert.Equal(t, operatorEndTime.UTC().Format(time.RFC1123), operatorRun.EndTime.UTC().Format(time.RFC1123)) assert.Equal(t, scheduler.StateFailed, operatorRun.Status) }) }) diff --git a/internal/store/postgres/scheduler/job_run_repository_test.go b/internal/store/postgres/scheduler/job_run_repository_test.go index e75309e580..20bcadb77c 100644 --- a/internal/store/postgres/scheduler/job_run_repository_test.go +++ b/internal/store/postgres/scheduler/job_run_repository_test.go @@ -66,7 +66,7 @@ func TestPostgresJobRunRepository(t *testing.T) { jobRunByID, err := jobRunRepo.GetByID(ctx, scheduler.JobRunID(jobRun.ID)) assert.Nil(t, err) assert.EqualValues(t, scheduler.StateSuccess, jobRunByID.State) - assert.True(t, jobEndTime.Equal(jobRunByID.EndTime)) + assert.Equal(t, jobEndTime.UTC().Format(time.RFC1123), jobRunByID.EndTime.UTC().Format(time.RFC1123)) }) }) t.Run("UpdateSLA", func(t *testing.T) { From 534d84abb9957b61a357fc7ecf336fbc0c731477 Mon Sep 17 00:00:00 2001 From: Arinda Arif Date: Wed, 4 Jan 2023 16:53:02 +0700 Subject: [PATCH 22/25] refactor: simplify job spec struct --- core/job/handler/v1beta1/job.go | 10 +- core/job/handler/v1beta1/job_adapter.go | 56 ++-- core/job/handler/v1beta1/job_test.go | 114 +++++---- core/job/job_test.go | 12 +- .../external_upstream_resolver_test.go | 8 +- .../internal_upstream_resolver_test.go | 27 +- core/job/resolver/upstream_resolver_test.go | 70 +++-- core/job/service/job_service.go | 14 +- core/job/service/job_service_test.go | 234 +++++++++-------- core/job/service/plugin_service.go | 14 +- core/job/service/plugin_service_test.go | 26 +- core/job/spec.go | 192 +++++--------- core/job/spec_test.go | 73 ++---- internal/store/postgres/job/adapter.go | 46 ++-- .../store/postgres/job/job_repository_test.go | 240 +++++++++++------- 15 files changed, 561 insertions(+), 575 deletions(-) diff --git a/core/job/handler/v1beta1/job.go b/core/job/handler/v1beta1/job.go index b45c16036c..5d8a47a198 100644 --- a/core/job/handler/v1beta1/job.go +++ b/core/job/handler/v1beta1/job.go @@ -37,7 +37,7 @@ type JobService interface { Update(ctx context.Context, jobTenant tenant.Tenant, jobs []*job.Spec) error Delete(ctx context.Context, jobTenant tenant.Tenant, jobName job.Name, cleanFlag bool, forceFlag bool) (affectedDownstream []job.FullName, err error) Get(ctx context.Context, jobTenant tenant.Tenant, jobName job.Name) (jobSpec *job.Job, err error) - GetTaskWithInfo(ctx context.Context, task *job.Task) (*job.Task, error) + GetTaskInfo(ctx context.Context, task job.Task) (*models.PluginInfoResponse, error) GetByFilter(ctx context.Context, filters ...filter.FilterOpt) (jobSpecs []*job.Job, err error) ReplaceAll(ctx context.Context, jobTenant tenant.Tenant, jobs []*job.Spec, jobNamesWithValidationError []job.Name, logWriter writer.LogWriter) error Refresh(ctx context.Context, projectName tenant.ProjectName, namespaceNames []string, jobNames []string, logWriter writer.LogWriter) error @@ -359,15 +359,15 @@ func (jh *JobHandler) GetJobTask(ctx context.Context, req *pb.GetJobTaskRequest) return nil, err } - jobTask, err := jh.jobService.GetTaskWithInfo(ctx, jobResult.Spec().Task()) + taskInfo, err := jh.jobService.GetTaskInfo(ctx, jobResult.Spec().Task()) if err != nil { return nil, err } jobTaskSpec := &pb.JobTask{ - Name: jobTask.Info().Name, - Description: jobTask.Info().Description, - Image: jobTask.Info().Image, + Name: taskInfo.Name, + Description: taskInfo.Description, + Image: taskInfo.Image, } jobTaskSpec.Destination = &pb.JobTask_Destination{ diff --git a/core/job/handler/v1beta1/job_adapter.go b/core/job/handler/v1beta1/job_adapter.go index a188bfa374..d14231c3fa 100644 --- a/core/job/handler/v1beta1/job_adapter.go +++ b/core/job/handler/v1beta1/job_adapter.go @@ -14,9 +14,9 @@ import ( func toJobProto(jobEntity *job.Job) *pb.JobSpecification { return &pb.JobSpecification{ - Version: int32(jobEntity.Spec().Version().Int()), + Version: int32(jobEntity.Spec().Version()), Name: jobEntity.Spec().Name().String(), - Owner: jobEntity.Spec().Owner().String(), + Owner: jobEntity.Spec().Owner(), StartDate: jobEntity.Spec().Schedule().StartDate().String(), EndDate: jobEntity.Spec().Schedule().EndDate().String(), Interval: jobEntity.Spec().Schedule().Interval(), @@ -61,20 +61,14 @@ func fromJobProtos(protoJobSpecs []*pb.JobSpecification) ([]*job.Spec, []job.Nam } func fromJobProto(js *pb.JobSpecification) (*job.Spec, error) { - version, err := job.VersionFrom(int(js.Version)) - if err != nil { - return nil, err - } + version := int(js.Version) name, err := job.NameFrom(js.Name) if err != nil { return nil, err } - owner, err := job.OwnerFrom(js.Owner) - if err != nil { - return nil, err - } + owner := js.Owner startDate, err := job.ScheduleDateFrom(js.StartDate) if err != nil { @@ -121,7 +115,7 @@ func fromJobProto(js *pb.JobSpecification) (*job.Spec, error) { return nil, err } - var taskConfig *job.Config + var taskConfig job.Config if js.Config != nil { taskConfig, err = toConfig(js.Config) if err != nil { @@ -173,14 +167,14 @@ func fromJobProto(js *pb.JobSpecification) (*job.Spec, error) { } if js.Assets != nil { - asset, err := job.NewAsset(js.Assets) + asset, err := job.AssetFrom(js.Assets) if err != nil { return nil, err } jobSpecBuilder = jobSpecBuilder.WithAsset(asset) } - return jobSpecBuilder.Build(), nil + return jobSpecBuilder.Build() } func fromResourceURNs(resourceURNs []job.ResourceURN) []string { @@ -228,11 +222,11 @@ func toHooks(hooksProto []*pb.JobSpecHook) ([]*job.Hook, error) { if err != nil { return nil, err } - hookName, err := job.HookNameFrom(hookProto.Name) + hookSpec, err := job.NewHook(hookProto.Name, hookConfig) if err != nil { return nil, err } - hooks[i] = job.NewHook(hookName, hookConfig) + hooks[i] = hookSpec } return hooks, nil } @@ -241,17 +235,17 @@ func fromHooks(hooks []*job.Hook) []*pb.JobSpecHook { var hooksProto []*pb.JobSpecHook for _, hook := range hooks { hooksProto = append(hooksProto, &pb.JobSpecHook{ - Name: hook.Name().String(), + Name: hook.Name(), Config: fromConfig(hook.Config()), }) } return hooksProto } -func fromAsset(jobAsset *job.Asset) map[string]string { +func fromAsset(jobAsset job.Asset) map[string]string { var assets map[string]string if jobAsset != nil { - assets = jobAsset.Assets() + assets = jobAsset } return assets } @@ -259,8 +253,8 @@ func fromAsset(jobAsset *job.Asset) map[string]string { func toAlerts(notifiers []*pb.JobSpecification_Behavior_Notifiers) ([]*job.AlertSpec, error) { alerts := make([]*job.AlertSpec, len(notifiers)) for i, notify := range notifiers { - alertOn := job.EventType(utils.FromEnumProto(notify.On.String(), "type")) - config, err := job.NewConfig(notify.Config) + alertOn := utils.FromEnumProto(notify.On.String(), "type") + config, err := job.ConfigFrom(notify.Config) if err != nil { return nil, err } @@ -277,9 +271,9 @@ func fromAlerts(jobAlerts []*job.AlertSpec) []*pb.JobSpecification_Behavior_Noti var notifiers []*pb.JobSpecification_Behavior_Notifiers for _, alert := range jobAlerts { notifiers = append(notifiers, &pb.JobSpecification_Behavior_Notifiers{ - On: pb.JobEvent_Type(pb.JobEvent_Type_value[utils.ToEnumProto(string(alert.On()), "type")]), + On: pb.JobEvent_Type(pb.JobEvent_Type_value[utils.ToEnumProto(alert.On(), "type")]), Channels: alert.Channels(), - Config: alert.Config().Configs(), + Config: alert.Config(), }) } return notifiers @@ -295,11 +289,7 @@ func toSpecUpstreams(upstreamProtos []*pb.JobDependency) (*job.UpstreamSpec, err continue } httpUpstreamProto := upstream.HttpDependency - httpUpstreamName, err := job.NameFrom(httpUpstreamProto.Name) - if err != nil { - return nil, err - } - httpUpstream, err := job.NewSpecHTTPUpstreamBuilder(httpUpstreamName, httpUpstreamProto.Url). + httpUpstream, err := job.NewSpecHTTPUpstreamBuilder(httpUpstreamProto.Name, httpUpstreamProto.Url). WithHeaders(httpUpstreamProto.Headers). WithParams(httpUpstreamProto.Params). Build() @@ -326,7 +316,7 @@ func fromSpecUpstreams(upstreams *job.UpstreamSpec) []*pb.JobDependency { for _, httpUpstream := range upstreams.HTTPUpstreams() { dependencies = append(dependencies, &pb.JobDependency{ HttpDependency: &pb.HttpDependency{ - Name: httpUpstream.Name().String(), + Name: httpUpstream.Name(), Url: httpUpstream.URL(), Headers: httpUpstream.Headers(), Params: httpUpstream.Params(), @@ -399,17 +389,17 @@ func fromMetadata(metadata *job.Metadata) *pb.JobMetadata { } } -func toConfig(configs []*pb.JobConfigItem) (*job.Config, error) { +func toConfig(configs []*pb.JobConfigItem) (job.Config, error) { configMap := make(map[string]string, len(configs)) for _, config := range configs { configMap[config.Name] = config.Value } - return job.NewConfig(configMap) + return job.ConfigFrom(configMap) } -func fromConfig(jobConfig *job.Config) []*pb.JobConfigItem { +func fromConfig(jobConfig job.Config) []*pb.JobConfigItem { configs := []*pb.JobConfigItem{} - for configName, configValue := range jobConfig.Configs() { + for configName, configValue := range jobConfig { configs = append(configs, &pb.JobConfigItem{Name: configName, Value: configValue}) } return configs @@ -473,7 +463,7 @@ func toHTTPUpstreamProtos(httpUpstreamSpecs []*job.SpecHTTPUpstream) []*pb.HttpD var httpUpstreamProtos []*pb.HttpDependency for _, httpUpstream := range httpUpstreamSpecs { httpUpstreamProtos = append(httpUpstreamProtos, &pb.HttpDependency{ - Name: httpUpstream.Name().String(), + Name: httpUpstream.Name(), Url: httpUpstream.URL(), Headers: httpUpstream.Headers(), Params: httpUpstream.Params(), diff --git a/core/job/handler/v1beta1/job_test.go b/core/job/handler/v1beta1/job_test.go index f9ff8a12f0..1eaf7d8a62 100644 --- a/core/job/handler/v1beta1/job_test.go +++ b/core/job/handler/v1beta1/job_test.go @@ -35,15 +35,14 @@ func TestNewJobHandler(t *testing.T) { "bucket": "gs://ns_bucket", }) sampleTenant, _ := tenant.NewTenant(project.Name().String(), namespace.Name().String()) - jobVersion, err := job.VersionFrom(1) - assert.NoError(t, err) + jobVersion := 1 startDate, err := job.ScheduleDateFrom("2022-10-01") assert.NoError(t, err) jobSchedule, err := job.NewScheduleBuilder(startDate).Build() assert.NoError(t, err) - jobWindow, err := models.NewWindow(jobVersion.Int(), "d", "24h", "24h") + jobWindow, err := models.NewWindow(jobVersion, "d", "24h", "24h") assert.NoError(t, err) - jobConfig, err := job.NewConfig(map[string]string{"sample_key": "sample_value"}) + jobConfig, err := job.ConfigFrom(map[string]string{"sample_key": "sample_value"}) assert.NoError(t, err) jobTask := job.NewTaskBuilder("bq2bq", jobConfig).Build() jobBehavior := &pb.JobSpecification_Behavior{ @@ -72,6 +71,7 @@ func TestNewJobHandler(t *testing.T) { Build() log := log.NewNoop() + sampleOwner := "sample-owner" t.Run("AddJobSpecifications", func(t *testing.T) { t.Run("adds job", func(t *testing.T) { @@ -82,7 +82,7 @@ func TestNewJobHandler(t *testing.T) { jobSpecProto := &pb.JobSpecification{ Version: int32(jobVersion), Name: "job-A", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -114,7 +114,7 @@ func TestNewJobHandler(t *testing.T) { jobSpecProto := &pb.JobSpecification{ Version: int32(jobVersion), Name: "job-A", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -175,7 +175,7 @@ func TestNewJobHandler(t *testing.T) { { Version: int32(jobVersion), Name: "job-B", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -207,7 +207,7 @@ func TestNewJobHandler(t *testing.T) { Version: int32(jobVersion), Name: "job-A", StartDate: "invalid", - Owner: "sample-owner", + Owner: sampleOwner, EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), TaskName: jobTask.Name().String(), @@ -218,7 +218,7 @@ func TestNewJobHandler(t *testing.T) { { Version: int32(jobVersion), Name: "job-B", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -251,7 +251,7 @@ func TestNewJobHandler(t *testing.T) { Name: "job-A", StartDate: jobSchedule.StartDate().String(), EndDate: "invalid", - Owner: "sample-owner", + Owner: sampleOwner, Interval: jobSchedule.Interval(), TaskName: jobTask.Name().String(), WindowSize: jobWindow.GetSize(), @@ -261,7 +261,7 @@ func TestNewJobHandler(t *testing.T) { { Version: int32(jobVersion), Name: "job-B", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -299,7 +299,7 @@ func TestNewJobHandler(t *testing.T) { { Version: int32(jobVersion), Name: "job-A", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -312,7 +312,7 @@ func TestNewJobHandler(t *testing.T) { { Version: int32(jobVersion), Name: "job-B", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -372,7 +372,7 @@ func TestNewJobHandler(t *testing.T) { { Version: int32(jobVersion), Name: "job-A", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -415,7 +415,7 @@ func TestNewJobHandler(t *testing.T) { jobSpecProto := &pb.JobSpecification{ Version: int32(jobVersion), Name: "job-A", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -447,7 +447,7 @@ func TestNewJobHandler(t *testing.T) { jobSpecProto := &pb.JobSpecification{ Version: int32(jobVersion), Name: "job-A", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -507,7 +507,7 @@ func TestNewJobHandler(t *testing.T) { { Version: int32(jobVersion), Name: "job-B", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -568,7 +568,7 @@ func TestNewJobHandler(t *testing.T) { { Version: int32(jobVersion), Name: "job-A", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -766,7 +766,7 @@ func TestNewJobHandler(t *testing.T) { { Version: int32(jobVersion), Name: "job-A", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -778,7 +778,7 @@ func TestNewJobHandler(t *testing.T) { { Version: int32(jobVersion), Name: "job-B", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -815,7 +815,7 @@ func TestNewJobHandler(t *testing.T) { { Version: int32(jobVersion), Name: "job-A", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -861,12 +861,12 @@ func TestNewJobHandler(t *testing.T) { { Version: int32(jobVersion), Name: "job-A", - Owner: "sample-owner", + Owner: sampleOwner, }, { Version: int32(jobVersion), Name: "job-B", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -903,7 +903,7 @@ func TestNewJobHandler(t *testing.T) { { Version: int32(jobVersion), Name: "job-A", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -915,7 +915,7 @@ func TestNewJobHandler(t *testing.T) { { Version: int32(jobVersion), Name: "job-B", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -954,7 +954,7 @@ func TestNewJobHandler(t *testing.T) { { Version: int32(jobVersion), Name: "job-A", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -966,7 +966,7 @@ func TestNewJobHandler(t *testing.T) { { Version: int32(jobVersion), Name: "job-B", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -1100,7 +1100,7 @@ func TestNewJobHandler(t *testing.T) { jobService := new(JobService) defer jobService.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", sampleOwner, jobSchedule, jobWindow, jobTask).Build() jobA := job.NewJob(sampleTenant, specA, "table-A", []job.ResourceURN{"table-B"}) request := pb.GetJobSpecificationRequest{ @@ -1136,9 +1136,9 @@ func TestNewJobHandler(t *testing.T) { request := pb.GetJobSpecificationsRequest{} - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", sampleOwner, jobSchedule, jobWindow, jobTask).Build() jobA := job.NewJob(sampleTenant, specA, "table-A", []job.ResourceURN{"table-B"}) - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", sampleOwner, jobSchedule, jobWindow, jobTask).Build() jobB := job.NewJob(sampleTenant, specB, "table-B", []job.ResourceURN{"table-C"}) jobService.On("GetByFilter", ctx, mock.Anything, mock.Anything, mock.Anything).Return([]*job.Job{jobA, jobB}, nil) @@ -1173,9 +1173,9 @@ func TestNewJobHandler(t *testing.T) { NamespaceName: sampleTenant.NamespaceName().String(), } - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", sampleOwner, jobSchedule, jobWindow, jobTask).Build() jobA := job.NewJob(sampleTenant, specA, "table-A", []job.ResourceURN{"table-B"}) - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", sampleOwner, jobSchedule, jobWindow, jobTask).Build() jobB := job.NewJob(sampleTenant, specB, "table-B", []job.ResourceURN{"table-C"}) jobService.On("GetByFilter", ctx, mock.Anything, mock.Anything).Return([]*job.Job{jobA, jobB}, nil) @@ -1212,7 +1212,7 @@ func TestNewJobHandler(t *testing.T) { jobSpecProto := &pb.JobSpecification{ Version: int32(jobVersion), Name: "", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -1247,7 +1247,7 @@ func TestNewJobHandler(t *testing.T) { jobSpecProto := &pb.JobSpecification{ Version: int32(jobVersion), Name: "job-A", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -1282,7 +1282,7 @@ func TestNewJobHandler(t *testing.T) { jobSpecProto := &pb.JobSpecification{ Version: int32(jobVersion), Name: "job-A", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), EndDate: jobSchedule.EndDate().String(), Interval: jobSchedule.Interval(), @@ -1319,7 +1319,7 @@ func TestNewJobHandler(t *testing.T) { httpUpstream, _ := job.NewSpecHTTPUpstreamBuilder("sample-upstream", "sample-url").Build() upstreamSpec, _ := job.NewSpecUpstreamBuilder().WithSpecHTTPUpstream([]*job.SpecHTTPUpstream{httpUpstream}).Build() - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).WithSpecUpstream(upstreamSpec).WithMetadata(metadataSpec).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", sampleOwner, jobSchedule, jobWindow, jobTask).WithSpecUpstream(upstreamSpec).WithMetadata(metadataSpec).Build() jobA := job.NewJob(sampleTenant, specA, "resource-A", nil) upstreamB := job.NewUpstreamResolved("job-B", "", "resource-b", sampleTenant, "static", "bq2bq", false) @@ -1349,6 +1349,7 @@ func TestNewJobHandler(t *testing.T) { Job: &pb.JobSpecification{ Version: int32(jobVersion), Name: specA.Name().String(), + Owner: sampleOwner, StartDate: specA.Schedule().StartDate().String(), EndDate: specA.Schedule().EndDate().String(), Interval: specA.Schedule().Interval(), @@ -1366,7 +1367,7 @@ func TestNewJobHandler(t *testing.T) { Dependencies: []*pb.JobDependency{ { HttpDependency: &pb.HttpDependency{ - Name: httpUpstream.Name().String(), + Name: httpUpstream.Name(), Url: httpUpstream.URL(), }, }, @@ -1396,7 +1397,7 @@ func TestNewJobHandler(t *testing.T) { }, HttpDependency: []*pb.HttpDependency{ { - Name: httpUpstream.Name().String(), + Name: httpUpstream.Name(), Url: httpUpstream.URL(), }, }, @@ -1426,9 +1427,9 @@ func TestNewJobHandler(t *testing.T) { WithSpecHTTPUpstream([]*job.SpecHTTPUpstream{httpUpstream}). WithUpstreamNames([]job.SpecUpstreamName{"job-B"}).Build() - hook1 := job.NewHook("hook-1", jobConfig) + hook1, _ := job.NewHook("hook-1", jobConfig) - specA := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask). + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", sampleOwner, jobSchedule, jobWindow, jobTask). WithSpecUpstream(upstreamSpec). WithHooks([]*job.Hook{hook1}).Build() jobA := job.NewJob(sampleTenant, specA, "resource-A", nil) @@ -1465,7 +1466,7 @@ func TestNewJobHandler(t *testing.T) { jobSpecProto := &pb.JobSpecification{ Version: int32(jobVersion), Name: "job-A", - Owner: "sample-owner", + Owner: sampleOwner, StartDate: jobSchedule.StartDate().String(), Interval: jobSchedule.Interval(), TaskName: jobTask.Name().String(), @@ -1488,7 +1489,7 @@ func TestNewJobHandler(t *testing.T) { Job: &pb.JobSpecification{ Version: int32(jobVersion), Name: specA.Name().String(), - Owner: "sample-owner", + Owner: sampleOwner, StartDate: specA.Schedule().StartDate().String(), EndDate: specA.Schedule().EndDate().String(), Interval: specA.Schedule().Interval(), @@ -1600,7 +1601,7 @@ func TestNewJobHandler(t *testing.T) { t.Run("should return downstream and upstream error log messages if exist", func(t *testing.T) { jobService := new(JobService) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", sampleOwner, jobSchedule, jobWindow, jobTask).Build() jobA := job.NewJob(sampleTenant, specA, "resource-A", nil) upstreamB := job.NewUpstreamResolved("job-B", "", "resource-b", sampleTenant, "static", "bq2bq", false) @@ -1634,6 +1635,7 @@ func TestNewJobHandler(t *testing.T) { Job: &pb.JobSpecification{ Version: int32(jobVersion), Name: specA.Name().String(), + Owner: sampleOwner, StartDate: specA.Schedule().StartDate().String(), EndDate: specA.Schedule().EndDate().String(), Interval: specA.Schedule().Interval(), @@ -1695,7 +1697,7 @@ func TestNewJobHandler(t *testing.T) { httpUpstream, _ := job.NewSpecHTTPUpstreamBuilder("sample-upstream", "sample-url").Build() upstreamSpec, _ := job.NewSpecUpstreamBuilder().WithSpecHTTPUpstream([]*job.SpecHTTPUpstream{httpUpstream}).Build() - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).WithSpecUpstream(upstreamSpec).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", sampleOwner, jobSchedule, jobWindow, jobTask).WithSpecUpstream(upstreamSpec).Build() basicInfoLogger := writer.BufferedLogger{Messages: []*pb.Log{ {Message: "not found"}, @@ -1762,7 +1764,7 @@ func TestNewJobHandler(t *testing.T) { jobService := new(JobService) defer jobService.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", sampleOwner, jobSchedule, jobWindow, jobTask).Build() jobA := job.NewJob(sampleTenant, specA, "table-A", []job.ResourceURN{"table-B"}) req := &pb.GetJobTaskRequest{ @@ -1772,7 +1774,7 @@ func TestNewJobHandler(t *testing.T) { } jobService.On("Get", ctx, sampleTenant, jobA.Spec().Name()).Return(jobA, nil) - jobService.On("GetTaskWithInfo", ctx, jobA.Spec().Task()).Return(nil, errors.New("error encountered")) + jobService.On("GetTaskInfo", ctx, jobA.Spec().Task()).Return(nil, errors.New("error encountered")) handler := v1beta1.NewJobHandler(jobService, nil) resp, err := handler.GetJobTask(ctx, req) assert.Error(t, err) @@ -1782,7 +1784,7 @@ func TestNewJobHandler(t *testing.T) { jobService := new(JobService) defer jobService.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", sampleOwner, jobSchedule, jobWindow, jobTask).Build() jobA := job.NewJob(sampleTenant, specA, "table-A", []job.ResourceURN{"table-B"}) req := &pb.GetJobTaskRequest{ @@ -1791,13 +1793,13 @@ func TestNewJobHandler(t *testing.T) { JobName: jobA.Spec().Name().String(), } - jobTask := job.NewTaskBuilder(jobTask.Name(), jobTask.Config()).WithInfo(&models.PluginInfoResponse{ + taskInfo := &models.PluginInfoResponse{ Name: "bq2bq", Description: "task info desc", Image: "odpf/bq2bq:latest", - }).Build() + } jobService.On("Get", ctx, sampleTenant, jobA.Spec().Name()).Return(jobA, nil) - jobService.On("GetTaskWithInfo", ctx, jobA.Spec().Task()).Return(jobTask, nil) + jobService.On("GetTaskInfo", ctx, jobA.Spec().Task()).Return(taskInfo, nil) handler := v1beta1.NewJobHandler(jobService, nil) resp, err := handler.GetJobTask(ctx, req) assert.NoError(t, err) @@ -1872,7 +1874,7 @@ func (_m *JobService) Get(ctx context.Context, jobTenant tenant.Tenant, jobName return r0, r1 } -// GetAll provides a mock function with given fields: ctx, filters +// GetByFilter provides a mock function with given fields: ctx, filters func (_m *JobService) GetByFilter(ctx context.Context, filters ...filter.FilterOpt) ([]*job.Job, error) { _va := make([]interface{}, len(filters)) for _i := range filters { @@ -1949,20 +1951,20 @@ func (_m *JobService) GetJobBasicInfo(ctx context.Context, jobTenant tenant.Tena } // GetTaskInfo provides a mock function with given fields: ctx, task -func (_m *JobService) GetTaskWithInfo(ctx context.Context, task *job.Task) (*job.Task, error) { +func (_m *JobService) GetTaskInfo(ctx context.Context, task job.Task) (*models.PluginInfoResponse, error) { ret := _m.Called(ctx, task) - var r0 *job.Task - if rf, ok := ret.Get(0).(func(context.Context, *job.Task) *job.Task); ok { + var r0 *models.PluginInfoResponse + if rf, ok := ret.Get(0).(func(context.Context, job.Task) *models.PluginInfoResponse); ok { r0 = rf(ctx, task) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*job.Task) + r0 = ret.Get(0).(*models.PluginInfoResponse) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *job.Task) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, job.Task) error); ok { r1 = rf(ctx, task) } else { r1 = ret.Error(1) diff --git a/core/job/job_test.go b/core/job/job_test.go index 2df2296cf0..ff85997a77 100644 --- a/core/job/job_test.go +++ b/core/job/job_test.go @@ -22,19 +22,19 @@ func TestEntityJob(t *testing.T) { "bucket": "gs://ns_bucket", }) sampleTenant, _ := tenant.NewTenant(project.Name().String(), namespace.Name().String()) - jobVersion, _ := job.VersionFrom(1) + jobVersion := 1 startDate, _ := job.ScheduleDateFrom("2022-10-01") jobSchedule, _ := job.NewScheduleBuilder(startDate).Build() - jobWindow, _ := models.NewWindow(jobVersion.Int(), "d", "24h", "24h") - jobTaskConfig, _ := job.NewConfig(map[string]string{"sample_task_key": "sample_value"}) + jobWindow, _ := models.NewWindow(jobVersion, "d", "24h", "24h") + jobTaskConfig, _ := job.ConfigFrom(map[string]string{"sample_task_key": "sample_value"}) jobTask := job.NewTaskBuilder("bq2bq", jobTaskConfig).Build() - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobADestination := job.ResourceURN("project.dataset.sample-a") jobASources := []job.ResourceURN{"project.dataset.sample-b"} jobA := job.NewJob(sampleTenant, specA, jobADestination, jobASources) - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobBDestination := job.ResourceURN("project.dataset.sample-b") jobBSources := []job.ResourceURN{"project.dataset.sample-c"} jobB := job.NewJob(sampleTenant, specB, jobBDestination, jobBSources) @@ -202,7 +202,7 @@ func TestEntityJob(t *testing.T) { t.Run("Job", func(t *testing.T) { t.Run("should return values as inserted", func(t *testing.T) { specUpstream, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{"job-E"}).Build() - specC := job.NewSpecBuilder(jobVersion, "job-C", "sample-owner", jobSchedule, jobWindow, jobTask).WithSpecUpstream(specUpstream).Build() + specC, _ := job.NewSpecBuilder(jobVersion, "job-C", "sample-owner", jobSchedule, jobWindow, jobTask).WithSpecUpstream(specUpstream).Build() jobCDestination := job.ResourceURN("project.dataset.sample-c") jobCSources := []job.ResourceURN{"project.dataset.sample-d"} jobC := job.NewJob(sampleTenant, specC, jobCDestination, jobCSources) diff --git a/core/job/resolver/external_upstream_resolver_test.go b/core/job/resolver/external_upstream_resolver_test.go index efffbf52b0..d309386f64 100644 --- a/core/job/resolver/external_upstream_resolver_test.go +++ b/core/job/resolver/external_upstream_resolver_test.go @@ -23,15 +23,15 @@ func TestExternalUpstreamResolver(t *testing.T) { resourceManager := new(ResourceManager) optimusResourceManagers := []resourcemanager.ResourceManager{resourceManager} - jobVersion, _ := job.VersionFrom(1) + jobVersion := 1 startDate, _ := job.ScheduleDateFrom("2022-10-01") jobSchedule, _ := job.NewScheduleBuilder(startDate).Build() - jobWindow, _ := models.NewWindow(jobVersion.Int(), "d", "24h", "24h") + jobWindow, _ := models.NewWindow(jobVersion, "d", "24h", "24h") taskName, _ := job.TaskNameFrom("sample-task") - jobTaskConfig, _ := job.NewConfig(map[string]string{"sample_task_key": "sample_value"}) + jobTaskConfig, _ := job.ConfigFrom(map[string]string{"sample_task_key": "sample_value"}) jobTask := job.NewTaskBuilder(taskName, jobTaskConfig).Build() upstreamSpec, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{"external-project/job-B"}).Build() - specA := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).WithSpecUpstream(upstreamSpec).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).WithSpecUpstream(upstreamSpec).Build() jobA := job.NewJob(sampleTenant, specA, "", []job.ResourceURN{"resource-C"}) t.Run("Resolve", func(t *testing.T) { diff --git a/core/job/resolver/internal_upstream_resolver_test.go b/core/job/resolver/internal_upstream_resolver_test.go index 776373b7fb..ecc770d05a 100644 --- a/core/job/resolver/internal_upstream_resolver_test.go +++ b/core/job/resolver/internal_upstream_resolver_test.go @@ -17,24 +17,24 @@ func TestInternalUpstreamResolver(t *testing.T) { ctx := context.Background() sampleTenant, _ := tenant.NewTenant("project", "namespace") - jobVersion, _ := job.VersionFrom(1) + jobVersion := 1 startDate, _ := job.ScheduleDateFrom("2022-10-01") jobSchedule, _ := job.NewScheduleBuilder(startDate).Build() - jobWindow, _ := models.NewWindow(jobVersion.Int(), "d", "24h", "24h") + jobWindow, _ := models.NewWindow(jobVersion, "d", "24h", "24h") taskName, _ := job.TaskNameFrom("sample-task") - jobTaskConfig, _ := job.NewConfig(map[string]string{"sample_task_key": "sample_value"}) + jobTaskConfig := map[string]string{"sample_task_key": "sample_value"} jobTask := job.NewTaskBuilder(taskName, jobTaskConfig).Build() upstreamSpec, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{"job-C"}).Build() - specA := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).WithSpecUpstream(upstreamSpec).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).WithSpecUpstream(upstreamSpec).Build() jobADestination := job.ResourceURN("resource-A") jobASources := []job.ResourceURN{"resource-B", "resource-D"} jobA := job.NewJob(sampleTenant, specA, jobADestination, jobASources) - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobBDestination := job.ResourceURN("resource-B") jobB := job.NewJob(sampleTenant, specB, jobBDestination, nil) - specC := job.NewSpecBuilder(jobVersion, "job-C", "", jobSchedule, jobWindow, jobTask).Build() + specC, _ := job.NewSpecBuilder(jobVersion, "job-C", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobCDestination := job.ResourceURN("resource-C") jobC := job.NewJob(sampleTenant, specC, jobCDestination, nil) @@ -93,7 +93,8 @@ func TestInternalUpstreamResolver(t *testing.T) { defer logWriter.AssertExpectations(t) specEUpstreamSpec, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{"job-unknown", "job-C"}).Build() - specE := job.NewSpecBuilder(jobVersion, "job-E", "sample-owner", jobSchedule, jobWindow, jobTask).WithSpecUpstream(specEUpstreamSpec).Build() + specE, err := job.NewSpecBuilder(jobVersion, "job-E", "sample-owner", jobSchedule, jobWindow, jobTask).WithSpecUpstream(specEUpstreamSpec).Build() + assert.NoError(t, err) jobEDestination := job.ResourceURN("resource-E") jobE := job.NewJob(sampleTenant, specE, jobEDestination, nil) @@ -116,8 +117,12 @@ func TestInternalUpstreamResolver(t *testing.T) { logWriter := new(mockWriter) defer logWriter.AssertExpectations(t) - specEUpstreamSpec, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{"/", "job-C"}).Build() - specE := job.NewSpecBuilder(jobVersion, "job-E", "sample-owner", jobSchedule, jobWindow, jobTask).WithSpecUpstream(specEUpstreamSpec).Build() + specEUpstreamSpec, err := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{"/", "job-C"}).Build() + assert.NoError(t, err) + + specE, err := job.NewSpecBuilder(jobVersion, "job-E", "sample-owner", jobSchedule, jobWindow, jobTask).WithSpecUpstream(specEUpstreamSpec).Build() + assert.NoError(t, err) + jobEDestination := job.ResourceURN("resource-E") jobE := job.NewJob(sampleTenant, specE, jobEDestination, nil) @@ -135,7 +140,9 @@ func TestInternalUpstreamResolver(t *testing.T) { }) }) t.Run("BulkResolve", func(t *testing.T) { - specX := job.NewSpecBuilder(jobVersion, "job-X", "", jobSchedule, jobWindow, jobTask).WithSpecUpstream(upstreamSpec).Build() + specX, err := job.NewSpecBuilder(jobVersion, "job-X", "sample-owner", jobSchedule, jobWindow, jobTask).WithSpecUpstream(upstreamSpec).Build() + assert.NoError(t, err) + jobXDestination := job.ResourceURN("resource-X") jobX := job.NewJob(sampleTenant, specX, jobXDestination, []job.ResourceURN{"resource-B"}) diff --git a/core/job/resolver/upstream_resolver_test.go b/core/job/resolver/upstream_resolver_test.go index cc90bfabc0..86bf53adfe 100644 --- a/core/job/resolver/upstream_resolver_test.go +++ b/core/job/resolver/upstream_resolver_test.go @@ -29,18 +29,18 @@ func TestUpstreamResolver(t *testing.T) { }) sampleTenant, _ := tenant.NewTenant(project.Name().String(), namespace.Name().String()) externalTenant, _ := tenant.NewTenant("external-proj", "external-namespace") - jobVersion, err := job.VersionFrom(1) - assert.NoError(t, err) + jobVersion := 1 startDate, err := job.ScheduleDateFrom("2022-10-01") assert.NoError(t, err) jobSchedule, err := job.NewScheduleBuilder(startDate).Build() assert.NoError(t, err) - jobWindow, err := models.NewWindow(jobVersion.Int(), "d", "24h", "24h") + jobWindow, err := models.NewWindow(jobVersion, "d", "24h", "24h") assert.NoError(t, err) - jobTaskConfig, err := job.NewConfig(map[string]string{"sample_task_key": "sample_value"}) + jobTaskConfig, err := job.ConfigFrom(map[string]string{"sample_task_key": "sample_value"}) assert.NoError(t, err) taskName, _ := job.TaskNameFrom("sample-task") jobTask := job.NewTaskBuilder(taskName, jobTaskConfig).Build() + sampleOwner := "sample-owner" t.Run("BulkResolve", func(t *testing.T) { t.Run("resolve upstream internally", func(t *testing.T) { @@ -52,11 +52,14 @@ func TestUpstreamResolver(t *testing.T) { defer logWriter.AssertExpectations(t) upstreamName := job.SpecUpstreamName("test-proj/job-c") - upstreamSpec, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{upstreamName}).Build() - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).WithSpecUpstream(upstreamSpec).Build() + upstreamSpec, err := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{upstreamName}).Build() + assert.NoError(t, err) + + specA, err := job.NewSpecBuilder(jobVersion, "job-A", sampleOwner, jobSchedule, jobWindow, jobTask).WithSpecUpstream(upstreamSpec).Build() + assert.NoError(t, err) + jobADestination := job.ResourceURN("resource-A") jobAUpstreams := []job.ResourceURN{"resource-B"} - jobA := job.NewJob(sampleTenant, specA, jobADestination, jobAUpstreams) jobs := []*job.Job{jobA} @@ -85,8 +88,12 @@ func TestUpstreamResolver(t *testing.T) { defer logWriter.AssertExpectations(t) upstreamName := job.SpecUpstreamNameFrom("job-c") - upstreamSpec, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{upstreamName}).Build() - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).WithSpecUpstream(upstreamSpec).Build() + upstreamSpec, err := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{upstreamName}).Build() + assert.NoError(t, err) + + specA, err := job.NewSpecBuilder(jobVersion, "job-A", sampleOwner, jobSchedule, jobWindow, jobTask).WithSpecUpstream(upstreamSpec).Build() + assert.NoError(t, err) + jobADestination := job.ResourceURN("resource-A") jobAUpstreams := []job.ResourceURN{"resource-B", "resource-D"} jobA := job.NewJob(sampleTenant, specA, jobADestination, jobAUpstreams) @@ -124,10 +131,11 @@ func TestUpstreamResolver(t *testing.T) { logWriter := new(mockWriter) defer logWriter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, err := job.NewSpecBuilder(jobVersion, "job-A", sampleOwner, jobSchedule, jobWindow, jobTask).Build() + assert.NoError(t, err) + jobADestination := job.ResourceURN("resource-A") jobAUpstreams := []job.ResourceURN{"resource-B"} - jobA := job.NewJob(sampleTenant, specA, jobADestination, jobAUpstreams) jobs := []*job.Job{jobA} @@ -149,8 +157,12 @@ func TestUpstreamResolver(t *testing.T) { defer logWriter.AssertExpectations(t) upstreamName := job.SpecUpstreamNameFrom("job-c") - upstreamSpec, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{upstreamName}).Build() - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).WithSpecUpstream(upstreamSpec).Build() + upstreamSpec, err := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{upstreamName}).Build() + assert.NoError(t, err) + + specA, err := job.NewSpecBuilder(jobVersion, "job-A", sampleOwner, jobSchedule, jobWindow, jobTask).WithSpecUpstream(upstreamSpec).Build() + assert.NoError(t, err) + jobADestination := job.ResourceURN("resource-A") jobAUpstreams := []job.ResourceURN{"resource-B", "resource-D"} @@ -187,8 +199,12 @@ func TestUpstreamResolver(t *testing.T) { defer logWriter.AssertExpectations(t) upstreamName := job.SpecUpstreamNameFrom("job-c") - upstreamSpec, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{upstreamName}).Build() - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).WithSpecUpstream(upstreamSpec).Build() + upstreamSpec, err := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{upstreamName}).Build() + assert.NoError(t, err) + + specA, err := job.NewSpecBuilder(jobVersion, "job-A", sampleOwner, jobSchedule, jobWindow, jobTask).WithSpecUpstream(upstreamSpec).Build() + assert.NoError(t, err) + jobADestination := job.ResourceURN("resource-A") jobAUpstreams := []job.ResourceURN{"resource-B", "resource-D"} @@ -227,8 +243,12 @@ func TestUpstreamResolver(t *testing.T) { defer logWriter.AssertExpectations(t) jobAUpstreamCName := job.SpecUpstreamNameFrom("job-C") - jobAUpstreamSpec, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{jobAUpstreamCName}).Build() - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).WithSpecUpstream(jobAUpstreamSpec).Build() + jobAUpstreamSpec, err := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{jobAUpstreamCName}).Build() + assert.NoError(t, err) + + specA, err := job.NewSpecBuilder(jobVersion, "job-A", sampleOwner, jobSchedule, jobWindow, jobTask).WithSpecUpstream(jobAUpstreamSpec).Build() + assert.NoError(t, err) + jobADestination := job.ResourceURN("resource-A") jobASources := []job.ResourceURN{"resource-B", "resource-D"} jobA := job.NewJob(sampleTenant, specA, jobADestination, jobASources) @@ -264,8 +284,12 @@ func TestUpstreamResolver(t *testing.T) { defer logWriter.AssertExpectations(t) jobAUpstreamCName := job.SpecUpstreamNameFrom("") - jobAUpstreamSpec, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{jobAUpstreamCName}).Build() - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).WithSpecUpstream(jobAUpstreamSpec).Build() + jobAUpstreamSpec, err := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{jobAUpstreamCName}).Build() + assert.NoError(t, err) + + specA, err := job.NewSpecBuilder(jobVersion, "job-A", sampleOwner, jobSchedule, jobWindow, jobTask).WithSpecUpstream(jobAUpstreamSpec).Build() + assert.NoError(t, err) + jobADestination := job.ResourceURN("resource-A") jobASources := []job.ResourceURN{"resource-B", "resource-D"} jobA := job.NewJob(sampleTenant, specA, jobADestination, jobASources) @@ -301,8 +325,12 @@ func TestUpstreamResolver(t *testing.T) { defer logWriter.AssertExpectations(t) jobAUpstreamCName := job.SpecUpstreamNameFrom("job-C") - jobAUpstreamSpec, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{jobAUpstreamCName}).Build() - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).WithSpecUpstream(jobAUpstreamSpec).Build() + jobAUpstreamSpec, err := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{jobAUpstreamCName}).Build() + assert.NoError(t, err) + + specA, err := job.NewSpecBuilder(jobVersion, "job-A", sampleOwner, jobSchedule, jobWindow, jobTask).WithSpecUpstream(jobAUpstreamSpec).Build() + assert.NoError(t, err) + jobADestination := job.ResourceURN("resource-A") jobASources := []job.ResourceURN{"resource-B", "resource-D"} jobA := job.NewJob(sampleTenant, specA, jobADestination, jobASources) diff --git a/core/job/service/job_service.go b/core/job/service/job_service.go index 8937e6bed6..afa670834c 100644 --- a/core/job/service/job_service.go +++ b/core/job/service/job_service.go @@ -46,7 +46,7 @@ func NewJobService(repo JobRepository, pluginService PluginService, upstreamReso type PluginService interface { Info(context.Context, job.TaskName) (*models.PluginInfoResponse, error) - GenerateDestination(context.Context, *tenant.WithDetails, *job.Task) (job.ResourceURN, error) + GenerateDestination(context.Context, *tenant.WithDetails, job.Task) (job.ResourceURN, error) GenerateUpstreams(ctx context.Context, jobTenant *tenant.WithDetails, spec *job.Spec, dryRun bool) ([]job.ResourceURN, error) } @@ -158,16 +158,8 @@ func (j JobService) Get(ctx context.Context, jobTenant tenant.Tenant, jobName jo return &job.Job{}, nil } -func (j JobService) GetTaskWithInfo(ctx context.Context, task *job.Task) (*job.Task, error) { - taskInfo, err := j.pluginService.Info(ctx, task.Name()) - if err != nil { - return nil, err - } - - return job.NewTaskBuilder( - task.Name(), - task.Config(), - ).WithInfo(taskInfo).Build(), nil +func (j JobService) GetTaskInfo(ctx context.Context, task job.Task) (*models.PluginInfoResponse, error) { + return j.pluginService.Info(ctx, task.Name()) } func (j JobService) GetByFilter(ctx context.Context, filters ...filter.FilterOpt) ([]*job.Job, error) { diff --git a/core/job/service/job_service_test.go b/core/job/service/job_service_test.go index 6a7c0c65a7..26ab59da34 100644 --- a/core/job/service/job_service_test.go +++ b/core/job/service/job_service_test.go @@ -40,14 +40,13 @@ func TestJobService(t *testing.T) { otherTenant, _ := tenant.NewTenant(project.Name().String(), otherNamespace.Name().String()) detailedOtherTenant, _ := tenant.NewTenantDetails(project, otherNamespace) - jobVersion, err := job.VersionFrom(1) - assert.NoError(t, err) + jobVersion := 1 startDate, err := job.ScheduleDateFrom("2022-10-01") assert.NoError(t, err) jobSchedule, err := job.NewScheduleBuilder(startDate).Build() assert.NoError(t, err) - jobWindow, _ := models.NewWindow(jobVersion.Int(), "d", "24h", "24h") - jobTaskConfig, err := job.NewConfig(map[string]string{"sample_task_key": "sample_value"}) + jobWindow, _ := models.NewWindow(jobVersion, "d", "24h", "24h") + jobTaskConfig, err := job.ConfigFrom(map[string]string{"sample_task_key": "sample_value"}) assert.NoError(t, err) taskName, _ := job.TaskNameFrom("bq2bq") jobTask := job.NewTaskBuilder(taskName, jobTaskConfig).Build() @@ -70,7 +69,7 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specA} tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) @@ -108,7 +107,7 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specA} tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(&tenant.WithDetails{}, errors.New("internal error")) @@ -130,9 +129,9 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() - specC := job.NewSpecBuilder(jobVersion, "job-C", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + specC, _ := job.NewSpecBuilder(jobVersion, "job-C", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specB, specA, specC} tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) @@ -175,8 +174,8 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specB, specA} jobRepo.On("Add", ctx, mock.Anything).Return(nil, nil) @@ -211,7 +210,7 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specA} tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) @@ -246,8 +245,8 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specA, specB} tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) @@ -287,7 +286,7 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specA} tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) @@ -321,7 +320,7 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specA} tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) @@ -361,7 +360,7 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specA} tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) @@ -399,7 +398,7 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specA} tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(&tenant.WithDetails{}, errors.New("internal error")) @@ -421,9 +420,9 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() - specC := job.NewSpecBuilder(jobVersion, "job-C", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + specC, _ := job.NewSpecBuilder(jobVersion, "job-C", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specB, specA, specC} tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) @@ -466,8 +465,8 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specB, specA} jobRepo.On("Update", ctx, mock.Anything).Return(nil, nil) @@ -502,7 +501,7 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specA} tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) @@ -537,8 +536,8 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specA, specB} tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) @@ -578,7 +577,7 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specA} tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) @@ -612,7 +611,7 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specA} tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) @@ -642,7 +641,7 @@ func TestJobService(t *testing.T) { jobRepo := new(JobRepository) defer jobRepo.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobRepo.On("GetDownstreamByJobName", ctx, project.Name(), specA.Name()).Return(nil, nil) jobRepo.On("Delete", ctx, project.Name(), specA.Name(), false).Return(nil) @@ -656,7 +655,7 @@ func TestJobService(t *testing.T) { jobRepo := new(JobRepository) defer jobRepo.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() downstreamFullNames := []job.FullName{"test-proj/job-B", "test-proj/job-C"} downstreamList := []*job.Downstream{ @@ -676,7 +675,7 @@ func TestJobService(t *testing.T) { jobRepo := new(JobRepository) defer jobRepo.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() downstreamList := []*job.Downstream{ job.NewDownstream("job-B", project.Name(), namespace.Name(), taskName), @@ -693,7 +692,7 @@ func TestJobService(t *testing.T) { jobRepo := new(JobRepository) defer jobRepo.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobRepo.On("GetDownstreamByJobName", ctx, project.Name(), specA.Name()).Return(nil, errors.New("internal error")) @@ -706,7 +705,7 @@ func TestJobService(t *testing.T) { jobRepo := new(JobRepository) defer jobRepo.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobRepo.On("GetDownstreamByJobName", ctx, project.Name(), specA.Name()).Return(nil, nil) jobRepo.On("Delete", ctx, project.Name(), specA.Name(), false).Return(errors.New("internal error")) @@ -736,12 +735,12 @@ func TestJobService(t *testing.T) { tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobADestination := job.ResourceURN("resource-A") jobAUpstreamName := []job.ResourceURN{"job-B"} jobA := job.NewJob(sampleTenant, specA, jobADestination, jobAUpstreamName) - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobB := job.NewJob(sampleTenant, specB, "", nil) incomingSpecs := []*job.Spec{specA, specB} @@ -786,15 +785,15 @@ func TestJobService(t *testing.T) { tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobADestination := job.ResourceURN("resource-A") jobAUpstreamName := []job.ResourceURN{"job-B"} jobA := job.NewJob(sampleTenant, specA, jobADestination, jobAUpstreamName) incomingSpecs := []*job.Spec{specA} - existingJobWindow, _ := models.NewWindow(jobVersion.Int(), "d", "0h", "24h") - existingSpecA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, existingJobWindow, jobTask).Build() + existingJobWindow, _ := models.NewWindow(jobVersion, "d", "0h", "24h") + existingSpecA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, existingJobWindow, jobTask).Build() existingJobA := job.NewJob(sampleTenant, existingSpecA, jobADestination, jobAUpstreamName) existingSpecs := []*job.Job{existingJobA} @@ -836,10 +835,10 @@ func TestJobService(t *testing.T) { tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobA := job.NewJob(sampleTenant, specA, "", nil) - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobB := job.NewJob(sampleTenant, specB, "", nil) incomingSpecs := []*job.Spec{specA} @@ -875,14 +874,14 @@ func TestJobService(t *testing.T) { tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() incomingSpecs := []*job.Spec{specA, specB} - existingJobWindow, _ := models.NewWindow(jobVersion.Int(), "d", "0h", "24h") - existingSpecB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, existingJobWindow, jobTask).Build() + existingJobWindow, _ := models.NewWindow(jobVersion, "d", "0h", "24h") + existingSpecB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, existingJobWindow, jobTask).Build() existingJobB := job.NewJob(sampleTenant, existingSpecB, "", nil) - existingSpecC := job.NewSpecBuilder(jobVersion, "job-C", "", jobSchedule, jobWindow, jobTask).Build() + existingSpecC, _ := job.NewSpecBuilder(jobVersion, "job-C", "sample-owner", jobSchedule, jobWindow, jobTask).Build() existingJobC := job.NewJob(sampleTenant, existingSpecC, "", nil) existingSpecs := []*job.Job{existingJobB, existingJobC} @@ -938,16 +937,16 @@ func TestJobService(t *testing.T) { tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() incomingSpecs := []*job.Spec{specA, specB} - existingJobWindow, _ := models.NewWindow(jobVersion.Int(), "d", "0h", "24h") - existingSpecB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, existingJobWindow, jobTask).Build() + existingJobWindow, _ := models.NewWindow(jobVersion, "d", "0h", "24h") + existingSpecB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, existingJobWindow, jobTask).Build() existingJobB := job.NewJob(sampleTenant, existingSpecB, "", nil) - existingSpecC := job.NewSpecBuilder(jobVersion, "job-C", "", jobSchedule, jobWindow, jobTask).Build() + existingSpecC, _ := job.NewSpecBuilder(jobVersion, "job-C", "sample-owner", jobSchedule, jobWindow, jobTask).Build() existingJobC := job.NewJob(sampleTenant, existingSpecC, "", nil) - existingSpecD := job.NewSpecBuilder(jobVersion, "job-D", "", jobSchedule, jobWindow, jobTask).Build() + existingSpecD, _ := job.NewSpecBuilder(jobVersion, "job-D", "sample-owner", jobSchedule, jobWindow, jobTask).Build() existingJobD := job.NewJob(sampleTenant, existingSpecD, "", nil) existingSpecs := []*job.Job{existingJobB, existingJobC, existingJobD} @@ -1003,10 +1002,10 @@ func TestJobService(t *testing.T) { tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() incomingSpecs := []*job.Spec{specA} - existingSpecC := job.NewSpecBuilder(jobVersion, "job-C", "", jobSchedule, jobWindow, jobTask).Build() + existingSpecC, _ := job.NewSpecBuilder(jobVersion, "job-C", "sample-owner", jobSchedule, jobWindow, jobTask).Build() existingJobC := job.NewJob(sampleTenant, existingSpecC, "", nil) existingSpecs := []*job.Job{existingJobC} @@ -1042,13 +1041,13 @@ func TestJobService(t *testing.T) { tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() incomingSpecs := []*job.Spec{specB} - existingJobWindow, _ := models.NewWindow(jobVersion.Int(), "d", "0h", "24h") - existingSpecB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, existingJobWindow, jobTask).Build() + existingJobWindow, _ := models.NewWindow(jobVersion, "d", "0h", "24h") + existingSpecB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, existingJobWindow, jobTask).Build() existingJobB := job.NewJob(sampleTenant, existingSpecB, "", nil) - existingSpecC := job.NewSpecBuilder(jobVersion, "job-C", "", jobSchedule, jobWindow, jobTask).Build() + existingSpecC, _ := job.NewSpecBuilder(jobVersion, "job-C", "sample-owner", jobSchedule, jobWindow, jobTask).Build() existingJobC := job.NewJob(sampleTenant, existingSpecC, "", nil) existingSpecs := []*job.Job{existingJobB, existingJobC} @@ -1084,12 +1083,12 @@ func TestJobService(t *testing.T) { tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() incomingSpecs := []*job.Spec{specA} - existingSpecC := job.NewSpecBuilder(jobVersion, "job-C", "", jobSchedule, jobWindow, jobTask).Build() + existingSpecC, _ := job.NewSpecBuilder(jobVersion, "job-C", "sample-owner", jobSchedule, jobWindow, jobTask).Build() existingJobC := job.NewJob(sampleTenant, existingSpecC, "", nil) - existingSpecD := job.NewSpecBuilder(jobVersion, "job-D", "", jobSchedule, jobWindow, jobTask).Build() + existingSpecD, _ := job.NewSpecBuilder(jobVersion, "job-D", "sample-owner", jobSchedule, jobWindow, jobTask).Build() existingJobD := job.NewJob(sampleTenant, existingSpecD, "", nil) existingSpecs := []*job.Job{existingJobC, existingJobD} @@ -1142,12 +1141,12 @@ func TestJobService(t *testing.T) { tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobADestination := job.ResourceURN("resource-A") jobAUpstreamName := []job.ResourceURN{"job-B"} jobA := job.NewJob(sampleTenant, specA, jobADestination, jobAUpstreamName) - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobBDestination := job.ResourceURN("resource-B") jobBUpstreamName := []job.ResourceURN{"job-C"} @@ -1194,14 +1193,14 @@ func TestJobService(t *testing.T) { tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobADestination := job.ResourceURN("resource-A") jobAUpstreamName := []job.ResourceURN{"job-B"} incomingSpecs := []*job.Spec{specA} - existingJobWindow, _ := models.NewWindow(jobVersion.Int(), "d", "0h", "24h") - existingSpecA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, existingJobWindow, jobTask).Build() + existingJobWindow, _ := models.NewWindow(jobVersion, "d", "0h", "24h") + existingSpecA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, existingJobWindow, jobTask).Build() existingJobA := job.NewJob(sampleTenant, existingSpecA, jobADestination, jobAUpstreamName) existingSpecs := []*job.Job{existingJobA} @@ -1236,10 +1235,10 @@ func TestJobService(t *testing.T) { tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobA := job.NewJob(sampleTenant, specA, "", nil) - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobB := job.NewJob(sampleTenant, specB, "", nil) incomingSpecs := []*job.Spec{specA} @@ -1275,10 +1274,10 @@ func TestJobService(t *testing.T) { tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobA := job.NewJob(sampleTenant, specA, "", nil) - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobB := job.NewJob(sampleTenant, specB, "", nil) incomingSpecs := []*job.Spec{specA} @@ -1311,8 +1310,8 @@ func TestJobService(t *testing.T) { logWriter := new(mockWriter) defer logWriter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobB := job.NewJob(sampleTenant, specB, "", nil) incomingSpecs := []*job.Spec{specA, specB} @@ -1348,14 +1347,14 @@ func TestJobService(t *testing.T) { logWriter := new(mockWriter) defer logWriter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobADestination := job.ResourceURN("resource-A") jobAUpstreamName := []job.ResourceURN{"job-B"} jobA := job.NewJob(sampleTenant, specA, jobADestination, jobAUpstreamName) var jobBDestination job.ResourceURN jobBUpstreamName := []job.ResourceURN{"job-C"} - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobB := job.NewJob(sampleTenant, specB, jobBDestination, jobBUpstreamName) jobRepo.On("GetAllByTenant", ctx, sampleTenant).Return([]*job.Job{jobA, jobB}, nil) @@ -1400,7 +1399,7 @@ func TestJobService(t *testing.T) { logWriter := new(mockWriter) defer logWriter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobADestination := job.ResourceURN("resource-A") jobAUpstreamName := []job.ResourceURN{"job-B"} jobA := job.NewJob(sampleTenant, specA, jobADestination, jobAUpstreamName) @@ -1408,7 +1407,7 @@ func TestJobService(t *testing.T) { var jobBDestination job.ResourceURN var jobBUpstreamName []job.ResourceURN - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobB := job.NewJob(otherTenant, specB, jobBDestination, jobBUpstreamName) jobsTenant2 := []*job.Job{jobB} @@ -1481,7 +1480,7 @@ func TestJobService(t *testing.T) { jobRepo := new(JobRepository) defer jobRepo.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobA := job.NewJob(sampleTenant, specA, "table-A", []job.ResourceURN{"table-B"}) jobRepo.On("GetByJobName", ctx, sampleTenant.ProjectName(), specA.Name()).Return(jobA, nil) @@ -1510,7 +1509,7 @@ func TestJobService(t *testing.T) { jobRepo := new(JobRepository) defer jobRepo.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobA := job.NewJob(sampleTenant, specA, "table-A", []job.ResourceURN{"table-B"}) jobRepo.On("GetAllByResourceDestination", ctx, job.ResourceURN("table-A")).Return([]*job.Job{jobA}, nil) @@ -1542,9 +1541,9 @@ func TestJobService(t *testing.T) { jobRepo := new(JobRepository) defer jobRepo.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobA := job.NewJob(sampleTenant, specA, "table-A", []job.ResourceURN{"table-B"}) - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobRepo.On("GetByJobName", ctx, sampleTenant.ProjectName(), specA.Name()).Return(jobA, nil) jobRepo.On("GetByJobName", ctx, sampleTenant.ProjectName(), specB.Name()).Return(nil, errors.New("error encountered")) @@ -1562,7 +1561,7 @@ func TestJobService(t *testing.T) { jobRepo := new(JobRepository) defer jobRepo.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobRepo.On("GetByJobName", ctx, sampleTenant.ProjectName(), specA.Name()).Return(nil, optErrors.NotFound(job.EntityJob, "job not found")) jobService := service.NewJobService(jobRepo, nil, nil, nil, nil) @@ -1577,7 +1576,7 @@ func TestJobService(t *testing.T) { jobRepo := new(JobRepository) defer jobRepo.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobA := job.NewJob(sampleTenant, specA, "table-A", []job.ResourceURN{"table-B"}) jobRepo.On("GetByJobName", ctx, sampleTenant.ProjectName(), specA.Name()).Return(jobA, nil) @@ -1612,7 +1611,7 @@ func TestJobService(t *testing.T) { jobRepo := new(JobRepository) defer jobRepo.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobRepo.On("GetByJobName", ctx, sampleTenant.ProjectName(), specA.Name()).Return(nil, optErrors.NotFound(job.EntityJob, "job not found")) jobService := service.NewJobService(jobRepo, nil, nil, nil, nil) @@ -1627,7 +1626,7 @@ func TestJobService(t *testing.T) { jobRepo := new(JobRepository) defer jobRepo.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobA := job.NewJob(sampleTenant, specA, "table-A", []job.ResourceURN{"table-B"}) jobRepo.On("GetByJobName", ctx, sampleTenant.ProjectName(), specA.Name()).Return(jobA, nil) @@ -1673,7 +1672,7 @@ func TestJobService(t *testing.T) { jobRepo := new(JobRepository) defer jobRepo.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobA := job.NewJob(sampleTenant, specA, "table-A", []job.ResourceURN{"table-B"}) jobRepo.On("GetAllByTenant", ctx, sampleTenant).Return([]*job.Job{jobA}, nil) @@ -1706,7 +1705,7 @@ func TestJobService(t *testing.T) { jobRepo := new(JobRepository) defer jobRepo.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobA := job.NewJob(sampleTenant, specA, "table-A", []job.ResourceURN{"table-B"}) jobRepo.On("GetAllByProjectName", ctx, sampleTenant.ProjectName()).Return([]*job.Job{jobA}, nil) @@ -1728,7 +1727,7 @@ func TestJobService(t *testing.T) { }) }) - t.Run("GetTaskWithInfo", func(t *testing.T) { + t.Run("GetTaskInfo", func(t *testing.T) { t.Run("return error when plugin could not retrieve info", func(t *testing.T) { pluginService := new(PluginService) defer pluginService.AssertExpectations(t) @@ -1736,7 +1735,7 @@ func TestJobService(t *testing.T) { pluginService.On("Info", ctx, jobTask.Name()).Return(nil, errors.New("error encountered")) jobService := service.NewJobService(nil, pluginService, nil, nil, nil) - actual, err := jobService.GetTaskWithInfo(ctx, jobTask) + actual, err := jobService.GetTaskInfo(ctx, jobTask) assert.Error(t, err, "error encountered") assert.Nil(t, actual) }) @@ -1751,12 +1750,11 @@ func TestJobService(t *testing.T) { } pluginService.On("Info", ctx, jobTask.Name()).Return(pluginInfoResp, nil) - expected := job.NewTaskBuilder(jobTask.Name(), jobTask.Config()).WithInfo(pluginInfoResp).Build() jobService := service.NewJobService(nil, pluginService, nil, nil, nil) - actual, err := jobService.GetTaskWithInfo(ctx, jobTask) + actual, err := jobService.GetTaskInfo(ctx, jobTask) assert.NoError(t, err) assert.NotNil(t, actual) - assert.Equal(t, expected, actual) + assert.Equal(t, pluginInfoResp, actual) }) }) @@ -1782,7 +1780,7 @@ func TestJobService(t *testing.T) { logWriter := new(mockWriter) defer logWriter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specA} pluginService.On("GenerateDestination", ctx, detailedTenant, specA.Task()).Return(job.ResourceURN(""), errors.New("some error on generate destination")) @@ -1809,7 +1807,7 @@ func TestJobService(t *testing.T) { repo.On("GetAllByProjectName", ctx, sampleTenant.ProjectName()).Return(nil, errors.New("error on get all by project name")) defer repo.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specA} pluginService.On("GenerateDestination", ctx, detailedTenant, specA.Task()).Return(job.ResourceURN("example_destination"), nil) @@ -1836,9 +1834,9 @@ func TestJobService(t *testing.T) { logWriter := new(mockWriter) defer logWriter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() - specC := job.NewSpecBuilder(jobVersion, "job-C", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + specC, _ := job.NewSpecBuilder(jobVersion, "job-C", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specA, specB, specC} repo.On("GetAllByProjectName", ctx, sampleTenant.ProjectName()).Return([]*job.Job{}, nil) @@ -1870,9 +1868,9 @@ func TestJobService(t *testing.T) { logWriter := new(mockWriter) defer logWriter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() - specC := job.NewSpecBuilder(jobVersion, "job-C", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + specC, _ := job.NewSpecBuilder(jobVersion, "job-C", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specA, specB, specC} jobA := job.NewJob(sampleTenant, specA, "table-A", []job.ResourceURN{"table-C"}) @@ -1904,9 +1902,9 @@ func TestJobService(t *testing.T) { logWriter := new(mockWriter) defer logWriter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() - specC := job.NewSpecBuilder(jobVersion, "job-C", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + specC, _ := job.NewSpecBuilder(jobVersion, "job-C", "sample-owner", jobSchedule, jobWindow, jobTask).Build() specs := []*job.Spec{specA, specB, specC} jobA := job.NewJob(sampleTenant, specA, "table-A", []job.ResourceURN{}) @@ -1939,7 +1937,7 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobA := job.NewJob(sampleTenant, specA, "resource-A", []job.ResourceURN{"resource-B"}) upstreamB := job.NewUpstreamResolved("job-B", "", "resource-B", sampleTenant, "inferred", taskName, false) @@ -1967,7 +1965,7 @@ func TestJobService(t *testing.T) { logWriter := new(mockWriter) defer logWriter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobA := job.NewJob(sampleTenant, specA, "resource-A", []job.ResourceURN{"resource-B"}) upstreamB := job.NewUpstreamResolved("job-B", "", "resource-B", sampleTenant, "inferred", taskName, false) @@ -1995,7 +1993,7 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) @@ -2027,7 +2025,7 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobADestination := job.ResourceURN("resource-A") jobASources := []job.ResourceURN{"job-B"} jobA := job.NewJob(sampleTenant, specA, jobADestination, jobASources) @@ -2053,7 +2051,7 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, errors.New("sample error")) @@ -2075,7 +2073,7 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() tenantDetailsGetter.On("GetDetails", ctx, sampleTenant).Return(detailedTenant, nil) @@ -2106,7 +2104,7 @@ func TestJobService(t *testing.T) { specASchedule, err := job.NewScheduleBuilder(startDate).WithCatchUp(true).Build() assert.NoError(t, err) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", specASchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", specASchedule, jobWindow, jobTask).Build() jobADestination := job.ResourceURN("resource-A") jobA := job.NewJob(sampleTenant, specA, jobADestination, nil) @@ -2133,7 +2131,7 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobRepo.On("GetByJobName", ctx, project.Name(), specA.Name()).Return(nil, errors.New("internal error")) @@ -2155,7 +2153,7 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobRepo.On("GetByJobName", ctx, project.Name(), specA.Name()).Return(nil, nil) @@ -2177,12 +2175,12 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobADestination := job.ResourceURN("resource-A") jobASources := []job.ResourceURN{"job-B"} jobA := job.NewJob(sampleTenant, specA, jobADestination, jobASources) - specB := job.NewSpecBuilder(jobVersion, "job-B", "", jobSchedule, jobWindow, jobTask).Build() + specB, _ := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobBDestination := job.ResourceURN("resource-B") jobBSources := []job.ResourceURN{"job-C"} jobB := job.NewJob(sampleTenant, specB, jobBDestination, jobBSources) @@ -2208,7 +2206,7 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobADestination := job.ResourceURN("resource-A") jobASources := []job.ResourceURN{"job-B"} jobA := job.NewJob(sampleTenant, specA, jobADestination, jobASources) @@ -2237,7 +2235,7 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobA := job.NewJob(sampleTenant, specA, "resource-A", nil) jobADownstream := []*job.Downstream{ @@ -2263,7 +2261,7 @@ func TestJobService(t *testing.T) { tenantDetailsGetter := new(TenantDetailsGetter) defer tenantDetailsGetter.AssertExpectations(t) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, _ := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() jobA := job.NewJob(sampleTenant, specA, "resource-A", nil) jobADownstream := []*job.Downstream{ @@ -2548,18 +2546,18 @@ type PluginService struct { } // GenerateDestination provides a mock function with given fields: _a0, _a1, _a2 -func (_m *PluginService) GenerateDestination(_a0 context.Context, _a1 *tenant.WithDetails, _a2 *job.Task) (job.ResourceURN, error) { +func (_m *PluginService) GenerateDestination(_a0 context.Context, _a1 *tenant.WithDetails, _a2 job.Task) (job.ResourceURN, error) { ret := _m.Called(_a0, _a1, _a2) var r0 job.ResourceURN - if rf, ok := ret.Get(0).(func(context.Context, *tenant.WithDetails, *job.Task) job.ResourceURN); ok { + if rf, ok := ret.Get(0).(func(context.Context, *tenant.WithDetails, job.Task) job.ResourceURN); ok { r0 = rf(_a0, _a1, _a2) } else { r0 = ret.Get(0).(job.ResourceURN) } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *tenant.WithDetails, *job.Task) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *tenant.WithDetails, job.Task) error); ok { r1 = rf(_a0, _a1, _a2) } else { r1 = ret.Error(1) diff --git a/core/job/service/plugin_service.go b/core/job/service/plugin_service.go index 997cc5c971..b09c28b87e 100644 --- a/core/job/service/plugin_service.go +++ b/core/job/service/plugin_service.go @@ -72,7 +72,7 @@ func (p JobPluginService) Info(_ context.Context, taskName job.TaskName) (*model return plugin.YamlMod.PluginInfo(), nil } -func (p JobPluginService) GenerateDestination(ctx context.Context, tnnt *tenant.WithDetails, task *job.Task) (job.ResourceURN, error) { +func (p JobPluginService) GenerateDestination(ctx context.Context, tnnt *tenant.WithDetails, task job.Task) (job.ResourceURN, error) { plugin, err := p.pluginRepo.GetByName(task.Name().String()) if err != nil { return "", err @@ -82,7 +82,7 @@ func (p JobPluginService) GenerateDestination(ctx context.Context, tnnt *tenant. return "", ErrUpstreamModNotFound } - compiledConfig, err := p.compileConfig(ctx, task.Config(), tnnt) + compiledConfig, err := p.compileConfig(ctx, task.Config().Map(), tnnt) if err != nil { return "", err } @@ -139,7 +139,7 @@ func (p JobPluginService) GenerateUpstreams(ctx context.Context, jobTenant *tena return upstreamURNs, nil } -func (p JobPluginService) compileConfig(ctx context.Context, configs *job.Config, tnnt *tenant.WithDetails) (models.PluginConfigs, error) { +func (p JobPluginService) compileConfig(ctx context.Context, configs job.Config, tnnt *tenant.WithDetails) (models.PluginConfigs, error) { jobTenant := tnnt.ToTenant() secrets, err := p.secretsGetter.GetAll(ctx, jobTenant.ProjectName(), jobTenant.NamespaceName().String()) if err != nil { @@ -152,7 +152,7 @@ func (p JobPluginService) compileConfig(ctx context.Context, configs *job.Config ) var pluginConfigs models.PluginConfigs - for key, val := range configs.Configs() { + for key, val := range configs { compiledConf, err := p.engine.CompileString(val, tmplCtx) if err != nil { p.logger.Warn("error in template compilation: ", err.Error()) @@ -170,10 +170,10 @@ func (p JobPluginService) compileAsset(ctx context.Context, plugin *models.Plugi if plugin.DependencyMod != nil { var assets map[string]string if spec.Asset() != nil { - assets = spec.Asset().Assets() + assets = spec.Asset() } jobDestinationResponse, err := plugin.DependencyMod.GenerateDestination(ctx, models.GenerateDestinationRequest{ - Config: models.PluginConfigs{}.FromMap(spec.Task().Config().Configs()), + Config: models.PluginConfigs{}.FromMap(spec.Task().Config()), Assets: models.PluginAssets{}.FromMap(assets), PluginOptions: models.PluginOptions{ DryRun: true, @@ -196,7 +196,7 @@ func (p JobPluginService) compileAsset(ctx context.Context, plugin *models.Plugi var assets map[string]string if spec.Asset() != nil { - assets = spec.Asset().Assets() + assets = spec.Asset() } templates, err := p.engine.Compile(assets, map[string]interface{}{ diff --git a/core/job/service/plugin_service_test.go b/core/job/service/plugin_service_test.go index bbd70bc66b..8caafacabb 100644 --- a/core/job/service/plugin_service_test.go +++ b/core/job/service/plugin_service_test.go @@ -34,11 +34,11 @@ func TestPluginService(t *testing.T) { assert.NoError(t, err) jobSchedule, err := job.NewScheduleBuilder(startDate).Build() assert.NoError(t, err) - jobVersion, err := job.VersionFrom(1) + jobVersion := 1 assert.NoError(t, err) - jobWindow, err := models.NewWindow(jobVersion.Int(), "d", "24h", "24h") + jobWindow, err := models.NewWindow(jobVersion, "d", "24h", "24h") assert.NoError(t, err) - jobTaskConfig, err := job.NewConfig(map[string]string{ + jobTaskConfig, err := job.ConfigFrom(map[string]string{ "SECRET_TABLE_NAME": "{{.secret.table_name}}", }) assert.NoError(t, err) @@ -292,9 +292,10 @@ func TestPluginService(t *testing.T) { Dependencies: []string{jobSource.String()}}, nil) - asset, err := job.NewAsset(map[string]string{"sample-key": "sample-value"}) + asset, err := job.AssetFrom(map[string]string{"sample-key": "sample-value"}) + assert.NoError(t, err) + specA, err := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).WithAsset(asset).Build() assert.NoError(t, err) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).WithAsset(asset).Build() pluginService := service.NewJobPluginService(secretsGetter, pluginRepo, engine, logger) result, err := pluginService.GenerateUpstreams(ctx, tenantDetails, specA, false) @@ -315,7 +316,8 @@ func TestPluginService(t *testing.T) { pluginRepo.On("GetByName", jobTask.Name().String()).Return(nil, errors.New("not found")) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, err := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + assert.NoError(t, err) pluginService := service.NewJobPluginService(secretsGetter, pluginRepo, engine, logger) result, err := pluginService.GenerateUpstreams(ctx, tenantDetails, specA, false) @@ -343,7 +345,8 @@ func TestPluginService(t *testing.T) { pluginWithoutDependencyMod := &models.Plugin{YamlMod: yamlMod} pluginRepo.On("GetByName", jobTask.Name().String()).Return(pluginWithoutDependencyMod, nil) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, err := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + assert.NoError(t, err) pluginService := service.NewJobPluginService(secretsGetter, pluginRepo, engine, logger) result, err := pluginService.GenerateUpstreams(ctx, tenantDetails, specA, false) @@ -379,7 +382,8 @@ func TestPluginService(t *testing.T) { secretsGetter.On("GetAll", ctx, project.Name(), namespace.Name().String()).Return([]*tenant.PlainTextSecret{}, errors.New("getting secret error")) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, err := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + assert.NoError(t, err) pluginService := service.NewJobPluginService(secretsGetter, pluginRepo, engine, logger) result, err := pluginService.GenerateUpstreams(ctx, tenantDetails, specA, false) @@ -409,7 +413,8 @@ func TestPluginService(t *testing.T) { depMod.On("GenerateDestination", ctx, mock.Anything).Return(&models.GenerateDestinationResponse{}, errors.New("generate destination error")) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, err := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + assert.NoError(t, err) pluginService := service.NewJobPluginService(secretsGetter, pluginRepo, engine, logger) result, err := pluginService.GenerateUpstreams(ctx, tenantDetails, specA, false) @@ -448,7 +453,8 @@ func TestPluginService(t *testing.T) { depMod.On("GenerateDependencies", ctx, mock.Anything).Return(&models.GenerateDependenciesResponse{}, errors.New("generate dependencies error")) - specA := job.NewSpecBuilder(jobVersion, "job-A", "", jobSchedule, jobWindow, jobTask).Build() + specA, err := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + assert.NoError(t, err) pluginService := service.NewJobPluginService(secretsGetter, pluginRepo, engine, logger) result, err := pluginService.GenerateUpstreams(ctx, tenantDetails, specA, false) diff --git a/core/job/spec.go b/core/job/spec.go index 73b17f86af..8c24f13f9e 100644 --- a/core/job/spec.go +++ b/core/job/spec.go @@ -13,23 +13,23 @@ import ( const DateLayout = "2006-01-02" type Spec struct { - version Version + version int name Name - owner Owner + owner string schedule *Schedule window models.Window - task *Task + task Task description string labels map[string]string metadata *Metadata hooks []*Hook - asset *Asset + asset Asset alertSpecs []*AlertSpec upstreamSpec *UpstreamSpec } -func (s Spec) Version() Version { +func (s Spec) Version() int { return s.version } @@ -37,7 +37,7 @@ func (s Spec) Name() Name { return s.name } -func (s Spec) Owner() Owner { +func (s Spec) Owner() string { return s.owner } @@ -49,7 +49,7 @@ func (s Spec) Window() models.Window { return s.window } -func (s Spec) Task() *Task { +func (s Spec) Task() Task { return s.task } @@ -73,7 +73,7 @@ func (s Spec) UpstreamSpec() *UpstreamSpec { return s.upstreamSpec } -func (s Spec) Asset() *Asset { +func (s Spec) Asset() Asset { return s.asset } @@ -86,12 +86,12 @@ type SpecBuilder struct { } func NewSpecBuilder( - version Version, + version int, name Name, - owner Owner, + owner string, schedule *Schedule, window models.Window, - task *Task, + task Task, ) *SpecBuilder { return &SpecBuilder{ spec: &Spec{ @@ -105,8 +105,14 @@ func NewSpecBuilder( } } -func (s *SpecBuilder) Build() *Spec { - return s.spec +func (s *SpecBuilder) Build() (*Spec, error) { + if s.spec.version <= 0 { + return nil, errors.InvalidArgument(EntityJob, "version is less than or equal to zero") + } + if s.spec.owner == "" { + return nil, errors.InvalidArgument(EntityJob, "owner is empty") + } + return s.spec, nil } func (s *SpecBuilder) WithHooks(hooks []*Hook) *SpecBuilder { @@ -133,7 +139,7 @@ func (s *SpecBuilder) WithSpecUpstream(specUpstream *UpstreamSpec) *SpecBuilder } } -func (s *SpecBuilder) WithAsset(asset *Asset) *SpecBuilder { +func (s *SpecBuilder) WithAsset(asset Asset) *SpecBuilder { spec := *s.spec spec.asset = asset return &SpecBuilder{ @@ -175,19 +181,6 @@ func (s Specs) ToNameAndSpecMap() map[Name]*Spec { return nameAndSpecMap } -type Version int - -func VersionFrom(version int) (Version, error) { - if version <= 0 { - return 0, errors.InvalidArgument(EntityJob, "version is less than or equal to zero") - } - return Version(version), nil -} - -func (v Version) Int() int { - return int(v) -} - type Name string func NameFrom(name string) (Name, error) { @@ -201,24 +194,11 @@ func (n Name) String() string { return string(n) } -type Owner string - -func OwnerFrom(owner string) (Owner, error) { - if owner == "" { - return "", errors.InvalidArgument(EntityJob, "owner is empty") - } - return Owner(owner), nil -} - -func (o Owner) String() string { - return string(o) -} - type ScheduleDate string func ScheduleDateFrom(date string) (ScheduleDate, error) { if date == "" { - return ScheduleDate(""), nil + return "", nil } if _, err := time.Parse(DateLayout, date); err != nil { msg := fmt.Sprintf("error is encountered when validating date with layout [%s]: %s", DateLayout, err) @@ -346,19 +326,17 @@ func (s ScheduleBuilder) WithRetry(retry *Retry) *ScheduleBuilder { } } -type Config struct { - configs map[string]string -} +type Config map[string]string -func NewConfig(configs map[string]string) (*Config, error) { +func ConfigFrom(configs map[string]string) (Config, error) { if err := validateMap(configs); err != nil { return nil, err } - return &Config{configs: configs}, nil + return configs, nil } -func (c Config) Configs() map[string]string { - return c.configs +func (c Config) Map() map[string]string { + return c } type TaskName string @@ -375,42 +353,29 @@ func (t TaskName) String() string { } type Task struct { - info *models.PluginInfoResponse name TaskName - config *Config + config Config } func (t Task) Name() TaskName { return t.name } -func (t Task) Config() *Config { +func (t Task) Config() Config { return t.config } -func (t Task) Info() *models.PluginInfoResponse { - return t.info -} - type TaskBuilder struct { - task *Task -} - -func NewTaskBuilder(name TaskName, config *Config) *TaskBuilder { - return &TaskBuilder{ - task: &Task{name: name, config: config}, - } + task Task } -func (t TaskBuilder) WithInfo(info *models.PluginInfoResponse) *TaskBuilder { - task := *t.task - task.info = info +func NewTaskBuilder(name TaskName, config Config) *TaskBuilder { return &TaskBuilder{ - task: &task, + task: Task{name: name, config: config}, } } -func (t TaskBuilder) Build() *Task { +func (t TaskBuilder) Build() Task { return t.task } @@ -498,91 +463,52 @@ func (m MetadataBuilder) WithScheduler(scheduler map[string]string) *MetadataBui } } -type HookName string - -func HookNameFrom(name string) (HookName, error) { - if name == "" { - return "", errors.InvalidArgument(EntityJob, "name is empty") - } - return HookName(name), nil -} - -func (h HookName) String() string { - return string(h) -} - type Hook struct { - name HookName - config *Config + name string + config Config } -func NewHook(name HookName, config *Config) *Hook { - return &Hook{name: name, config: config} +func NewHook(name string, config Config) (*Hook, error) { + if name == "" { + return nil, errors.InvalidArgument(EntityJob, "name is empty") + } + return &Hook{name: name, config: config}, nil } -func (h Hook) Name() HookName { +func (h Hook) Name() string { return h.name } -func (h Hook) Config() *Config { +func (h Hook) Config() Config { return h.config } -type Asset struct { - assets map[string]string -} +type Asset map[string]string -func NewAsset(fileNameToContent map[string]string) (*Asset, error) { - asset := &Asset{assets: fileNameToContent} +func AssetFrom(fileNameToContent map[string]string) (Asset, error) { + asset := Asset(fileNameToContent) if err := asset.validate(); err != nil { return nil, err } return asset, nil } -func (a Asset) validate() error { - return validateMap(a.assets) +func (a Asset) Map() map[string]string { + return a } -func (a Asset) Assets() map[string]string { - return a.assets +func (a Asset) validate() error { + return validateMap(a) } -type EventType string - -// TODO: Check which event type that is valid. There should be a validation and also added in the documentation. -const ( - SLAMissEvent EventType = "sla_miss" - - JobFailureEvent EventType = "failure" - JobStartEvent EventType = "job_start" - JobFailEvent EventType = "job_fail" - JobSuccessEvent EventType = "job_success" - JobRetryEvent EventType = "retry" - - TaskStartEvent EventType = "task_start" - TaskRetryEvent EventType = "task_retry" - TaskFailEvent EventType = "task_fail" - TaskSuccessEvent EventType = "task_success" - - HookStartEvent EventType = "hook_start" - HookRetryEvent EventType = "hook_retry" - HookFailEvent EventType = "hook_fail" - HookSuccessEvent EventType = "hook_success" - - SensorStartEvent EventType = "sensor_start" - SensorRetryEvent EventType = "sensor_retry" - SensorFailEvent EventType = "sensor_fail" - SensorSuccessEvent EventType = "sensor_success" -) - type AlertSpec struct { - on EventType + on string + channels []string - config *Config + config Config } -func (a AlertSpec) On() EventType { +func (a AlertSpec) On() string { return a.on } @@ -590,13 +516,13 @@ func (a AlertSpec) Channels() []string { return a.channels } -func (a AlertSpec) Config() *Config { +func (a AlertSpec) Config() Config { return a.config } func (a AlertSpec) validate() error { if a.config != nil { - if err := validateMap(a.config.configs); err != nil { + if err := validateMap(a.config); err != nil { return err } } @@ -607,7 +533,7 @@ type AlertBuilder struct { alert *AlertSpec } -func NewAlertBuilder(on EventType, channels []string) *AlertBuilder { +func NewAlertBuilder(on string, channels []string) *AlertBuilder { return &AlertBuilder{ alert: &AlertSpec{ on: on, @@ -623,7 +549,7 @@ func (a AlertBuilder) Build() (*AlertSpec, error) { return a.alert, nil } -func (a AlertBuilder) WithConfig(config *Config) *AlertBuilder { +func (a AlertBuilder) WithConfig(config Config) *AlertBuilder { alert := *a.alert alert.config = config return &AlertBuilder{ @@ -633,13 +559,13 @@ func (a AlertBuilder) WithConfig(config *Config) *AlertBuilder { // TODO: reconsider whether we still need it or not type SpecHTTPUpstream struct { - name Name + name string url string headers map[string]string params map[string]string } -func (s SpecHTTPUpstream) Name() Name { +func (s SpecHTTPUpstream) Name() string { return s.name } @@ -666,7 +592,7 @@ type SpecHTTPUpstreamBuilder struct { upstream *SpecHTTPUpstream } -func NewSpecHTTPUpstreamBuilder(name Name, url string) *SpecHTTPUpstreamBuilder { +func NewSpecHTTPUpstreamBuilder(name string, url string) *SpecHTTPUpstreamBuilder { return &SpecHTTPUpstreamBuilder{ upstream: &SpecHTTPUpstream{ name: name, diff --git a/core/job/spec_test.go b/core/job/spec_test.go index dcd0e4b145..a0f2733da4 100644 --- a/core/job/spec_test.go +++ b/core/job/spec_test.go @@ -11,7 +11,7 @@ import ( ) func TestEntitySpec(t *testing.T) { - jobVersion, _ := job.VersionFrom(1) + jobVersion := 1 startDate, _ := job.ScheduleDateFrom("2022-10-01") endDate, _ := job.ScheduleDateFrom("2022-10-02") retry := job.NewRetry(0, int32(0), false) @@ -22,21 +22,21 @@ func TestEntitySpec(t *testing.T) { WithRetry(retry). WithDependsOnPast(false). Build() - jobWindow, _ := models.NewWindow(jobVersion.Int(), "d", "24h", "24h") - jobTaskConfig, _ := job.NewConfig(map[string]string{"sample_task_key": "sample_value"}) + jobWindow, _ := models.NewWindow(jobVersion, "d", "24h", "24h") + jobTaskConfig, _ := job.ConfigFrom(map[string]string{"sample_task_key": "sample_value"}) jobTask := job.NewTaskBuilder("bq2bq", jobTaskConfig).Build() description := "sample description" labels := map[string]string{"key": "value"} - hook := job.NewHook("sample-hook", jobTaskConfig) - jobAlertConfig, _ := job.NewConfig(map[string]string{"sample_alert_key": "sample_value"}) + hook, _ := job.NewHook("sample-hook", jobTaskConfig) + jobAlertConfig, _ := job.ConfigFrom(map[string]string{"sample_alert_key": "sample_value"}) httpUpstreamConfig := map[string]string{"host": "sample-host"} httpUpstreamHeader := map[string]string{"header-key": "sample-header-val"} httpUpstream, _ := job.NewSpecHTTPUpstreamBuilder("sample-name", "sample-url").WithParams(httpUpstreamConfig).WithHeaders(httpUpstreamHeader).Build() specUpstream, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{"job-d"}).WithSpecHTTPUpstream([]*job.SpecHTTPUpstream{httpUpstream}).Build() - alert, _ := job.NewAlertBuilder(job.SLAMissEvent, []string{"sample-channel"}).WithConfig(jobAlertConfig).Build() + alert, _ := job.NewAlertBuilder("sla_miss", []string{"sample-channel"}).WithConfig(jobAlertConfig).Build() assetMap := map[string]string{"key": "value"} - asset, _ := job.NewAsset(assetMap) + asset, _ := job.AssetFrom(assetMap) resourceRequestConfig := job.NewMetadataResourceConfig("250m", "128Mi") resourceLimitConfig := job.NewMetadataResourceConfig("250m", "128Mi") resourceMetadata := job.NewResourceMetadata(resourceRequestConfig, resourceLimitConfig) @@ -47,18 +47,18 @@ func TestEntitySpec(t *testing.T) { t.Run("Spec", func(t *testing.T) { t.Run("should return values as inserted", func(t *testing.T) { - specA := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask). + specA, err := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask). WithDescription(description). WithLabels(labels).WithHooks([]*job.Hook{hook}).WithAlerts([]*job.AlertSpec{alert}). WithSpecUpstream(specUpstream). WithAsset(asset). WithMetadata(jobMetadata). Build() + assert.NoError(t, err) assert.Equal(t, job.Name("job-A"), specA.Name()) assert.Equal(t, jobVersion, specA.Version()) - assert.Equal(t, job.Owner("sample-owner"), specA.Owner()) - assert.Equal(t, "sample-owner", specA.Owner().String()) + assert.Equal(t, "sample-owner", specA.Owner()) assert.Equal(t, jobSchedule, specA.Schedule()) assert.Equal(t, jobSchedule.Retry(), specA.Schedule().Retry()) @@ -77,22 +77,21 @@ func TestEntitySpec(t *testing.T) { assert.Equal(t, jobTask, specA.Task()) assert.Equal(t, jobTask.Name(), specA.Task().Name()) assert.Equal(t, jobTask.Name().String(), specA.Task().Name().String()) - assert.Equal(t, jobTask.Info(), specA.Task().Info()) assert.Equal(t, jobTask.Config(), specA.Task().Config()) - assert.Equal(t, jobTask.Config().Configs(), specA.Task().Config().Configs()) + assert.Equal(t, jobTask.Config(), specA.Task().Config()) assert.Equal(t, description, specA.Description()) assert.Equal(t, labels, specA.Labels()) assert.Equal(t, []*job.Hook{hook}, specA.Hooks()) assert.Equal(t, hook.Name(), specA.Hooks()[0].Name()) - assert.Equal(t, hook.Name().String(), specA.Hooks()[0].Name().String()) + assert.Equal(t, hook.Name(), specA.Hooks()[0].Name()) + assert.Equal(t, hook.Config(), specA.Hooks()[0].Config()) assert.Equal(t, hook.Config(), specA.Hooks()[0].Config()) - assert.Equal(t, hook.Config().Configs(), specA.Hooks()[0].Config().Configs()) assert.Equal(t, []*job.AlertSpec{alert}, specA.AlertSpecs()) assert.Equal(t, alert.Config(), specA.AlertSpecs()[0].Config()) - assert.Equal(t, alert.Config().Configs(), specA.AlertSpecs()[0].Config().Configs()) + assert.Equal(t, alert.Config(), specA.AlertSpecs()[0].Config()) assert.Equal(t, alert.Channels(), specA.AlertSpecs()[0].Channels()) assert.Equal(t, alert.On(), specA.AlertSpecs()[0].On()) @@ -105,7 +104,7 @@ func TestEntitySpec(t *testing.T) { assert.Equal(t, specUpstream.HTTPUpstreams()[0].Headers(), specA.UpstreamSpec().HTTPUpstreams()[0].Headers()) assert.Equal(t, asset, specA.Asset()) - assert.Equal(t, asset.Assets(), specA.Asset().Assets()) + assert.Equal(t, asset, specA.Asset()) assert.Equal(t, jobMetadata, specA.Metadata()) assert.Equal(t, jobMetadata.Resource(), specA.Metadata().Resource()) @@ -120,8 +119,11 @@ func TestEntitySpec(t *testing.T) { t.Run("Specs", func(t *testing.T) { t.Run("ToNameAndSpecMap should return map with name key and spec value", func(t *testing.T) { - specA := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() - specB := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + specA, err := job.NewSpecBuilder(jobVersion, "job-A", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + assert.NoError(t, err) + + specB, err := job.NewSpecBuilder(jobVersion, "job-B", "sample-owner", jobSchedule, jobWindow, jobTask).Build() + assert.NoError(t, err) expectedMap := map[job.Name]*job.Spec{ specA.Name(): specA, @@ -197,26 +199,19 @@ func TestEntitySpec(t *testing.T) { t.Run("Asset", func(t *testing.T) { t.Run("should return asset and nil error if no error found", func(t *testing.T) { validAssetMap := map[string]string{"key": "value"} - validAsset, err := job.NewAsset(validAssetMap) + validAsset, err := job.AssetFrom(validAssetMap) assert.NoError(t, err) - assert.Equal(t, validAssetMap, validAsset.Assets()) + assert.Equal(t, job.Asset(validAssetMap), validAsset) + assert.Equal(t, validAssetMap, validAsset.Map()) }) t.Run("should return nil and error if asset map is invalid", func(t *testing.T) { invalidAssetMap := map[string]string{"": ""} - invalidAsset, err := job.NewAsset(invalidAssetMap) + invalidAsset, err := job.AssetFrom(invalidAssetMap) assert.Error(t, err) assert.Nil(t, invalidAsset) }) }) - t.Run("VersionFrom", func(t *testing.T) { - t.Run("should return error if version is less than or equals to zero", func(t *testing.T) { - version, err := job.VersionFrom(0) - assert.ErrorContains(t, err, "version is less than or equal to zero") - assert.Zero(t, version) - }) - }) - t.Run("NameFrom", func(t *testing.T) { t.Run("should return error if name is empty", func(t *testing.T) { name, err := job.NameFrom("") @@ -225,14 +220,6 @@ func TestEntitySpec(t *testing.T) { }) }) - t.Run("OwnerFrom", func(t *testing.T) { - t.Run("should return error if owner is empty", func(t *testing.T) { - owner, err := job.OwnerFrom("") - assert.ErrorContains(t, err, "owner is empty") - assert.Empty(t, owner) - }) - }) - t.Run("ScheduleDateFrom", func(t *testing.T) { t.Run("should not return error if date is empty", func(t *testing.T) { scheduleDate, err := job.ScheduleDateFrom("") @@ -254,17 +241,9 @@ func TestEntitySpec(t *testing.T) { }) }) - t.Run("HookNameFrom", func(t *testing.T) { - t.Run("should return error if hook name is empty", func(t *testing.T) { - owner, err := job.HookNameFrom("") - assert.ErrorContains(t, err, "name is empty") - assert.Empty(t, owner) - }) - }) - - t.Run("NewConfig", func(t *testing.T) { + t.Run("ConfigFrom", func(t *testing.T) { t.Run("should return error if the config map is invalid", func(t *testing.T) { - jobConfig, err := job.NewConfig(map[string]string{"": ""}) + jobConfig, err := job.ConfigFrom(map[string]string{"": ""}) assert.Error(t, err) assert.Empty(t, jobConfig) }) diff --git a/internal/store/postgres/job/adapter.go b/internal/store/postgres/job/adapter.go index 95a17436b3..cc3f59dc56 100644 --- a/internal/store/postgres/job/adapter.go +++ b/internal/store/postgres/job/adapter.go @@ -149,7 +149,7 @@ func toStorageSpec(jobEntity *job.Job) (*Spec, error) { var assets map[string]string if jobSpec.Asset() != nil { - assets = jobSpec.Asset().Assets() + assets = jobSpec.Asset() } schedule, err := toStorageSchedule(jobSpec.Schedule()) @@ -164,8 +164,8 @@ func toStorageSpec(jobEntity *job.Job) (*Spec, error) { return &Spec{ Name: jobSpec.Name().String(), - Version: jobSpec.Version().Int(), - Owner: jobSpec.Owner().String(), + Version: jobSpec.Version(), + Owner: jobSpec.Owner(), Description: jobSpec.Description(), Labels: jobSpec.Labels(), Assets: assets, @@ -177,7 +177,7 @@ func toStorageSpec(jobEntity *job.Job) (*Spec, error) { Alert: alertsBytes, TaskName: jobSpec.Task().Name().String(), - TaskConfig: jobSpec.Task().Config().Configs(), + TaskConfig: jobSpec.Task().Config(), Hooks: hooksBytes, @@ -223,8 +223,8 @@ func toStorageHooks(hookSpecs []*job.Hook) ([]byte, error) { func toStorageHook(spec *job.Hook) Hook { return Hook{ - Name: spec.Name().String(), - Config: spec.Config().Configs(), + Name: spec.Name(), + Config: spec.Config(), } } @@ -235,8 +235,8 @@ func toStorageAlerts(alertSpecs []*job.AlertSpec) ([]byte, error) { var alerts []Alert for _, alertSpec := range alertSpecs { alerts = append(alerts, Alert{ - On: string(alertSpec.On()), - Config: alertSpec.Config().Configs(), + On: alertSpec.On(), + Config: alertSpec.Config(), Channels: alertSpec.Channels(), }) } @@ -316,20 +316,14 @@ func toStorageMetadata(metadataSpec *job.Metadata) ([]byte, error) { } func fromStorageSpec(jobSpec *Spec) (*job.Spec, error) { - version, err := job.VersionFrom(jobSpec.Version) - if err != nil { - return nil, err - } + version := jobSpec.Version jobName, err := job.NameFrom(jobSpec.Name) if err != nil { return nil, err } - owner, err := job.OwnerFrom(jobSpec.Owner) - if err != nil { - return nil, err - } + owner := jobSpec.Owner var schedule *job.Schedule if jobSpec.Schedule != nil { @@ -347,9 +341,9 @@ func fromStorageSpec(jobSpec *Spec) (*job.Spec, error) { } } - var taskConfig *job.Config + var taskConfig job.Config if jobSpec.TaskConfig != nil { - taskConfig, err = job.NewConfig(jobSpec.TaskConfig) + taskConfig, err = job.ConfigFrom(jobSpec.TaskConfig) if err != nil { return nil, err } @@ -436,14 +430,14 @@ func fromStorageSpec(jobSpec *Spec) (*job.Spec, error) { } if jobSpec.Assets != nil { - asset, err := job.NewAsset(jobSpec.Assets) + asset, err := job.AssetFrom(jobSpec.Assets) if err != nil { return nil, err } jobSpecBuilder = jobSpecBuilder.WithAsset(asset) } - return jobSpecBuilder.Build(), nil + return jobSpecBuilder.Build() } func fromStorageWindow(raw []byte, jobVersion int) (models.Window, error) { @@ -513,15 +507,11 @@ func fromStorageHooks(raw []byte) ([]*job.Hook, error) { } func fromStorageHook(hook Hook) (*job.Hook, error) { - config, err := job.NewConfig(hook.Config) - if err != nil { - return nil, err - } - hookName, err := job.HookNameFrom(hook.Name) + config, err := job.ConfigFrom(hook.Config) if err != nil { return nil, err } - return job.NewHook(hookName, config), nil + return job.NewHook(hook.Name, config) } func fromStorageAlerts(raw []byte) ([]*job.AlertSpec, error) { @@ -536,11 +526,11 @@ func fromStorageAlerts(raw []byte) ([]*job.AlertSpec, error) { var jobAlerts []*job.AlertSpec for _, alert := range alerts { - config, err := job.NewConfig(alert.Config) + config, err := job.ConfigFrom(alert.Config) if err != nil { return nil, err } - jobAlert, err := job.NewAlertBuilder(job.EventType(alert.On), alert.Channels). + jobAlert, err := job.NewAlertBuilder(alert.On, alert.Channels). WithConfig(config). Build() if err != nil { diff --git a/internal/store/postgres/job/job_repository_test.go b/internal/store/postgres/job/job_repository_test.go index c615e67ce2..001cb5e2f6 100644 --- a/internal/store/postgres/job/job_repository_test.go +++ b/internal/store/postgres/job/job_repository_test.go @@ -71,9 +71,9 @@ func TestPostgresJobRepository(t *testing.T) { return pool } - jobVersion, err := job.VersionFrom(1) + jobVersion := 1 assert.NoError(t, err) - jobOwner, err := job.OwnerFrom("dev_test") + jobOwner := "dev_test" assert.NoError(t, err) jobDescription := "sample job" jobRetry := job.NewRetry(5, 0, false) @@ -81,9 +81,9 @@ func TestPostgresJobRepository(t *testing.T) { assert.NoError(t, err) jobSchedule, err := job.NewScheduleBuilder(startDate).WithRetry(jobRetry).Build() assert.NoError(t, err) - jobWindow, err := models.NewWindow(jobVersion.Int(), "d", "24h", "24h") + jobWindow, err := models.NewWindow(jobVersion, "d", "24h", "24h") assert.NoError(t, err) - jobTaskConfig, err := job.NewConfig(map[string]string{"sample_task_key": "sample_value"}) + jobTaskConfig, err := job.ConfigFrom(map[string]string{"sample_task_key": "sample_value"}) assert.NoError(t, err) taskName, err := job.TaskNameFrom("bq2bq") assert.NoError(t, err) @@ -99,27 +99,32 @@ func TestPostgresJobRepository(t *testing.T) { jobLabels := map[string]string{ "environment": "integration", } - jobHookConfig, err := job.NewConfig(map[string]string{"sample_hook_key": "sample_value"}) + jobHookConfig, err := job.ConfigFrom(map[string]string{"sample_hook_key": "sample_value"}) assert.NoError(t, err) - jobHooks := []*job.Hook{job.NewHook("sample_hook", jobHookConfig)} - jobAlertConfig, err := job.NewConfig(map[string]string{"sample_alert_key": "sample_value"}) + jobHook1, err := job.NewHook("sample_hook", jobHookConfig) + assert.NoError(t, err) + jobHooks := []*job.Hook{jobHook1} + jobAlertConfig, err := job.ConfigFrom(map[string]string{"sample_alert_key": "sample_value"}) + assert.NoError(t, err) + alert, err := job.NewAlertBuilder("sla_miss", []string{"sample-channel"}).WithConfig(jobAlertConfig).Build() assert.NoError(t, err) - alert, _ := job.NewAlertBuilder(job.SLAMissEvent, []string{"sample-channel"}).WithConfig(jobAlertConfig).Build() jobAlerts := []*job.AlertSpec{alert} upstreamName1 := job.SpecUpstreamNameFrom("job-upstream-1") upstreamName2 := job.SpecUpstreamNameFrom("job-upstream-2") - jobUpstream, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{upstreamName1, upstreamName2}).Build() - jobAsset, err := job.NewAsset(map[string]string{"sample-asset": "value-asset"}) + jobUpstream, err := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{upstreamName1, upstreamName2}).Build() + assert.NoError(t, err) + jobAsset, err := job.AssetFrom(map[string]string{"sample-asset": "value-asset"}) assert.NoError(t, err) resourceRequestConfig := job.NewMetadataResourceConfig("250m", "128Mi") resourceLimitConfig := job.NewMetadataResourceConfig("250m", "128Mi") resourceMetadata := job.NewResourceMetadata(resourceRequestConfig, resourceLimitConfig) - jobMetadata, _ := job.NewMetadataBuilder(). + jobMetadata, err := job.NewMetadataBuilder(). WithResource(resourceMetadata). WithScheduler(map[string]string{"scheduler_config_key": "value"}). Build() + assert.NoError(t, err) - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask). + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask). WithDescription(jobDescription). WithLabels(jobLabels). WithHooks(jobHooks). @@ -128,9 +133,10 @@ func TestPostgresJobRepository(t *testing.T) { WithAsset(jobAsset). WithMetadata(jobMetadata). Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"resource-3"}) - jobSpecB := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask). + jobSpecB, err := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask). WithDescription(jobDescription). WithLabels(jobLabels). WithHooks(jobHooks). @@ -138,6 +144,7 @@ func TestPostgresJobRepository(t *testing.T) { WithAsset(jobAsset). WithMetadata(jobMetadata). Build() + assert.NoError(t, err) jobB := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_b", nil) jobs := []*job.Job{jobA, jobB} @@ -154,7 +161,8 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("inserts job spec with optional fields empty", func(t *testing.T) { db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"resource-3"}) jobs := []*job.Job{jobA} @@ -167,14 +175,16 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("skip job and return job error if job already exist", func(t *testing.T) { db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"resource-3"}) jobRepo := postgres.NewJobRepository(db) _, err = jobRepo.Add(ctx, []*job.Job{jobA}) assert.NoError(t, err) - jobSpecB := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecB, err := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobB := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_b", []job.ResourceURN{"resource-3"}) addedJobs, err := jobRepo.Add(ctx, []*job.Job{jobA, jobB}) @@ -184,10 +194,12 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("return error if all jobs are failed to be saved", func(t *testing.T) { db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"resource-3"}) - jobSpecB := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecB, err := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobB := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_b", []job.ResourceURN{"resource-3"}) jobRepo := postgres.NewJobRepository(db) @@ -204,27 +216,32 @@ func TestPostgresJobRepository(t *testing.T) { jobLabels := map[string]string{ "environment": "integration", } - jobHookConfig, err := job.NewConfig(map[string]string{"sample_hook_key": "sample_value"}) + jobHookConfig, err := job.ConfigFrom(map[string]string{"sample_hook_key": "sample_value"}) + assert.NoError(t, err) + jobHook1, err := job.NewHook("sample_hook", jobHookConfig) + assert.NoError(t, err) + jobHooks := []*job.Hook{jobHook1} + jobAlertConfig, err := job.ConfigFrom(map[string]string{"sample_alert_key": "sample_value"}) assert.NoError(t, err) - jobHooks := []*job.Hook{job.NewHook("sample_hook", jobHookConfig)} - jobAlertConfig, err := job.NewConfig(map[string]string{"sample_alert_key": "sample_value"}) + alert, err := job.NewAlertBuilder("sla_miss", []string{"sample-channel"}).WithConfig(jobAlertConfig).Build() assert.NoError(t, err) - alert, _ := job.NewAlertBuilder(job.SLAMissEvent, []string{"sample-channel"}).WithConfig(jobAlertConfig).Build() jobAlerts := []*job.AlertSpec{alert} upstreamName1 := job.SpecUpstreamNameFrom("job-upstream-1") upstreamName2 := job.SpecUpstreamNameFrom("job-upstream-2") - jobUpstream, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{upstreamName1, upstreamName2}).Build() - jobAsset, err := job.NewAsset(map[string]string{"sample-asset": "value-asset"}) + jobUpstream, err := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{upstreamName1, upstreamName2}).Build() + assert.NoError(t, err) + jobAsset, err := job.AssetFrom(map[string]string{"sample-asset": "value-asset"}) assert.NoError(t, err) resourceRequestConfig := job.NewMetadataResourceConfig("250m", "128Mi") resourceLimitConfig := job.NewMetadataResourceConfig("250m", "128Mi") resourceMetadata := job.NewResourceMetadata(resourceRequestConfig, resourceLimitConfig) - jobMetadata, _ := job.NewMetadataBuilder(). + jobMetadata, err := job.NewMetadataBuilder(). WithResource(resourceMetadata). WithScheduler(map[string]string{"scheduler_config_key": "value"}). Build() + assert.NoError(t, err) - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask). + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask). WithDescription(jobDescription). WithLabels(jobLabels). WithHooks(jobHooks). @@ -233,9 +250,10 @@ func TestPostgresJobRepository(t *testing.T) { WithAsset(jobAsset). WithMetadata(jobMetadata). Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"resource-3"}) - jobSpecB := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask). + jobSpecB, err := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask). WithDescription(jobDescription). WithLabels(jobLabels). WithHooks(jobHooks). @@ -243,6 +261,7 @@ func TestPostgresJobRepository(t *testing.T) { WithAsset(jobAsset). WithMetadata(jobMetadata). Build() + assert.NoError(t, err) jobB := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_b", nil) jobs := []*job.Job{jobA, jobB} @@ -265,27 +284,32 @@ func TestPostgresJobRepository(t *testing.T) { jobLabels := map[string]string{ "environment": "integration", } - jobHookConfig, err := job.NewConfig(map[string]string{"sample_hook_key": "sample_value"}) + jobHookConfig, err := job.ConfigFrom(map[string]string{"sample_hook_key": "sample_value"}) assert.NoError(t, err) - jobHooks := []*job.Hook{job.NewHook("sample_hook", jobHookConfig)} - jobAlertConfig, err := job.NewConfig(map[string]string{"sample_alert_key": "sample_value"}) + jobHook1, err := job.NewHook("sample_hook", jobHookConfig) + assert.NoError(t, err) + jobHooks := []*job.Hook{jobHook1} + jobAlertConfig, err := job.ConfigFrom(map[string]string{"sample_alert_key": "sample_value"}) + assert.NoError(t, err) + alert, err := job.NewAlertBuilder("sla_miss", []string{"sample-channel"}).WithConfig(jobAlertConfig).Build() assert.NoError(t, err) - alert, _ := job.NewAlertBuilder(job.SLAMissEvent, []string{"sample-channel"}).WithConfig(jobAlertConfig).Build() jobAlerts := []*job.AlertSpec{alert} upstreamName1 := job.SpecUpstreamNameFrom("job-upstream-1") upstreamName2 := job.SpecUpstreamNameFrom("job-upstream-2") - jobUpstream, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{upstreamName1, upstreamName2}).Build() - jobAsset, err := job.NewAsset(map[string]string{"sample-asset": "value-asset"}) + jobUpstream, err := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{upstreamName1, upstreamName2}).Build() + assert.NoError(t, err) + jobAsset, err := job.AssetFrom(map[string]string{"sample-asset": "value-asset"}) assert.NoError(t, err) resourceRequestConfig := job.NewMetadataResourceConfig("250m", "128Mi") resourceLimitConfig := job.NewMetadataResourceConfig("250m", "128Mi") resourceMetadata := job.NewResourceMetadata(resourceRequestConfig, resourceLimitConfig) - jobMetadata, _ := job.NewMetadataBuilder(). + jobMetadata, err := job.NewMetadataBuilder(). WithResource(resourceMetadata). WithScheduler(map[string]string{"scheduler_config_key": "value"}). Build() + assert.NoError(t, err) - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask). + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask). WithDescription(jobDescription). WithLabels(jobLabels). WithHooks(jobHooks). @@ -294,6 +318,7 @@ func TestPostgresJobRepository(t *testing.T) { WithAsset(jobAsset). WithMetadata(jobMetadata). Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"resource-3"}) jobs := []*job.Job{jobA} @@ -320,10 +345,12 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("updates job spec", func(t *testing.T) { db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"resource-3"}) - jobSpecB := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).Build() + jobSpecB, err := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).Build() + assert.NoError(t, err) jobB := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_b", nil) jobs := []*job.Job{jobA, jobB} @@ -333,9 +360,11 @@ func TestPostgresJobRepository(t *testing.T) { assert.NoError(t, err) assert.EqualValues(t, jobs, addedJobs) - jobSpecAToUpdate := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask). + jobSpecAToUpdate, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask). WithDescription(jobDescription). Build() + assert.NoError(t, err) + jobAToUpdate := job.NewJob(sampleTenant, jobSpecAToUpdate, "dev.resource.sample_a", []job.ResourceURN{"resource-3"}) jobBToUpdate := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_b", []job.ResourceURN{"resource-4"}) jobsToUpdate := []*job.Job{jobAToUpdate, jobBToUpdate} @@ -347,19 +376,22 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("skip job and return job error if job not exist yet", func(t *testing.T) { db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"resource-3"}) jobRepo := postgres.NewJobRepository(db) _, err = jobRepo.Add(ctx, []*job.Job{jobA}) assert.NoError(t, err) - jobSpecAToUpdate := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask). + jobSpecAToUpdate, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask). WithDescription(jobDescription). Build() + assert.NoError(t, err) jobAToUpdate := job.NewJob(sampleTenant, jobSpecAToUpdate, "dev.resource.sample_a", []job.ResourceURN{"resource-3"}) - jobSpecB := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).Build() + jobSpecB, err := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).Build() + assert.NoError(t, err) jobBToUpdate := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_b", []job.ResourceURN{"resource-4"}) jobsToUpdate := []*job.Job{jobAToUpdate, jobBToUpdate} @@ -370,10 +402,12 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("return error if all jobs are failed to be updated", func(t *testing.T) { db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"resource-3"}) - jobSpecB := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecB, err := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobB := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_b", []job.ResourceURN{"resource-3"}) jobRepo := postgres.NewJobRepository(db) @@ -384,7 +418,8 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("should not update job if it has been soft deleted", func(t *testing.T) { db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"resource-3"}) jobs := []*job.Job{jobA} @@ -411,7 +446,8 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("should not update job if it is owned by different namespace", func(t *testing.T) { db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"resource-3"}) jobs := []*job.Job{jobA} @@ -438,10 +474,12 @@ func TestPostgresJobRepository(t *testing.T) { tenantDetails, err := tenant.NewTenantDetails(proj, namespace) assert.NoError(t, err) - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"dev.resource.sample_b"}) - jobSpecB := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecB, err := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobB := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_b", nil) jobRepo := postgres.NewJobRepository(db) @@ -462,13 +500,14 @@ func TestPostgresJobRepository(t *testing.T) { upstreamName := job.SpecUpstreamNameFrom("sample-job-B") jobAUpstream, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{upstreamName}).Build() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask). + jobSpecA, _ := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask). WithDescription(jobDescription). WithSpecUpstream(jobAUpstream). Build() jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", nil) - jobSpecB := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecB, err := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobB := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_b", nil) jobRepo := postgres.NewJobRepository(db) @@ -489,16 +528,18 @@ func TestPostgresJobRepository(t *testing.T) { upstreamName := job.SpecUpstreamNameFrom("test-proj/sample-job-B") jobAUpstream, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{upstreamName}).Build() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask). + jobSpecA, _ := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask). WithDescription(jobDescription). WithSpecUpstream(jobAUpstream). Build() jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"dev.resource.sample_c"}) - jobSpecB := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecB, err := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobB := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_b", nil) - jobSpecC := job.NewSpecBuilder(jobVersion, "sample-job-C", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecC, err := job.NewSpecBuilder(jobVersion, "sample-job-C", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobC := job.NewJob(sampleTenant, jobSpecC, "dev.resource.sample_c", nil) jobRepo := postgres.NewJobRepository(db) @@ -527,22 +568,24 @@ func TestPostgresJobRepository(t *testing.T) { upstreamBName := job.SpecUpstreamNameFrom("test-proj/sample-job-B") jobAUpstream, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{upstreamBName, upstreamDName}).Build() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask). + jobSpecA, _ := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask). WithDescription(jobDescription). WithSpecUpstream(jobAUpstream). Build() jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"dev.resource.sample_c", "dev.resource.sample_e"}) // internal project, same server - jobSpecB := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecB, err := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobB := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_b", nil) - jobSpecC := job.NewSpecBuilder(jobVersion, "sample-job-C", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecC, err := job.NewSpecBuilder(jobVersion, "sample-job-C", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobC := job.NewJob(sampleTenant, jobSpecC, "dev.resource.sample_c", nil) // external project, same server - jobSpecD := job.NewSpecBuilder(jobVersion, "sample-job-D", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecD, _ := job.NewSpecBuilder(jobVersion, "sample-job-D", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() jobD := job.NewJob(otherTenant, jobSpecD, "dev.resource.sample_d", nil) - jobSpecE := job.NewSpecBuilder(jobVersion, "sample-job-E", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecE, _ := job.NewSpecBuilder(jobVersion, "sample-job-E", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() jobE := job.NewJob(otherTenant, jobSpecE, "dev.resource.sample_e", nil) jobRepo := postgres.NewJobRepository(db) @@ -568,13 +611,16 @@ func TestPostgresJobRepository(t *testing.T) { }) t.Run("ReplaceUpstreams", func(t *testing.T) { - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"dev.resource.sample_c"}) - jobSpecB := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecB, err := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobB := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_b", nil) - jobSpecC := job.NewSpecBuilder(jobVersion, "sample-job-C", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecC, err := job.NewSpecBuilder(jobVersion, "sample-job-C", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobC := job.NewJob(sampleTenant, jobSpecC, "dev.resource.sample_c", nil) t.Run("inserts job upstreams", func(t *testing.T) { @@ -635,7 +681,8 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("soft delete a job if not asked to do clean delete", func(t *testing.T) { db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", nil) jobRepo := postgres.NewJobRepository(db) @@ -654,7 +701,8 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("should return error if the soft delete failed", func(t *testing.T) { db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobRepo := postgres.NewJobRepository(db) @@ -664,7 +712,8 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("hard delete a job if asked to do clean delete", func(t *testing.T) { db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", nil) jobRepo := postgres.NewJobRepository(db) @@ -683,7 +732,8 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("should return error if the hard delete failed", func(t *testing.T) { db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobRepo := postgres.NewJobRepository(db) @@ -693,7 +743,8 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("do delete job and delete upstream relationship", func(t *testing.T) { db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"dev.resource.sample_b"}) jobRepo := postgres.NewJobRepository(db) @@ -721,7 +772,8 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("returns job success", func(t *testing.T) { db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"dev.resource.sample_b", "dev.resource.sample_c"}) jobRepo := postgres.NewJobRepository(db) @@ -736,7 +788,8 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("should not return job if it is soft deleted", func(t *testing.T) { db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"dev.resource.sample_b", "dev.resource.sample_c"}) jobRepo := postgres.NewJobRepository(db) @@ -761,9 +814,11 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("returns no error when get all jobs success", func(t *testing.T) { db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"dev.resource.sample_b", "dev.resource.sample_c"}) - jobSpecB := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecB, err := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobB := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_b", []job.ResourceURN{"dev.resource.sample_c"}) jobRepo := postgres.NewJobRepository(db) @@ -779,13 +834,15 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("returns only active jobs excluding the soft deleted jobs", func(t *testing.T) { db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"dev.resource.sample_b", "dev.resource.sample_c"}) - jobSpecB := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecB, err := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobB := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_b", []job.ResourceURN{"dev.resource.sample_c"}) jobRepo := postgres.NewJobRepository(db) - _, err := jobRepo.Add(ctx, []*job.Job{jobA, jobB}) + _, err = jobRepo.Add(ctx, []*job.Job{jobA, jobB}) assert.NoError(t, err) err = jobRepo.Delete(ctx, sampleTenant.ProjectName(), jobSpecB.Name(), false) @@ -801,13 +858,15 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("returns no error when get all jobs success", func(t *testing.T) { db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_general", []job.ResourceURN{"dev.resource.sample_b", "dev.resource.sample_c"}) - jobSpecB := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecB, err := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobB := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_general", []job.ResourceURN{"dev.resource.sample_c"}) jobRepo := postgres.NewJobRepository(db) - _, err := jobRepo.Add(ctx, []*job.Job{jobA, jobB}) + _, err = jobRepo.Add(ctx, []*job.Job{jobA, jobB}) assert.NoError(t, err) actual, err := jobRepo.GetAllByResourceDestination(ctx, "dev.resource.sample_general") @@ -819,13 +878,15 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("returns only active jobs excluding the soft deleted jobs", func(t *testing.T) { db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_general", []job.ResourceURN{"dev.resource.sample_b", "dev.resource.sample_c"}) - jobSpecB := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecB, err := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobB := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_general", []job.ResourceURN{"dev.resource.sample_c"}) jobRepo := postgres.NewJobRepository(db) - _, err := jobRepo.Add(ctx, []*job.Job{jobA, jobB}) + _, err = jobRepo.Add(ctx, []*job.Job{jobA, jobB}) assert.NoError(t, err) err = jobRepo.Delete(ctx, sampleTenant.ProjectName(), jobSpecB.Name(), false) @@ -842,19 +903,21 @@ func TestPostgresJobRepository(t *testing.T) { // TODO: test is failing for nullable fields in upstream db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_general", []job.ResourceURN{"dev.resource.sample_b", "dev.resource.sample_c"}) jobAUpstreamResolved := job.NewUpstreamResolved("sample-job-B", "", "", sampleTenant, "inferred", taskName, false) jobAUpstreamUnresolved := job.NewUpstreamUnresolvedInferred("dev.resource.sample_c") - jobSpecB := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecB, err := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobB := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_b", nil) jobAWithUpstream := job.NewWithUpstream(jobA, []*job.Upstream{jobAUpstreamResolved, jobAUpstreamUnresolved}) jobRepo := postgres.NewJobRepository(db) - _, err := jobRepo.Add(ctx, []*job.Job{jobA, jobB}) + _, err = jobRepo.Add(ctx, []*job.Job{jobA, jobB}) assert.NoError(t, err) err = jobRepo.ReplaceUpstreams(ctx, []*job.WithUpstream{jobAWithUpstream}) @@ -871,13 +934,16 @@ func TestPostgresJobRepository(t *testing.T) { db := dbSetup() jobAUpstreamSpec, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{"sample-job-B"}).Build() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).WithSpecUpstream(jobAUpstreamSpec).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).WithSpecUpstream(jobAUpstreamSpec).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"dev.resource.sample_c"}) - jobSpecB := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecB, err := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobB := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_b", nil) - jobSpecC := job.NewSpecBuilder(jobVersion, "sample-job-C", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecC, err := job.NewSpecBuilder(jobVersion, "sample-job-C", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobC := job.NewJob(sampleTenant, jobSpecC, "dev.resource.sample_c", nil) jobRepo := postgres.NewJobRepository(db) @@ -897,14 +963,16 @@ func TestPostgresJobRepository(t *testing.T) { t.Run("returns downstream given a job name", func(t *testing.T) { db := dbSetup() - jobSpecA := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecA, err := job.NewSpecBuilder(jobVersion, "sample-job-A", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"dev.resource.sample_b", "dev.resource.sample_c"}) jobAUpstreamResolved := job.NewUpstreamResolved("sample-job-B", "", "", sampleTenant, "inferred", taskName, false) jobAUpstreamUnresolved := job.NewUpstreamUnresolvedInferred("dev.resource.sample_c") jobAWithUpstream := job.NewWithUpstream(jobA, []*job.Upstream{jobAUpstreamResolved, jobAUpstreamUnresolved}) - jobSpecB := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + jobSpecB, err := job.NewSpecBuilder(jobVersion, "sample-job-B", jobOwner, jobSchedule, jobWindow, jobTask).WithDescription(jobDescription).Build() + assert.NoError(t, err) jobB := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_b", nil) jobRepo := postgres.NewJobRepository(db) From 2c98e05dac347dd3df5151a16fcda4fa7fadb097 Mon Sep 17 00:00:00 2001 From: Arinda Arif Date: Wed, 4 Jan 2023 17:11:53 +0700 Subject: [PATCH 23/25] fix: scheduler job repo test failures due to spec changes --- .../postgres/scheduler/job_repository_test.go | 32 ++++++++++--------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/internal/store/postgres/scheduler/job_repository_test.go b/internal/store/postgres/scheduler/job_repository_test.go index 43224d345a..c3829f160c 100644 --- a/internal/store/postgres/scheduler/job_repository_test.go +++ b/internal/store/postgres/scheduler/job_repository_test.go @@ -107,19 +107,17 @@ func dbSetup() *pgxpool.Pool { func addJobs(ctx context.Context, t *testing.T, pool *pgxpool.Pool) map[string]*job.Job { t.Helper() - jobVersion, err := job.VersionFrom(1) - assert.NoError(t, err) - jobOwner, err := job.OwnerFrom("dev_test") - assert.NoError(t, err) + jobVersion := 1 + jobOwner := "dev_test" jobDescription := "sample job" jobRetry := job.NewRetry(5, 0, false) startDate, err := job.ScheduleDateFrom("2022-10-01") assert.NoError(t, err) jobSchedule, err := job.NewScheduleBuilder(startDate).WithRetry(jobRetry).Build() assert.NoError(t, err) - jobWindow, err := models.NewWindow(jobVersion.Int(), "d", "24h", "24h") + jobWindow, err := models.NewWindow(jobVersion, "d", "24h", "24h") assert.NoError(t, err) - jobTaskConfig, err := job.NewConfig(map[string]string{"sample_task_key": "sample_value"}) + jobTaskConfig, err := job.ConfigFrom(map[string]string{"sample_task_key": "sample_value"}) assert.NoError(t, err) taskName, err := job.TaskNameFrom("bq2bq") assert.NoError(t, err) @@ -149,17 +147,19 @@ func addJobs(ctx context.Context, t *testing.T, pool *pgxpool.Pool) map[string]* namespaceRepo := tenantPostgres.NewNamespaceRepository(pool) assert.NoError(t, namespaceRepo.Save(ctx, namespace)) - jobHookConfig, err := job.NewConfig(map[string]string{"sample_hook_key": "sample_value"}) + jobHookConfig, err := job.ConfigFrom(map[string]string{"sample_hook_key": "sample_value"}) + assert.NoError(t, err) + hookSpec, err := job.NewHook("sample_hook", jobHookConfig) assert.NoError(t, err) - jobHooks := []*job.Hook{job.NewHook("sample_hook", jobHookConfig)} - jobAlertConfig, err := job.NewConfig(map[string]string{"sample_alert_key": "sample_value"}) + jobHooks := []*job.Hook{hookSpec} + jobAlertConfig, err := job.ConfigFrom(map[string]string{"sample_alert_key": "sample_value"}) assert.NoError(t, err) - alert, _ := job.NewAlertBuilder(job.SLAMissEvent, []string{"sample-channel"}).WithConfig(jobAlertConfig).Build() + alert, _ := job.NewAlertBuilder("sla_miss", []string{"sample-channel"}).WithConfig(jobAlertConfig).Build() jobAlerts := []*job.AlertSpec{alert} upstreamName1 := job.SpecUpstreamNameFrom("job-upstream-1") upstreamName2 := job.SpecUpstreamNameFrom("job-upstream-2") jobUpstream, _ := job.NewSpecUpstreamBuilder().WithUpstreamNames([]job.SpecUpstreamName{upstreamName1, upstreamName2}).Build() - jobAsset, err := job.NewAsset(map[string]string{"sample-asset": "value-asset"}) + jobAsset, err := job.AssetFrom(map[string]string{"sample-asset": "value-asset"}) assert.NoError(t, err) resourceRequestConfig := job.NewMetadataResourceConfig("250m", "128Mi") resourceLimitConfig := job.NewMetadataResourceConfig("250m", "128Mi") @@ -168,7 +168,7 @@ func addJobs(ctx context.Context, t *testing.T, pool *pgxpool.Pool) map[string]* WithResource(resourceMetadata). WithScheduler(map[string]string{"scheduler_config_key": "value"}). Build() - jobSpecA := job.NewSpecBuilder(jobVersion, jobAName, jobOwner, jobSchedule, jobWindow, jobTask). + jobSpecA, err := job.NewSpecBuilder(jobVersion, jobAName, jobOwner, jobSchedule, jobWindow, jobTask). WithDescription(jobDescription). WithLabels(jobLabels). WithHooks(jobHooks). @@ -177,11 +177,12 @@ func addJobs(ctx context.Context, t *testing.T, pool *pgxpool.Pool) map[string]* WithAsset(jobAsset). WithMetadata(jobMetadata). Build() + assert.NoError(t, err) sampleTenant, err := tenant.NewTenant(proj.Name().String(), namespace.Name().String()) assert.NoError(t, err) jobA := job.NewJob(sampleTenant, jobSpecA, "dev.resource.sample_a", []job.ResourceURN{"resource-3"}) - jobSpecB := job.NewSpecBuilder(jobVersion, jobBName, jobOwner, jobSchedule, jobWindow, jobTask). + jobSpecB, err := job.NewSpecBuilder(jobVersion, jobBName, jobOwner, jobSchedule, jobWindow, jobTask). WithDescription(jobDescription). WithLabels(jobLabels). WithHooks(jobHooks). @@ -189,6 +190,7 @@ func addJobs(ctx context.Context, t *testing.T, pool *pgxpool.Pool) map[string]* WithAsset(jobAsset). WithMetadata(jobMetadata). Build() + assert.NoError(t, err) jobB := job.NewJob(sampleTenant, jobSpecB, "dev.resource.sample_b", nil) jobs := []*job.Job{jobA, jobB} @@ -214,7 +216,7 @@ func compareEqualJob(j *job.Job, s *scheduler.Job) bool { func compareEqualJobWithDetails(j *job.Job, s *scheduler.JobWithDetails) bool { return compareEqualJob(j, s.Job) && j.GetName() == s.Name.String() && - j.Spec().Version().Int() == s.JobMetadata.Version && - j.Spec().Owner().String() == s.JobMetadata.Owner && + j.Spec().Version() == s.JobMetadata.Version && + j.Spec().Owner() == s.JobMetadata.Owner && j.Spec().Schedule().Interval() == s.Schedule.Interval } From c9bd3eadb44b54f39fd7344dcfee564bef008015 Mon Sep 17 00:00:00 2001 From: Arinda Arif Date: Wed, 4 Jan 2023 17:38:34 +0700 Subject: [PATCH 24/25] feat: add 000046 migration down file --- .../migrations/000046_update_job_table.down.sql | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 internal/store/postgres/migrations/000046_update_job_table.down.sql diff --git a/internal/store/postgres/migrations/000046_update_job_table.down.sql b/internal/store/postgres/migrations/000046_update_job_table.down.sql new file mode 100644 index 0000000000..b614777ef3 --- /dev/null +++ b/internal/store/postgres/migrations/000046_update_job_table.down.sql @@ -0,0 +1,14 @@ +ALTER TABLE job + +DROP COLUMN schedule, +ADD COLUMN start_date TIMESTAMP NOT NULL, +ADD COLUMN end_date TIMESTAMP, +ADD COLUMN interval VARCHAR(50), +ADD COLUMN depends_on_past BOOLEAN, +ADD COLUMN catch_up BOOLEAN, +ADD COLUMN retry JSONB, + +DROP COLUMN window_spec, +ADD COLUMN window_size VARCHAR(10), +ADD COLUMN window_offset VARCHAR(10), +ADD COLUMN window_truncate_to VARCHAR(10); From 36117251391c4202ec9002b9a239e71c17030418 Mon Sep 17 00:00:00 2001 From: Arinda Arif Date: Thu, 5 Jan 2023 12:40:51 +0700 Subject: [PATCH 25/25] fix: inferred dependency resolution issue when duplicated job name found across projects --- internal/store/postgres/job/job_repository.go | 6 ++--- .../store/postgres/job/job_repository_test.go | 22 +++++++++++++++++++ 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/internal/store/postgres/job/job_repository.go b/internal/store/postgres/job/job_repository.go index 05b7fe43a5..35adbec200 100644 --- a/internal/store/postgres/job/job_repository.go +++ b/internal/store/postgres/job/job_repository.go @@ -515,8 +515,8 @@ INSERT INTO job_upstream ( created_at ) VALUES ( - (select id FROM job WHERE name = $1), $1, $2, - (select id FROM job WHERE name = $3), $3, $4, + (select id FROM job WHERE name = $1 and project_name = $2), $1, $2, + (select id FROM job WHERE name = $3 and project_name = $4), $3, $4, $5, $6, $7, $8, $9, $10, $11, @@ -531,7 +531,7 @@ INSERT INTO job_upstream ( created_at ) VALUES ( - (select id FROM job WHERE name = $1), $1, $2, + (select id FROM job WHERE name = $1 and project_name = $2), $1, $2, $3, $4, $5, $6, $7, NOW() diff --git a/internal/store/postgres/job/job_repository_test.go b/internal/store/postgres/job/job_repository_test.go index 001cb5e2f6..ae013460d7 100644 --- a/internal/store/postgres/job/job_repository_test.go +++ b/internal/store/postgres/job/job_repository_test.go @@ -675,6 +675,28 @@ func TestPostgresJobRepository(t *testing.T) { assert.NoError(t, err) assert.EqualValues(t, []*job.Upstream{upstreamC}, upstreamsOfJobA) }) + t.Run("inserts job upstreams with exact name across projects exists", func(t *testing.T) { + db := dbSetup() + + upstreamB := job.NewUpstreamResolved("jobB", host, "resource-B", sampleTenant, upstreamType, taskName, false) + upstreamC := job.NewUpstreamResolved("jobC", host, "resource-C", sampleTenant, upstreamType, taskName, false) + upstreams := []*job.Upstream{upstreamB, upstreamC} + jobWithUpstream := job.NewWithUpstream(jobA, upstreams) + + jobUpstreamRepo := postgres.NewJobRepository(db) + _, err := jobUpstreamRepo.Add(ctx, []*job.Job{jobA, jobB, jobC}) + assert.NoError(t, err) + + otherTenant, err := tenant.NewTenant(otherProj.Name().String(), otherNamespace.Name().String()) + assert.NoError(t, err) + + otherProjectJobA := job.NewJob(otherTenant, jobSpecA, "dev-external.resource.sample_a", []job.ResourceURN{"dev-external.resource.sample_c"}) + otherProjectJobB := job.NewJob(otherTenant, jobSpecB, "dev-external.resource.sample_b", nil) + _, err = jobUpstreamRepo.Add(ctx, []*job.Job{otherProjectJobA, otherProjectJobB}) + assert.NoError(t, err) + + assert.Nil(t, jobUpstreamRepo.ReplaceUpstreams(ctx, []*job.WithUpstream{jobWithUpstream})) + }) }) t.Run("Delete", func(t *testing.T) {