2
0
mirror of https://github.com/VinylDNS/vinyldns synced 2025-09-02 15:25:44 +00:00

Burn dynamo burn (#1025)

Fixes #971 

Changes in this pull request:
- Remove the module
- Update `build.sbt`
- Update docs
- Update docker things
- Update configs everywhere
This commit is contained in:
Paul Cleary
2020-10-23 17:26:52 -04:00
committed by GitHub
parent aeb5b8310c
commit 25acdb13c7
69 changed files with 154 additions and 9346 deletions

View File

@@ -170,7 +170,6 @@ You should now be able to see the zone in the portal at localhost:9001 when logg
Integration tests are used to test integration with _real_ dependent services. We use Docker to spin up those
backend services for integration test development.
1. Integration tests are currently in the `api`, `dynamo`, `mysql`, and `sqs` modules.
1. Type `dockerComposeUp` to start up dependent background services
1. Go to the target module in sbt, example: `project api`
1. Run all integration tests by typing `it:test`.
@@ -266,7 +265,6 @@ them after the tests complete at:
* `target/vinyldns-api.log` - the API server logs
* `target/vinyldns-bind9.log` - the Bind9 DNS server logs
* `target/vinyldns-dynamodb.log` - the DynamoDB server logs
* `target/vinyldns-elasticmq.log` - the ElasticMQ (SQS) server logs
* `target/vinyldns-functest.log` - the output of running the functional tests
* `target/vinyldns-mysql.log` - the MySQL server logs

View File

@@ -48,7 +48,6 @@ docker logs vinyldns-api > $DIR/../target/vinyldns-api.log 2>/dev/null
docker logs vinyldns-bind9 > $DIR/../target/vinyldns-bind9.log 2>/dev/null
docker logs vinyldns-mysql > $DIR/../target/vinyldns-mysql.log 2>/dev/null
docker logs vinyldns-elasticmq > $DIR/../target/vinyldns-elasticmq.log 2>/dev/null
docker logs vinyldns-dynamodb > $DIR/../target/vinyldns-dynamodb.log 2>/dev/null
docker logs vinyldns-functest > $DIR/../target/vinyldns-functest.log 2>/dev/null
echo "Cleaning up docker containers..."

View File

@@ -48,7 +48,6 @@ docker logs vinyldns-api > $DIR/../target/vinyldns-api.log 2>/dev/null
docker logs vinyldns-bind9 > $DIR/../target/vinyldns-bind9.log 2>/dev/null
docker logs vinyldns-mysql > $DIR/../target/vinyldns-mysql.log 2>/dev/null
docker logs vinyldns-elasticmq > $DIR/../target/vinyldns-elasticmq.log 2>/dev/null
docker logs vinyldns-dynamodb > $DIR/../target/vinyldns-dynamodb.log 2>/dev/null
docker logs vinyldns-functest > $DIR/../target/vinyldns-functest.log 2>/dev/null
echo "Cleaning up docker containers..."

View File

@@ -3,11 +3,12 @@
# The local vinyldns setup used for testing relies on the
# following docker images:
# mysql:5.7
# cnadiminti/dynamodb-local:2017-02-16
# s12v/elasticmq:0.13.8
# vinyldns/bind9
# vinyldns/api
# vinyldns/portal
# rroemhild/test-openldap
# localstack/localstack
#
# This script with kill and remove containers associated
# with these names and/or tags
@@ -15,7 +16,7 @@
# Note: this will not remove the actual images from your
# machine, just the running containers
IDS=$(docker ps -a | grep -e 'mysql:5.7' -e 'cnadiminti/dynamodb-local:2017-02-16' -e 's12v/elasticmq:0.13.8' -e 'vinyldns' -e 'flaviovs/mock-smtp' -e 'localstack/localstack' -e 'rroemhild/test-openldap' | awk '{print $1}')
IDS=$(docker ps -a | grep -e 'mysql:5.7' -e 's12v/elasticmq:0.13.8' -e 'vinyldns' -e 'flaviovs/mock-smtp' -e 'localstack/localstack' -e 'rroemhild/test-openldap' | awk '{print $1}')
echo "killing..."
echo $(echo "$IDS" | xargs -I {} docker kill {})

View File

@@ -141,10 +141,6 @@ lazy val portalDockerSettings = Seq(
credentials in Docker := Seq(Credentials(Path.userHome / ".ivy2" / ".dockerCredentials"))
)
lazy val dynamoDBDockerSettings = Seq(
composeFile := baseDirectory.value.getAbsolutePath + "/docker/docker-compose.yml"
)
lazy val noPublishSettings = Seq(
publish := {},
publishLocal := {},
@@ -194,7 +190,6 @@ lazy val api = (project in file("modules/api"))
.settings(inConfig(IntegrationTest)(scalafmtConfigSettings))
.dependsOn(
core % "compile->compile;test->test",
dynamodb % "compile->compile;it->it",
mysql % "compile->compile;it->it",
sqs % "compile->compile;it->it",
r53 % "compile->compile;it->it"
@@ -212,7 +207,7 @@ lazy val root = (project in file(".")).enablePlugins(DockerComposePlugin, Automa
"./bin/remove-vinyl-containers.sh" !
},
)
.aggregate(core, api, portal, dynamodb, mysql, sqs, r53)
.aggregate(core, api, portal, mysql, sqs, r53)
lazy val coreBuildSettings = Seq(
name := "core",
@@ -257,23 +252,6 @@ lazy val core = (project in file("modules/core")).enablePlugins(AutomateHeaderPl
organization := "io.vinyldns"
)
lazy val dynamodb = (project in file("modules/dynamodb"))
.enablePlugins(AutomateHeaderPlugin)
.configs(IntegrationTest)
.settings(sharedSettings)
.settings(headerSettings(IntegrationTest))
.settings(inConfig(IntegrationTest)(scalafmtConfigSettings))
.settings(corePublishSettings)
.settings(testSettings)
.settings(Defaults.itSettings)
.settings(libraryDependencies ++= dynamoDBDependencies ++ commonTestDependencies.map(_ % "test, it"))
.settings(
organization := "io.vinyldns",
parallelExecution in Test := true,
parallelExecution in IntegrationTest := true
).dependsOn(core % "compile->compile;test->test")
.settings(name := "dynamodb")
lazy val mysql = (project in file("modules/mysql"))
.enablePlugins(AutomateHeaderPlugin)
.configs(IntegrationTest)
@@ -366,7 +344,7 @@ lazy val portal = (project in file("modules/portal")).enablePlugins(PlayScala, A
// change the name of the output to portal.zip
packageName in Universal := "portal"
)
.dependsOn(dynamodb, mysql)
.dependsOn(mysql)
lazy val docSettings = Seq(
git.remoteRepo := "https://github.com/vinyldns/vinyldns",
@@ -469,7 +447,6 @@ releaseProcess :=
addCommandAlias("validate", "; root/clean; " +
"all core/headerCheck core/test:headerCheck " +
"api/headerCheck api/test:headerCheck api/it:headerCheck " +
"dynamodb/headerCheck dynamodb/test:headerCheck dynamodb/it:headerCheck " +
"mysql/headerCheck mysql/test:headerCheck mysql/it:headerCheck " +
"r53/headerCheck r53/test:headerCheck r53/it:headerCheck " +
"sqs/headerCheck sqs/test:headerCheck sqs/it:headerCheck " +

View File

@@ -11,7 +11,6 @@ PORTAL_PORT=9001
PLAY_HTTP_SECRET_KEY=change-this-for-prod
VINYLDNS_BACKEND_URL=http://vinyldns-api:9000
SQS_ENDPOINT=http://vinyldns-localstack:19007
DYNAMODB_ENDPOINT=http://vinyldns-dynamodb:8000
MYSQL_ENDPOINT=vinyldns-mysql:3306
USER_TABLE_NAME=users
USER_CHANGE_TABLE_NAME=userChange

View File

@@ -11,7 +11,6 @@ PORTAL_PORT=9001
PLAY_HTTP_SECRET_KEY=change-this-for-prod
VINYLDNS_BACKEND_URL=http://vinyldns-api:9000
SQS_ENDPOINT=http://vinyldns-localstack:19007
DYNAMODB_ENDPOINT=http://vinyldns-localstack:19000
MYSQL_ENDPOINT=vinyldns-mysql:3306
USER_TABLE_NAME=users
USER_CHANGE_TABLE_NAME=userChange

View File

@@ -11,7 +11,6 @@
# JDBC_URL - the full URL to the SQL database
# JDBC_USER - the SQL database user
# JDBC_PASSWORD - the SQL database password
# DYNAMODB_ENDPOINT - the endpoint for DynamoDB
# DEFAULT_DNS_ADDRESS - the server (and port if not 53) of the default DNS server
# DEFAULT_DNS_KEY_NAME - the default key name used to connect to the default DNS server
# DEFAULT_DNS_KEY_SECRET - the default key secret used to connect to the default DNS server
@@ -135,7 +134,7 @@ vinyldns {
type = "vinyldns.core.crypto.NoOpCrypto"
}
data-stores = ["mysql", "dynamodb"]
data-stores = ["mysql"]
mysql {
settings {
@@ -192,22 +191,6 @@ vinyldns {
}
}
dynamodb {
settings {
# default settings point to the docker compose setup
key = "x"
key = ${?AWS_ACCESS_KEY}
secret = "x"
secret = ${?AWS_SECRET_ACCESS_KEY}
endpoint = "http://vinyldns-dynamodb:8000"
endpoint = ${?DYNAMODB_ENDPOINT}
}
repositories {
# none
}
}
backends = []
batch-change-limit = 1000

View File

@@ -19,17 +19,6 @@ services:
logging:
driver: none
dynamodb-local:
image: cnadiminti/dynamodb-local:2017-02-16
container_name: "vinyldns-dynamodb"
env_file:
.env
logging:
driver: none
ports:
- "19000:8000"
command: "--sharedDb --inMemory"
localstack:
image: localstack/localstack:0.10.4
container_name: "vinyldns-localstack"
@@ -39,7 +28,7 @@ services:
- "19007:19007"
- "19009:19009"
environment:
- SERVICES=dynamodb:19000,sns:19006,sqs:19007,route53:19009
- SERVICES=sns:19006,sqs:19007,route53:19009
- START_WEB=0
- HOSTNAME_EXTERNAL=vinyldns-localstack

View File

@@ -24,17 +24,6 @@ services:
logging:
driver: none
dynamodb-local:
image: cnadiminti/dynamodb-local:2017-02-16
container_name: "vinyldns-dynamodb"
env_file:
.env
logging:
driver: none
ports:
- "19000:8000"
command: "--sharedDb --inMemory"
localstack:
image: localstack/localstack:0.10.4
container_name: "vinyldns-localstack"

View File

@@ -24,12 +24,11 @@ services:
image: localstack/localstack:0.10.4
container_name: "vinyldns-localstack"
ports:
- "19000:19000"
- "19006:19006"
- "19007:19007"
- "19009:19009"
environment:
- SERVICES=dynamodb:19000,sns:19006,sqs:19007,route53:19009
- SERVICES=sns:19006,sqs:19007,route53:19009
- START_WEB=0
- HOSTNAME_EXTERNAL=vinyldns-localstack

View File

@@ -18,14 +18,6 @@ services:
- ./bind9/etc:/var/cache/bind/config
- ./bind9/zones:/var/cache/bind/zones
dynamodb-local:
image: cnadiminti/dynamodb-local:2017-02-16<skipPull>
env_file:
.env
ports:
- "19000:8000"
command: "--sharedDb --inMemory"
localstack:
image: localstack/localstack:0.10.4<skipPull>
ports:

View File

@@ -77,10 +77,6 @@ vinyldns {
# types of unowned records that users can access in shared zones
shared-approved-types = ["A", "AAAA", "CNAME", "PTR", "TXT"]
dynamodb.repositories {
# none
}
crypto {
type = "vinyldns.core.crypto.NoOpCrypto"
}

View File

@@ -1,26 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.api
import org.scalatest.concurrent.ScalaFutures
import vinyldns.dynamodb.repository.{DynamoDBDataStoreSettings, DynamoDBIntegrationSpec}
trait DynamoDBApiIntegrationSpec extends DynamoDBIntegrationSpec with ScalaFutures {
override val dynamoIntegrationConfig: DynamoDBDataStoreSettings = getDynamoConfig(19000)
}

View File

@@ -16,8 +16,24 @@
package vinyldns.api
import com.typesafe.config.{Config, ConfigFactory}
import scalikejdbc.DB
import vinyldns.mysql.MySqlIntegrationSpec
trait MySqlApiIntegrationSpec extends MySqlIntegrationSpec {
val mysqlConfig: Config = ConfigFactory.load().getConfig("vinyldns.mysql")
def clearRecordSetRepo(): Unit =
DB.localTx { s =>
s.executeUpdate("DELETE FROM recordset")
}
def clearZoneRepo(): Unit =
DB.localTx { s =>
s.executeUpdate("DELETE FROM zone")
}
def clearGroupRepo(): Unit =
DB.localTx { s =>
s.executeUpdate("DELETE FROM groups")
}
}

View File

@@ -17,13 +17,14 @@
package vinyldns.api.domain.record
import cats.effect._
import cats.implicits._
import cats.scalatest.EitherMatchers
import org.joda.time.DateTime
import org.mockito.Mockito._
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import org.scalatest.matchers.should.Matchers
import org.scalatest.concurrent.PatienceConfiguration
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.time.{Seconds, Span}
import org.scalatest.wordspec.AnyWordSpec
import vinyldns.api._
import vinyldns.api.config.VinylDNSConfig
import vinyldns.api.domain.access.AccessValidations
@@ -38,31 +39,22 @@ import vinyldns.core.domain.membership.{Group, GroupRepository, User, UserReposi
import vinyldns.core.domain.record.RecordType._
import vinyldns.core.domain.record._
import vinyldns.core.domain.zone._
import vinyldns.dynamodb.repository.{DynamoDBRecordSetRepository, DynamoDBRepositorySettings}
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
class RecordSetServiceIntegrationSpec
extends DynamoDBApiIntegrationSpec
extends AnyWordSpec
with ResultHelpers
with EitherMatchers
with MockitoSugar
with Matchers
with MySqlApiIntegrationSpec {
with MySqlApiIntegrationSpec
with BeforeAndAfterEach
with BeforeAndAfterAll {
private val vinyldnsConfig = VinylDNSConfig.load().unsafeRunSync()
private val recordSetTable = "recordSetTest"
private val recordSetStoreConfig = DynamoDBRepositorySettings(s"$recordSetTable", 30, 30)
private val timeout = PatienceConfiguration.Timeout(Span(10, Seconds))
private var recordSetRepo: DynamoDBRecordSetRepository = _
private var zoneRepo: ZoneRepository = _
private var groupRepo: GroupRepository = _
private val recordSetRepo = recordSetRepository
private val zoneRepo: ZoneRepository = zoneRepository
private val groupRepo: GroupRepository = groupRepository
private var testRecordSetService: RecordSetServiceAlgebra = _
@@ -229,31 +221,57 @@ class RecordSetServiceIntegrationSpec
private val mockBackendResolver = mock[BackendResolver]
private val mockBackend = mock[Backend]
def setup(): Unit = {
recordSetRepo =
DynamoDBRecordSetRepository(recordSetStoreConfig, dynamoIntegrationConfig).unsafeRunSync()
zoneRepo = zoneRepository
groupRepo = groupRepository
List(group, group2, sharedGroup).map(g => waitForSuccess(groupRepo.save(g)))
List(zone, zoneTestNameConflicts, zoneTestAddRecords, sharedZone).map(
z => waitForSuccess(zoneRepo.save(z))
override def afterAll(): Unit = {
clearRecordSetRepo()
clearZoneRepo()
clearGroupRepo()
}
override def beforeEach(): Unit = {
def makeAddChange(rs: RecordSet, zone: Zone): RecordSetChange =
RecordSetChange(
zone = zone,
recordSet = rs,
userId = "system",
changeType = RecordSetChangeType.Create,
status = RecordSetChangeStatus.Pending,
singleBatchChangeIds = Nil
)
clearRecordSetRepo()
clearZoneRepo()
clearGroupRepo()
List(group, group2, sharedGroup).traverse(g => groupRepo.save(g).void).unsafeRunSync()
List(zone, zoneTestNameConflicts, zoneTestAddRecords, sharedZone)
.traverse(
z => zoneRepo.save(z)
)
.unsafeRunSync()
// Seeding records in DB
val records = List(
val sharedRecords = List(
sharedTestRecord,
sharedTestRecordBadOwnerGroup
)
val conflictRecords = List(
subTestRecordNameConflict,
apexTestRecordNameConflict
)
val zoneRecords = List(
apexTestRecordA,
apexTestRecordAAAA,
subTestRecordA,
subTestRecordAAAA,
subTestRecordNS,
apexTestRecordNameConflict,
subTestRecordNameConflict,
highValueDomainRecord,
sharedTestRecord,
sharedTestRecordBadOwnerGroup,
testOwnerGroupRecordInNormalZone
)
records.map(record => waitForSuccess(recordSetRepo.putRecordSet(record)))
val changes = ChangeSet(
sharedRecords.map(makeAddChange(_, sharedZone)) ++
conflictRecords.map(makeAddChange(_, zoneTestNameConflicts)) ++
zoneRecords.map(makeAddChange(_, zone))
)
recordSetRepo.apply(changes).unsafeRunSync()
testRecordSetService = new RecordSetService(
zoneRepo,
@@ -270,18 +288,13 @@ class RecordSetServiceIntegrationSpec
)
}
def tearDown(): Unit = ()
"DynamoDBRecordSetRepository" should {
"MySqlRecordSetRepository" should {
"not alter record name when seeding database for tests" in {
val originalRecord = testRecordSetService
.getRecordSet(apexTestRecordA.id, auth)
.value
.unsafeToFuture()
.mapTo[Either[Throwable, RecordSetInfo]]
whenReady(originalRecord, timeout) { out =>
rightValue(out).name shouldBe "live-zone-test"
}
.unsafeRunSync()
rightValue(originalRecord).name shouldBe "live-zone-test"
}
}
@@ -301,11 +314,11 @@ class RecordSetServiceIntegrationSpec
testRecordSetService
.addRecordSet(newRecord, auth)
.value
.unsafeToFuture()
.mapTo[Either[Throwable, RecordSetChange]]
whenReady(result, timeout) { out =>
rightValue(out).recordSet.name shouldBe "zone-test-add-records."
}
.unsafeRunSync()
rightValue(result)
.asInstanceOf[RecordSetChange]
.recordSet
.name shouldBe "zone-test-add-records."
}
"update apex A record and add trailing dot" in {
@@ -313,70 +326,55 @@ class RecordSetServiceIntegrationSpec
val result = testRecordSetService
.updateRecordSet(newRecord, auth)
.value
.unsafeToFuture()
.mapTo[Either[Throwable, RecordSetChange]]
whenReady(result, timeout) { out =>
val change = rightValue(out)
.unsafeRunSync()
val change = rightValue(result).asInstanceOf[RecordSetChange]
change.recordSet.name shouldBe "live-zone-test."
change.recordSet.ttl shouldBe 200
}
}
"update apex AAAA record and add trailing dot" in {
val newRecord = apexTestRecordAAAA.copy(ttl = 200)
val result = testRecordSetService
.updateRecordSet(newRecord, auth)
.value
.unsafeToFuture()
.mapTo[Either[Throwable, RecordSetChange]]
whenReady(result, timeout) { out =>
val change = rightValue(out)
.unsafeRunSync()
val change = rightValue(result).asInstanceOf[RecordSetChange]
change.recordSet.name shouldBe "live-zone-test."
change.recordSet.ttl shouldBe 200
}
}
"update relative A record without adding trailing dot" in {
val newRecord = subTestRecordA.copy(ttl = 200)
val result = testRecordSetService
.updateRecordSet(newRecord, auth)
.value
.unsafeToFuture()
.mapTo[Either[Throwable, RecordSetChange]]
whenReady(result, timeout) { out =>
val change = rightValue(out)
.unsafeRunSync()
val change = rightValue(result).asInstanceOf[RecordSetChange]
change.recordSet.name shouldBe "a-record"
change.recordSet.ttl shouldBe 200
}
}
"update relative AAAA without adding trailing dot" in {
val newRecord = subTestRecordAAAA.copy(ttl = 200)
val result = testRecordSetService
.updateRecordSet(newRecord, auth)
.value
.unsafeToFuture()
.mapTo[Either[Throwable, RecordSetChange]]
whenReady(result, timeout) { out =>
val change = rightValue(out)
.unsafeRunSync()
val change = rightValue(result).asInstanceOf[RecordSetChange]
change.recordSet.name shouldBe "aaaa-record"
change.recordSet.ttl shouldBe 200
}
}
"update relative NS record without trailing dot" in {
val newRecord = subTestRecordNS.copy(ttl = 200)
val result = testRecordSetService
.updateRecordSet(newRecord, auth)
.value
.unsafeToFuture()
.mapTo[Either[Throwable, RecordSetChange]]
whenReady(result, timeout) { out =>
val change = rightValue(out)
.unsafeRunSync()
val change = rightValue(result).asInstanceOf[RecordSetChange]
change.recordSet.name shouldBe "ns-record"
change.recordSet.ttl shouldBe 200
}
}
"fail to add relative record if apex record with same name already exists" in {
val newRecord = apexTestRecordNameConflict.copy(name = "zone-test-name-conflicts")
@@ -393,15 +391,12 @@ class RecordSetServiceIntegrationSpec
testRecordSetService
.addRecordSet(newRecord, auth)
.value
.unsafeToFuture()
.mapTo[Either[Throwable, RecordSetChange]]
whenReady(result, timeout) { out =>
leftValue(out) shouldBe a[RecordSetAlreadyExists]
}
.unsafeRunSync()
leftValue(result) shouldBe a[RecordSetAlreadyExists]
}
"fail to add apex record if relative record with same name already exists" in {
val newRecord = subTestRecordNameConflict.copy(name = "relative-name-conflict.")
val newRecord = subTestRecordNameConflict.copy(name = "relative-name-conflict")
doReturn(IO(List(newRecord)))
.when(mockBackend)
@@ -411,11 +406,8 @@ class RecordSetServiceIntegrationSpec
testRecordSetService
.addRecordSet(newRecord, auth)
.value
.unsafeToFuture()
.mapTo[Either[Throwable, RecordSetChange]]
whenReady(result, timeout) { out =>
leftValue(out) shouldBe a[RecordSetAlreadyExists]
}
.unsafeRunSync()
leftValue(result) shouldBe a[RecordSetAlreadyExists]
}
"fail to add a dns record whose name is a high value domain" in {
@@ -460,12 +452,10 @@ class RecordSetServiceIntegrationSpec
testRecordSetService
.getRecordSet(sharedTestRecord.id, auth2)
.value
.unsafeToFuture()
.mapTo[Either[Throwable, RecordSetInfo]]
whenReady(result, timeout) { out =>
rightValue(out).name shouldBe "shared-record"
rightValue(out).ownerGroupName shouldBe Some(sharedGroup.name)
}
.unsafeRunSync()
rightValue(result).name shouldBe "shared-record"
rightValue(result).ownerGroupName shouldBe Some(sharedGroup.name)
}
"get a shared record when owner group can't be found" in {
@@ -473,12 +463,9 @@ class RecordSetServiceIntegrationSpec
testRecordSetService
.getRecordSet(sharedTestRecordBadOwnerGroup.id, auth)
.value
.unsafeToFuture()
.mapTo[Either[Throwable, RecordSetInfo]]
whenReady(result, timeout) { out =>
rightValue(out).name shouldBe "shared-record-bad-owner-group"
rightValue(out).ownerGroupName shouldBe None
}
.unsafeRunSync()
rightValue(result).name shouldBe "shared-record-bad-owner-group"
rightValue(result).ownerGroupName shouldBe None
}
"fail updating if user is in owner group but zone is not shared" in {
@@ -587,9 +574,4 @@ class RecordSetServiceIntegrationSpec
result should be(right)
}
}
private def waitForSuccess[T](f: => IO[T]): T = {
val waiting = f.unsafeToFuture().recover { case _ => Thread.sleep(2000); waitForSuccess(f) }
Await.result[T](waiting, 15.seconds)
}
}

View File

@@ -26,7 +26,6 @@ import org.scalatest.wordspec.AnyWordSpec
import org.scalatest.concurrent.{PatienceConfiguration, ScalaFutures}
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.time.{Seconds, Span}
import scalikejdbc.DB
import vinyldns.api.domain.access.AccessValidations
import vinyldns.api.domain.record.RecordSetChangeGenerator
import vinyldns.api.engine.TestMessageQueue
@@ -99,16 +98,6 @@ class ZoneServiceIntegrationSpec
private val mockBackendResolver = mock[BackendResolver]
def clearRecordSetRepo(): Unit =
DB.localTx { s =>
s.executeUpdate("DELETE FROM recordset")
}
def clearZoneRepo(): Unit =
DB.localTx { s =>
s.executeUpdate("DELETE FROM zone")
}
override protected def beforeEach(): Unit = {
clearRecordSetRepo()
clearZoneRepo()

View File

@@ -29,7 +29,7 @@ akka.http {
}
vinyldns {
data-stores = ["mysql", "dynamodb"]
data-stores = ["mysql"]
mysql {
settings {
@@ -73,9 +73,6 @@ vinyldns {
}
}
dynamodb.repositories {
}
sync-delay = 10000 # 10 second delay for resyncing zone
batch-change-limit = 1000 # Max change limit per batch request

View File

@@ -128,21 +128,6 @@ vinyldns {
}
}
dynamodb {
class-name = "vinyldns.dynamodb.repository.DynamoDBDataStoreProvider"
settings {
key = "vinyldnsTest"
secret = "notNeededForDynamoDbLocal"
endpoint = "http://127.0.0.1:19000"
region = "us-east-1" # note: we are always in us-east-1, but this can be overridden
}
repositories {
# override
}
}
notifiers = []
email = {

View File

@@ -95,10 +95,6 @@ vinyldns {
}
}
dynamodb.repositories {
# none
}
notifiers = ["test-notifier"]
test-notifier {

View File

@@ -32,7 +32,7 @@ class VinylDNSConfigSpec extends AnyWordSpec with Matchers with BeforeAndAfterAl
}
"properly load the datastore configs" in {
(underTest.dataStoreConfigs should have).length(2L)
(underTest.dataStoreConfigs should have).length(1L)
}
"assign the correct mysql repositories" in {
val mysqlConfig =
@@ -52,15 +52,6 @@ class VinylDNSConfigSpec extends AnyWordSpec with Matchers with BeforeAndAfterAl
recordChange
)
}
"assign the correct dynamodb repositories" in {
val dynamodbConfig =
underTest.dataStoreConfigs
.find(_.className == "vinyldns.dynamodb.repository.DynamoDBDataStoreProvider")
.get
dynamodbConfig.repositories.keys should contain theSameElementsAs
Set()
}
"properly load the notifier configs" in {
val notifierConfigs = underTest.notifierConfigs

View File

@@ -25,7 +25,7 @@ vinyldns {
type = "vinyldns.core.crypto.NoOpCrypto"
}
data-stores = ["mysql", "dynamodb"]
data-stores = ["mysql"]
# default settings point to the setup from docker compose
mysql {
@@ -74,18 +74,6 @@ vinyldns {
}
}
dynamodb {
# dynamodb settings, for local docker compose the secrets are not needed
settings {
key = "x"
secret = "x"
endpoint = "http://vinyldns-dynamodb:8000"
}
repositories {
}
}
# the DDNS connection information for the default dns backend
defaultZoneConnection {
name = "vinyldns."

View File

@@ -147,36 +147,17 @@ queue {
```
## Database Configuration
VinylDNS supports both DynamoDB and MySQL backends. You can enable all repos in a single backend, or have a mix of the two.
VinylDNS supports a MySQL database. You can enable all repos in a single backend, or have a mix of the two.
For each backend, you need to configure the table(s) that should be loaded.
You must have all of the following required API repositories configured in exactly one datastore.
**Some repositories are implemented in DynamoDB, all repositories have MySQL support**:
| Repository | DynamoDB support | MySQL support |
| :--- | :---: | :---: |
| BatchChange | | X |
| Group | X | X |
| GroupChange | X | X |
| Membership | X | X |
| RecordSet | | X |
| RecordSetChange | X | X |
| User | X | X |
| UserChange | X | X |
| Zone | | X |
| ZoneChange | X | X |
If using MySQL, follow the [MySQL Setup Guide](setup-mysql.html) first to get the values you need to configure here.
If using DynamoDB, follow the [AWS DynamoDB Setup Guide](setup-dynamodb.html) first to get the values you need to configure here.
```yaml
vinyldns {
# this list should include only the datastores being used by your instance
data-stores = ["mysql", "dynamodb"]
data-stores = ["mysql"]
mysql {
@@ -248,58 +229,6 @@ vinyldns {
}
}
dynamodb {
# this is the path to the DynamoDB provider. This should not be edited
# from the default in reference.conf
class-name = "vinyldns.dynamodb.repository.DynamoDBDataStoreProvider"
settings {
# AWS_ACCESS_KEY, credential needed to access the SQS queue
key = "x"
# AWS_SECRET_ACCESS_KEY, credential needed to access the SQS queue
secret = "x"
# DynamoDB url for the region you are running in, this example is in us-east-1
endpoint = "https://dynamodb.us-east-1.amazonaws.com"
# DynamoDB region
region = "us-east-1"
}
repositories {
# all repositories with config sections here will be enabled in dynamodb
record-change {
# Name of the table where recordsets are saved
table-name = "recordChangeTest"
# Provisioned throughput for reads
provisioned-reads = 30
# Provisioned throughput for writes
provisioned-writes = 20
}
zone-change {
table-name = "zoneChangesTest"
provisioned-reads = 30
provisioned-writes = 20
}
group {
table-name = "groupsTest"
provisioned-reads = 30
provisioned-writes = 20
}
group-change {
table-name = "groupChangesTest"
provisioned-reads = 30
provisioned-writes = 20
}
membership {
table-name = "membershipTest"
provisioned-reads = 30
provisioned-writes = 20
}
}
}
}
```
@@ -364,6 +293,9 @@ vinyldns {
# the host name or IP address, note you can add a port if not using the default by settings hostname:port
primaryServer = "ddns1.foo.bar.com"
# the key algorithm to use: HMAC-MD5, HMAC-SHA1, HMAC-SHA224, HMAC-SHA256, HMAC-SHA384, HMAC-SHA512
algorithm = "HMAC-MD5"
}
# the AXFR connection information for the default dns backend
@@ -372,6 +304,7 @@ vinyldns {
keyName = "vinyldns."
key = "nzisn+4G2ldMn0q1CV3vsg=="
primaryServer = "vinyldns-bind9"
algorithm = "HMAC-MD5"
}
}
@@ -384,12 +317,14 @@ backends = [
key-name = "vinyldns."
key = "nzisn+4G2ldMn0q1CV3vsg=="
primary-server = "127.0.0.1:19001"
algorithm = "HMAC-MD5"
}
transfer-connection {
name = "vinyldns."
key-name = "vinyldns."
key = "nzisn+4G2ldMn0q1CV3vsg=="
primary-server = "127.0.0.1:19001"
algorithm = "HMAC-MD5"
}
}
]
@@ -620,46 +555,7 @@ vinyldns {
}
# both datastore options are in use
data-stores = ["mysql", "dynamodb"]
dynamodb {
class-name = "vinyldns.dynamodb.repository.DynamoDBDataStoreProvider"
settings {
key = "x"
secret = "x"
endpoint = "http://vinyldns-dynamodb:8000"
region = "us-east-1"
}
repositories {
record-change {
table-name = "recordChange"
provisioned-reads = 30
provisioned-writes = 20
}
zone-change {
table-name = "zoneChanges"
provisioned-reads = 30
provisioned-writes = 20
}
group {
table-name = "groups"
provisioned-reads = 30
provisioned-writes = 20
}
group-change {
table-name = "groupChanges"
provisioned-reads = 30
provisioned-writes = 20
}
membership {
table-name = "membership"
provisioned-reads = 30
provisioned-writes = 20
}
}
}
data-stores = ["mysql"]
mysql {
class-name = "vinyldns.mysql.repository.MySqlDataStoreProvider"
@@ -695,6 +591,16 @@ vinyldns {
}
record-set {
}
group {
}
membership {
}
group-change {
}
zone-change {
}
record-change {
}
}
}
@@ -704,6 +610,7 @@ vinyldns {
keyName = "vinyldns."
key = "nzisn+4G2ldMn0q1CV3vsg=="
primaryServer = "vinyldns-bind9"
algorithm = "HMAC-MD5"
}
# the AXFR connection information for the default dns backend
@@ -712,6 +619,7 @@ vinyldns {
keyName = "vinyldns."
key = "nzisn+4G2ldMn0q1CV3vsg=="
primaryServer = "vinyldns-bind9"
algorithm = "HMAC-MD5"
}
# the max number of changes in a single batch change. Change carefully as this has performance
@@ -804,12 +712,14 @@ vinyldns {
key-name = "vinyldns."
key = "nzisn+4G2ldMn0q1CV3vsg=="
primary-server = "127.0.0.1:19001"
algorithm = "HMAC-MD5"
}
transfer-connection {
name = "vinyldns."
key-name = "vinyldns."
key = "nzisn+4G2ldMn0q1CV3vsg=="
primary-server = "127.0.0.1:19001"
algorithm = "HMAC-MD5"
}
}
]

View File

@@ -1,287 +0,0 @@
---
layout: docs
title: "Setup AWS DynamoDB"
section: "operator_menu"
---
# Setup AWS DynamoDB
[AWS DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Introduction.html) is currently the default database
for _most_ of the data that is stored in our instance of VinylDNS. However, all table implementations are available in MySQL
(see [Setup MySQL Guide](setup-mysql.html) for more information). The following tables are present in DynamoDB in our instance of VinylDNS:
* [RecordSetChange](#recordsetchange-table) - audit history of all changes made to records
* [Group](#group-table) - group information, including name, email and description
* [Membership](#membership-table) - connects users to groups
* [GroupChange](#groupchange-table) - holds audit history for groups
* [UserChange](#userchange-table) - holds audit history for all users (only used in the portal currently)
* [ZoneChange](#zonechange-table) - audit history for changes to zones (not record related)
###### Note: the DynamoDB RecordSet repository is only partially implemented. For use you would need to provide implementations of those methods
AWS DynamoDB connection information is configured one time, and the same connection is used across all tables. Therefore,
you must ensure that all tables live inside the _same_ AWS region accessible by the _same_ credentials.
## Setting up DynamoDB
**If the tables do not yet exist, starting up the application will _automatically_ create the tables for you. Starting
up the application for the first time is often the best way to setup the tables, as they do require attributes and indexes to be setup.**
[Provisioned Throughput](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ProvisionedThroughput.html)
is a mechanism that controls how many reads and writes can happen concurrently against your tables and indexes. You can
configure *Auto Scaling* for your tables, so you do not have to worry about these settings.
The most _important_ thing to remember for Provisioned Throughput is that you pay more for _writes_ than _reads_. To manage,
costs, it is important to use Auto-Scaling, or turn down your provisioned throughput settings to be really low.
If your installation does not have large zones (100,000 records), and takes relatively low throughput, you can turn
the throughput very low and operate in the "almost" free-tier.
## Configuring DynamoDB
Before you can configure DynamoDB, make note of the AWS account (access key and secret access key) as well as the
DynamoDB endpoint (region) that you will be using. Follow the [API Database Configuration](config-api.html#database-configuration)
to complete the setup for the API.
You also need to configure DynamoDB for the portal [Portal Database Configuration](config-portal.html#database-configuration)
### RecordSet Table
Each row in the RecordSet table is a `RRSet`, which means it comprises one or more "Records" inside of it.
**Usage**
This table (and recordSetChange) require the highest throughput. If you have large zones, the first time
you load a zone, all records will be loaded into the `recordSet` table. If the settings are too low, it can take a long time
for the records to be loaded, and worst case scenario the operation will fail.
**Attributes**
| name | type | description |
| `zone_id` | String(UUID) | the id of the zone the record set belongs to |
| `record_set_id` | String(UUID) | the unique id for this record set |
| `record_set_name` | String | the record set name |
| `record_set_type` | String | the RRType of record set, for example A, AAAA, CNAME |
| `record_set_sort` | String | the case in-sensitive name for the record set, used for sort purposes |
| `record_set_blob` | Binary | hold a binary array representing the record set. Currently protocol buffer byte array |
**Table Keys**
| type | attribute name |
| HASH | `record_set_id` |
| SORT | `<none>` |
**Indexes**
* `zone_id_record_set_name_index` - Global Secondary Index
* HASH = `zone_id`
* SORT = `record_set_name`
* Projection Type = `ALL`
* `zone_id_record_set_sort_index` - Global Secondary Index
* HASH = `zone_id`
* SORT = `record_set_sort`
* Projection Type = `ALL`
### RecordSetChange Table
Each record set change could potentially live inside a `ChangeSet`. A `ChangeSet` contains one or more individual
`RecordSetChange` instances that are processed together. Each _record_ in the `RecordSetChange` table corresponds to an individual change
such as "Create a new record set".
**Usage**
Every time any record is updated, the audit trail is inserted into the `recordSetChange` table. This
also should have higher settings, as usage, especially on writes, can be rather high.
**Attributes**
| name | type | description |
| `change_set_id` | String(UUID) | id for the change set this change belongs to |
| `record_set_change_id` | String(UUID) | the id for the record set change |
| `zone_id` | String(UUID) | the zone this record change was made for |
| `change_set_status` | Number | a number representing the status of the change (Pending = 0 ; Processing = 1 ; Complete = 2 ; Applied = 100) |
| `created_timestamp` | String | the timestamp (UTC) when the change set was created |
| `record_set_change_created_timestamp` | Number | a number in EPOCH millis when the change was created |
| `processing_timestamp` | String | the timestamp (UTC) when the change was processed |
| `record_set_change_blob` | Binary | the protobuf serialized bytes that represent the entire record set change |
**Table Keys**
| type | attribute name |
| HASH | `record_set_change_id` |
| SORT | `<none>` |
**Indexes**
* `zone_id_record_set_change_id_index` - Global Secondary Index
* HASH = `zone_id`
* SORT = `record_set_change_id`
* Projection Type = `ALL`
* `zone_id_created_index` - Global Secondary Index
* HASH = `zone_id`
* SORT = `record_set_change_created_timestamp`
* Projection Type = `ALL`
### User Table
The User Table holds user specific information. Each row in the table is a separate distinct user.
To enable encryption at rest, the user table should be encrypted.
**Encryption can only be enabled when the table is first created.**
**Usage**
Very low writes, very small data, high read rate (every API call looks up the user info)
**Attributes**
| name | type | description |
| `userid` | String(UUID) | a unique identifier for this user |
| `username` | String | LDAP user name for this user |
| `firstname` | String | user first name |
| `lastname` | String | user last name |
| `email` | String | user's email address |
| `created` | Number | EPOCH time in millis when the user was created in VinylDNS |
| `accesskey` | String | The access key (public) for the user to interact with the VinylDNS API |
| `secretkey` | String | the secret key (private) for the user to interact with the VinylDNS API. This secret is encrypted by default using the configured `Crypto` implementation. It is also encrypted at rest. |
| `super` | Boolean | an indicator that the user is a VinylDNS Admin user (can access all data and operations) |
**Note: there is no way to programmatically set the super flag, as it has a tremendous amount of power. We are looking
for ideas and ways that we can provide super type access with some additional checks. To set this flag, you would need
to hand-roll your own script at this point and set this attribute.**
**Table Keys**
| type | attribute name |
| HASH | `userid` |
| SORT | `<none>` |
**Indexes**
* `username_index` - Global Secondary Index
* HASH = `username`
* SORT = `<none>`
* Projection Type = `ALL`
* `access_key_index` - Global Secondary Index
* HASH = `accesskey`
* SORT = `<none>`
* Projection Type = `ALL`
### Group Table
The Group table holds group information, including group name, email, and ids of members.
**Usage**
Very low writes, very small data, moderate read rate
**Attributes**
| name | type | description |
| `group_id` | String(UUID) | unique identifier for the group |
| `name` | String | the name of the group |
| `email` | String | the email (usually distribution list) of the group |
| `desc` | String | the description of the group |
| `status` | String | the group status (Active, Deleted) |
| `created` | Number | the date-time in EPOCH millis when the group was created |
| `member_ids` | String Set | the ids of all members (users) of the group |
| `admin_ids` | String Set | the ids of all members who are group managers |
**Table Keys**
| type | attribute name |
| HASH | `group_id` |
| SORT | `<none>` |
**Indexes**
* `group_name_index` - Global Secondary Index
* HASH = `name`
* SORT = `<none>`
* Projection Type = `ALL`
### Membership Table
The Membership table is a "join" table linking users and groups. It supports fast look ups for all groups that
a user is a member of.
**Usage**
Very low writes, very small data, high read rate (every API call looks up the user groups)
**Attributes**
| name | type | description |
| `user_id` | String(UUID) | the unique id for the user |
| `group_id` | String(UUID) | the unique id for the group |
**Table Keys**
| type | attribute name |
| HASH | `user_id` |
| SORT | `group_id` |
**Indexes**
*none*
### GroupChange Table
Group changes are required anytime groups are created, modified, or deleted. This includes changes in group ownership
and group membership.
**Usage**
Very low writes, very small data, very low read
**Attributes**
| name | type | description |
| `group_change_id` | String(UUID) | the unique identifier for the group change
| `group_id` | String(UUID) | the unique identifier for the group |
| `created` | Number | the date / time in EPOCH millis |
| `group_change_blob` | Binary | protobuf of the group change |
**Table Keys**
| type | attribute name |
| HASH | `group_id` |
| SORT | `created` |
**Indexes**
* `GROUP_ID_AND_CREATED_INDEX` - Global Secondary Index
* HASH = `group_id`
* SORT = `created`
* Projection Type = `ALL`
### UserChange Table
UserChange holds information of when new users are created in VinylDNS. It is different as it does not serialize
the change data as protobuf.
**Usage**
Very low writes, very small data, very low read
**Attributes**
| name | type | description |
| `timestamp` | String | the datetime the change was made |
| `userId` | String(UUID) | the unique identifier for the user being changed |
| `username` | String | the username for the user being changed |
| `changeType` | String | (created ; updated ; deleted) |
| `updateUser` | Map | a map of the attributes being updated |
| `previousUser` | Map | a map of the new attributes |
### ZoneChange Table
Anytime an update is made to a zone, the event is stored here. This includes changes to the admin group or ACL rules.
**Usage**
Very low writes, small data, low read
**Attributes**
| name | type | description |
| `zone_id` | String(UUID) | unique identifier for the zone |
| `change_id` | String(UUID) | the unique identifier for this zone change |
| `status` | String | the status of the zone change (Active, Deleted, PendingUpdate, PendingDelete, Syncing) |
| `blob` | Binary | the protobuf serialized bytes for this zone change |
| `created` | Number | the date/time in EPOCH milliseconds |
**Table Keys**
| type | attribute name |
| HASH | `zone_id` |
| SORT | `change_id` |
**Indexes**
* `zone_id_status_index` - Global Secondary Index
* HASH = `zone_id`
* SORT = `status`
* Projection Type = `ALL`
* `status_zone_id_index` - Global Secondary Index
* HASH = `status`
* SORT = `zone_id`
* Projection Type = `ALL`
* `zone_id_created_index` - Global Secondary Index
* HASH = `zone_id`
* SORT = `created`
* Projection Type = `ALL`

View File

@@ -71,8 +71,6 @@ options:
menu_type: operator_menu
menu_section: operator_section
nested_options:
- title: Setup AWS DynamoDB
url: operator/setup-dynamodb.html
- title: Setup MySQL
url: operator/setup-mysql.html
- title: Setup AWS SQS

View File

@@ -1,40 +0,0 @@
akka.loglevel = "OFF"
dynamodb {
class-name = "vinyldns.dynamodb.repository.DynamoDbDataStoreProvider"
settings {
key = "vinyldnsTest"
secret = "notNeededForDynamoDbLocal"
endpoint = "http://127.0.0.1:19000"
region = "us-east-1"
}
repositories {
record-change {
table-name = "recordchange-startup-test"
provisioned-reads = 100
provisioned-writes = 100
}
zone-change {
table-name = "zonechange-startup-test"
provisioned-reads = 100
provisioned-writes = 100
}
group {
table-name = "groups-startup-test"
provisioned-reads = 100
provisioned-writes = 100
}
group-change {
table-name = "groupchanges-startup-test"
provisioned-reads = 100
provisioned-writes = 100
}
membership {
table-name = "memberships-startup-test"
provisioned-reads = 100
provisioned-writes = 100
}
}
}

View File

@@ -1,98 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import cats.effect.{ContextShift, IO}
import cats.implicits._
import com.amazonaws.services.dynamodbv2.model.DeleteTableRequest
import com.typesafe.config.{Config, ConfigFactory}
import vinyldns.core.crypto.{CryptoAlgebra, NoOpCrypto}
import vinyldns.core.domain.batch.BatchChangeRepository
import vinyldns.core.domain.membership._
import vinyldns.core.domain.record.{RecordChangeRepository, RecordSetRepository}
import vinyldns.core.domain.zone.{ZoneChangeRepository, ZoneRepository}
import vinyldns.core.repository.{DataStore, DataStoreConfig, LoadedDataStore}
import vinyldns.core.repository.RepositoryName._
import pureconfig._
import pureconfig.generic.auto._
class DynamoDBDataStoreProviderIntegrationSpec extends DynamoDBIntegrationSpec {
private implicit val cs: ContextShift[IO] =
IO.contextShift(scala.concurrent.ExecutionContext.global)
val config: Config = ConfigFactory.load()
val dynamoDBConfig: DataStoreConfig =
ConfigSource.fromConfig(config).at("dynamodb").loadOrThrow[DataStoreConfig]
val provider: DynamoDBDataStoreProvider = new DynamoDBDataStoreProvider()
val crypto: CryptoAlgebra = new NoOpCrypto()
logger.info("Loading all dynamodb tables in DynamoDBDataStoreProviderSpec")
val providerLoad: LoadedDataStore = provider.load(dynamoDBConfig, crypto).unsafeRunSync()
val dataStore: DataStore = providerLoad.dataStore
logger.info("DynamoDBDataStoreProviderSpec load complete")
def setup(): Unit = ()
def tearDown(): Unit = {
val deletes = dynamoDBConfig.repositories.configMap.map {
case (_, config) => {
val asDynamo = ConfigSource.fromConfig(config).loadOrThrow[DynamoDBRepositorySettings]
val request = new DeleteTableRequest().withTableName(asDynamo.tableName)
testDynamoDBHelper.deleteTable(request)
}
}
logger.info("Deleting all tables created by provider in DynamoDBDataStoreProviderSpec")
deletes.toList.parSequence.unsafeRunSync()
logger.info("DynamoDBDataStoreProviderSpec delete complete")
}
"DynamoDBDataStoreProvider" should {
"properly load configured repos" in {
dataStore.get[GroupRepository](group) shouldBe defined
dataStore.get[MembershipRepository](membership) shouldBe defined
dataStore.get[GroupChangeRepository](groupChange) shouldBe defined
dataStore.get[RecordChangeRepository](recordChange) shouldBe defined
dataStore.get[ZoneChangeRepository](zoneChange) shouldBe defined
}
"not load configured off repos" in {
dataStore.get[ZoneRepository](zone) shouldBe empty
dataStore.get[BatchChangeRepository](batchChange) shouldBe empty
dataStore.get[RecordSetRepository](recordSet) shouldBe empty
}
"validate a loaded repo works" in {
val testGroup = Group(
"provider-load-test-group-name",
"provider-load@test.email",
Some("some description"),
"testGroupId",
adminUserIds = Set("testUserId"),
memberIds = Set("testUserId")
)
val groupRepo = dataStore.get[GroupRepository](group)
val save = groupRepo.map(_.save(testGroup)).sequence[IO, Group]
save.unsafeRunSync() shouldBe Some(testGroup)
val get = groupRepo.map(_.getGroup(testGroup.id)).sequence[IO, Option[Group]]
get.unsafeRunSync().flatten shouldBe Some(testGroup)
}
"include a health check IO" in {
providerLoad.healthCheck.unsafeRunSync() shouldBe ().asRight
}
}
}

View File

@@ -1,191 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import cats.effect.{ContextShift, IO}
import cats.implicits._
import com.amazonaws.services.dynamodbv2.model._
import org.joda.time.DateTime
import vinyldns.core.TestMembershipData._
import vinyldns.core.domain.membership.{Group, GroupChange, GroupChangeType}
import scala.concurrent.duration._
import scala.util.Random
class DynamoDBGroupChangeRepositoryIntegrationSpec extends DynamoDBIntegrationSpec {
private implicit val cs: ContextShift[IO] =
IO.contextShift(scala.concurrent.ExecutionContext.global)
private implicit def dateTimeOrdering: Ordering[DateTime] = Ordering.fromLessThan(_.isAfter(_))
private val GROUP_CHANGES_TABLE = "group-changes-live"
private val tableConfig = DynamoDBRepositorySettings(s"$GROUP_CHANGES_TABLE", 30, 30)
private var repo: DynamoDBGroupChangeRepository = _
private val randomTimeGroup: Group = Group(
"randomTime",
"test@test.com",
Some("changes have random time stamp"),
memberIds = Set(listOfDummyUsers(0).id)
)
// making distinct, multiple changes with the same time throws this test
private val randomTimes: List[Int] = List.range(0, 200).map(_ => Random.nextInt(1000)).distinct
private val listOfRandomTimeGroupChanges: List[GroupChange] = randomTimes.zipWithIndex.map {
case (randomTime, i) =>
GroupChange(
randomTimeGroup,
GroupChangeType.Update,
dummyUser.id,
created = now.minusSeconds(randomTime),
id = s"random-time-$i"
)
}
private val groupChanges = Seq(okGroupChange, okGroupChangeUpdate, okGroupChangeDelete) ++
listOfDummyGroupChanges ++ listOfRandomTimeGroupChanges
def setup(): Unit = {
repo = DynamoDBGroupChangeRepository(tableConfig, dynamoIntegrationConfig).unsafeRunSync()
// Create all the changes
val savedGroupChanges = groupChanges.map(repo.save(_)).toList.parSequence
// Wait until all of the changes are done
savedGroupChanges.unsafeRunTimed(5.minutes).getOrElse(fail("timeout waiting for data load"))
}
def tearDown(): Unit = {
val request = new DeleteTableRequest().withTableName(GROUP_CHANGES_TABLE)
repo.dynamoDBHelper.deleteTable(request).unsafeRunSync()
}
"DynamoDBGroupChangeRepository" should {
"get a group change by id" in {
val targetGroupChange = okGroupChange
repo.getGroupChange(targetGroupChange.id).unsafeRunSync() shouldBe Some(targetGroupChange)
}
"return none when no matching id is found" in {
repo.getGroupChange("NotFound").unsafeRunSync() shouldBe None
}
"save a group change with oldGroup = None" in {
val targetGroupChange = okGroupChange
val test =
for {
saved <- repo.save(targetGroupChange)
retrieved <- repo.getGroupChange(saved.id)
} yield retrieved
test.unsafeRunSync() shouldBe Some(targetGroupChange)
}
"save a group change with oldGroup set" in {
val targetGroupChange = okGroupChangeUpdate
val test =
for {
saved <- repo.save(targetGroupChange)
retrieved <- repo.getGroupChange(saved.id)
} yield retrieved
test.unsafeRunSync() shouldBe Some(targetGroupChange)
}
"getGroupChanges should return the recent changes and the correct last key" in {
val retrieved = repo.getGroupChanges(oneUserDummyGroup.id, None, 100).unsafeRunSync()
retrieved.changes should contain theSameElementsAs listOfDummyGroupChanges.slice(0, 100)
retrieved.lastEvaluatedTimeStamp shouldBe Some(
listOfDummyGroupChanges(99).created.getMillis.toString
)
}
"getGroupChanges should start using the time startFrom" in {
val retrieved = repo
.getGroupChanges(
oneUserDummyGroup.id,
Some(listOfDummyGroupChanges(50).created.getMillis.toString),
100
)
.unsafeRunSync()
retrieved.changes should contain theSameElementsAs listOfDummyGroupChanges.slice(51, 151)
retrieved.lastEvaluatedTimeStamp shouldBe Some(
listOfDummyGroupChanges(150).created.getMillis.toString
)
}
"getGroupChanges returns entire page and nextId = None if there are less than maxItems left" in {
val retrieved = repo
.getGroupChanges(
oneUserDummyGroup.id,
Some(listOfDummyGroupChanges(200).created.getMillis.toString),
100
)
.unsafeRunSync()
retrieved.changes should contain theSameElementsAs listOfDummyGroupChanges.slice(201, 300)
retrieved.lastEvaluatedTimeStamp shouldBe None
}
"getGroupChanges returns 3 pages of items" in {
val page1 = repo.getGroupChanges(oneUserDummyGroup.id, None, 100).unsafeRunSync()
val page2 = repo
.getGroupChanges(oneUserDummyGroup.id, page1.lastEvaluatedTimeStamp, 100)
.unsafeRunSync()
val page3 = repo
.getGroupChanges(oneUserDummyGroup.id, page2.lastEvaluatedTimeStamp, 100)
.unsafeRunSync()
val page4 = repo
.getGroupChanges(oneUserDummyGroup.id, page3.lastEvaluatedTimeStamp, 100)
.unsafeRunSync()
page1.changes should contain theSameElementsAs listOfDummyGroupChanges.slice(0, 100)
page1.lastEvaluatedTimeStamp shouldBe Some(
listOfDummyGroupChanges(99).created.getMillis.toString
)
page2.changes should contain theSameElementsAs listOfDummyGroupChanges.slice(100, 200)
page2.lastEvaluatedTimeStamp shouldBe Some(
listOfDummyGroupChanges(199).created.getMillis.toString
)
page3.changes should contain theSameElementsAs listOfDummyGroupChanges.slice(200, 300)
page3.lastEvaluatedTimeStamp shouldBe Some(
listOfDummyGroupChanges(299).created.getMillis.toString
) // the limit was reached before the end of list
page4.changes should contain theSameElementsAs List() // no matches found in the rest of the list
page4.lastEvaluatedTimeStamp shouldBe None
}
"getGroupChanges should return `maxItem` items" in {
val retrieved = repo.getGroupChanges(oneUserDummyGroup.id, None, 5).unsafeRunSync()
retrieved.changes should contain theSameElementsAs listOfDummyGroupChanges.slice(0, 5)
retrieved.lastEvaluatedTimeStamp shouldBe Some(
listOfDummyGroupChanges(4).created.getMillis.toString
)
}
"getGroupChanges should handle changes inserted in random order" in {
// group changes have a random time stamp and inserted in random order
val retrieved = repo.getGroupChanges(randomTimeGroup.id, None, 100).unsafeRunSync()
val sorted = listOfRandomTimeGroupChanges.sortBy(_.created)
retrieved.changes should contain theSameElementsAs sorted.slice(0, 100)
retrieved.lastEvaluatedTimeStamp shouldBe Some(sorted(99).created.getMillis.toString)
}
}
}

View File

@@ -1,196 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import cats.effect.{ContextShift, IO}
import cats.implicits._
import com.amazonaws.services.dynamodbv2.model._
import vinyldns.core.domain.membership.{Group, GroupStatus}
import vinyldns.core.TestMembershipData._
import scala.concurrent.duration._
class DynamoDBGroupRepositoryIntegrationSpec extends DynamoDBIntegrationSpec {
private implicit val cs: ContextShift[IO] =
IO.contextShift(scala.concurrent.ExecutionContext.global)
private val GROUP_TABLE = "groups-live"
private val tableConfig = DynamoDBRepositorySettings(s"$GROUP_TABLE", 30, 30)
private var repo: DynamoDBGroupRepository = _
private val activeGroups =
for (i <- 1 to 10)
yield Group(
s"live-test-group$i",
s"test$i@test.com",
Some(s"description$i"),
memberIds = Set(s"member$i", s"member2$i"),
adminUserIds = Set(s"member$i", s"member2$i"),
id = "id-%03d".format(i)
)
private val inDbDeletedGroup = Group(
s"live-test-group-deleted",
s"test@test.com",
Some(s"description"),
memberIds = Set("member1"),
adminUserIds = Set("member1"),
id = "id-deleted-group",
status = GroupStatus.Deleted
)
private val groups = activeGroups ++ List(inDbDeletedGroup)
def setup(): Unit = {
repo = DynamoDBGroupRepository(tableConfig, dynamoIntegrationConfig).unsafeRunSync()
// Create all the groups
val savedGroups = groups.map(repo.save(_)).toList.parSequence
// Wait until all of the zones are done
savedGroups.unsafeRunTimed(5.minutes).getOrElse(fail("timeout waiting for data load"))
}
def tearDown(): Unit = {
val request = new DeleteTableRequest().withTableName(GROUP_TABLE)
val deleteTables = repo.dynamoDBHelper.deleteTable(request)
deleteTables.unsafeRunSync()
}
"DynamoDBGroupRepository" should {
"get a group by id" in {
val targetGroup = groups.head
repo.getGroup(targetGroup.id).unsafeRunSync() shouldBe Some(targetGroup)
}
"get all active groups" in {
repo.getAllGroups().unsafeRunSync() shouldBe activeGroups.toSet
}
"not return a deleted group when getting group by id" in {
val deleted = deletedGroup.copy(memberIds = Set("foo"), adminUserIds = Set("foo"))
val f =
for {
_ <- repo.save(deleted)
retrieved <- repo.getGroup(deleted.id)
} yield retrieved
f.unsafeRunSync() shouldBe None
}
"not return a deleted group when getting group by name" in {
val deleted = deletedGroup.copy(memberIds = Set("foo"), adminUserIds = Set("foo"))
val f =
for {
_ <- repo.save(deleted)
retrieved <- repo.getGroupByName(deleted.name)
} yield retrieved
f.unsafeRunSync() shouldBe None
}
"get groups should omit non existing groups" in {
val f = repo.getGroups(Set(activeGroups.head.id, "thisdoesnotexist"))
f.unsafeRunSync().map(_.id) should contain theSameElementsAs Set(activeGroups.head.id)
}
"returns all the groups" in {
val f = repo.getGroups(groups.map(_.id).toSet)
f.unsafeRunSync() should contain theSameElementsAs activeGroups
}
"only return requested groups" in {
val evenGroups = activeGroups.filter(_.id.takeRight(1).toInt % 2 == 0)
val f = repo.getGroups(evenGroups.map(_.id).toSet)
f.unsafeRunSync() should contain theSameElementsAs evenGroups
}
"return an Empty set if nothing found" in {
val f = repo.getGroups(Set("notFound"))
f.unsafeRunSync() shouldBe Set()
}
"not return deleted groups" in {
val deleted = deletedGroup.copy(
id = "test-deleted-group-get-groups",
memberIds = Set("foo"),
adminUserIds = Set("foo")
)
val f =
for {
_ <- repo.save(deleted)
retrieved <- repo.getGroups(Set(deleted.id, groups.head.id))
} yield retrieved
f.unsafeRunSync().map(_.id) shouldBe Set(groups.head.id)
}
"get a group by name" in {
val targetGroup = groups.head
repo.getGroupByName(targetGroup.name).unsafeRunSync() shouldBe Some(targetGroup)
}
"save a group with no description" in {
val group = Group(
"null-description",
"test@test.com",
None,
memberIds = Set("foo"),
adminUserIds = Set("bar")
)
val test =
for {
saved <- repo.save(group)
retrieved <- repo.getGroup(saved.id)
} yield retrieved
test.unsafeRunSync().get.description shouldBe None
}
"add and delete a group should return successfully" in {
val deleted = deletedGroup.copy(
id = "test-deleted-group-get-groups",
memberIds = Set("foo"),
adminUserIds = Set("foo")
)
val f =
for {
_ <- repo.save(deleted)
retrieved <- repo.delete(deleted)
} yield retrieved
f.unsafeRunSync().id shouldBe deleted.id
val getAfterDeleted =
for {
get <- repo.getGroup("test-deleted-group-get-groups")
getAll <- repo.getAllGroups()
} yield (get, getAll)
val (get, getAll) = getAfterDeleted.unsafeRunSync()
get shouldBe None
getAll.filter(_.id == "test-deleted-group-get-groups") shouldBe Set.empty
getAll.filter(_.id == activeGroups.head.id) shouldBe Set(activeGroups.head)
}
}
}

View File

@@ -1,63 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import java.util.UUID
import org.scalatest._
import org.scalatest.wordspec.AnyWordSpec
import org.scalatest.matchers.should.Matchers
import org.slf4j.{Logger, LoggerFactory}
trait DynamoDBIntegrationSpec
extends AnyWordSpec
with BeforeAndAfterAll
with Matchers
with Inspectors {
// port is defined in the docker/docker-compose.yml file for dynamodb
val dynamoIntegrationConfig: DynamoDBDataStoreSettings = getDynamoConfig(19000)
val logger: Logger = LoggerFactory.getLogger("DynamoDBIntegrationSpec")
// only used for teardown
lazy val testDynamoDBHelper: DynamoDBHelper =
new DynamoDBHelper(DynamoDBClient(dynamoIntegrationConfig), logger)
def getDynamoConfig(port: Int): DynamoDBDataStoreSettings =
DynamoDBDataStoreSettings(
"vinyldnsTest",
"notNeededForDynamoDbLocal",
s"http://localhost:$port",
"us-east-1"
)
override protected def beforeAll(): Unit =
setup()
override protected def afterAll(): Unit =
tearDown()
/* Allows a spec to initialize the database */
def setup(): Unit
/* Allows a spec to clean up */
def tearDown(): Unit
/* Generates a random string useful to avoid data collision */
def genString: String = UUID.randomUUID().toString
}

View File

@@ -1,166 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import cats.effect.{ContextShift, IO}
import cats.implicits._
import scala.concurrent.duration._
class DynamoDBMembershipRepositoryIntegrationSpec extends DynamoDBIntegrationSpec {
private implicit val cs: ContextShift[IO] =
IO.contextShift(scala.concurrent.ExecutionContext.global)
private val membershipTable = "membership-live"
private val tableConfig = DynamoDBRepositorySettings(s"$membershipTable", 30, 30)
private var repo: DynamoDBMembershipRepository = _
private val testUserIds = for (i <- 0 to 5) yield s"test-user-$i"
private val testGroupIds = for (i <- 0 to 5) yield s"test-group-$i"
def setup(): Unit = {
repo = DynamoDBMembershipRepository(tableConfig, dynamoIntegrationConfig).unsafeRunSync()
// Create all the items
val results =
testGroupIds.map(repo.saveMembers(_, testUserIds.toSet, isAdmin = false)).toList.parSequence
// Wait until all of the data is stored
results.unsafeRunTimed(5.minutes).getOrElse(fail("timeout waiting for data load"))
}
def tearDown(): Unit = {
val results = testGroupIds.map(repo.removeMembers(_, testUserIds.toSet)).toList.parSequence
results.unsafeRunSync()
}
"DynamoDBMembershipRepository" should {
val groupId = genString
val user1 = genString
val user2 = genString
"add members successfully" in {
repo
.saveMembers(groupId, Set(user1, user2), isAdmin = false)
.unsafeRunSync() should contain theSameElementsAs Set(user1, user2)
}
"add members with no member ids invokes no change" in {
val user1 = genString
repo.saveMembers(groupId, Set(user1), isAdmin = false).unsafeRunSync()
val originalResult = repo.getGroupsForUser(user1).unsafeRunSync()
repo.saveMembers(groupId, Set(), isAdmin = false).unsafeRunSync()
repo.getGroupsForUser(user1).unsafeRunSync() should contain theSameElementsAs originalResult
}
"add a group to an existing user" in {
val group1 = genString
val group2 = genString
val user1 = genString
val f =
for {
_ <- repo.saveMembers(group1, Set(user1), isAdmin = false)
_ <- repo.saveMembers(group2, Set(user1), isAdmin = false)
userGroups <- repo.getGroupsForUser(user1)
} yield userGroups
f.unsafeRunSync() should contain theSameElementsAs Set(group1, group2)
}
"return an empty set when getting groups for a user that does not exist" in {
repo.getGroupsForUser("notHere").unsafeRunSync() shouldBe empty
}
"remove members successfully" in {
val group1 = genString
val group2 = genString
val user1 = genString
val f =
for {
_ <- repo.saveMembers(group1, Set(user1), isAdmin = false)
_ <- repo.saveMembers(group2, Set(user1), isAdmin = false)
_ <- repo.removeMembers(group1, Set(user1))
userGroups <- repo.getGroupsForUser(user1)
} yield userGroups
f.unsafeRunSync() should contain theSameElementsAs Set(group2)
}
"remove members not in group" in {
val group1 = genString
val user1 = genString
val user2 = genString
val f =
for {
_ <- repo.saveMembers(group1, Set(user1), isAdmin = false)
_ <- repo.removeMembers(group1, Set(user2))
userGroups <- repo.getGroupsForUser(user2)
} yield userGroups
f.unsafeRunSync() shouldBe empty
}
"remove members with no member ids invokes no change" in {
val user1 = genString
repo.saveMembers(groupId, Set(user1), isAdmin = false).unsafeRunSync()
val originalResult = repo.getGroupsForUser(user1).unsafeRunSync()
repo.removeMembers(groupId, Set()).unsafeRunSync()
repo.getGroupsForUser(user1).unsafeRunSync() should contain theSameElementsAs originalResult
}
"remove all groups for user" in {
val group1 = genString
val group2 = genString
val group3 = genString
val user1 = genString
val f =
for {
_ <- repo.saveMembers(group1, Set(user1), isAdmin = false)
_ <- repo.saveMembers(group2, Set(user1), isAdmin = false)
_ <- repo.saveMembers(group3, Set(user1), isAdmin = false)
_ <- repo.removeMembers(group1, Set(user1))
_ <- repo.removeMembers(group2, Set(user1))
_ <- repo.removeMembers(group3, Set(user1))
userGroups <- repo.getGroupsForUser(user1)
} yield userGroups
f.unsafeRunSync() shouldBe empty
}
"retrieve all of the groups for a user" in {
val f = repo.getGroupsForUser(testUserIds.head)
val retrieved = f.unsafeRunSync()
testGroupIds.foreach(groupId => retrieved should contain(groupId))
}
"remove members from a group" in {
val membersToRemove = testUserIds.toList.sorted.take(2).toSet
val groupsRemoved = testGroupIds.toList.sorted.take(2)
groupsRemoved.map(repo.removeMembers(_, membersToRemove)).parSequence.unsafeRunSync()
val groupsRetrieved = repo.getGroupsForUser(membersToRemove.head).unsafeRunSync()
forAll(groupsRetrieved) { groupId =>
groupsRemoved should not contain groupId
}
}
}
}

View File

@@ -1,275 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import java.util.UUID
import cats.implicits._
import com.amazonaws.services.dynamodbv2.model._
import org.joda.time.DateTime
import vinyldns.core.domain.record.{ChangeSet, ChangeSetStatus, RecordSetChange}
import vinyldns.core.domain.zone.{Zone, ZoneStatus}
import vinyldns.core.TestMembershipData.abcAuth
import vinyldns.core.TestZoneData.testConnection
import vinyldns.core.TestRecordSetData._
import scala.concurrent.duration._
class DynamoDBRecordChangeRepositoryIntegrationSpec extends DynamoDBIntegrationSpec {
private val recordChangeTable = "record-change-live"
private val tableConfig = DynamoDBRepositorySettings(s"$recordChangeTable", 30, 30)
private var repo: DynamoDBRecordChangeRepository = _
private val user = abcAuth.signedInUser.userName
private val auth = abcAuth
private val zoneA = Zone(
s"live-test-$user.zone-small.",
"test@test.com",
status = ZoneStatus.Active,
connection = testConnection
)
private val zoneB = Zone(
s"live-test-$user.zone-large.",
"test@test.com",
status = ZoneStatus.Active,
connection = testConnection
)
private val recordSetA =
for {
rsTemplate <- Seq(rsOk, aaaa, cname)
} yield rsTemplate.copy(
zoneId = zoneA.id,
name = s"${rsTemplate.typ.toString}-${zoneA.account}.",
ttl = 100,
created = DateTime.now(),
id = UUID.randomUUID().toString
)
private val recordSetB =
for {
i <- 1 to 3
} yield rsOk.copy(
zoneId = zoneB.id,
name = s"${rsOk.typ.toString}-${zoneB.account}-$i.",
ttl = 100,
created = DateTime.now(),
id = UUID.randomUUID().toString
)
private val updateRecordSetA =
for {
rsTemplate <- Seq(rsOk, aaaa, cname)
} yield rsTemplate.copy(
zoneId = zoneA.id,
name = s"${rsTemplate.typ.toString}-${zoneA.account}.",
ttl = 1000,
created = DateTime.now(),
id = UUID.randomUUID().toString
)
private val recordSetChangesA = {
for {
rs <- recordSetA
} yield makeTestAddChange(rs, zoneA, auth.userId)
}.sortBy(_.id)
private val recordSetChangesB = {
for {
rs <- recordSetB
} yield makeTestAddChange(rs, zoneB, auth.userId)
}.sortBy(_.id)
private val recordSetChangesC = {
for {
rs <- recordSetA
} yield makePendingTestDeleteChange(rs, zoneA, auth.userId)
}.sortBy(_.id)
private val recordSetChangesD = {
for {
rs <- recordSetA
updateRs <- updateRecordSetA
} yield makePendingTestUpdateChange(rs, updateRs, zoneA, auth.userId)
}.sortBy(_.id)
private val changeSetA = ChangeSet(recordSetChangesA)
private val changeSetB = ChangeSet(recordSetChangesB)
private val changeSetC =
ChangeSet(recordSetChangesC).copy(status = ChangeSetStatus.Applied)
private val changeSetD = ChangeSet(recordSetChangesD)
.copy(createdTimestamp = changeSetA.createdTimestamp + 1000) // make sure D is created AFTER A
private val changeSets = List(changeSetA, changeSetB, changeSetC, changeSetD)
//This zone is to test listing record changes in correct order
private val zoneC = Zone(
s"live-test-$user.record-changes.",
"test@test.com",
status = ZoneStatus.Active,
connection = testConnection
)
private val baseTime = DateTime.now()
private val timeOrder = List(
baseTime.minusSeconds(8000),
baseTime.minusSeconds(7000),
baseTime.minusSeconds(6000),
baseTime.minusSeconds(5000),
baseTime.minusSeconds(4000),
baseTime.minusSeconds(3000),
baseTime.minusSeconds(2000),
baseTime.minusSeconds(1000),
baseTime
)
private val recordSetsC =
for {
rsTemplate <- Seq(rsOk, aaaa, cname)
} yield rsTemplate.copy(
zoneId = zoneC.id,
name = s"${rsTemplate.typ.toString}-${zoneC.account}.",
ttl = 100,
id = UUID.randomUUID().toString
)
private val updateRecordSetsC =
for {
rsTemplate <- Seq(rsOk, aaaa, cname)
} yield rsTemplate.copy(
zoneId = zoneC.id,
name = s"${rsTemplate.typ.toString}-${zoneC.account}.",
ttl = 1000,
id = UUID.randomUUID().toString
)
private val recordSetChangesCreateC = {
for {
(rs, index) <- recordSetsC.zipWithIndex
} yield makeTestAddChange(rs, zoneC, auth.userId).copy(created = timeOrder(index))
}
private val recordSetChangesUpdateC = {
for {
(rs, index) <- recordSetsC.zipWithIndex
} yield makePendingTestUpdateChange(rs, updateRecordSetsC(index), zoneC, auth.userId)
.copy(created = timeOrder(index + 3))
}
private val recordSetChangesDeleteC = {
for {
(rs, index) <- recordSetsC.zipWithIndex
} yield makePendingTestDeleteChange(rs, zoneC, auth.userId).copy(created = timeOrder(index + 6))
}
private val changeSetCreateC = ChangeSet(recordSetChangesCreateC)
private val changeSetUpdateC = ChangeSet(recordSetChangesUpdateC)
private val changeSetDeleteC = ChangeSet(recordSetChangesDeleteC)
private val changeSetsC = List(changeSetCreateC, changeSetUpdateC, changeSetDeleteC)
private val recordSetChanges: List[RecordSetChange] =
(recordSetChangesCreateC ++ recordSetChangesUpdateC ++ recordSetChangesDeleteC)
.sortBy(_.created.getMillis)
.toList
.reverse // Changes are retrieved by time stamp in decending order
def setup(): Unit = {
repo = DynamoDBRecordChangeRepository(tableConfig, dynamoIntegrationConfig).unsafeRunSync()
changeSets.foreach { changeSet =>
// Save the change set
val savedChangeSet = repo.save(changeSet)
// Wait until all of the change sets are saved
savedChangeSet.unsafeRunTimed(5.minutes).getOrElse(fail("error in change set load"))
}
changeSetsC.foreach { changeSet =>
// Save the change set
val savedChangeSet = repo.save(changeSet)
// Wait until all of the change sets are saved
savedChangeSet.unsafeRunTimed(5.minutes).getOrElse(fail("error in change set load"))
}
}
def tearDown(): Unit = {
val request = new DeleteTableRequest().withTableName(recordChangeTable)
repo.dynamoDBHelper.deleteTable(request).unsafeRunSync()
}
"DynamoDBRepository" should {
"get a record change set by id" in {
val testRecordSetChange = pendingCreateAAAA.copy(id = genString)
val f =
for {
saved <- repo.save(ChangeSet(Seq(testRecordSetChange)))
retrieved <- repo.getRecordSetChange(saved.zoneId, testRecordSetChange.id)
} yield retrieved
f.unsafeRunSync() shouldBe Some(testRecordSetChange)
}
"list all record set changes in zone C" in {
val testFuture = repo.listRecordSetChanges(zoneC.id)
testFuture.unsafeRunSync().items shouldBe recordSetChanges
}
"list record set changes with a page size of one" in {
val testFuture = repo.listRecordSetChanges(zoneC.id, maxItems = 1)
testFuture.unsafeRunSync().items shouldBe recordSetChanges.take(1)
}
"list record set changes with page size of one and reuse key to get another page with size of two" in {
val testFuture = for {
listOne <- repo.listRecordSetChanges(zoneC.id, maxItems = 1)
listTwo <- repo.listRecordSetChanges(zoneC.id, startFrom = listOne.nextId, maxItems = 2)
} yield listTwo
val result = testFuture.unsafeRunSync()
val page2 = result.items
page2 shouldBe recordSetChanges.slice(1, 3)
}
"return an empty list and nextId of None when passing last record as start" in {
val testFuture = for {
listOne <- repo.listRecordSetChanges(zoneC.id, maxItems = 9)
listTwo <- repo.listRecordSetChanges(zoneC.id, startFrom = listOne.nextId, maxItems = 2)
} yield listTwo
val result = testFuture.unsafeRunSync()
result.nextId shouldBe None
result.items shouldBe List()
}
"have nextId of None when exhausting record changes" in {
val testFuture = repo.listRecordSetChanges(zoneC.id, maxItems = 10)
testFuture.unsafeRunSync().nextId shouldBe None
}
"return empty list with startFrom of zero" in {
val testFuture = repo.listRecordSetChanges(zoneC.id, startFrom = Some("0"))
val result = testFuture.unsafeRunSync()
result.nextId shouldBe None
result.items shouldBe List()
}
}
}

View File

@@ -1,634 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import java.util.UUID
import cats.effect.{ContextShift, IO}
import cats.implicits._
import com.amazonaws.services.dynamodbv2.model._
import org.joda.time.DateTime
import vinyldns.core.domain.membership.User
import vinyldns.core.domain.zone.{Zone, ZoneStatus}
import vinyldns.core.TestZoneData.{okZone, testConnection}
import vinyldns.core.TestRecordSetData._
import vinyldns.core.domain.record._
import scala.concurrent.duration._
class DynamoDBRecordSetRepositoryIntegrationSpec
extends DynamoDBIntegrationSpec
with DynamoDBRecordSetConversions {
private implicit val cs: ContextShift[IO] =
IO.contextShift(scala.concurrent.ExecutionContext.global)
private val recordSetTable = "record-sets-live"
private[repository] val recordSetTableName: String = recordSetTable
private val tableConfig = DynamoDBRepositorySettings(s"$recordSetTable", 30, 30)
private var repo: DynamoDBRecordSetRepository = _
private val users =
for (i <- 1 to 3)
yield User(s"live-test-acct$i", "key", "secret")
private val zones =
for {
acct <- users
i <- 1 to 3
} yield Zone(
s"live-test-${acct.userName}.zone$i.",
"test@test.com",
status = ZoneStatus.Active,
connection = testConnection
)
private val rsTemplates = Seq(rsOk, aaaa, cname)
private val rsQualifiedStatus = Seq("-dotless", "-dotted.")
private val recordSets =
for {
zone <- zones
rsTemplate <- rsTemplates
rsQualifiedStatus <- rsQualifiedStatus
} yield rsTemplate.copy(
zoneId = zone.id,
name = s"${rsTemplate.typ.toString}-${zone.account}$rsQualifiedStatus",
ttl = 100,
created = DateTime.now(),
id = UUID.randomUUID().toString
)
def setup(): Unit = {
repo = DynamoDBRecordSetRepository(tableConfig, dynamoIntegrationConfig).unsafeRunSync()
// Create all the items
val results = recordSets.map(repo.putRecordSet(_)).toList.parSequence
// Wait until all of the data is stored
results.unsafeRunTimed(5.minutes).getOrElse(fail("timeout waiting for data load"))
}
def tearDown(): Unit = {
val request = new DeleteTableRequest().withTableName(recordSetTable)
repo.dynamoDBHelper.deleteTable(request).unsafeRunSync()
}
"DynamoDBRecordSetRepository" should {
"fail to return records with listRecordSets if zoneId not given" in {
val testFuture = repo.listRecordSets(
zoneId = None,
startFrom = None,
maxItems = None,
recordNameFilter = None,
recordTypeFilter = None,
recordOwnerGroupFilter = None,
nameSort = NameSort.ASC
)
assertThrows[UnsupportedDynamoDBRepoFunction](testFuture.unsafeRunSync())
}
"get a record set by id" in {
val testRecordSet = recordSets.head
val testFuture = repo.listRecordSets(
zoneId = Some(testRecordSet.zoneId),
startFrom = None,
maxItems = None,
recordNameFilter = None,
recordTypeFilter = None,
recordOwnerGroupFilter = None,
nameSort = NameSort.ASC
)
testFuture.unsafeRunSync().recordSets should contain(testRecordSet)
}
"get a record set count" in {
val testRecordSet = recordSets.head
val expected = 6
val testFuture = repo.getRecordSetCount(testRecordSet.zoneId)
testFuture.unsafeRunSync() shouldBe expected
}
"get a record set by record set id and zone id" in {
val testRecordSet = recordSets.head
val testFuture = repo.getRecordSet(testRecordSet.id)
testFuture.unsafeRunSync() shouldBe Some(testRecordSet)
}
"get a record set by zone id, name, type" in {
val testRecordSet = recordSets.head
val testFuture =
repo.getRecordSets(testRecordSet.zoneId, testRecordSet.name, testRecordSet.typ)
testFuture.unsafeRunSync() shouldBe List(testRecordSet)
}
"get a record set by zone id, case-insensitive name, type" in {
val testRecordSet = recordSets.head
val testFuture = repo.getRecordSets(
testRecordSet.zoneId,
testRecordSet.name.toUpperCase(),
testRecordSet.typ
)
testFuture.unsafeRunSync() shouldBe List(testRecordSet)
}
"get a fully qualified record set by zone id, trailing dot-insensitive name, type" in {
val testRecordSet = recordSets.find(_.name.endsWith(".")).get
val testFuture =
repo.getRecordSets(testRecordSet.zoneId, testRecordSet.name.dropRight(1), testRecordSet.typ)
testFuture.unsafeRunSync() shouldBe List(testRecordSet)
}
"get a relative record set by zone id, trailing dot-insensitive name, type" in {
val testRecordSet = recordSets.find(_.name.endsWith("dotless")).get
val testFuture =
repo.getRecordSets(testRecordSet.zoneId, testRecordSet.name.concat("."), testRecordSet.typ)
testFuture.unsafeRunSync() shouldBe List(testRecordSet)
}
"get a record set by zone id, name" in {
val testRecordSet = recordSets.head
val testFuture = repo.getRecordSetsByName(testRecordSet.zoneId, testRecordSet.name)
testFuture.unsafeRunSync() shouldBe List(testRecordSet)
}
"get a record set by zone id, case-insensitive name" in {
val testRecordSet = recordSets.head
val testFuture =
repo.getRecordSetsByName(testRecordSet.zoneId, testRecordSet.name.toUpperCase())
testFuture.unsafeRunSync() shouldBe List(testRecordSet)
}
"get a fully qualified record set by zone id, trailing dot-insensitive name" in {
val testRecordSet = recordSets.find(_.name.endsWith(".")).get
val testFuture =
repo.getRecordSetsByName(testRecordSet.zoneId, testRecordSet.name.dropRight(1))
testFuture.unsafeRunSync() shouldBe List(testRecordSet)
}
"get a relative record set by zone id, trailing dot-insensitive name" in {
val testRecordSet = recordSets.find(_.name.endsWith("dotless")).get
val testFuture =
repo.getRecordSetsByName(testRecordSet.zoneId, testRecordSet.name.concat("."))
testFuture.unsafeRunSync() shouldBe List(testRecordSet)
}
"list record sets with page size of 1 returns recordSets[0] only" in {
val testRecordSet = recordSets.head
val testFuture = repo.listRecordSets(
zoneId = Some(testRecordSet.zoneId),
startFrom = None,
maxItems = Some(1),
recordNameFilter = None,
recordTypeFilter = None,
recordOwnerGroupFilter = None,
nameSort = NameSort.ASC
)
val foundRecordSet = testFuture.unsafeRunSync()
foundRecordSet.recordSets should contain(recordSets(0))
foundRecordSet.recordSets shouldNot contain(recordSets(1))
foundRecordSet.nextId.get.split('~')(2) shouldBe recordSets(0).id
}
"list record sets with page size of 1 reusing key with page size of 1 returns recordSets[0] and recordSets[1]" in {
val testRecordSet = recordSets.head
val testFutureOne = repo.listRecordSets(
zoneId = Some(testRecordSet.zoneId),
startFrom = None,
maxItems = Some(1),
recordNameFilter = None,
recordTypeFilter = None,
recordOwnerGroupFilter = None,
nameSort = NameSort.ASC
)
val foundRecordSet = testFutureOne.unsafeRunSync()
foundRecordSet.recordSets should contain(recordSets(0))
foundRecordSet.recordSets shouldNot contain(recordSets(1))
val key = foundRecordSet.nextId
val testFutureTwo = repo.listRecordSets(
zoneId = Some(testRecordSet.zoneId),
startFrom = key,
maxItems = Some(1),
recordNameFilter = None,
recordTypeFilter = None,
recordOwnerGroupFilter = None,
nameSort = NameSort.ASC
)
val foundRecordSetTwo = testFutureTwo.unsafeRunSync()
foundRecordSetTwo.recordSets shouldNot contain(recordSets(0))
foundRecordSetTwo.recordSets should contain(recordSets(1))
foundRecordSetTwo.recordSets shouldNot contain(recordSets(2))
foundRecordSetTwo.nextId.get.split('~')(2) shouldBe recordSets(1).id
}
"list record sets page size of 1 then reusing key with page size of 2 returns recordSets[0], recordSets[1,2]" in {
val testRecordSet = recordSets.head
val testFutureOne = repo.listRecordSets(
zoneId = Some(testRecordSet.zoneId),
startFrom = None,
maxItems = Some(1),
recordNameFilter = None,
recordTypeFilter = None,
recordOwnerGroupFilter = None,
nameSort = NameSort.ASC
)
val foundRecordSet = testFutureOne.unsafeRunSync()
foundRecordSet.recordSets should contain(recordSets(0))
foundRecordSet.recordSets shouldNot contain(recordSets(1))
val key = foundRecordSet.nextId
val testFutureTwo = repo.listRecordSets(
zoneId = Some(testRecordSet.zoneId),
startFrom = key,
maxItems = Some(2),
recordNameFilter = None,
recordTypeFilter = None,
recordOwnerGroupFilter = None,
nameSort = NameSort.ASC
)
val foundRecordSetTwo = testFutureTwo.unsafeRunSync()
foundRecordSetTwo.recordSets shouldNot contain(recordSets(0))
foundRecordSetTwo.recordSets should contain(recordSets(1))
foundRecordSetTwo.recordSets should contain(recordSets(2))
foundRecordSetTwo.nextId.get.split('~')(2) shouldBe recordSets(2).id
}
"return an empty list and nextId of None when passing last record as start" in {
val testRecordSet = recordSets.head
val testFutureOne = repo.listRecordSets(
zoneId = Some(testRecordSet.zoneId),
startFrom = None,
maxItems = Some(6),
recordNameFilter = None,
recordTypeFilter = None,
recordOwnerGroupFilter = None,
nameSort = NameSort.ASC
)
val foundRecordSet = testFutureOne.unsafeRunSync()
foundRecordSet.recordSets should contain(recordSets(0))
foundRecordSet.recordSets should contain(recordSets(1))
foundRecordSet.recordSets should contain(recordSets(2))
foundRecordSet.recordSets should contain(recordSets(3))
foundRecordSet.recordSets should contain(recordSets(4))
foundRecordSet.recordSets should contain(recordSets(5))
val key = foundRecordSet.nextId
val testFutureTwo = repo.listRecordSets(
zoneId = Some(testRecordSet.zoneId),
startFrom = key,
maxItems = Some(6),
recordNameFilter = None,
recordTypeFilter = None,
recordOwnerGroupFilter = None,
nameSort = NameSort.ASC
)
val foundRecordSetTwo = testFutureTwo.unsafeRunSync()
foundRecordSetTwo.recordSets shouldBe List()
foundRecordSetTwo.nextId shouldBe None
}
"have nextId of None when exhausting recordSets" in {
val testRecordSet = recordSets.head
val testFuture = repo.listRecordSets(
zoneId = Some(testRecordSet.zoneId),
startFrom = None,
maxItems = Some(7),
recordNameFilter = None,
recordTypeFilter = None,
recordOwnerGroupFilter = None,
nameSort = NameSort.ASC
)
val foundRecordSet = testFuture.unsafeRunSync()
foundRecordSet.recordSets should contain(recordSets(0))
foundRecordSet.recordSets should contain(recordSets(1))
foundRecordSet.recordSets should contain(recordSets(2))
foundRecordSet.recordSets should contain(recordSets(3))
foundRecordSet.recordSets should contain(recordSets(4))
foundRecordSet.recordSets should contain(recordSets(5))
foundRecordSet.nextId shouldBe None
}
"only retrieve recordSet with name containing 'AAAA'" in {
val testRecordSet = recordSets.head
val testFuture = repo.listRecordSets(
zoneId = Some(testRecordSet.zoneId),
startFrom = None,
maxItems = None,
recordNameFilter = Some("AAAA"),
recordTypeFilter = None,
recordOwnerGroupFilter = None,
nameSort = NameSort.ASC
)
val foundRecordSet = testFuture.unsafeRunSync()
foundRecordSet.recordSets shouldNot contain(recordSets(0))
foundRecordSet.recordSets shouldNot contain(recordSets(1))
foundRecordSet.recordSets should contain(recordSets(2))
foundRecordSet.recordSets should contain(recordSets(3))
}
"retrieve all recordSets with names containing 'A'" in {
val testRecordSet = recordSets.head
val testFuture = repo.listRecordSets(
zoneId = Some(testRecordSet.zoneId),
startFrom = None,
maxItems = None,
recordNameFilter = Some("A"),
recordTypeFilter = None,
recordOwnerGroupFilter = None,
nameSort = NameSort.ASC
)
val foundRecordSet = testFuture.unsafeRunSync()
foundRecordSet.recordSets should contain(recordSets(0))
foundRecordSet.recordSets should contain(recordSets(1))
foundRecordSet.recordSets should contain(recordSets(2))
foundRecordSet.recordSets should contain(recordSets(3))
foundRecordSet.recordSets should contain(recordSets(4))
foundRecordSet.recordSets should contain(recordSets(5))
}
"return an empty list if recordName filter had no match" in {
val testRecordSet = recordSets.head
val testFuture = repo.listRecordSets(
zoneId = Some(testRecordSet.zoneId),
startFrom = None,
maxItems = None,
recordNameFilter = Some("Dummy"),
recordTypeFilter = None,
recordOwnerGroupFilter = None,
nameSort = NameSort.ASC
)
testFuture.unsafeRunSync().recordSets shouldBe List()
}
"apply a change set" in {
val newRecordSets =
for {
i <- 1 to 1000
} yield aaaa.copy(
zoneId = "big-apply-zone",
name = s"$i.apply.test.",
id = UUID.randomUUID().toString
)
val pendingChanges = newRecordSets.map(makeTestAddChange(_, zones.head))
val bigPendingChangeSet = ChangeSet(pendingChanges)
try {
val f = repo.apply(bigPendingChangeSet)
val apply = f.unsafeRunTimed(1500.seconds)
if (apply.isEmpty) {
throw new RuntimeException("change set apply timed out")
}
// let's fail half of them
val split = pendingChanges.grouped(pendingChanges.length / 2).toSeq
val halfSuccess = split.head.map(_.successful)
val halfFailed = split(1).map(_.failed())
val halfFailedChangeSet = ChangeSet(halfSuccess ++ halfFailed)
val nextUp = repo.apply(halfFailedChangeSet)
val nextUpApply = nextUp.unsafeRunTimed(1500.seconds)
if (nextUpApply.isEmpty) {
throw new RuntimeException("nextUp change set apply timed out")
}
// let's run our query and see how long until we succeed(which will determine
// how long it takes DYNAMO to update its index)
var querySuccessful = false
var retries = 1
var recordSetsResult: List[RecordSet] = Nil
while (!querySuccessful && retries <= 10) {
// if we query now, we should get half that failed
val rsQuery = repo.listRecordSets(
zoneId = Some("big-apply-zone"),
startFrom = None,
maxItems = None,
recordNameFilter = None,
recordTypeFilter = None,
recordOwnerGroupFilter = None,
nameSort = NameSort.ASC
)
recordSetsResult = rsQuery.unsafeRunTimed(30.seconds) match {
case Some(result) => result.recordSets
case None => throw new RuntimeException("Query timed out")
}
querySuccessful = recordSetsResult.length == halfSuccess.length
retries += 1
Thread.sleep(100)
}
querySuccessful shouldBe true
// the result of the query should be the same as those pending that succeeded
val expected = halfSuccess.map(_.recordSet)
recordSetsResult should contain theSameElementsAs expected
} catch {
case e: Throwable =>
e.printStackTrace()
fail("encountered error running apply test")
}
}
"apply successful and pending creates, and delete failed creates" in {
val zone = okZone
val recordForSuccess = RecordSet(
"test-create-converter",
"createSuccess",
RecordType.A,
123,
RecordSetStatus.Active,
DateTime.now
)
val recordForPending = RecordSet(
"test-create-converter",
"createPending",
RecordType.A,
123,
RecordSetStatus.Pending,
DateTime.now
)
val recordForFailed = RecordSet(
"test-create-converter",
"failed",
RecordType.A,
123,
RecordSetStatus.Inactive,
DateTime.now
)
val successfulChange =
RecordSetChange(
zone,
recordForSuccess,
"abc",
RecordSetChangeType.Create,
RecordSetChangeStatus.Complete
)
val pendingChange =
successfulChange.copy(recordSet = recordForPending, status = RecordSetChangeStatus.Pending)
val failedChange =
successfulChange.copy(recordSet = recordForFailed, status = RecordSetChangeStatus.Failed)
// to be deleted - assume this was already saved as pending
val existingPending = failedChange.copy(
recordSet = recordForFailed.copy(status = RecordSetStatus.Pending),
status = RecordSetChangeStatus.Pending
)
repo.apply(ChangeSet(existingPending)).unsafeRunSync()
repo.getRecordSet(failedChange.recordSet.id).unsafeRunSync() shouldBe
Some(existingPending.recordSet)
repo.apply(ChangeSet(Seq(successfulChange, pendingChange, failedChange))).unsafeRunSync()
// success and pending changes have records saved
repo
.getRecordSet(successfulChange.recordSet.id)
.unsafeRunSync() shouldBe
Some(successfulChange.recordSet)
repo
.getRecordSet(pendingChange.recordSet.id)
.unsafeRunSync() shouldBe
Some(pendingChange.recordSet)
// check that the pending record was deleted because of failed record change
repo
.getRecordSet(failedChange.recordSet.id)
.unsafeRunSync() shouldBe None
}
"apply successful updates and revert records for failed updates" in {
val oldSuccess = aaaa.copy(zoneId = "test-update-converter", ttl = 100, id = "success")
val updateSuccess = oldSuccess.copy(ttl = 200)
val oldPending = aaaa.copy(zoneId = "test-update-converter", ttl = 100, id = "pending")
val updatePending = oldPending.copy(ttl = 200, status = RecordSetStatus.PendingUpdate)
val oldFailure = aaaa.copy(zoneId = "test-update-converter", ttl = 100, id = "failed")
val updateFailure = oldFailure.copy(ttl = 200, status = RecordSetStatus.Inactive)
val successfulUpdate = makeCompleteTestUpdateChange(oldSuccess, updateSuccess)
val pendingUpdate = makePendingTestUpdateChange(oldPending, updatePending)
val failedUpdate = pendingUpdate.copy(
recordSet = updateFailure,
updates = Some(oldFailure),
status = RecordSetChangeStatus.Failed
)
val updateChanges = Seq(successfulUpdate, pendingUpdate, failedUpdate)
val updateChangeSet = ChangeSet(updateChanges)
// save old recordsets
val oldAddChanges = updateChanges
.map(
_.copy(changeType = RecordSetChangeType.Create, status = RecordSetChangeStatus.Complete)
)
val oldChangeSet = ChangeSet(oldAddChanges)
repo.apply(oldChangeSet).unsafeRunSync() shouldBe oldChangeSet
// apply updates
repo.apply(updateChangeSet).unsafeRunSync() shouldBe updateChangeSet
// ensure that success and pending updates store the new recordsets
repo
.getRecordSet(successfulUpdate.recordSet.id)
.unsafeRunSync() shouldBe
Some(successfulUpdate.recordSet)
repo
.getRecordSet(pendingUpdate.recordSet.id)
.unsafeRunSync() shouldBe
Some(pendingUpdate.recordSet)
// ensure that failure update store the old recordset
repo
.getRecordSet(failedUpdate.recordSet.id)
.unsafeRunSync() shouldBe
failedUpdate.updates
repo
.getRecordSet(failedUpdate.recordSet.id)
.unsafeRunSync() shouldNot
be(Some(failedUpdate.recordSet))
}
"apply successful deletes, save pending deletes, and revert failed deletes" in {
val oldSuccess = aaaa.copy(zoneId = "test-update-converter", id = "success")
val oldPending = aaaa.copy(zoneId = "test-update-converter", id = "pending")
val oldFailure = aaaa.copy(zoneId = "test-update-converter", id = "failed")
val successfulDelete =
makePendingTestDeleteChange(oldSuccess).copy(status = RecordSetChangeStatus.Complete)
val pendingDelete =
makePendingTestDeleteChange(oldPending).copy(status = RecordSetChangeStatus.Pending)
val failedDelete =
makePendingTestDeleteChange(oldFailure).copy(status = RecordSetChangeStatus.Failed)
val deleteChanges = Seq(successfulDelete, pendingDelete, failedDelete)
val deleteChangeSet = ChangeSet(deleteChanges)
// save old recordsets
val oldAddChanges = deleteChanges
.map(
_.copy(changeType = RecordSetChangeType.Create, status = RecordSetChangeStatus.Complete)
)
val oldChangeSet = ChangeSet(oldAddChanges)
repo.apply(oldChangeSet).unsafeRunSync() shouldBe oldChangeSet
// apply deletes
repo.apply(deleteChangeSet).unsafeRunSync() shouldBe deleteChangeSet
// ensure that successful change deletes the recordset
repo
.getRecordSet(successfulDelete.recordSet.id)
.unsafeRunSync() shouldBe None
// ensure that pending change saves the recordset
repo
.getRecordSet(pendingDelete.recordSet.id)
.unsafeRunSync() shouldBe
Some(pendingDelete.recordSet)
// ensure that failed delete keeps the recordset
repo
.getRecordSet(failedDelete.recordSet.id)
.unsafeRunSync() shouldBe
failedDelete.updates
}
}
}

View File

@@ -1,84 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import com.amazonaws.services.dynamodbv2.model.DeleteTableRequest
import com.typesafe.config.ConfigFactory
import org.joda.time.DateTime
import vinyldns.core.crypto.NoOpCrypto
import vinyldns.core.domain.auth.AuthPrincipal
import vinyldns.core.domain.membership.{User, UserChange}
class DynamoDBUserChangeRepositoryIntegrationSpec extends DynamoDBIntegrationSpec {
private val USER_CHANGE_TABLE = "user-changes"
private val tableConfig = DynamoDBRepositorySettings(s"$USER_CHANGE_TABLE", 30, 30)
private val testUser = User(
id = "test-user",
userName = "testUser",
firstName = Some("Test"),
lastName = Some("User"),
email = Some("test@user.com"),
created = DateTime.now,
isSuper = false,
accessKey = "test",
secretKey = "user"
)
private val repo: DynamoDBUserChangeRepository =
DynamoDBUserChangeRepository(
tableConfig,
dynamoIntegrationConfig,
new NoOpCrypto(ConfigFactory.load())
).unsafeRunSync()
def setup(): Unit = ()
def tearDown(): Unit = {
val request = new DeleteTableRequest().withTableName(USER_CHANGE_TABLE)
val deleteTables = repo.dynamoDBHelper.deleteTable(request)
deleteTables.unsafeRunSync()
}
"DynamoDBUserChangeRepository" should {
"save a user change" in {
val auth = AuthPrincipal(testUser, Seq.empty)
val c = UserChange.CreateUser(testUser, auth.userId, DateTime.now, "foo")
val t = for {
_ <- repo.save(c)
retrieved <- repo.get(c.id)
} yield retrieved
t.unsafeRunSync() shouldBe Some(c)
}
"save a change for a modified user" in {
val auth = AuthPrincipal(testUser, Seq.empty)
val updated = testUser.copy(userName = testUser.userName + "-updated")
val c = UserChange.UpdateUser(updated, auth.userId, DateTime.now, testUser, "foo")
val t = for {
_ <- repo.save(c)
retrieved <- repo.get(c.id)
} yield retrieved
t.unsafeRunSync() shouldBe Some(c)
}
}
}

View File

@@ -1,202 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import cats.effect.{ContextShift, IO}
import cats.implicits._
import com.amazonaws.services.dynamodbv2.model.DeleteTableRequest
import com.typesafe.config.ConfigFactory
import vinyldns.core.crypto.NoOpCrypto
import vinyldns.core.domain.membership.{LockStatus, User}
import scala.concurrent.duration._
class DynamoDBUserRepositoryIntegrationSpec extends DynamoDBIntegrationSpec {
private implicit val cs: ContextShift[IO] =
IO.contextShift(scala.concurrent.ExecutionContext.global)
private val userTable = "users-live"
private val tableConfig = DynamoDBRepositorySettings(s"$userTable", 30, 30)
private var repo: DynamoDBUserRepository = _
private val testUserIds = (for { i <- 0 to 100 } yield s"test-user-$i").toList.sorted
private val users = testUserIds.map { id =>
User(id = id, userName = "name" + id, accessKey = s"abc$id", secretKey = "123")
}
def setup(): Unit = {
repo = DynamoDBUserRepository(
tableConfig,
dynamoIntegrationConfig,
new NoOpCrypto(ConfigFactory.load())
).unsafeRunSync()
// Create all the items
val results = users.map(repo.save(_)).parSequence
// Wait until all of the data is stored
results.unsafeRunTimed(5.minutes).getOrElse(fail("timeout waiting for data load"))
}
def tearDown(): Unit = {
val request = new DeleteTableRequest().withTableName(userTable)
repo.dynamoDBHelper.deleteTable(request).unsafeRunSync()
}
"DynamoDBUserRepository" should {
"retrieve a user" in {
val f = repo.getUser(testUserIds.head)
f.unsafeRunSync() shouldBe Some(users.head)
}
"returns None when the user does not exist" in {
val f = repo.getUser("does not exists")
f.unsafeRunSync() shouldBe None
}
"getUsers omits all non existing users" in {
val getUsers = repo.getUsers(Set("notFound", testUserIds.head), None, Some(100))
val result = getUsers.unsafeRunSync()
result.users.map(_.id) should contain theSameElementsAs Set(testUserIds.head)
result.users.map(_.id) should not contain "notFound"
}
"returns all the users" in {
val f = repo.getUsers(testUserIds.toSet, None, None)
val retrieved = f.unsafeRunSync()
retrieved.users should contain theSameElementsAs users
retrieved.lastEvaluatedId shouldBe None
}
"only return requested users" in {
val evenUsers = users.filter(_.id.takeRight(1).toInt % 2 == 0)
val f = repo.getUsers(evenUsers.map(_.id).toSet, None, None)
val retrieved = f.unsafeRunSync()
retrieved.users should contain theSameElementsAs evenUsers
retrieved.lastEvaluatedId shouldBe None
}
"start at the exclusive start key" in {
val f = repo.getUsers(testUserIds.toSet, Some(testUserIds(5)), None)
val retrieved = f.unsafeRunSync()
retrieved.users should not contain users(5) //start key is exclusive
retrieved.users should contain theSameElementsAs users.slice(6, users.length)
retrieved.lastEvaluatedId shouldBe None
}
"only return the number of items equal to the limit" in {
val f = repo.getUsers(testUserIds.toSet, None, Some(5))
val retrieved = f.unsafeRunSync()
retrieved.users.size shouldBe 5
retrieved.users should contain theSameElementsAs users.take(5)
}
"returns the correct lastEvaluatedKey" in {
val f = repo.getUsers(testUserIds.toSet, None, Some(5))
val retrieved = f.unsafeRunSync()
retrieved.lastEvaluatedId shouldBe Some(users(4).id) // base 0
retrieved.users should contain theSameElementsAs users.take(5)
}
"return the user if the matching access key" in {
val f = repo.getUserByAccessKey(users.head.accessKey)
f.unsafeRunSync() shouldBe Some(users.head)
}
"returns None not user has a matching access key" in {
val f = repo.getUserByAccessKey("does not exists")
f.unsafeRunSync() shouldBe None
}
"returns the super user flag when true" in {
val testUser = User(
userName = "testSuper",
accessKey = "testSuper",
secretKey = "testUser",
isSuper = true
)
val saved = repo.save(testUser).unsafeRunSync()
val result = repo.getUser(saved.id).unsafeRunSync()
result shouldBe Some(testUser)
result.get.isSuper shouldBe true
}
"returns the super user flag when false" in {
val testUser = User(userName = "testSuper", accessKey = "testSuper", secretKey = "testUser")
val saved = repo.save(testUser).unsafeRunSync()
val result = repo.getUser(saved.id).unsafeRunSync()
result shouldBe Some(testUser)
result.get.isSuper shouldBe false
}
"returns the locked flag when true" in {
val testUser = User(
userName = "testSuper",
accessKey = "testSuper",
secretKey = "testUser",
lockStatus = LockStatus.Locked
)
val saved = repo.save(testUser).unsafeRunSync()
val result = repo.getUser(saved.id).unsafeRunSync()
result shouldBe Some(testUser)
result.get.lockStatus shouldBe LockStatus.Locked
}
"returns the locked flag when false" in {
val f = repo.getUserByAccessKey(users.head.accessKey).unsafeRunSync()
f shouldBe Some(users.head)
f.get.lockStatus shouldBe LockStatus.Unlocked
}
"returns the support flag when true" in {
val testUser = User(
userName = "testSuper",
accessKey = "testSuper",
secretKey = "testUser",
isSupport = true
)
val saved = repo.save(testUser).unsafeRunSync()
val result = repo.getUser(saved.id).unsafeRunSync()
result shouldBe Some(testUser)
result.get.isSupport shouldBe true
}
"returns the support flag when false" in {
val f = repo.getUserByAccessKey(users.head.accessKey).unsafeRunSync()
f shouldBe Some(users.head)
f.get.isSupport shouldBe false
}
"returns the test flag when true" in {
val testUser = User(userName = "test", accessKey = "test", secretKey = "test", isTest = true)
val saved = repo.save(testUser).unsafeRunSync()
val result = repo.getUser(saved.id).unsafeRunSync()
result shouldBe Some(testUser)
result.get.isTest shouldBe true
}
"returns the test flag when false (default)" in {
val f = repo.getUserByAccessKey(users.head.accessKey).unsafeRunSync()
f shouldBe Some(users.head)
f.get.isTest shouldBe false
}
}
}

View File

@@ -1,140 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import cats.effect.{ContextShift, IO}
import cats.implicits._
import com.amazonaws.services.dynamodbv2.model._
import org.joda.time.DateTime
import vinyldns.core.domain.membership.User
import vinyldns.core.domain.zone._
import vinyldns.core.TestZoneData._
import vinyldns.core.TestMembershipData.now
import scala.concurrent.duration._
import scala.util.Random
class DynamoDBZoneChangeRepositoryIntegrationSpec extends DynamoDBIntegrationSpec {
private implicit val cs: ContextShift[IO] =
IO.contextShift(scala.concurrent.ExecutionContext.global)
private val zoneChangeTable = "zone-changes-live"
private val tableConfig = DynamoDBRepositorySettings(s"$zoneChangeTable", 30, 30)
private var repo: DynamoDBZoneChangeRepository = _
private val goodUser = User(s"live-test-acct", "key", "secret")
private val okZones = for { i <- 1 to 3 } yield Zone(
s"${goodUser.userName}.zone$i.",
"test@test.com",
status = ZoneStatus.Active,
connection = testConnection
)
private val zones = okZones
private val statuses = {
import vinyldns.core.domain.zone.ZoneChangeStatus._
Pending :: Complete :: Failed :: Synced :: Nil
}
private val changes = for { zone <- zones; status <- statuses } yield ZoneChange(
zone,
zone.account,
ZoneChangeType.Update,
status,
created = now.minusSeconds(Random.nextInt(1000))
)
def setup(): Unit = {
repo = DynamoDBZoneChangeRepository(tableConfig, dynamoIntegrationConfig).unsafeRunSync()
// Create all the zones
val results = changes.map(repo.save(_)).toList.parSequence
results.unsafeRunTimed(5.minutes).getOrElse(fail("timeout waiting for data load"))
}
def tearDown(): Unit = {
val request = new DeleteTableRequest().withTableName(zoneChangeTable)
repo.dynamoDBHelper.deleteTable(request).unsafeRunSync()
}
"DynamoDBRepository" should {
implicit def dateTimeOrdering: Ordering[DateTime] = Ordering.fromLessThan(_.isAfter(_))
"get all changes for a zone" in {
val retrieved = repo.listZoneChanges(okZones(1).id).unsafeRunSync()
val expectedChanges = changes.filter(_.zoneId == okZones(1).id).sortBy(_.created)
retrieved.items should equal(expectedChanges)
}
"get zone changes with a page size of one" in {
val testFuture = repo.listZoneChanges(zoneId = okZones(1).id, startFrom = None, maxItems = 1)
val retrieved = testFuture.unsafeRunSync()
val result = retrieved.items
val expectedChanges = changes.filter(_.zoneId == okZones(1).id)
result.size shouldBe 1
expectedChanges should contain(result.head)
}
"get zone changes with page size of one and reuse key to get another page with size of two" in {
val testFuture = repo.listZoneChanges(zoneId = okZones(1).id, startFrom = None, maxItems = 1)
val retrieved = testFuture.unsafeRunSync()
val result1 = retrieved.items.map(_.id).toSet
val key = retrieved.nextId
val testFuture2 =
repo.listZoneChanges(zoneId = okZones(1).id, startFrom = key, maxItems = 2)
val result2 = testFuture2.unsafeRunSync().items
val expectedChanges =
changes.filter(_.zoneId == okZones(1).id).sortBy(_.created).slice(1, 3)
result2.size shouldBe 2
result2 should equal(expectedChanges)
result2 shouldNot contain(result1.head)
}
"return an empty list and nextId of None when passing last record as start" in {
val listZones = for {
testFuture <- repo.listZoneChanges(zoneId = okZones(1).id, startFrom = None, maxItems = 4)
testFuture2 <- repo.listZoneChanges(zoneId = okZones(1).id, startFrom = testFuture.nextId)
} yield testFuture2
val retrieved = listZones.unsafeRunSync()
val result = retrieved.items
result shouldBe List()
retrieved.nextId shouldBe None
}
"have nextId of None when exhausting record changes" in {
val testFuture = repo.listZoneChanges(zoneId = okZones(1).id, startFrom = None, maxItems = 10)
val retrieved = testFuture.unsafeRunSync()
val result = retrieved.items
val expectedChanges = changes.filter(_.zoneId == okZones(1).id).sortBy(_.created)
result.size shouldBe 4
result should equal(expectedChanges)
retrieved.nextId shouldBe None
}
}
}

View File

@@ -1,41 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials}
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.dynamodbv2.{AmazonDynamoDBClient, AmazonDynamoDBClientBuilder}
object DynamoDBClient {
def apply(dynamoDBDataStoreSettings: DynamoDBDataStoreSettings): AmazonDynamoDBClient = {
val dynamoAKID = dynamoDBDataStoreSettings.key
val dynamoSecret = dynamoDBDataStoreSettings.secret
val dynamoEndpoint = dynamoDBDataStoreSettings.endpoint
val dynamoRegion = dynamoDBDataStoreSettings.region
System.getProperties.setProperty("aws.accessKeyId", dynamoAKID)
System.getProperties.setProperty("aws.secretKey", dynamoSecret)
val credentials = new BasicAWSCredentials(dynamoAKID, dynamoSecret)
AmazonDynamoDBClientBuilder
.standard()
.withCredentials(new AWSStaticCredentialsProvider(credentials))
.withEndpointConfiguration(new EndpointConfiguration(dynamoEndpoint, dynamoRegion))
.build()
.asInstanceOf[AmazonDynamoDBClient]
}
}

View File

@@ -1,151 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import cats.implicits._
import cats.effect.{ContextShift, IO}
import org.slf4j.LoggerFactory
import vinyldns.core.repository._
import pureconfig._
import pureconfig.generic.auto._
import pureconfig.module.catseffect.syntax._
import vinyldns.core.crypto.CryptoAlgebra
import vinyldns.core.domain.batch.BatchChangeRepository
import vinyldns.core.domain.membership._
import vinyldns.core.domain.record.{RecordChangeRepository, RecordSetRepository}
import vinyldns.core.domain.zone.{ZoneChangeRepository, ZoneRepository}
import vinyldns.core.repository.RepositoryName._
import vinyldns.core.health.HealthCheck._
import vinyldns.core.task.TaskRepository
import pureconfig.ConfigSource
import cats.effect.Blocker
class DynamoDBDataStoreProvider extends DataStoreProvider {
private val logger = LoggerFactory.getLogger(classOf[DynamoDBDataStoreProvider])
private val implementedRepositories =
Set(group, membership, groupChange, recordChange, zoneChange, userChange)
private implicit val cs: ContextShift[IO] =
IO.contextShift(scala.concurrent.ExecutionContext.global)
def load(config: DataStoreConfig, crypto: CryptoAlgebra): IO[LoadedDataStore] =
for {
settingsConfig <- Blocker[IO].use(
ConfigSource.fromConfig(config.settings).loadF[IO, DynamoDBDataStoreSettings](_)
)
_ <- validateRepos(config.repositories)
repoConfigs <- loadRepoConfigs(config.repositories)
dataStore <- initializeRepos(settingsConfig, repoConfigs, crypto)
} yield new LoadedDataStore(dataStore, IO.unit, checkHealth(settingsConfig))
def validateRepos(reposConfig: RepositoriesConfig): IO[Unit] = {
val invalid = reposConfig.keys.diff(implementedRepositories)
if (invalid.isEmpty) {
IO.unit
} else {
val error = s"Invalid config provided to dynamodb; unimplemented repos included: $invalid"
IO.raiseError(DataStoreStartupError(error))
}
}
def loadRepoConfigs(
config: RepositoriesConfig
): IO[Map[RepositoryName, DynamoDBRepositorySettings]] = {
def loadConfigIfDefined(
repositoryName: RepositoryName
): Option[IO[(RepositoryName, DynamoDBRepositorySettings)]] =
config.get(repositoryName).map { repoConf =>
Blocker[IO].use(
ConfigSource
.fromConfig(repoConf)
.loadF[IO, DynamoDBRepositorySettings](_)
.map(repositoryName -> _)
)
}
val activeRepoSettings = RepositoryName.values.toList.flatMap(loadConfigIfDefined).parSequence
activeRepoSettings.map(_.toMap)
}
def initializeRepos(
dynamoConfig: DynamoDBDataStoreSettings,
repoSettings: Map[RepositoryName, DynamoDBRepositorySettings],
crypto: CryptoAlgebra
): IO[DataStore] = {
def initializeSingleRepo[T <: Repository](
repoName: RepositoryName,
fn: DynamoDBRepositorySettings => IO[T]
): IO[Option[T]] =
repoSettings
.get(repoName)
.map { configuredOn =>
for {
_ <- IO(logger.error(s"Loading dynamodb repo for type: $repoName"))
repo <- fn(configuredOn)
_ <- IO(logger.error(s"Completed dynamodb load for type: $repoName"))
} yield repo
}
.sequence
(
initializeSingleRepo[UserRepository](
user,
DynamoDBUserRepository.apply(_, dynamoConfig, crypto)
),
initializeSingleRepo[GroupRepository](group, DynamoDBGroupRepository.apply(_, dynamoConfig)),
initializeSingleRepo[MembershipRepository](
membership,
DynamoDBMembershipRepository.apply(_, dynamoConfig)
),
initializeSingleRepo[GroupChangeRepository](
groupChange,
DynamoDBGroupChangeRepository.apply(_, dynamoConfig)
),
initializeSingleRepo[RecordSetRepository](
recordSet,
DynamoDBRecordSetRepository.apply(_, dynamoConfig)
),
initializeSingleRepo[RecordChangeRepository](
recordChange,
DynamoDBRecordChangeRepository.apply(_, dynamoConfig)
),
initializeSingleRepo[ZoneChangeRepository](
zoneChange,
DynamoDBZoneChangeRepository.apply(_, dynamoConfig)
),
IO.pure[Option[ZoneRepository]](None),
IO.pure[Option[BatchChangeRepository]](None),
initializeSingleRepo[UserChangeRepository](
userChange,
DynamoDBUserChangeRepository.apply(_, dynamoConfig, crypto)
),
IO.pure[Option[TaskRepository]](None)
).parMapN {
DataStore.apply
}
}
private def checkHealth(dynamoConfig: DynamoDBDataStoreSettings): HealthCheck =
IO {
val client = DynamoDBClient(dynamoConfig)
client.listTables(1)
}.attempt.asHealthCheck(classOf[DynamoDBDataStoreProvider])
}

View File

@@ -1,30 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
final case class DynamoDBDataStoreSettings(
key: String,
secret: String,
endpoint: String,
region: String
)
final case class DynamoDBRepositorySettings(
tableName: String,
provisionedReads: Long,
provisionedWrites: Long
)

View File

@@ -1,179 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import java.nio.ByteBuffer
import java.util.HashMap
import cats.effect._
import com.amazonaws.services.dynamodbv2.model._
import org.joda.time.DateTime
import org.slf4j.{Logger, LoggerFactory}
import vinyldns.core.domain.membership.{GroupChange, GroupChangeRepository, ListGroupChangesResults}
import vinyldns.core.protobuf.GroupProtobufConversions
import vinyldns.core.route.Monitored
import vinyldns.proto.VinylDNSProto
import scala.collection.JavaConverters._
object DynamoDBGroupChangeRepository {
private[repository] val GROUP_CHANGE_ID = "group_change_id"
private[repository] val GROUP_ID = "group_id"
private[repository] val CREATED = "created"
private[repository] val GROUP_CHANGE_ATTR = "group_change_blob"
private val GROUP_ID_AND_CREATED_INDEX = "GROUP_ID_AND_CREATED_INDEX"
def apply(
config: DynamoDBRepositorySettings,
dynamoConfig: DynamoDBDataStoreSettings
): IO[DynamoDBGroupChangeRepository] = {
val dynamoDBHelper = new DynamoDBHelper(
DynamoDBClient(dynamoConfig),
LoggerFactory.getLogger(classOf[DynamoDBGroupChangeRepository])
)
val dynamoReads = config.provisionedReads
val dynamoWrites = config.provisionedWrites
val tableName = config.tableName
val tableAttributes = Seq(
new AttributeDefinition(GROUP_ID, "S"),
new AttributeDefinition(CREATED, "N"),
new AttributeDefinition(GROUP_CHANGE_ID, "S")
)
val secondaryIndexes = Seq(
new GlobalSecondaryIndex()
.withIndexName(GROUP_ID_AND_CREATED_INDEX)
.withProvisionedThroughput(new ProvisionedThroughput(dynamoReads, dynamoWrites))
.withKeySchema(
new KeySchemaElement(GROUP_ID, KeyType.HASH),
new KeySchemaElement(CREATED, KeyType.RANGE)
)
.withProjection(new Projection().withProjectionType("ALL"))
)
val setup = dynamoDBHelper.setupTable(
new CreateTableRequest()
.withTableName(tableName)
.withAttributeDefinitions(tableAttributes: _*)
.withKeySchema(new KeySchemaElement(GROUP_CHANGE_ID, KeyType.HASH))
.withGlobalSecondaryIndexes(secondaryIndexes: _*)
.withProvisionedThroughput(new ProvisionedThroughput(dynamoReads, dynamoWrites))
)
setup.as(new DynamoDBGroupChangeRepository(tableName, dynamoDBHelper))
}
}
class DynamoDBGroupChangeRepository private[repository] (
groupChangeTableName: String,
val dynamoDBHelper: DynamoDBHelper
) extends GroupChangeRepository
with Monitored
with GroupProtobufConversions {
import DynamoDBGroupChangeRepository._
val log: Logger = LoggerFactory.getLogger(classOf[DynamoDBGroupChangeRepository])
def save(groupChange: GroupChange): IO[GroupChange] =
monitor("repo.GroupChange.save") {
log.info(s"Saving groupChange ${groupChange.id}.")
val item = toItem(groupChange)
val request = new PutItemRequest().withTableName(groupChangeTableName).withItem(item)
dynamoDBHelper.putItem(request).map(_ => groupChange)
}
def getGroupChange(groupChangeId: String): IO[Option[GroupChange]] =
monitor("repo.GroupChange.getGroupChange") {
log.info(s"Getting groupChange $groupChangeId.")
val key = new HashMap[String, AttributeValue]()
key.put(GROUP_CHANGE_ID, new AttributeValue(groupChangeId))
val request = new GetItemRequest().withTableName(groupChangeTableName).withKey(key)
dynamoDBHelper.getItem(request).map { result =>
Option(result.getItem).map(fromItem)
}
}
def getGroupChanges(
groupId: String,
startFrom: Option[String],
maxItems: Int
): IO[ListGroupChangesResults] =
monitor("repo.GroupChange.getGroupChanges") {
log.info("Getting groupChanges")
// millisecond string
val startTime = startFrom.getOrElse(DateTime.now.getMillis.toString)
val expressionAttributeValues = new HashMap[String, AttributeValue]
expressionAttributeValues.put(":group_id", new AttributeValue(groupId))
expressionAttributeValues.put(":created", new AttributeValue().withN(startTime))
val expressionAttributeNames = new HashMap[String, String]
expressionAttributeNames.put("#group_id_attribute", GROUP_ID)
expressionAttributeNames.put("#created_attribute", CREATED)
val keyConditionExpression: String =
"#group_id_attribute = :group_id AND #created_attribute < :created"
val queryRequest = new QueryRequest()
.withTableName(groupChangeTableName)
.withIndexName(GROUP_ID_AND_CREATED_INDEX)
.withExpressionAttributeNames(expressionAttributeNames)
.withExpressionAttributeValues(expressionAttributeValues)
.withKeyConditionExpression(keyConditionExpression)
.withScanIndexForward(false) // return in descending order by sort key
.withLimit(maxItems)
dynamoDBHelper.query(queryRequest).map { queryResult =>
val items = queryResult.getItems().asScala.map(fromItem).toList
val lastEvaluatedId = Option(queryResult.getLastEvaluatedKey)
.flatMap(key => key.asScala.get(CREATED).map(_.getN))
ListGroupChangesResults(items, lastEvaluatedId)
}
}
private[repository] def toItem(groupChange: GroupChange) = {
val item = new java.util.HashMap[String, AttributeValue]()
item.put(GROUP_CHANGE_ID, new AttributeValue(groupChange.id))
item.put(GROUP_ID, new AttributeValue(groupChange.newGroup.id))
item.put(CREATED, new AttributeValue().withN(groupChange.created.getMillis.toString)) // # of millis from epoch
val groupChangeBlob = toPB(groupChange).toByteArray
val bb = ByteBuffer.allocate(groupChangeBlob.length) //convert byte array to byte buffer
bb.put(groupChangeBlob)
bb.position(0)
item.put(GROUP_CHANGE_ATTR, new AttributeValue().withB(bb))
item
}
private[repository] def fromItem(item: java.util.Map[String, AttributeValue]) =
try {
val groupChangeBlob = item.get(GROUP_CHANGE_ATTR)
fromPB(VinylDNSProto.GroupChange.parseFrom(groupChangeBlob.getB.array()))
} catch {
case ex: Throwable =>
log.error("fromItem", ex)
throw new UnexpectedDynamoResponseException(ex.getMessage, ex)
}
}

View File

@@ -1,281 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import java.util
import java.util.HashMap
import cats.effect._
import cats.implicits._
import com.amazonaws.services.dynamodbv2.model.{CreateTableRequest, Projection, _}
import org.joda.time.DateTime
import org.slf4j.{Logger, LoggerFactory}
import vinyldns.core.domain.membership.GroupStatus.GroupStatus
import vinyldns.core.domain.membership.{Group, GroupRepository, GroupStatus}
import vinyldns.core.route.Monitored
import scala.collection.JavaConverters._
object DynamoDBGroupRepository {
private[repository] val GROUP_ID = "group_id"
private val NAME = "name"
private val EMAIL = "email"
private val DESCRIPTION = "desc"
private val CREATED = "created"
private val STATUS = "status"
private val MEMBER_IDS = "member_ids"
private val ADMIN_IDS = "admin_ids"
private val GROUP_NAME_INDEX = "group_name_index"
def apply(
config: DynamoDBRepositorySettings,
dynamoConfig: DynamoDBDataStoreSettings
): IO[DynamoDBGroupRepository] = {
val dynamoDBHelper = new DynamoDBHelper(
DynamoDBClient(dynamoConfig),
LoggerFactory.getLogger(classOf[DynamoDBGroupRepository])
)
val dynamoReads = config.provisionedReads
val dynamoWrites = config.provisionedWrites
val tableName = config.tableName
val tableAttributes = Seq(
new AttributeDefinition(GROUP_ID, "S"),
new AttributeDefinition(NAME, "S")
)
val secondaryIndexes = Seq(
new GlobalSecondaryIndex()
.withIndexName(GROUP_NAME_INDEX)
.withProvisionedThroughput(new ProvisionedThroughput(dynamoReads, dynamoWrites))
.withKeySchema(new KeySchemaElement(NAME, KeyType.HASH))
.withProjection(new Projection().withProjectionType("ALL"))
)
val setup = dynamoDBHelper.setupTable(
new CreateTableRequest()
.withTableName(tableName)
.withAttributeDefinitions(tableAttributes: _*)
.withKeySchema(new KeySchemaElement(GROUP_ID, KeyType.HASH))
.withGlobalSecondaryIndexes(secondaryIndexes: _*)
.withProvisionedThroughput(new ProvisionedThroughput(dynamoReads, dynamoWrites))
)
setup.as(new DynamoDBGroupRepository(tableName, dynamoDBHelper))
}
}
class DynamoDBGroupRepository private[repository] (
groupTableName: String,
val dynamoDBHelper: DynamoDBHelper
) extends GroupRepository
with Monitored {
import DynamoDBGroupRepository._
val log: Logger = LoggerFactory.getLogger(classOf[DynamoDBGroupRepository])
def save(group: Group): IO[Group] =
monitor("repo.Group.save") {
log.info(s"Saving group ${group.id} ${group.name}.")
val item = toItem(group)
val request = new PutItemRequest().withTableName(groupTableName).withItem(item)
dynamoDBHelper.putItem(request).map(_ => group)
}
def delete(group: Group): IO[Group] =
monitor("repo.Group.delete") {
log.info(s"Deleting group ${group.id} ${group.name}.")
val key = new HashMap[String, AttributeValue]()
key.put(GROUP_ID, new AttributeValue(group.id))
val request = new DeleteItemRequest().withTableName(groupTableName).withKey(key)
dynamoDBHelper.deleteItem(request).map(_ => group)
}
/*Looks up a group. If the group is not found, or if the group's status is Deleted, will return None */
def getGroup(groupId: String): IO[Option[Group]] =
monitor("repo.Group.getGroup") {
log.info(s"Getting group $groupId.")
val key = new HashMap[String, AttributeValue]()
key.put(GROUP_ID, new AttributeValue(groupId))
val request = new GetItemRequest().withTableName(groupTableName).withKey(key)
dynamoDBHelper
.getItem(request)
.map { result =>
Option(result.getItem)
.map(fromItem)
.filter(_.status != GroupStatus.Deleted)
}
}
def getGroups(groupIds: Set[String]): IO[Set[Group]] = {
def toBatchGetItemRequest(groupIds: Set[String]): BatchGetItemRequest = {
val allKeys = new util.ArrayList[util.Map[String, AttributeValue]]()
for {
groupId <- groupIds
} {
val key = new util.HashMap[String, AttributeValue]()
key.put(GROUP_ID, new AttributeValue(groupId))
allKeys.add(key)
}
val keysAndAttributes = new KeysAndAttributes().withKeys(allKeys)
val request = new util.HashMap[String, KeysAndAttributes]()
request.put(groupTableName, keysAndAttributes)
new BatchGetItemRequest().withRequestItems(request)
}
def parseGroups(result: BatchGetItemResult): Set[Group] = {
val groupAttributes = result.getResponses.asScala.get(groupTableName)
groupAttributes match {
case None =>
Set()
case Some(items) =>
items.asScala.toSet.map(fromItem).filter(_.status != GroupStatus.Deleted)
}
}
monitor("repo.Group.getGroups") {
log.info(s"Getting groups by id $groupIds")
// Group the group ids into batches of 100, that is the max size of the BatchGetItemRequest
val batches = groupIds.grouped(100).toSet
val batchGets = batches.map(toBatchGetItemRequest)
// run the batches in parallel
val batchGetIo = batchGets.map(dynamoDBHelper.batchGetItem)
val allBatches: IO[List[BatchGetItemResult]] = batchGetIo.toList.sequence
val allGroups = allBatches.map { batchGetItemResults =>
batchGetItemResults.flatMap(parseGroups)
}
allGroups.map(_.toSet)
}
}
def getAllGroups(): IO[Set[Group]] =
monitor("repo.Group.getAllGroups") {
log.info(s"getting all group IDs")
// filtering NOT Deleted because there is no case insensitive filter. we later filter
// the response in case anything got through
val scanRequest = new ScanRequest()
.withTableName(groupTableName)
.withFilterExpression(s"NOT (#filtername = :del)")
.withExpressionAttributeNames(Map("#filtername" -> STATUS).asJava)
.withExpressionAttributeValues(Map(":del" -> new AttributeValue("Deleted")).asJava)
val scan = for {
start <- IO(System.currentTimeMillis())
groupsScan <- dynamoDBHelper.scanAll(scanRequest)
end <- IO(System.currentTimeMillis())
_ <- IO(log.debug(s"getAllGroups groups scan time: ${end - start} millis"))
} yield groupsScan
scan.map { results =>
val startTime = System.currentTimeMillis()
val groups = results
.flatMap(_.getItems.asScala.map(fromItem))
.filter(_.status == GroupStatus.Active)
.toSet
val duration = System.currentTimeMillis() - startTime
log.debug(s"getAllGroups fromItem duration = $duration millis")
groups
}
}
def getGroupByName(groupName: String): IO[Option[Group]] =
monitor("repo.Group.getGroupByName") {
log.info(s"Getting group by name $groupName")
val expressionAttributeValues = new HashMap[String, AttributeValue]
expressionAttributeValues.put(":name", new AttributeValue(groupName))
val expressionAttributeNames = new HashMap[String, String]
expressionAttributeNames.put("#name_attribute", NAME)
val keyConditionExpression: String = "#name_attribute = :name"
val queryRequest = new QueryRequest()
.withTableName(groupTableName)
.withIndexName(GROUP_NAME_INDEX)
.withExpressionAttributeNames(expressionAttributeNames)
.withExpressionAttributeValues(expressionAttributeValues)
.withKeyConditionExpression(keyConditionExpression)
dynamoDBHelper.query(queryRequest).map(firstAvailableGroup)
}
/* Filters the results from the query so we don't return Deleted groups */
private def toAvailableGroups(queryResult: QueryResult): List[Group] =
queryResult.getItems.asScala.map(fromItem).filter(_.status != GroupStatus.Deleted).toList
/* Filters the results from the query so we don't return Deleted groups */
private def firstAvailableGroup(queryResult: QueryResult): Option[Group] =
toAvailableGroups(queryResult).headOption
private[repository] def toItem(group: Group) = {
val item = new java.util.HashMap[String, AttributeValue]()
item.put(GROUP_ID, new AttributeValue(group.id))
item.put(NAME, new AttributeValue(group.name))
item.put(EMAIL, new AttributeValue(group.email))
item.put(CREATED, new AttributeValue().withN(group.created.getMillis.toString))
val descAttr =
group.description.map(new AttributeValue(_)).getOrElse(new AttributeValue().withNULL(true))
item.put(DESCRIPTION, descAttr)
item.put(STATUS, new AttributeValue(group.status.toString))
item.put(MEMBER_IDS, new AttributeValue().withSS(group.memberIds.asJavaCollection))
item.put(ADMIN_IDS, new AttributeValue().withSS(group.adminUserIds.asJavaCollection))
item.put(STATUS, new AttributeValue(group.status.toString))
item
}
private[repository] def fromItem(item: java.util.Map[String, AttributeValue]) = {
val ActiveStatus = "active"
def groupStatus(str: String): GroupStatus =
if (str.toLowerCase == ActiveStatus) GroupStatus.Active else GroupStatus.Deleted
try {
Group(
item.get(NAME).getS,
item.get(EMAIL).getS,
if (item.get(DESCRIPTION) == null) None else Option(item.get(DESCRIPTION).getS),
item.get(GROUP_ID).getS,
new DateTime(item.get(CREATED).getN.toLong),
groupStatus(item.get(STATUS).getS),
item.get(MEMBER_IDS).getSS.asScala.toSet,
item.get(ADMIN_IDS).getSS.asScala.toSet
)
} catch {
case ex: Throwable =>
log.error("fromItem", ex)
throw new UnexpectedDynamoResponseException(ex.getMessage, ex)
}
}
}

View File

@@ -1,325 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import java.util.Collections
import cats.effect._
import cats.syntax.all._
import com.amazonaws.AmazonWebServiceRequest
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient
import com.amazonaws.services.dynamodbv2.model._
import com.amazonaws.services.dynamodbv2.util.TableUtils
import org.slf4j.Logger
import vinyldns.core.VinylDNSMetrics
import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
private class RetryStateHolder(var retries: Int = 10, var backoff: FiniteDuration = 1.millis)
case class DynamoDBRetriesExhaustedException(msg: String) extends Throwable(msg)
case class UnsupportedDynamoDBRepoFunction(msg: String) extends Throwable
class UnexpectedDynamoResponseException(message: String, cause: Throwable)
extends Exception(message: String, cause: Throwable)
trait DynamoUtils {
def createTableIfNotExists(dynamoDB: AmazonDynamoDBClient, req: CreateTableRequest): IO[Boolean]
def waitUntilActive(dynamoDB: AmazonDynamoDBClient, tableName: String): IO[Unit]
}
/* Used to provide an exponential backoff in the event of a Provisioned Throughput Exception */
class DynamoDBHelper(dynamoDB: AmazonDynamoDBClient, log: Logger) {
private[repository] val retryCount: Int = 10
private val retryBackoff: FiniteDuration = 1.millis
private implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global)
private[repository] val provisionedThroughputMeter =
VinylDNSMetrics.metricsRegistry.meter("dynamo.provisionedThroughput")
private[repository] val retriesExceededMeter =
VinylDNSMetrics.metricsRegistry.meter("dynamo.retriesExceeded")
private[repository] val dynamoUnexpectedFailuresMeter =
VinylDNSMetrics.metricsRegistry.meter("dynamo.unexpectedFailure")
private[repository] val callRateMeter = VinylDNSMetrics.metricsRegistry.meter("dynamo.callRate")
private[repository] val dynamoUtils = new DynamoUtils {
def waitUntilActive(dynamoDB: AmazonDynamoDBClient, tableName: String): IO[Unit] =
IO(TableUtils.waitUntilActive(dynamoDB, tableName))
def createTableIfNotExists(
dynamoDB: AmazonDynamoDBClient,
req: CreateTableRequest
): IO[Boolean] =
IO(TableUtils.createTableIfNotExists(dynamoDB, req))
}
def shutdown(): Unit = dynamoDB.shutdown()
private[repository] def send[In <: AmazonWebServiceRequest, Out](aws: In, func: In => Out)(
implicit d: Describe[_ >: In]
): IO[Out] = {
def name = d.desc(aws)
def sendSingle(retryState: RetryStateHolder): IO[Out] =
IO {
callRateMeter.mark()
func(aws)
}.handleErrorWith {
case _: ProvisionedThroughputExceededException if retryState.retries > 0 =>
provisionedThroughputMeter.mark()
val backoff = retryState.backoff
retryState.retries -= 1
retryState.backoff *= 2
log.warn(s"provisioned throughput exceeded for aws request $name")
IO.sleep(backoff) *> sendSingle(retryState)
case _: ProvisionedThroughputExceededException if retryState.retries == 0 =>
retriesExceededMeter.mark()
log.error(s"exhausted retries for aws request $name")
IO.raiseError(DynamoDBRetriesExhaustedException(s"Exhausted retries for $name"))
case other =>
dynamoUnexpectedFailuresMeter.mark()
val n = name
log.error(s"failure while executing $n", other)
IO.raiseError(other)
}
val state = new RetryStateHolder(retryCount, retryBackoff)
sendSingle(state)
}
/* Describe is used to stringify a web service request, very useful for loggging */
trait Describe[T] {
def desc(t: T): String
}
object Describe {
implicit object GenericDescribe extends Describe[AmazonWebServiceRequest] {
def desc(aws: AmazonWebServiceRequest): String = aws.getClass.getSimpleName
}
}
implicit object DescribeDescribe extends Describe[DescribeTableRequest] {
def desc(aws: DescribeTableRequest): String = s"DescribeTableRequest(${aws.getTableName})"
}
implicit object QueryDescribe extends Describe[QueryRequest] {
def desc(aws: QueryRequest): String =
s"QueryRequest(${aws.getTableName},${aws.getExpressionAttributeValues})"
}
implicit object PutItemDescribe extends Describe[PutItemRequest] {
def desc(aws: PutItemRequest): String = s"PutItemRequest(${aws.getTableName})"
}
implicit object DeleteDescribe extends Describe[DeleteItemRequest] {
def desc(aws: DeleteItemRequest): String = s"DeleteItemRequest(${aws.getTableName}})"
}
implicit object BatchGetItemDescribe extends Describe[BatchGetItemRequest] {
def desc(aws: BatchGetItemRequest): String = {
val table = aws.getRequestItems.asScala.headOption.getOrElse("unknown table")
s"BatchGetItemRequest($table, ${aws.getRequestItems.size})"
}
}
implicit object BatchWriteItemDescribe extends Describe[BatchWriteItemRequest] {
def desc(aws: BatchWriteItemRequest): String = {
val table = aws.getRequestItems.asScala.headOption.getOrElse("unknown table")
s"BatchWriteItemRequest($table, ${aws.getRequestItems.size})"
}
}
def setupTable(createTableRequest: CreateTableRequest): IO[Unit] =
for {
tableCreated <- dynamoUtils.createTableIfNotExists(dynamoDB, createTableRequest)
_ = if (!tableCreated) {
log.info(s"Table ${createTableRequest.getTableName} already exists")
}
_ <- dynamoUtils.waitUntilActive(dynamoDB, createTableRequest.getTableName)
} yield ()
def listTables(aws: ListTablesRequest): IO[ListTablesResult] =
send[ListTablesRequest, ListTablesResult](aws, dynamoDB.listTables)
def describeTable(aws: DescribeTableRequest): IO[DescribeTableResult] =
send[DescribeTableRequest, DescribeTableResult](aws, dynamoDB.describeTable)
def createTable(aws: CreateTableRequest): IO[CreateTableResult] =
send[CreateTableRequest, CreateTableResult](aws, dynamoDB.createTable)
def updateTable(aws: UpdateTableRequest): IO[UpdateTableResult] =
send[UpdateTableRequest, UpdateTableResult](aws, dynamoDB.updateTable)
def deleteTable(aws: DeleteTableRequest): IO[DeleteTableResult] =
send[DeleteTableRequest, DeleteTableResult](aws, dynamoDB.deleteTable)
def query(aws: QueryRequest): IO[QueryResult] =
send[QueryRequest, QueryResult](aws, dynamoDB.query)
def scan(aws: ScanRequest): IO[ScanResult] =
send[ScanRequest, ScanResult](aws, dynamoDB.scan)
def putItem(aws: PutItemRequest): IO[PutItemResult] =
send[PutItemRequest, PutItemResult](aws, dynamoDB.putItem)
def getItem(aws: GetItemRequest): IO[GetItemResult] =
send[GetItemRequest, GetItemResult](aws, dynamoDB.getItem)
def updateItem(aws: UpdateItemRequest): IO[UpdateItemResult] =
send[UpdateItemRequest, UpdateItemResult](aws, dynamoDB.updateItem)
def deleteItem(aws: DeleteItemRequest): IO[DeleteItemResult] =
send[DeleteItemRequest, DeleteItemResult](aws, dynamoDB.deleteItem)
def scanAll(aws: ScanRequest): IO[List[ScanResult]] =
scan(aws).flatMap(result => continueScanning(aws, result, (List(result), 1))).map {
case (lst, scanNum) =>
log.debug(s"Completed $scanNum scans in scanAll on table: [${aws.getTableName}]")
lst
}
private def continueScanning(
request: ScanRequest,
result: ScanResult,
acc: (List[ScanResult], Int)
): IO[(List[ScanResult], Int)] =
result.getLastEvaluatedKey match {
case lastEvaluatedKey if lastEvaluatedKey == null || lastEvaluatedKey.isEmpty =>
// there is no last evaluated key, that means we are done querying
IO.pure(acc)
case lastEvaluatedKey =>
// set the exclusive start key to the last evaluated key
val continuedQuery = request
continuedQuery.setExclusiveStartKey(lastEvaluatedKey)
// re-run the query, continue querying if need be, be sure to accumulate the result
scan(continuedQuery)
.flatMap { continuedResult =>
val accumulated = acc match {
case (lst, num) => (lst :+ continuedResult, num + 1)
}
continueScanning(continuedQuery, continuedResult, accumulated)
}
}
def queryAll(aws: QueryRequest): IO[List[QueryResult]] =
query(aws).flatMap(result => continueQuerying(aws, result, List(result)))
/* Supports query all by continuing to query until there is no last evaluated key */
private def continueQuerying(
request: QueryRequest,
result: QueryResult,
acc: List[QueryResult]
): IO[List[QueryResult]] = {
val lastCount = result.getCount
val limit =
if (request.getLimit == null || request.getLimit == 0) None else Some(request.getLimit)
result.getLastEvaluatedKey match {
case lastEvaluatedKey if lastEvaluatedKey == null || lastEvaluatedKey.isEmpty =>
// there is no last evaluated key, that means we are done querying
IO.pure(acc)
case _ if limit.exists(_ <= lastCount) =>
//maxItems from limit has been achieved
IO.pure(acc)
case lastEvaluatedKey =>
// set the exclusive start key to the last evaluated key
val continuedQuery = request
continuedQuery.setExclusiveStartKey(lastEvaluatedKey)
//adjust limit
limit.foreach(old => continuedQuery.setLimit(old - lastCount))
// re-run the query, continue querying if need be, be sure to accumulate the result
query(continuedQuery)
.flatMap(
continuedResult =>
continueQuerying(continuedQuery, continuedResult, acc :+ continuedResult)
)
}
}
/**
* Does a batch write item, but will attempt to continue processing unwritten items a number of times with backoff
*/
def batchWriteItem(
table: String,
aws: BatchWriteItemRequest,
retries: Int = 10,
backoff: FiniteDuration = 1.millis
): IO[BatchWriteItemResult] =
send[BatchWriteItemRequest, BatchWriteItemResult](aws, dynamoDB.batchWriteItem)
.flatMap(r => sendUnprocessedBatchWriteItems(table, r, retries, backoff))
def toBatchWriteItemRequest(writes: Seq[WriteRequest], tableName: String): BatchWriteItemRequest =
toBatchWriteItemRequest(Collections.singletonMap(tableName, writes.asJava))
def toBatchWriteItemRequest(
writes: java.util.Map[String, java.util.List[WriteRequest]]
): BatchWriteItemRequest =
new BatchWriteItemRequest()
.withRequestItems(writes)
.withReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL)
def batchGetItem(aws: BatchGetItemRequest): IO[BatchGetItemResult] =
send[BatchGetItemRequest, BatchGetItemResult](aws, dynamoDB.batchGetItem)
/* sends unprocessed items back to dynamo in a retry loop with backoff */
private def sendUnprocessedBatchWriteItems(
tableName: String,
result: BatchWriteItemResult,
retriesRemaining: Int,
backoff: FiniteDuration
): IO[BatchWriteItemResult] = {
// calculate how many items were not processed yet, we need to re-submit those
val unprocessed: Int = result.getUnprocessedItems.get(tableName) match {
case null => 0
case items => items.size
}
if (unprocessed == 0) {
// if there are no items left to process, let's indicate that we are good!
IO.pure(result)
} else if (retriesRemaining == 0) {
// there are unprocessed items still remaining, but we have exhausted our retries, consider this FAILED
log.error("Exhausted retries while sending batch write")
throw DynamoDBRetriesExhaustedException(
s"Unable to batch write for table $tableName after retries"
)
} else {
// there are unprocessed items and we have retries left, let's retry those items we haven't yet processed
log.warn(
s"Unable to process all items in batch for table $tableName, resubmitting new batch with $unprocessed " +
s"items remaining"
)
val nextBatch = toBatchWriteItemRequest(result.getUnprocessedItems)
IO.sleep(backoff) *> batchWriteItem(tableName, nextBatch, retriesRemaining - 1, backoff * 2)
}
}
}

View File

@@ -1,173 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import java.util.{Collections, HashMap}
import cats.effect._
import com.amazonaws.services.dynamodbv2.model._
import org.slf4j.{Logger, LoggerFactory}
import vinyldns.core.domain.membership.MembershipRepository
import vinyldns.core.route.Monitored
import scala.collection.JavaConverters._
object DynamoDBMembershipRepository {
private[repository] val USER_ID = "user_id"
private[repository] val GROUP_ID = "group_id"
def apply(
config: DynamoDBRepositorySettings,
dynamoConfig: DynamoDBDataStoreSettings
): IO[DynamoDBMembershipRepository] = {
val dynamoDBHelper = new DynamoDBHelper(
DynamoDBClient(dynamoConfig),
LoggerFactory.getLogger("DynamoDBMembershipRepository")
)
val dynamoReads = config.provisionedReads
val dynamoWrites = config.provisionedWrites
val tableName = config.tableName
val tableAttributes = Seq(
new AttributeDefinition(USER_ID, "S"),
new AttributeDefinition(GROUP_ID, "S")
)
val setup = dynamoDBHelper.setupTable(
new CreateTableRequest()
.withTableName(tableName)
.withAttributeDefinitions(tableAttributes: _*)
.withKeySchema(
new KeySchemaElement(USER_ID, KeyType.HASH),
new KeySchemaElement(GROUP_ID, KeyType.RANGE)
)
.withProvisionedThroughput(new ProvisionedThroughput(dynamoReads, dynamoWrites))
)
setup.as(new DynamoDBMembershipRepository(tableName, dynamoDBHelper))
}
}
class DynamoDBMembershipRepository private[repository] (
membershipTable: String,
dynamoDBHelper: DynamoDBHelper
) extends MembershipRepository
with Monitored {
import DynamoDBMembershipRepository._
val log: Logger = LoggerFactory.getLogger("DynamoDBMembershipRepository")
def getGroupsForUser(userId: String): IO[Set[String]] =
monitor("repo.Membership.getGroupsForUser") {
log.info(s"Getting groups by user id $userId")
val expressionAttributeValues = new HashMap[String, AttributeValue]
expressionAttributeValues.put(":userId", new AttributeValue(userId))
val keyConditionExpression: String = "#user_id_attribute = :userId"
val expressionAttributeNames = new HashMap[String, String]
expressionAttributeNames.put("#user_id_attribute", USER_ID)
val queryRequest = new QueryRequest()
.withTableName(membershipTable)
.withKeyConditionExpression(keyConditionExpression)
.withExpressionAttributeNames(expressionAttributeNames)
.withExpressionAttributeValues(expressionAttributeValues)
dynamoDBHelper.query(queryRequest).map(result => result.getItems.asScala.map(fromItem).toSet)
}
def saveMembers(groupId: String, memberUserIds: Set[String], isAdmin: Boolean): IO[Set[String]] =
monitor("repo.Membership.addMembers") {
log.info(s"Saving members for group $groupId")
log.info(
s"Passed in isAdmin value $isAdmin is not supported and therefore ignored for DynamoDB"
)
val items = memberUserIds.toList
.map(toItem(_, groupId))
val result = executeBatch(items) { item =>
new WriteRequest().withPutRequest(new PutRequest().withItem(item))
}
// Assuming we succeeded, then return user ids
result.map(_ => memberUserIds)
}
def removeMembers(groupId: String, memberUserIds: Set[String]): IO[Set[String]] =
monitor("repo.Membership.removeMembers") {
log.info(s"Removing members for group $groupId")
val items = memberUserIds.toList
.map(toItem(_, groupId))
val result = executeBatch(items) { item =>
new WriteRequest().withDeleteRequest(new DeleteRequest().withKey(item))
}
// Assuming we succeeded, then return user ids
result.map(_ => memberUserIds)
}
private def executeBatch(
items: Iterable[java.util.Map[String, AttributeValue]]
)(f: java.util.Map[String, AttributeValue] => WriteRequest): IO[List[BatchWriteItemResult]] = {
val MaxDynamoBatchWriteSize = 25
val batchWrites =
items.toList
.map(item => f(item))
.grouped(MaxDynamoBatchWriteSize)
.map(
itemGroup =>
new BatchWriteItemRequest()
.withRequestItems(Collections.singletonMap(membershipTable, itemGroup.asJava))
.withReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL)
)
// Fold left will attempt each batch sequentially, and fail fast on error
batchWrites.foldLeft(IO.pure(List.empty[BatchWriteItemResult])) {
case (acc, batch) =>
acc.flatMap { lst =>
dynamoDBHelper.batchWriteItem(membershipTable, batch).map(result => result :: lst)
}
}
}
private[repository] def toItem(
userId: String,
groupId: String
): java.util.Map[String, AttributeValue] = {
val item = new java.util.HashMap[String, AttributeValue]()
item.put(USER_ID, new AttributeValue(userId))
item.put(GROUP_ID, new AttributeValue(groupId))
item
}
private[repository] def fromItem(item: java.util.Map[String, AttributeValue]): String =
try {
item.get(GROUP_ID).getS
} catch {
case ex: Throwable =>
log.error("fromItem", ex)
throw new UnexpectedDynamoResponseException(ex.getMessage, ex)
}
}

View File

@@ -1,246 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import java.nio.ByteBuffer
import java.util.HashMap
import cats.effect._
import com.amazonaws.services.dynamodbv2.model._
import org.joda.time.DateTime
import org.slf4j.{Logger, LoggerFactory}
import vinyldns.core.domain.record._
import vinyldns.core.domain.record.RecordChangeRepository
import vinyldns.core.protobuf.ProtobufConversions
import vinyldns.core.route.Monitored
import vinyldns.proto.VinylDNSProto
import scala.collection.JavaConverters._
import scala.util.Try
object DynamoDBRecordChangeRepository {
private val CHANGE_SET_ID = "change_set_id"
private[repository] val RECORD_SET_CHANGE_ID = "record_set_change_id"
private val CHANGE_SET_STATUS = "change_set_status"
private val ZONE_ID = "zone_id"
private val CREATED_TIMESTAMP = "created_timestamp"
private val RECORD_SET_CHANGE_CREATED_TIMESTAMP = "record_set_change_created_timestamp"
private val PROCESSING_TIMESTAMP = "processing_timestamp"
private val RECORD_SET_CHANGE_BLOB = "record_set_change_blob"
private val ZONE_ID_RECORD_SET_CHANGE_ID_INDEX = "zone_id_record_set_change_id_index"
private val ZONE_ID_CREATED_INDEX = "zone_id_created_index"
def apply(
config: DynamoDBRepositorySettings,
dynamoConfig: DynamoDBDataStoreSettings
): IO[DynamoDBRecordChangeRepository] = {
val dynamoDBHelper = new DynamoDBHelper(
DynamoDBClient(dynamoConfig),
LoggerFactory.getLogger("DynamoDBRecordChangeRepository")
)
val dynamoReads = config.provisionedReads
val dynamoWrites = config.provisionedWrites
val tableName = config.tableName
val tableAttributes =
Seq(
new AttributeDefinition(RECORD_SET_CHANGE_ID, "S"),
new AttributeDefinition(ZONE_ID, "S"),
new AttributeDefinition(RECORD_SET_CHANGE_CREATED_TIMESTAMP, "N")
)
val secondaryIndexes =
Seq(
new GlobalSecondaryIndex()
.withIndexName(ZONE_ID_RECORD_SET_CHANGE_ID_INDEX)
.withProvisionedThroughput(new ProvisionedThroughput(dynamoReads, dynamoWrites))
.withKeySchema(
new KeySchemaElement(ZONE_ID, KeyType.HASH),
new KeySchemaElement(RECORD_SET_CHANGE_ID, KeyType.RANGE)
)
.withProjection(new Projection().withProjectionType("ALL")),
new GlobalSecondaryIndex()
.withIndexName(ZONE_ID_CREATED_INDEX)
.withProvisionedThroughput(new ProvisionedThroughput(dynamoReads, dynamoWrites))
.withKeySchema(
new KeySchemaElement(ZONE_ID, KeyType.HASH),
new KeySchemaElement(RECORD_SET_CHANGE_CREATED_TIMESTAMP, KeyType.RANGE)
)
.withProjection(new Projection().withProjectionType("ALL"))
)
val setup = dynamoDBHelper.setupTable(
new CreateTableRequest()
.withTableName(tableName)
.withAttributeDefinitions(tableAttributes: _*)
.withKeySchema(new KeySchemaElement(RECORD_SET_CHANGE_ID, KeyType.HASH))
.withGlobalSecondaryIndexes(secondaryIndexes: _*)
.withProvisionedThroughput(new ProvisionedThroughput(dynamoReads, dynamoWrites))
)
setup.as(new DynamoDBRecordChangeRepository(tableName, dynamoDBHelper))
}
}
class DynamoDBRecordChangeRepository private[repository] (
recordChangeTable: String,
val dynamoDBHelper: DynamoDBHelper
) extends RecordChangeRepository
with ProtobufConversions
with Monitored {
import DynamoDBRecordChangeRepository._
val log: Logger = LoggerFactory.getLogger("DynamoDBRecordChangeRepository")
def toWriteRequest(changeSet: ChangeSet, change: RecordSetChange): WriteRequest =
new WriteRequest().withPutRequest(new PutRequest().withItem(toItem(changeSet, change)))
def save(changeSet: ChangeSet): IO[ChangeSet] =
monitor("repo.RecordChange.save") {
log.info(s"Saving change set ${changeSet.id} with size ${changeSet.changes.size}")
val MaxBatchWriteGroup = 25
val writeItems = changeSet.changes.map(change => toWriteRequest(changeSet, change))
val batchWrites = writeItems
.grouped(MaxBatchWriteGroup)
.map { group =>
dynamoDBHelper.toBatchWriteItemRequest(group, recordChangeTable)
}
.toList
// Fold left will attempt each batch sequentially, and fail fast on error
val result = batchWrites.foldLeft(IO.pure(List.empty[BatchWriteItemResult])) {
case (acc, req) =>
acc.flatMap { lst =>
dynamoDBHelper
.batchWriteItem(recordChangeTable, req)
.map(result => result :: lst)
}
}
result.map(_ => changeSet)
}
def listRecordSetChanges(
zoneId: String,
startFrom: Option[String] = None,
maxItems: Int = 100
): IO[ListRecordSetChangesResults] =
monitor("repo.RecordChange.getRecordSetChanges") {
log.info(s"Getting record set changes for zone $zoneId")
// millisecond string
val startTime = startFrom.getOrElse(DateTime.now.getMillis.toString)
val expressionAttributeValues = new HashMap[String, AttributeValue]
expressionAttributeValues.put(":zone_id", new AttributeValue(zoneId))
expressionAttributeValues.put(":created", new AttributeValue().withN(startTime))
val expressionAttributeNames = new HashMap[String, String]
expressionAttributeNames.put("#zone_id_attribute", ZONE_ID)
expressionAttributeNames.put("#created_attribute", RECORD_SET_CHANGE_CREATED_TIMESTAMP)
val keyConditionExpression: String =
"#zone_id_attribute = :zone_id AND #created_attribute < :created"
val queryRequest = new QueryRequest()
.withTableName(recordChangeTable)
.withIndexName(ZONE_ID_CREATED_INDEX)
.withExpressionAttributeNames(expressionAttributeNames)
.withExpressionAttributeValues(expressionAttributeValues)
.withKeyConditionExpression(keyConditionExpression)
.withScanIndexForward(false) // return in descending order by sort key
.withLimit(maxItems)
dynamoDBHelper.queryAll(queryRequest).map { resultList =>
val items = resultList.flatMap { result =>
result.getItems.asScala.map(toRecordSetChange)
}
val nextId = Try(
resultList.last.getLastEvaluatedKey
.get("record_set_change_created_timestamp")
.getN
).toOption
ListRecordSetChangesResults(items, nextId, startFrom, maxItems)
}
}
def getRecordSetChange(zoneId: String, changeId: String): IO[Option[RecordSetChange]] =
monitor("repo.RecordChange.getRecordSetChange") {
log.info(s"Getting record set change for zone $zoneId and changeId $changeId")
val expressionAttributeValues = new HashMap[String, AttributeValue]
expressionAttributeValues.put(":record_set_change_id", new AttributeValue(changeId))
expressionAttributeValues.put(":zone_id", new AttributeValue(zoneId))
val expressionAttributeNames = new HashMap[String, String]
expressionAttributeNames.put("#record_set_change_id_attribute", RECORD_SET_CHANGE_ID)
expressionAttributeNames.put("#zone_id_attribute", ZONE_ID)
val keyConditionExpression: String =
"#record_set_change_id_attribute = :record_set_change_id and #zone_id_attribute = :zone_id"
val queryRequest = new QueryRequest()
.withTableName(recordChangeTable)
.withIndexName(ZONE_ID_RECORD_SET_CHANGE_ID_INDEX)
.withExpressionAttributeNames(expressionAttributeNames)
.withExpressionAttributeValues(expressionAttributeValues)
.withKeyConditionExpression(keyConditionExpression)
dynamoDBHelper
.query(queryRequest)
.map(_.getItems.asScala.toList.headOption.map(toRecordSetChange))
}
def toRecordSetChange(item: java.util.Map[String, AttributeValue]): RecordSetChange =
try {
val recordSetChangeBlob = item.get(RECORD_SET_CHANGE_BLOB)
fromPB(VinylDNSProto.RecordSetChange.parseFrom(recordSetChangeBlob.getB.array()))
} catch {
case ex: Throwable =>
log.error("fromItem", ex)
throw new UnexpectedDynamoResponseException(ex.getMessage, ex)
}
def toItem(
changeSet: ChangeSet,
change: RecordSetChange
): java.util.HashMap[String, AttributeValue] = {
val item = new java.util.HashMap[String, AttributeValue]()
item.put(CHANGE_SET_ID, new AttributeValue(changeSet.id))
item.put(ZONE_ID, new AttributeValue(changeSet.zoneId))
item.put(CHANGE_SET_STATUS, new AttributeValue().withN(changeSet.status.intValue.toString))
item.put(CREATED_TIMESTAMP, new AttributeValue(changeSet.createdTimestamp.toString))
item.put(
RECORD_SET_CHANGE_CREATED_TIMESTAMP,
new AttributeValue().withN(change.created.getMillis.toString)
)
item.put(PROCESSING_TIMESTAMP, new AttributeValue(changeSet.processingTimestamp.toString))
val recordSetChangeBlob = toPB(change).toByteArray
val recordSetChangeBB = ByteBuffer.allocate(recordSetChangeBlob.length)
recordSetChangeBB.put(recordSetChangeBlob)
recordSetChangeBB.position(0)
item.put(RECORD_SET_CHANGE_ID, new AttributeValue(change.id))
item.put(RECORD_SET_CHANGE_BLOB, new AttributeValue().withB(recordSetChangeBB))
item
}
}

View File

@@ -1,123 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import java.nio.ByteBuffer
import com.amazonaws.services.dynamodbv2.model._
import org.slf4j.LoggerFactory
import vinyldns.core.domain.DomainHelpers.omitTrailingDot
import vinyldns.core.domain.record._
import vinyldns.core.protobuf.ProtobufConversions
import vinyldns.proto.VinylDNSProto
trait DynamoDBRecordSetConversions extends ProtobufConversions {
import DynamoDBRecordSetRepository._
private[repository] val recordSetTableName: String
private val logger = LoggerFactory.getLogger(classOf[DynamoDBRecordSetConversions])
def toWriteRequest(recordSetChange: RecordSetChange): WriteRequest = recordSetChange match {
case failed if recordSetChange.status == RecordSetChangeStatus.Failed =>
unsuccessful(failed)
case complete if recordSetChange.status == RecordSetChangeStatus.Complete =>
successful(complete)
case notComplete => saveRecordSet(notComplete)
}
def toWriteRequest(recordSet: RecordSet): WriteRequest = saveRecordSet(recordSet)
def toWriteRequests(changeSet: ChangeSet): Seq[WriteRequest] =
changeSet.changes.map(toWriteRequest)
def toWriteRequests(recordSets: List[RecordSet]): Seq[WriteRequest] =
recordSets.map(toWriteRequest)
private[repository] def deleteRecordSetFromTable(recordSetId: String): WriteRequest =
new WriteRequest().withDeleteRequest(new DeleteRequest().withKey(recordSetIdKey(recordSetId)))
/* simply writes the record set in the table, if it already exists it will be overridden (which is ok) */
private def saveRecordSet(change: RecordSetChange): WriteRequest =
putRecordSetInTable(toItem(change.recordSet))
private def saveRecordSet(recordSet: RecordSet): WriteRequest =
putRecordSetInTable(toItem(recordSet))
private def successful(change: RecordSetChange): WriteRequest = change.changeType match {
case RecordSetChangeType.Delete => applySuccessfulDelete(change)
case _ => applySuccessfulUpdateOrCreate(change)
}
private def unsuccessful(change: RecordSetChange): WriteRequest = change.changeType match {
case RecordSetChangeType.Create => revertCreate(change)
case _ => revertUpdateOrDelete(change)
}
/* reverts a failed change by restoring the record set change's "updates" attribute */
private def revertUpdateOrDelete(failedChange: RecordSetChange): WriteRequest =
putRecordSetInTable(failedChange.updates.map(toItem).get)
/* reverts a failed create by deleting it from the table */
private def revertCreate(failedChange: RecordSetChange): WriteRequest =
deleteRecordSetFromTable(failedChange.recordSet.id)
/* applies a successful change by putting the record set itself */
private def applySuccessfulUpdateOrCreate(successfulChange: RecordSetChange): WriteRequest =
putRecordSetInTable(toItem(successfulChange.recordSet))
/* successful deletes get removed from the record set table via a delete request */
private def applySuccessfulDelete(delete: RecordSetChange): WriteRequest =
deleteRecordSetFromTable(delete.recordSet.id)
private def putRecordSetInTable(item: java.util.HashMap[String, AttributeValue]): WriteRequest =
new WriteRequest().withPutRequest(new PutRequest().withItem(item))
private def recordSetIdKey(recordSetId: String): java.util.HashMap[String, AttributeValue] = {
val key = new java.util.HashMap[String, AttributeValue]()
key.put(RECORD_SET_ID, new AttributeValue(recordSetId))
key
}
def toItem(recordSet: RecordSet): java.util.HashMap[String, AttributeValue] = {
val recordSetBlob = toPB(recordSet).toByteArray
val bb = ByteBuffer.allocate(recordSetBlob.length) //convert byte array to byte buffer
bb.put(recordSetBlob)
bb.position(0)
val item = new java.util.HashMap[String, AttributeValue]()
item.put(ZONE_ID, new AttributeValue(recordSet.zoneId))
item.put(RECORD_SET_TYPE, new AttributeValue(recordSet.typ.toString))
item.put(RECORD_SET_NAME, new AttributeValue(recordSet.name))
item.put(RECORD_SET_ID, new AttributeValue(recordSet.id))
item.put(RECORD_SET_BLOB, new AttributeValue().withB(bb))
item.put(RECORD_SET_SORT, new AttributeValue(omitTrailingDot(recordSet.name.toLowerCase)))
item
}
def fromItem(item: java.util.Map[String, AttributeValue]): RecordSet =
try {
val recordSetBlob = item.get(RECORD_SET_BLOB)
fromPB(VinylDNSProto.RecordSet.parseFrom(recordSetBlob.getB.array()))
} catch {
case ex: Throwable =>
logger.error("fromItem", ex)
throw new UnexpectedDynamoResponseException(ex.getMessage, ex)
}
}

View File

@@ -1,301 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import java.util.HashMap
import cats.effect._
import com.amazonaws.services.dynamodbv2.model._
import org.slf4j.{Logger, LoggerFactory}
import vinyldns.core.domain.DomainHelpers.omitTrailingDot
import vinyldns.core.domain.record.NameSort.NameSort
import vinyldns.core.domain.record.RecordType.RecordType
import vinyldns.core.domain.record.{ChangeSet, ListRecordSetResults, RecordSet, RecordSetRepository}
import vinyldns.core.protobuf.ProtobufConversions
import vinyldns.core.route.Monitored
object DynamoDBRecordSetRepository extends ProtobufConversions {
private[repository] val ZONE_ID = "zone_id"
private[repository] val RECORD_SET_ID = "record_set_id"
private[repository] val RECORD_SET_TYPE = "record_set_type"
private[repository] val RECORD_SET_NAME = "record_set_name"
private[repository] val RECORD_SET_SORT = "record_set_sort"
private[repository] val RECORD_SET_BLOB = "record_set_blob"
private val ZONE_ID_RECORD_SET_NAME_INDEX = "zone_id_record_set_name_index"
private val ZONE_ID_RECORD_SET_SORT_INDEX = "zone_id_record_set_sort_index"
def apply(
config: DynamoDBRepositorySettings,
dynamoConfig: DynamoDBDataStoreSettings
): IO[DynamoDBRecordSetRepository] = {
val dynamoDBHelper = new DynamoDBHelper(
DynamoDBClient(dynamoConfig),
LoggerFactory.getLogger("DynamoDBRecordSetRepository")
)
val dynamoReads = config.provisionedReads
val dynamoWrites = config.provisionedWrites
val tableName = config.tableName
val tableAttributes = Seq(
new AttributeDefinition(ZONE_ID, "S"),
new AttributeDefinition(RECORD_SET_NAME, "S"),
new AttributeDefinition(RECORD_SET_ID, "S"),
new AttributeDefinition(RECORD_SET_SORT, "S")
)
val secondaryIndexes = Seq(
new GlobalSecondaryIndex()
.withIndexName(ZONE_ID_RECORD_SET_NAME_INDEX)
.withProvisionedThroughput(new ProvisionedThroughput(dynamoReads, dynamoWrites))
.withKeySchema(
new KeySchemaElement(ZONE_ID, KeyType.HASH),
new KeySchemaElement(RECORD_SET_NAME, KeyType.RANGE)
)
.withProjection(new Projection().withProjectionType("ALL")),
new GlobalSecondaryIndex()
.withIndexName(ZONE_ID_RECORD_SET_SORT_INDEX)
.withProvisionedThroughput(new ProvisionedThroughput(dynamoReads, dynamoWrites))
.withKeySchema(
new KeySchemaElement(ZONE_ID, KeyType.HASH),
new KeySchemaElement(RECORD_SET_SORT, KeyType.RANGE)
)
.withProjection(new Projection().withProjectionType("ALL"))
)
val setup = dynamoDBHelper.setupTable(
new CreateTableRequest()
.withTableName(tableName)
.withAttributeDefinitions(tableAttributes: _*)
.withKeySchema(new KeySchemaElement(RECORD_SET_ID, KeyType.HASH))
.withGlobalSecondaryIndexes(secondaryIndexes: _*)
.withProvisionedThroughput(new ProvisionedThroughput(dynamoReads, dynamoWrites))
)
setup.as(new DynamoDBRecordSetRepository(tableName, dynamoDBHelper))
}
}
class DynamoDBRecordSetRepository private[repository] (
val recordSetTableName: String,
val dynamoDBHelper: DynamoDBHelper
) extends RecordSetRepository
with DynamoDBRecordSetConversions
with Monitored
with QueryHelper {
import DynamoDBRecordSetRepository._
val log: Logger = LoggerFactory.getLogger("DynamoDBRecordSetRepository")
def apply(changeSet: ChangeSet): IO[ChangeSet] =
monitor("repo.RecordSet.apply") {
log.info(
s"Applying change set for zone ${changeSet.zoneId} with size ${changeSet.changes.size}"
)
// The BatchWriteItem max size is 25, so we need to group by that number
val MaxBatchWriteGroup = 25
val writeItems = changeSet.changes.map(toWriteRequest)
val batchWrites = writeItems
.grouped(MaxBatchWriteGroup)
.map(group => dynamoDBHelper.toBatchWriteItemRequest(group, recordSetTableName))
// Fold left will attempt each batch sequentially, and fail fast on error
val result = batchWrites.foldLeft(IO.pure(List.empty[BatchWriteItemResult])) {
case (acc, req) =>
acc.flatMap { lst =>
dynamoDBHelper.batchWriteItem(recordSetTableName, req).map(result => result :: lst)
}
}
// Assuming we succeeded, then return the change set with a status of applied
result.map(_ => changeSet)
}
def putRecordSet(recordSet: RecordSet): IO[RecordSet] = { //TODO remove me
val item = toItem(recordSet)
val request = new PutItemRequest().withTableName(recordSetTableName).withItem(item)
dynamoDBHelper.putItem(request).map(_ => recordSet)
}
def listRecordSets(
zoneId: Option[String],
startFrom: Option[String],
maxItems: Option[Int],
recordNameFilter: Option[String],
recordTypeFilter: Option[Set[RecordType]],
recordOwnerGroupFilter: Option[String],
nameSort: NameSort
): IO[ListRecordSetResults] =
monitor("repo.RecordSet.listRecordSets") {
zoneId match {
case None =>
IO.raiseError(
UnsupportedDynamoDBRepoFunction(
"listRecordSets without zoneId is not supported by VinylDNS DynamoDB RecordSetRepository"
)
)
case Some(id) =>
log.info(s"Getting recordSets for zone $zoneId")
val keyConditions = Map[String, String](ZONE_ID -> id)
val filterExpression = recordNameFilter.map(
filter => ContainsFilter(RECORD_SET_SORT, omitTrailingDot(filter.toLowerCase))
)
val startKey = startFrom.map { inputString =>
val attributes = inputString.split('~')
Map(
ZONE_ID -> attributes(0),
RECORD_SET_NAME -> attributes(1),
RECORD_SET_ID -> attributes(2)
)
}
val responseFuture = doQuery(
recordSetTableName,
ZONE_ID_RECORD_SET_NAME_INDEX,
keyConditions,
filterExpression,
startKey,
maxItems
)(dynamoDBHelper)
for {
resp <- responseFuture
queryResp = resp.asInstanceOf[QueryResponseItems]
rs = queryResp.items.map(fromItem)
nextId = queryResp.lastEvaluatedKey.map { keyMap =>
List(
keyMap.get(ZONE_ID).getS,
keyMap.get(RECORD_SET_NAME).getS,
keyMap.get(RECORD_SET_ID).getS
).mkString("~")
}
} yield ListRecordSetResults(
rs,
nextId,
startFrom,
maxItems,
recordNameFilter,
recordTypeFilter,
recordOwnerGroupFilter,
nameSort
)
}
}
def getRecordSetsByName(zoneId: String, name: String): IO[List[RecordSet]] =
monitor("repo.RecordSet.getRecordSetByName") {
log.info(s"Getting recordSet $name from zone $zoneId")
val keyConditions = Map[String, String](
ZONE_ID -> zoneId,
RECORD_SET_SORT -> omitTrailingDot(name.toLowerCase())
)
val responseFuture =
doQuery(recordSetTableName, ZONE_ID_RECORD_SET_SORT_INDEX, keyConditions)(dynamoDBHelper)
for {
resp <- responseFuture
rs = resp.asInstanceOf[QueryResponseItems].items.map(fromItem)
} yield rs
}
def getRecordSets(zoneId: String, name: String, typ: RecordType): IO[List[RecordSet]] =
monitor("repo.RecordSet.getRecordSetsByNameAndType") {
log.info(s"Getting recordSet $name, zone $zoneId, type $typ")
val keyConditions = Map[String, String](
ZONE_ID -> zoneId,
RECORD_SET_SORT -> omitTrailingDot(name.toLowerCase())
)
val filterExpression = Some(EqualsFilter(RECORD_SET_TYPE, typ.toString))
val responseFuture =
doQuery(recordSetTableName, ZONE_ID_RECORD_SET_SORT_INDEX, keyConditions, filterExpression)(
dynamoDBHelper
)
for {
resp <- responseFuture
rs = resp.asInstanceOf[QueryResponseItems].items.map(fromItem)
} yield rs
}
def getRecordSet(recordSetId: String): IO[Option[RecordSet]] =
monitor("repo.RecordSet.getRecordSetById") {
//Do not need ZoneId, recordSetId is unique
log.info(s"Getting recordSet $recordSetId")
val key = new HashMap[String, AttributeValue]()
key.put(RECORD_SET_ID, new AttributeValue(recordSetId))
val request = new GetItemRequest().withTableName(recordSetTableName).withKey(key)
dynamoDBHelper.getItem(request).map { result =>
if (result != null && result.getItem != null && !result.getItem.isEmpty)
Some(fromItem(result.getItem))
else
None
}
}
def getRecordSetCount(zoneId: String): IO[Int] =
monitor("repo.RecordSet.getRecordSetCount") {
log.info(s"Getting record set count zone $zoneId")
val keyConditions = Map[String, String](ZONE_ID -> zoneId)
// set isCountQuery to true to ignore items
val responseFuture = doQuery(
recordSetTableName,
ZONE_ID_RECORD_SET_NAME_INDEX,
keyConditions,
isCountQuery = true
)(dynamoDBHelper)
responseFuture.map(resp => resp.asInstanceOf[QueryResponseCount].count)
}
def getRecordSetsByFQDNs(names: Set[String]): IO[List[RecordSet]] =
monitor("repo.RecordSet.getRecordSetsByFQDNs") {
IO.raiseError(
UnsupportedDynamoDBRepoFunction(
"getRecordSetsByFQDNs is not supported by VinylDNS DynamoDB RecordSetRepository"
)
)
}
def getFirstOwnedRecordByGroup(ownerGroupId: String): IO[Option[String]] =
monitor("repo.RecordSet.getFirstOwnedRecordByGroup") {
IO.raiseError(
UnsupportedDynamoDBRepoFunction(
s"getFirstOwnedRecordByGroup is not supported by VinylDNS DynamoDB RecordSetRepository id=$ownerGroupId"
)
)
}
def deleteRecordSetsInZone(zoneId: String, zoneName: String): IO[Unit] =
monitor("repo.RecordSet.deleteRecordSetsInZone") {
IO.raiseError(
UnsupportedDynamoDBRepoFunction(
s"""deleteRecordSetsInZone(zoneid=$zoneId, zoneName=$zoneName)
|is not supported by VinylDNS DynamoDB RecordSetRepository""".stripMargin
.replaceAll("\n", " ")
)
)
}
}

View File

@@ -1,147 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import java.util
import java.util.HashMap
import cats.data.OptionT
import cats.effect.IO
import com.amazonaws.services.dynamodbv2.model._
import org.joda.time.DateTime
import org.slf4j.LoggerFactory
import vinyldns.core.crypto.CryptoAlgebra
import vinyldns.core.domain.membership._
import vinyldns.core.route.Monitored
object DynamoDBUserChangeRepository {
val USER_CHANGE_ID: String = "change_id"
val USER_ID: String = "user_id"
val MADE_BY_ID: String = "made_by_id"
val CREATED: String = "created"
val USER_NAME: String = "username"
val CHANGE_TYPE: String = "change_type"
val NEW_USER: String = "new_user"
val OLD_USER: String = "old_user"
val TABLE_ATTRIBUTES: Seq[AttributeDefinition] = Seq(
new AttributeDefinition(USER_CHANGE_ID, "S")
)
// Note: This should be an Either; however pulling everything into an Either is a big refactoring
def fromItem(item: java.util.Map[String, AttributeValue]): IO[UserChange] =
for {
c <- IO(item.get(CHANGE_TYPE).getS)
changeType <- IO.fromEither(UserChangeType.fromString(c))
newUser <- IO(item.get(NEW_USER).getM).flatMap(m => DynamoDBUserRepository.fromItem(m))
oldUser <- OptionT(IO(Option(item.get(OLD_USER))))
.subflatMap(av => Option(av.getM))
.semiflatMap(DynamoDBUserRepository.fromItem)
.value
madeByUserId <- IO(item.get(MADE_BY_ID).getS)
id <- IO(item.get(USER_CHANGE_ID).getS)
created <- IO(new DateTime(item.get(CREATED).getN.toLong))
change <- IO.fromEither(UserChange(id, newUser, madeByUserId, created, oldUser, changeType))
} yield change
def toItem(crypto: CryptoAlgebra, change: UserChange): java.util.Map[String, AttributeValue] = {
val item = new util.HashMap[String, AttributeValue]()
item.put(USER_CHANGE_ID, new AttributeValue(change.id))
item.put(USER_ID, new AttributeValue(change.newUser.id))
item.put(USER_NAME, new AttributeValue(change.newUser.userName))
item.put(MADE_BY_ID, new AttributeValue(change.madeByUserId))
item.put(CHANGE_TYPE, new AttributeValue(UserChangeType.fromChange(change).value))
item.put(CREATED, new AttributeValue().withN(change.created.getMillis.toString))
item.put(
NEW_USER,
new AttributeValue().withM(DynamoDBUserRepository.toItem(crypto, change.newUser))
)
change match {
case UserChange.UpdateUser(_, _, _, oldUser, _) =>
item.put(
OLD_USER,
new AttributeValue().withM(DynamoDBUserRepository.toItem(crypto, oldUser))
)
case _ => ()
}
item
}
def apply(
config: DynamoDBRepositorySettings,
dynamoConfig: DynamoDBDataStoreSettings,
crypto: CryptoAlgebra
): IO[DynamoDBUserChangeRepository] = {
val dynamoDBHelper = new DynamoDBHelper(
DynamoDBClient(dynamoConfig),
LoggerFactory.getLogger("DynamoDBUserChangeRepository")
)
val setup =
dynamoDBHelper.setupTable(
new CreateTableRequest()
.withTableName(config.tableName)
.withAttributeDefinitions(TABLE_ATTRIBUTES: _*)
.withKeySchema(new KeySchemaElement(USER_CHANGE_ID, KeyType.HASH))
.withProvisionedThroughput(
new ProvisionedThroughput(config.provisionedReads, config.provisionedWrites)
)
)
val serialize: UserChange => java.util.Map[String, AttributeValue] = toItem(crypto, _)
val deserialize: java.util.Map[String, AttributeValue] => IO[UserChange] = fromItem
setup.as(
new DynamoDBUserChangeRepository(config.tableName, dynamoDBHelper, serialize, deserialize)
)
}
}
class DynamoDBUserChangeRepository private[repository] (
tableName: String,
val dynamoDBHelper: DynamoDBHelper,
serialize: UserChange => java.util.Map[String, AttributeValue],
deserialize: java.util.Map[String, AttributeValue] => IO[UserChange]
) extends UserChangeRepository
with Monitored {
import DynamoDBUserChangeRepository._
private val logger = LoggerFactory.getLogger(classOf[DynamoDBUserChangeRepository])
def save(change: UserChange): IO[UserChange] =
monitor("repo.UserChange.save") {
logger.info(s"Saving user change ${change.id}")
val item = serialize(change)
val request = new PutItemRequest().withTableName(tableName).withItem(item)
dynamoDBHelper.putItem(request).as(change)
}
def get(changeId: String): IO[Option[UserChange]] =
monitor("repo.UserChange.get") {
val key = new HashMap[String, AttributeValue]()
key.put(USER_CHANGE_ID, new AttributeValue(changeId))
val request = new GetItemRequest().withTableName(tableName).withKey(key)
// OptionT is a convenience wrapper around IO[Option[A]]
OptionT
.liftF(dynamoDBHelper.getItem(request))
.subflatMap(r => Option(r.getItem))
.semiflatMap(item => deserialize(item))
.value
}
}

View File

@@ -1,323 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import java.util
import java.util.HashMap
import cats.data.OptionT
import cats.effect._
import cats.implicits._
import com.amazonaws.services.dynamodbv2.model._
import org.joda.time.DateTime
import org.slf4j.{Logger, LoggerFactory}
import vinyldns.core.crypto.CryptoAlgebra
import vinyldns.core.domain.membership.LockStatus.LockStatus
import vinyldns.core.domain.membership.{ListUsersResults, LockStatus, User, UserRepository}
import vinyldns.core.route.Monitored
import scala.collection.JavaConverters._
import scala.util.Try
object DynamoDBUserRepository {
private[repository] val USER_ID = "userid"
private[repository] val USER_NAME = "username"
private[repository] val FIRST_NAME = "firstname"
private[repository] val LAST_NAME = "lastname"
private[repository] val EMAIL = "email"
private[repository] val CREATED = "created"
private[repository] val ACCESS_KEY = "accesskey"
private[repository] val SECRET_KEY = "secretkey"
private[repository] val IS_SUPER = "super"
private[repository] val LOCK_STATUS = "lockstatus"
private[repository] val IS_SUPPORT = "support"
private[repository] val IS_TEST_USER = "istest"
private[repository] val USER_NAME_INDEX_NAME = "username_index"
private[repository] val ACCESS_KEY_INDEX_NAME = "access_key_index"
private val log: Logger = LoggerFactory.getLogger(classOf[DynamoDBUserRepository])
private implicit val cs: ContextShift[IO] =
IO.contextShift(scala.concurrent.ExecutionContext.global)
def apply(
config: DynamoDBRepositorySettings,
dynamoConfig: DynamoDBDataStoreSettings,
crypto: CryptoAlgebra
): IO[DynamoDBUserRepository] = {
val dynamoDBHelper = new DynamoDBHelper(
DynamoDBClient(dynamoConfig),
LoggerFactory.getLogger("DynamoDBUserRepository")
)
val dynamoReads = config.provisionedReads
val dynamoWrites = config.provisionedWrites
val tableName = config.tableName
val tableAttributes = Seq(
new AttributeDefinition(USER_ID, "S"),
new AttributeDefinition(USER_NAME, "S"),
new AttributeDefinition(ACCESS_KEY, "S")
)
val secondaryIndexes = Seq(
new GlobalSecondaryIndex()
.withIndexName(USER_NAME_INDEX_NAME)
.withProvisionedThroughput(new ProvisionedThroughput(dynamoReads, dynamoWrites))
.withKeySchema(new KeySchemaElement(USER_NAME, KeyType.HASH))
.withProjection(new Projection().withProjectionType("ALL")),
new GlobalSecondaryIndex()
.withIndexName(ACCESS_KEY_INDEX_NAME)
.withProvisionedThroughput(new ProvisionedThroughput(dynamoReads, dynamoWrites))
.withKeySchema(new KeySchemaElement(ACCESS_KEY, KeyType.HASH))
.withProjection(new Projection().withProjectionType("ALL"))
)
val setup = dynamoDBHelper.setupTable(
new CreateTableRequest()
.withTableName(tableName)
.withAttributeDefinitions(tableAttributes: _*)
.withKeySchema(new KeySchemaElement(USER_ID, KeyType.HASH))
.withProvisionedThroughput(new ProvisionedThroughput(dynamoReads, dynamoWrites))
.withGlobalSecondaryIndexes(secondaryIndexes: _*)
)
setup.as(new DynamoDBUserRepository(tableName, dynamoDBHelper, toItem(crypto, _), fromItem))
}
def toItem(crypto: CryptoAlgebra, user: User): java.util.Map[String, AttributeValue] = {
val item = new java.util.HashMap[String, AttributeValue]()
item.put(USER_ID, new AttributeValue(user.id))
item.put(USER_NAME, new AttributeValue(user.userName))
item.put(CREATED, new AttributeValue().withN(user.created.getMillis.toString))
item.put(ACCESS_KEY, new AttributeValue(user.accessKey))
item.put(SECRET_KEY, new AttributeValue(crypto.encrypt(user.secretKey)))
item.put(IS_SUPER, new AttributeValue().withBOOL(user.isSuper))
item.put(IS_TEST_USER, new AttributeValue().withBOOL(user.isTest))
item.put(LOCK_STATUS, new AttributeValue(user.lockStatus.toString))
item.put(IS_SUPPORT, new AttributeValue().withBOOL(user.isSupport))
val firstName =
user.firstName.map(new AttributeValue(_)).getOrElse(new AttributeValue().withNULL(true))
item.put(FIRST_NAME, firstName)
val lastName =
user.lastName.map(new AttributeValue(_)).getOrElse(new AttributeValue().withNULL(true))
item.put(LAST_NAME, lastName)
val email = user.email.map(new AttributeValue(_)).getOrElse(new AttributeValue().withNULL(true))
item.put(EMAIL, email)
item
}
def fromItem(item: java.util.Map[String, AttributeValue]): IO[User] = IO {
def userStatus(str: String): LockStatus = Try(LockStatus.withName(str)).getOrElse {
log.error(s"Invalid locked status value '$str'; defaulting to unlocked")
LockStatus.Unlocked
}
User(
id = item.get(USER_ID).getS,
userName = item.get(USER_NAME).getS,
created = new DateTime(item.get(CREATED).getN.toLong),
accessKey = item.get(ACCESS_KEY).getS,
secretKey = item.get(SECRET_KEY).getS,
firstName = Option(item.get(FIRST_NAME)).flatMap(fn => Option(fn.getS)),
lastName = Option(item.get(LAST_NAME)).flatMap(ln => Option(ln.getS)),
email = Option(item.get(EMAIL)).flatMap(e => Option(e.getS)),
isSuper = if (item.get(IS_SUPER) == null) false else item.get(IS_SUPER).getBOOL,
lockStatus =
if (item.get(LOCK_STATUS) == null) LockStatus.Unlocked
else userStatus(item.get(LOCK_STATUS).getS),
isSupport = if (item.get(IS_SUPPORT) == null) false else item.get(IS_SUPPORT).getBOOL,
isTest = if (item.get(IS_TEST_USER) == null) false else item.get(IS_TEST_USER).getBOOL
)
}
}
class DynamoDBUserRepository private[repository] (
userTableName: String,
val dynamoDBHelper: DynamoDBHelper,
serialize: User => java.util.Map[String, AttributeValue],
deserialize: java.util.Map[String, AttributeValue] => IO[User]
) extends UserRepository
with Monitored {
import DynamoDBUserRepository._
val log: Logger = LoggerFactory.getLogger(classOf[DynamoDBUserRepository])
def getUser(userId: String): IO[Option[User]] =
monitor("repo.User.getUser") {
log.info(s"Getting user by id $userId")
val key = new HashMap[String, AttributeValue]()
key.put(USER_ID, new AttributeValue(userId))
val request = new GetItemRequest().withTableName(userTableName).withKey(key)
OptionT
.liftF(dynamoDBHelper.getItem(request))
.subflatMap(r => Option(r.getItem))
.semiflatMap(item => deserialize(item))
.value
}
def getUserByName(username: String): IO[Option[User]] = {
val attributeNames = new util.HashMap[String, String]()
attributeNames.put("#uname", USER_NAME)
val attributeValues = new util.HashMap[String, AttributeValue]()
attributeValues.put(":uname", new AttributeValue().withS(username))
val request = new QueryRequest()
.withTableName(userTableName)
.withKeyConditionExpression("#uname = :uname")
.withExpressionAttributeNames(attributeNames)
.withExpressionAttributeValues(attributeValues)
.withIndexName(USER_NAME_INDEX_NAME)
// the question is what to do with duplicate usernames, in the portal we just log loudly, staying the same here
dynamoDBHelper.query(request).flatMap { result =>
result.getItems.asScala.toList match {
case x :: Nil => fromItem(x).map(Some(_))
case Nil => IO.pure(None)
case x :: _ =>
log.error(s"Inconsistent data, multiple user records found for user name '$username'")
fromItem(x).map(Some(_))
}
}
}
def getUsers(
userIds: Set[String],
startFrom: Option[String],
maxItems: Option[Int]
): IO[ListUsersResults] = {
def toBatchGetItemRequest(userIds: List[String]): BatchGetItemRequest = {
val allKeys = new util.ArrayList[util.Map[String, AttributeValue]]()
for { userId <- userIds } {
val key = new util.HashMap[String, AttributeValue]()
key.put(USER_ID, new AttributeValue(userId))
allKeys.add(key)
}
val keysAndAttributes = new KeysAndAttributes().withKeys(allKeys)
val request = new util.HashMap[String, KeysAndAttributes]()
request.put(userTableName, keysAndAttributes)
new BatchGetItemRequest().withRequestItems(request)
}
def parseUsers(result: BatchGetItemResult): IO[List[User]] = {
val userAttributes = result.getResponses.asScala.get(userTableName)
userAttributes match {
case None =>
IO.pure(List())
case Some(items) =>
items.asScala.toList.map(fromItem).sequence
}
}
monitor("repo.User.getUsers") {
log.info(s"Getting users by id $userIds")
val sortedUserIds = userIds.toList.sorted
val filtered = startFrom match {
case None => sortedUserIds
case Some(startId) => sortedUserIds.filter(startId < _)
}
val page = maxItems match {
case None => filtered
case Some(size) => filtered.take(size)
}
// Group the user ids into batches of 100, that is the max size of the BatchGetItemRequest
val batches = page.grouped(100).toList
val batchGets = batches.map(toBatchGetItemRequest)
val batchGetIo = batchGets.map(dynamoDBHelper.batchGetItem)
// run the batches in parallel
val allBatches: IO[List[BatchGetItemResult]] = batchGetIo.parSequence
val allUsers = for {
batches <- allBatches
x <- batches.foldLeft(IO(List.empty[User])) { (acc, cur) =>
for {
users <- parseUsers(cur)
accumulated <- acc
} yield users ++ accumulated
}
} yield x
allUsers.map { list =>
val lastEvaluatedId =
if (filtered.size > list.size) list.sortBy(_.id).lastOption.map(_.id) else None
ListUsersResults(list, lastEvaluatedId)
}
}
}
def getAllUsers: IO[List[User]] =
monitor("repo.User.getAllUsers") {
IO.raiseError(
UnsupportedDynamoDBRepoFunction(
"getAllUsers is not supported by VinylDNS DynamoDB UserRepository"
)
)
}
def getUserByAccessKey(accessKey: String): IO[Option[User]] =
monitor("repo.User.getUserByAccessKey") {
log.info(s"Getting user by access key $accessKey")
val expressionAttributeValues = new HashMap[String, AttributeValue]
expressionAttributeValues.put(":access_key", new AttributeValue(accessKey))
val expressionAttributeNames = new HashMap[String, String]
expressionAttributeNames.put("#access_key_attribute", ACCESS_KEY)
val keyConditionExpression: String = "#access_key_attribute = :access_key"
val queryRequest = new QueryRequest()
.withTableName(userTableName)
.withIndexName(ACCESS_KEY_INDEX_NAME)
.withExpressionAttributeNames(expressionAttributeNames)
.withExpressionAttributeValues(expressionAttributeValues)
.withKeyConditionExpression(keyConditionExpression)
dynamoDBHelper.query(queryRequest).flatMap { results =>
results.getItems.asScala.headOption.map(deserialize).sequence
}
}
def save(user: User): IO[User] = //For testing purposes
monitor("repo.User.save") {
log.info(s"Saving user id: ${user.id} name: ${user.userName}.")
val request = new PutItemRequest().withTableName(userTableName).withItem(serialize(user))
dynamoDBHelper.putItem(request).map(_ => user)
}
def save(users: List[User]): IO[List[User]] =
monitor("repo.User.save") {
IO.raiseError(
UnsupportedDynamoDBRepoFunction(
"batch save is not supported by VinylDNS DynamoDb UserRepository"
)
)
}
}

View File

@@ -1,177 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import java.nio.ByteBuffer
import java.util.HashMap
import cats.effect._
import com.amazonaws.services.dynamodbv2.model._
import org.joda.time.DateTime
import org.slf4j.LoggerFactory
import vinyldns.core.domain.zone.{ListZoneChangesResults, ZoneChange, ZoneChangeRepository}
import vinyldns.core.protobuf.ProtobufConversions
import vinyldns.core.route.Monitored
import vinyldns.proto.VinylDNSProto
import scala.util.Try
object DynamoDBZoneChangeRepository extends ProtobufConversions {
private[repository] val ZONE_ID = "zone_id"
private[repository] val CHANGE_ID = "change_id"
private[repository] val BLOB = "blob"
private[repository] val CREATED = "created"
private val ZONE_ID_CREATED_INDEX = "zone_id_created_index"
def apply(
config: DynamoDBRepositorySettings,
dynamoConfig: DynamoDBDataStoreSettings
): IO[DynamoDBZoneChangeRepository] = {
val dynamoDBHelper = new DynamoDBHelper(
DynamoDBClient(dynamoConfig),
LoggerFactory.getLogger("DynamoDBZoneChangeRepository")
)
val dynamoReads = config.provisionedReads
val dynamoWrites = config.provisionedWrites
val tableName = config.tableName
val tableAttributes = Seq(
new AttributeDefinition(CHANGE_ID, "S"),
new AttributeDefinition(ZONE_ID, "S"),
new AttributeDefinition(CREATED, "N")
)
val secondaryIndexes = Seq(
new GlobalSecondaryIndex()
.withIndexName(ZONE_ID_CREATED_INDEX)
.withProvisionedThroughput(new ProvisionedThroughput(dynamoReads, dynamoWrites))
.withKeySchema(
new KeySchemaElement(ZONE_ID, KeyType.HASH),
new KeySchemaElement(CREATED, KeyType.RANGE)
)
.withProjection(new Projection().withProjectionType("ALL"))
)
val setup = dynamoDBHelper.setupTable(
new CreateTableRequest()
.withTableName(tableName)
.withAttributeDefinitions(tableAttributes: _*)
.withKeySchema(
new KeySchemaElement(ZONE_ID, KeyType.HASH),
new KeySchemaElement(CHANGE_ID, KeyType.RANGE)
)
.withGlobalSecondaryIndexes(secondaryIndexes: _*)
.withProvisionedThroughput(new ProvisionedThroughput(dynamoReads, dynamoWrites))
)
setup.as(new DynamoDBZoneChangeRepository(tableName, dynamoDBHelper))
}
}
class DynamoDBZoneChangeRepository private[repository] (
zoneChangeTable: String,
val dynamoDBHelper: DynamoDBHelper
) extends ZoneChangeRepository
with ProtobufConversions
with Monitored {
import scala.collection.JavaConverters._
import DynamoDBZoneChangeRepository._
implicit def dateTimeOrdering: Ordering[DateTime] = Ordering.fromLessThan(_.isAfter(_))
val log = LoggerFactory.getLogger(classOf[DynamoDBZoneChangeRepository])
def save(zoneChange: ZoneChange): IO[ZoneChange] =
monitor("repo.ZoneChange.save") {
log.info(s"Saving zone change ${zoneChange.id}")
val item = toItem(zoneChange)
val request = new PutItemRequest().withTableName(zoneChangeTable).withItem(item)
dynamoDBHelper.putItem(request).map(_ => zoneChange)
}
def listZoneChanges(
zoneId: String,
startFrom: Option[String] = None,
maxItems: Int = 100
): IO[ListZoneChangesResults] =
monitor("repo.ZoneChange.getChanges") {
log.info(s"Getting zone changes for zone $zoneId")
// millisecond string
val startTime = startFrom.getOrElse(DateTime.now.getMillis.toString)
val expressionAttributeValues = new HashMap[String, AttributeValue]
expressionAttributeValues.put(":zone_id", new AttributeValue(zoneId))
expressionAttributeValues.put(":created", new AttributeValue().withN(startTime))
val expressionAttributeNames = new HashMap[String, String]
expressionAttributeNames.put("#zone_id_attribute", ZONE_ID)
expressionAttributeNames.put("#created_attribute", CREATED)
val keyConditionExpression: String =
"#zone_id_attribute = :zone_id AND #created_attribute < :created"
val queryRequest = new QueryRequest()
.withTableName(zoneChangeTable)
.withIndexName(ZONE_ID_CREATED_INDEX)
.withExpressionAttributeNames(expressionAttributeNames)
.withExpressionAttributeValues(expressionAttributeValues)
.withKeyConditionExpression(keyConditionExpression)
.withScanIndexForward(false) // return in descending order by sort key
.withLimit(maxItems)
dynamoDBHelper.queryAll(queryRequest).map { resultList =>
val items = resultList.flatMap { result =>
result.getItems.asScala.map(fromItem).distinct
}
val nextId = Try(resultList.last.getLastEvaluatedKey.get("created").getN).toOption
ListZoneChangesResults(items, nextId, startFrom, maxItems)
}
}
def fromItem(item: java.util.Map[String, AttributeValue]): ZoneChange =
try {
val blob = item.get(BLOB)
fromPB(VinylDNSProto.ZoneChange.parseFrom(blob.getB.array()))
} catch {
case ex: Throwable =>
log.error("fromItem", ex)
throw new UnexpectedDynamoResponseException(ex.getMessage, ex)
}
def toItem(zoneChange: ZoneChange): java.util.HashMap[String, AttributeValue] = {
val blob = toPB(zoneChange).toByteArray
val bb = ByteBuffer.allocate(blob.length)
bb.put(blob)
bb.position(0)
val item = new java.util.HashMap[String, AttributeValue]()
item.put(CHANGE_ID, new AttributeValue(zoneChange.id))
item.put(ZONE_ID, new AttributeValue(zoneChange.zoneId))
item.put(BLOB, new AttributeValue().withB(bb))
item.put(CREATED, new AttributeValue().withN(zoneChange.created.getMillis.toString))
item
}
}

View File

@@ -1,219 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import java.util
import java.util.HashMap
import cats.effect._
import com.amazonaws.services.dynamodbv2.model.{AttributeValue, QueryRequest, QueryResult, Select}
import scala.collection.JavaConverters._
trait ResponseItems {
def addResult(newResult: QueryResult): ResponseItems
def isComplete(limit: Option[Int]): Boolean
def trimTo(limit: Option[Int]): ResponseItems
}
case class QueryResponseItems(
items: List[java.util.Map[String, AttributeValue]] = List(),
lastEvaluatedKey: Option[java.util.Map[String, AttributeValue]] = None
) extends ResponseItems {
override def addResult(newResult: QueryResult): QueryResponseItems =
QueryResponseItems(items ++ newResult.getItems.asScala, Option(newResult.getLastEvaluatedKey))
override def isComplete(limit: Option[Int]): Boolean =
(limit, lastEvaluatedKey) match {
case (_, None) => true
case (Some(lim), _) if lim <= items.length => true
case _ => false
}
override def trimTo(limit: Option[Int]): QueryResponseItems =
limit match {
case Some(lim) if items.length > lim =>
val trimmedItems = items.take(lim)
val last = trimmedItems.last
QueryResponseItems(trimmedItems, Some(last))
case _ => this
}
}
case class QueryResponseCount(
count: Int = 0,
lastEvaluatedKey: Option[java.util.Map[String, AttributeValue]] = None
) extends ResponseItems {
override def addResult(newResult: QueryResult): QueryResponseCount =
QueryResponseCount(count + newResult.getCount, Option(newResult.getLastEvaluatedKey))
override def isComplete(limit: Option[Int]): Boolean =
lastEvaluatedKey match {
case None => true
case _ => false
}
override def trimTo(limit: Option[Int]): QueryResponseCount = this
}
trait FilterType {
val attributeName: String
val attributeValue: String
def getFilterString(name: String, value: String): String
}
case class ContainsFilter(attributeName: String, attributeValue: String) extends FilterType {
override def getFilterString(name: String, value: String) = s"contains ($name, $value)"
}
case class EqualsFilter(attributeName: String, attributeValue: String) extends FilterType {
override def getFilterString(name: String, value: String) = s"$name = $value"
}
object QueryManager {
def apply(
tableName: String,
index: String,
keyConditions: Map[String, String],
filter: Option[FilterType],
initialStartKey: Option[Map[String, String]],
maxItems: Option[Int],
isCountQuery: Boolean
): QueryManager = {
val expressionAttributeValues = new HashMap[String, AttributeValue]
val expressionAttributeNames = new HashMap[String, String]
val expression = keyConditions.zipWithIndex.map { item =>
val ((attrName, attrValue), count) = item
expressionAttributeValues.put(s":attrVal$count", new AttributeValue(attrValue))
expressionAttributeNames.put(s"#attr_name$count", attrName)
s"#attr_name$count = :attrVal$count"
}
val keyConditionExpression = expression.reduce(_ + " AND " + _)
// set filter expression if applicable
val filterExpression = filter.map { f =>
expressionAttributeNames.put("#filter_name", f.attributeName)
expressionAttributeValues.put(":filterVal", new AttributeValue(f.attributeValue))
f.getFilterString("#filter_name", ":filterVal")
}
val start: Option[util.Map[String, AttributeValue]] = initialStartKey.map {
_.map {
case (key, value) =>
(key, new AttributeValue(value))
}.asJava
}
QueryManager(
tableName,
index,
expressionAttributeNames,
expressionAttributeValues,
keyConditionExpression,
start,
filterExpression,
maxItems,
isCountQuery
)
}
}
case class QueryManager(
tableName: String,
index: String,
expressionAttributeNames: util.HashMap[String, String],
expressionAttributeValues: util.HashMap[String, AttributeValue],
keyConditionExpression: String,
startKey: Option[util.Map[String, AttributeValue]],
filterExpression: Option[String],
maxItems: Option[Int],
isCountQuery: Boolean
) {
def build(): QueryRequest = {
val request = new QueryRequest()
.withTableName(tableName)
.withIndexName(index)
.withExpressionAttributeNames(expressionAttributeNames)
.withExpressionAttributeValues(expressionAttributeValues)
.withKeyConditionExpression(keyConditionExpression)
filterExpression.foreach(request.withFilterExpression(_))
maxItems.foreach(request.withLimit(_))
startKey.foreach(request.withExclusiveStartKey(_))
if (isCountQuery) request.withSelect(Select.COUNT)
request
}
}
trait QueryHelper {
def doQuery(
tableName: String,
index: String,
keyConditions: Map[String, String],
nameFilter: Option[FilterType] = None,
startKey: Option[Map[String, String]] = None,
maxItems: Option[Int] = None,
isCountQuery: Boolean = false
): DynamoDBHelper => IO[ResponseItems] = dynamoDbHelper => {
// do not limit items when there is a filter - filters are applied after limits
val itemsToRetrieve = nameFilter match {
case Some(_) => None
case None => maxItems
}
val response =
if (isCountQuery) QueryResponseCount()
else QueryResponseItems()
val queryManager =
QueryManager(
tableName,
index,
keyConditions,
nameFilter,
startKey,
itemsToRetrieve,
isCountQuery
)
completeQuery(dynamoDbHelper, queryManager, response, maxItems)
}
private def completeQuery(
dynamoDbHelper: DynamoDBHelper,
dynamoQuery: QueryManager,
acc: ResponseItems,
limit: Option[Int]
): IO[ResponseItems] =
dynamoDbHelper.query(dynamoQuery.build()).flatMap { queryResult =>
val accumulatedResults = acc.addResult(queryResult)
if (accumulatedResults.isComplete(limit))
IO(accumulatedResults.trimTo(limit))
else
completeQuery(
dynamoDbHelper,
dynamoQuery.copy(startKey = Some(queryResult.getLastEvaluatedKey)),
accumulatedResults,
limit
)
}
}

View File

@@ -1,40 +0,0 @@
akka.loglevel = "OFF"
dynamodb {
class-name = "vinyldns.dynamodb.repository.DynamoDbDataStoreProvider"
settings {
key = "vinyldnsTest"
secret = "notNeededForDynamoDbLocal"
endpoint = "http://127.0.0.1:19000"
region = "us-east-1"
}
repositories {
record-change {
table-name = "recordChangeTest"
provisioned-reads = 30
provisioned-writes = 30
}
zone-change {
table-name = "zoneChanges"
provisioned-reads = 30
provisioned-writes = 30
}
group {
table-name = "groups"
provisioned-reads = 30
provisioned-writes = 30
}
group-change {
table-name = "groupChanges"
provisioned-reads = 30
provisioned-writes = 30
}
membership {
table-name = "membership"
provisioned-reads = 30
provisioned-writes = 30
}
}
}

View File

@@ -1,61 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb
import com.typesafe.config.{Config, ConfigFactory}
import vinyldns.core.repository.{DataStoreConfig, RepositoriesConfig}
import vinyldns.dynamodb.repository.DynamoDBRepositorySettings
import pureconfig._
import pureconfig.generic.auto._
object DynamoTestConfig {
lazy val config: Config = ConfigFactory.load()
lazy val dynamoDBConfig: DataStoreConfig =
ConfigSource.fromConfig(config).at("dynamodb").loadOrThrow[DataStoreConfig]
lazy val baseReposConfigs: RepositoriesConfig = dynamoDBConfig.repositories
lazy val zoneChangeStoreConfig: DynamoDBRepositorySettings =
ConfigSource.fromConfig(baseReposConfigs.zoneChange.get).loadOrThrow[DynamoDBRepositorySettings]
lazy val recordChangeStoreConfig: DynamoDBRepositorySettings =
ConfigSource
.fromConfig(baseReposConfigs.recordChange.get)
.loadOrThrow[DynamoDBRepositorySettings]
// Needed for testing DynamoDBUserRepository, but can't include in config directly due to not being implemented
lazy val usertableConfig: Config = ConfigFactory.parseString("""
| table-name = "users"
| provisioned-reads = 30
| provisioned-writes = 30
""".stripMargin)
lazy val usersStoreConfig: DynamoDBRepositorySettings =
ConfigSource.fromConfig(usertableConfig).loadOrThrow[DynamoDBRepositorySettings]
lazy val groupsStoreConfig: DynamoDBRepositorySettings =
ConfigSource.fromConfig(baseReposConfigs.group.get).loadOrThrow[DynamoDBRepositorySettings]
lazy val groupChangesStoreConfig: DynamoDBRepositorySettings =
ConfigSource
.fromConfig(baseReposConfigs.groupChange.get)
.loadOrThrow[DynamoDBRepositorySettings]
lazy val membershipStoreConfig: DynamoDBRepositorySettings =
ConfigSource.fromConfig(baseReposConfigs.membership.get).loadOrThrow[DynamoDBRepositorySettings]
}

View File

@@ -1,142 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import vinyldns.core.crypto.NoOpCrypto
import vinyldns.core.repository.{
DataStoreConfig,
DataStoreStartupError,
RepositoriesConfig,
RepositoryName
}
import vinyldns.dynamodb.DynamoTestConfig
import pureconfig._
import pureconfig.generic.auto._
class DynamoDBDataStoreProviderSpec extends AnyWordSpec with Matchers {
private val underTest = new DynamoDBDataStoreProvider()
private val crypto = new NoOpCrypto()
"load" should {
// Note: success here will actually startup the repos, just testing failure in unit tests
"Fail if a required setting is not included" in {
val badConfig = ConfigFactory.parseString(
"""
| class-name = "vinyldns.dynamodb.repository.DynamoDbDataStoreProvider"
|
| settings {
| key = "vinyldnsTest"
| secret = "notNeededForDynamoDbLocal"
| }
|
| repositories {
| record-change {
| table-name = "test"
| provisioned-reads = 30
| provisioned-writes = 30
| }
| }
| """.stripMargin
)
val badSettings = ConfigSource.fromConfig(badConfig).loadOrThrow[DataStoreConfig]
a[pureconfig.error.ConfigReaderException[DynamoDBDataStoreSettings]] should be thrownBy underTest
.load(badSettings, crypto)
.unsafeRunSync()
}
}
"validateRepos" should {
"Return successfully if all configured repos are implemented" in {
noException should be thrownBy underTest
.validateRepos(DynamoTestConfig.dynamoDBConfig.repositories)
.unsafeRunSync()
}
"Fail if an unimplemented repo is enabled" in {
val placeHolder = ConfigFactory.parseString("test=test")
val badRepos = DynamoTestConfig.dynamoDBConfig.repositories.copy(zone = Some(placeHolder))
val thrown = the[DataStoreStartupError] thrownBy underTest
.validateRepos(badRepos)
.unsafeRunSync()
thrown.msg shouldBe "Invalid config provided to dynamodb; unimplemented repos included: Set(zone)"
}
}
"loadRepoConfigs" should {
"Return a map of configured repos are properly configured" in {
val enabledRepoConf: Config =
ConfigFactory.parseString("""
|{
| table-name = "someName"
| provisioned-reads = 20
| provisioned-writes = 30
| }
""".stripMargin)
val repoSettings =
RepositoriesConfig(
Some(enabledRepoConf),
None,
None,
None,
None,
None,
None,
None,
None,
None,
None
)
val response = underTest
.loadRepoConfigs(repoSettings)
.unsafeRunSync()
response shouldBe Map(RepositoryName.user -> DynamoDBRepositorySettings("someName", 20, 30))
}
"Return an error if a repo isnt configured correctly" in {
val badRepoConf: Config =
ConfigFactory.parseString("""
|{
| provisioned-reads = 20
| provisioned-writes = 30
| }
""".stripMargin)
val repoSettings =
RepositoriesConfig(
Some(badRepoConf),
Some(badRepoConf),
None,
None,
None,
None,
None,
None,
None,
None,
None
)
a[pureconfig.error.ConfigReaderException[DynamoDBRepositorySettings]] should be thrownBy underTest
.loadRepoConfigs(repoSettings)
.unsafeRunSync()
}
}
}

View File

@@ -1,225 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import com.amazonaws.services.dynamodbv2.model.{GetItemRequest, ResourceNotFoundException, _}
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.concurrent.ScalaFutures
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.BeforeAndAfterEach
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import vinyldns.core.TestMembershipData._
import scala.collection.JavaConverters._
import cats.effect._
import vinyldns.dynamodb.DynamoTestConfig
class DynamoDBGroupChangeRepositorySpec
extends AnyWordSpec
with MockitoSugar
with Matchers
with ScalaFutures
with BeforeAndAfterEach {
private val dynamoDBHelper = mock[DynamoDBHelper]
private val groupChangeStoreConfig = DynamoTestConfig.groupChangesStoreConfig
private val groupChangeTable = groupChangeStoreConfig.tableName
class TestDynamoDBGroupChangeRepository
extends DynamoDBGroupChangeRepository(groupChangeTable, dynamoDBHelper)
private val underTest = new DynamoDBGroupChangeRepository(groupChangeTable, dynamoDBHelper)
override def beforeEach(): Unit =
reset(dynamoDBHelper)
"DynamoDBGroupChangeRepository.toItem and fromItem" should {
"work with all values set" in {
val roundRobin = underTest.fromItem(underTest.toItem(okGroupChangeUpdate))
roundRobin shouldBe okGroupChangeUpdate
}
"work with oldGroup = None" in {
val roundRobin = underTest.fromItem(underTest.toItem(okGroupChange))
roundRobin shouldBe okGroupChange
}
}
"DynamoDBGroupChangeRepository.save" should {
"return the group change when saved" in {
val mockPutItemResult = mock[PutItemResult]
doReturn(IO.pure(mockPutItemResult))
.when(dynamoDBHelper)
.putItem(any[PutItemRequest])
val response = underTest.save(okGroupChange).unsafeRunSync()
response shouldBe okGroupChange
}
"throw exception when save returns an unexpected response" in {
doReturn(IO.raiseError(new ResourceNotFoundException("bar does not exist")))
.when(dynamoDBHelper)
.putItem(any[PutItemRequest])
val result = underTest.save(okGroupChange)
a[ResourceNotFoundException] shouldBe thrownBy(result.unsafeRunSync())
}
}
"DynamoDBGroupChangeRepository.getGroupChange" should {
"return the group change if the id is found" in {
val dynamoResponse = mock[GetItemResult]
val expected = underTest.toItem(okGroupChange)
doReturn(expected).when(dynamoResponse).getItem
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).getItem(any[GetItemRequest])
val response = underTest.getGroupChange(okGroupChange.id).unsafeRunSync()
verify(dynamoDBHelper).getItem(any[GetItemRequest])
response shouldBe Some(okGroupChange)
}
"throw exception when get returns an unexpected response" in {
doReturn(IO.raiseError(new ResourceNotFoundException("bar does not exist")))
.when(dynamoDBHelper)
.getItem(any[GetItemRequest])
val result = underTest.getGroupChange(okGroupChange.id)
a[ResourceNotFoundException] shouldBe thrownBy(result.unsafeRunSync())
}
"return None if not found" in {
val dynamoResponse = mock[GetItemResult]
doReturn(null).when(dynamoResponse).getItem
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).getItem(any[GetItemRequest])
val response = underTest.getGroupChange(okGroupChange.id).unsafeRunSync()
verify(dynamoDBHelper).getItem(any[GetItemRequest])
response shouldBe None
}
}
"DynamoDBGroupChangeRepository.getGroupChanges" should {
"returns all matching GroupChanges and the correct nextId" in {
val dynamoResponse = mock[QueryResult]
val expected = listOfDummyGroupChanges.slice(0, 100).map(underTest.toItem).asJava
doReturn(expected).when(dynamoResponse).getItems()
val lastEvaluatedKey = underTest.toItem(listOfDummyGroupChanges(99))
doReturn(lastEvaluatedKey).when(dynamoResponse).getLastEvaluatedKey
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).query(any[QueryRequest])
val response = underTest.getGroupChanges(oneUserDummyGroup.id, None, 100).unsafeRunSync()
response.changes should contain theSameElementsAs listOfDummyGroupChanges.take(100)
response.lastEvaluatedTimeStamp shouldBe Some(
listOfDummyGroupChanges(99).created.getMillis.toString
)
}
"returns an empty list when no matching changes are found" in {
val dynamoResponse = mock[QueryResult]
val expected = List().asJava
doReturn(expected).when(dynamoResponse).getItems()
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).query(any[QueryRequest])
val response = underTest.getGroupChanges(oneUserDummyGroup.id, None, 100).unsafeRunSync()
response.changes shouldBe Seq()
response.lastEvaluatedTimeStamp shouldBe None
}
"starts from the correct change" in {
val dynamoGetResponse = mock[GetItemResult]
doReturn(underTest.toItem(listOfDummyGroupChanges(50))).when(dynamoGetResponse).getItem
doReturn(IO.pure(dynamoGetResponse))
.when(dynamoDBHelper)
.getItem(any[GetItemRequest])
val dynamoQueryResponse = mock[QueryResult]
val expected = listOfDummyGroupChanges.slice(51, 151).map(underTest.toItem).asJava
doReturn(expected).when(dynamoQueryResponse).getItems()
val lastEvaluatedKey = underTest.toItem(listOfDummyGroupChanges(150))
doReturn(lastEvaluatedKey).when(dynamoQueryResponse).getLastEvaluatedKey
doReturn(IO.pure(dynamoQueryResponse)).when(dynamoDBHelper).query(any[QueryRequest])
val response = underTest
.getGroupChanges(
oneUserDummyGroup.id,
Some(listOfDummyGroupChanges(50).created.getMillis.toString),
100
)
.unsafeRunSync()
response.changes should contain theSameElementsAs listOfDummyGroupChanges.slice(51, 151)
response.lastEvaluatedTimeStamp shouldBe Some(
listOfDummyGroupChanges(150).created.getMillis.toString
)
}
"returns `maxItems` items" in {
val dynamoResponse = mock[QueryResult]
val expected = listOfDummyGroupChanges.slice(0, 50).map(underTest.toItem).asJava
doReturn(expected).when(dynamoResponse).getItems()
val lastEvaluatedKey = underTest.toItem(listOfDummyGroupChanges(49))
doReturn(lastEvaluatedKey).when(dynamoResponse).getLastEvaluatedKey
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).query(any[QueryRequest])
val response = underTest.getGroupChanges(oneUserDummyGroup.id, None, 50).unsafeRunSync()
response.changes should contain theSameElementsAs listOfDummyGroupChanges.take(50)
response.lastEvaluatedTimeStamp shouldBe Some(
listOfDummyGroupChanges(49).created.getMillis.toString
)
}
"returns entire page and nextId = None if there are less than maxItems left" in {
val dynamoGetResponse = mock[GetItemResult]
doReturn(underTest.toItem(listOfDummyGroupChanges(99))).when(dynamoGetResponse).getItem
doReturn(IO.pure(dynamoGetResponse))
.when(dynamoDBHelper)
.getItem(any[GetItemRequest])
val dynamoQueryResponse = mock[QueryResult]
val expected = listOfDummyGroupChanges.slice(100, 200).map(underTest.toItem).asJava
doReturn(expected).when(dynamoQueryResponse).getItems()
doReturn(IO.pure(dynamoQueryResponse)).when(dynamoDBHelper).query(any[QueryRequest])
val response =
underTest
.getGroupChanges(oneUserDummyGroup.id, Some(listOfDummyGroupChanges(99).id), 100)
.unsafeRunSync()
response.changes should contain theSameElementsAs (listOfDummyGroupChanges.slice(100, 200))
response.lastEvaluatedTimeStamp shouldBe None
}
}
}

View File

@@ -1,288 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import com.amazonaws.services.dynamodbv2.model.{GetItemRequest, ResourceNotFoundException, _}
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.concurrent.ScalaFutures
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.BeforeAndAfterEach
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import vinyldns.core.TestMembershipData._
import scala.collection.JavaConverters._
import cats.effect._
import vinyldns.dynamodb.DynamoTestConfig
class DynamoDBGroupRepositorySpec
extends AnyWordSpec
with MockitoSugar
with Matchers
with ScalaFutures
with BeforeAndAfterEach {
private val dynamoDBHelper = mock[DynamoDBHelper]
private val groupsStoreConfig = DynamoTestConfig.groupsStoreConfig
private val groupsTable = groupsStoreConfig.tableName
private val underTest = new DynamoDBGroupRepository(groupsTable, dynamoDBHelper)
override def beforeEach(): Unit =
reset(dynamoDBHelper)
"DynamoDBGroupRepository.toItem" should {
"set all values correctly" in {
val items = underTest.toItem(okGroup)
items.get("group_id").getS shouldBe okGroup.id
items.get("name").getS shouldBe okGroup.name
items.get("email").getS shouldBe okGroup.email
items.get("created").getN shouldBe okGroup.created.getMillis.toString
items.get("status").getS shouldBe okGroup.status.toString
items.get("member_ids").getSS should contain theSameElementsAs okGroup.memberIds
items.get("admin_ids").getSS should contain theSameElementsAs okGroup.adminUserIds
items.get("desc").getS shouldBe okGroup.description.get
}
"set the description to null if it is not present" in {
val emptyDesc = okGroup.copy(description = None)
val items = underTest.toItem(emptyDesc)
items.get("desc").getS shouldBe null
items.get("desc").getNULL shouldBe true
}
}
"DynamoDBGroupRepository.fromItem" should {
"set all the values correctly" in {
val items = underTest.toItem(okGroup)
val group = underTest.fromItem(items)
group shouldBe okGroup
}
"set all the values correctly if description is not present" in {
val emptyDesc = okGroup.copy(description = None)
val items = underTest.toItem(emptyDesc)
val group = underTest.fromItem(items)
group shouldBe emptyDesc
}
}
"DynamoDBGroupRepository.save" should {
"return the group when saved" in {
val mockPutItemResult = mock[PutItemResult]
doReturn(IO.pure(mockPutItemResult))
.when(dynamoDBHelper)
.putItem(any[PutItemRequest])
val response = underTest.save(okGroup).unsafeRunSync()
response shouldBe okGroup
}
}
"DynamoDBGroupRepository.getGroupByName" should {
"return a group if the name is found" in {
val mockQueryResult = mock[QueryResult]
val expected = underTest.toItem(okGroup)
doReturn(List(expected).asJava).when(mockQueryResult).getItems
doReturn(IO.pure(mockQueryResult)).when(dynamoDBHelper).query(any[QueryRequest])
val response = underTest.getGroupByName(okGroup.id).unsafeRunSync()
response shouldBe Some(okGroup)
}
"return None if the group is not found" in {
val mockQueryResult = mock[QueryResult]
doReturn(new java.util.ArrayList[java.util.Map[String, AttributeValue]]())
.when(mockQueryResult)
.getItems
doReturn(IO.pure(mockQueryResult)).when(dynamoDBHelper).query(any[QueryRequest])
val response = underTest.getGroupByName(okGroup.id).unsafeRunSync()
response shouldBe None
}
"return None if the group is deleted" in {
val mockQueryResult = mock[QueryResult]
val expected = underTest.toItem(deletedGroup)
doReturn(List(expected).asJava).when(mockQueryResult).getItems
doReturn(IO.pure(mockQueryResult)).when(dynamoDBHelper).query(any[QueryRequest])
val response = underTest.getGroupByName(deletedGroup.id).unsafeRunSync()
response shouldBe None
}
}
"DynamoDBGroupRepository.getGroup" should {
"return the group if the id is found" in {
val dynamoResponse = mock[GetItemResult]
val expected = underTest.toItem(okGroup)
doReturn(expected).when(dynamoResponse).getItem
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).getItem(any[GetItemRequest])
val response = underTest.getGroup(okGroup.id).unsafeRunSync()
verify(dynamoDBHelper).getItem(any[GetItemRequest])
response shouldBe Some(okGroup)
}
"throw exception when get returns an unexpected response" in {
doReturn(IO.raiseError(new ResourceNotFoundException("bar does not exist")))
.when(dynamoDBHelper)
.getItem(any[GetItemRequest])
val result = underTest.getGroup(okGroup.id)
a[ResourceNotFoundException] shouldBe thrownBy(result.unsafeRunSync())
}
"return None if not found" in {
val dynamoResponse = mock[GetItemResult]
doReturn(null).when(dynamoResponse).getItem
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).getItem(any[GetItemRequest])
val response = underTest.getGroup(okGroup.id).unsafeRunSync()
verify(dynamoDBHelper).getItem(any[GetItemRequest])
response shouldBe None
}
"not return a group if it is deleted" in {
val dynamoResponse = mock[GetItemResult]
val expected = underTest.toItem(deletedGroup)
doReturn(expected).when(dynamoResponse).getItem
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).getItem(any[GetItemRequest])
val response = underTest.getGroup(deletedGroup.id).unsafeRunSync()
verify(dynamoDBHelper).getItem(any[GetItemRequest])
response shouldBe None
}
}
"DynamoDBGroupRepository.getGroups" should {
"return the groups if the id is found" in {
val firstResponse = mock[BatchGetItemResult]
val firstPage = Map(
groupsTable -> listOfDummyGroups
.slice(0, 100)
.map(underTest.toItem)
.asJava
).asJava
doReturn(firstPage).when(firstResponse).getResponses
val secondResponse = mock[BatchGetItemResult]
val secondPage = Map(
groupsTable -> listOfDummyGroups
.slice(100, 200)
.map(underTest.toItem)
.asJava
).asJava
doReturn(secondPage).when(secondResponse).getResponses
doReturn(IO.pure(firstResponse))
.doReturn(IO.pure(secondResponse))
.when(dynamoDBHelper)
.batchGetItem(any[BatchGetItemRequest])
val response = underTest.getGroups(listOfDummyGroups.map(_.id).toSet).unsafeRunSync()
verify(dynamoDBHelper, times(2)).batchGetItem(any[BatchGetItemRequest])
response should contain theSameElementsAs listOfDummyGroups
}
"not return a group if it is deleted" in {
val dynamoResponse = mock[BatchGetItemResult]
val expected = underTest.toItem(deletedGroup)
val firstPage = Map(groupsTable -> List(expected).asJava).asJava
doReturn(firstPage).when(dynamoResponse).getResponses
doReturn(IO.pure(dynamoResponse))
.when(dynamoDBHelper)
.batchGetItem(any[BatchGetItemRequest])
val response = underTest.getGroups(Set(deletedGroup.id)).unsafeRunSync()
response shouldBe empty
}
"return None if no groups found" in {
val firstResponse = mock[BatchGetItemResult]
val firstPage = Map(groupsTable -> List().asJava).asJava
doReturn(firstPage).when(firstResponse).getResponses
doReturn(IO.pure(firstResponse))
.when(dynamoDBHelper)
.batchGetItem(any[BatchGetItemRequest])
val response = underTest.getGroups(Set("notFound")).unsafeRunSync()
verify(dynamoDBHelper).batchGetItem(any[BatchGetItemRequest])
response should contain theSameElementsAs Set()
}
"return None if table is missing" in {
val firstResponse = mock[BatchGetItemResult]
val firstPage = Map().asJava
doReturn(firstPage).when(firstResponse).getResponses
doReturn(IO.pure(firstResponse))
.when(dynamoDBHelper)
.batchGetItem(any[BatchGetItemRequest])
val response = underTest.getGroups(Set("notFound")).unsafeRunSync()
verify(dynamoDBHelper).batchGetItem(any[BatchGetItemRequest])
response should contain theSameElementsAs Set()
}
"throw exception when get returns an unexpected response" in {
doReturn(IO.raiseError(new ResourceNotFoundException("bar does not exist")))
.when(dynamoDBHelper)
.batchGetItem(any[BatchGetItemRequest])
val result = underTest.getGroups(listOfDummyGroups.map(_.id).toSet)
a[ResourceNotFoundException] shouldBe thrownBy(result.unsafeRunSync())
}
}
"DynamoDBGroupRepository.delete" should {
"return a deleted group on delete" in {
val mockDeleteItemRequest = mock[DeleteItemResult]
doReturn(IO.pure(mockDeleteItemRequest))
.when(dynamoDBHelper)
.deleteItem(any[DeleteItemRequest])
val response = underTest.delete(okGroup).unsafeRunSync()
response shouldBe okGroup
}
}
}

View File

@@ -1,240 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import com.amazonaws.services.dynamodbv2.model.{BatchWriteItemResult, _}
import org.mockito.ArgumentCaptor
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.concurrent.ScalaFutures
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.BeforeAndAfterEach
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import vinyldns.core.TestMembershipData._
import scala.collection.JavaConverters._
import cats.effect._
import vinyldns.dynamodb.DynamoTestConfig
import scala.concurrent.duration.FiniteDuration
class DynamoDBMembershipRepositorySpec
extends AnyWordSpec
with MockitoSugar
with Matchers
with ScalaFutures
with BeforeAndAfterEach {
private val membershipStoreConfig = DynamoTestConfig.membershipStoreConfig
private val membershipTable = membershipStoreConfig.tableName
private val dynamoDBHelper = mock[DynamoDBHelper]
class TestDynamoDBMembershipRepository
extends DynamoDBMembershipRepository(membershipTable, dynamoDBHelper) {}
private val underTest = new TestDynamoDBMembershipRepository
override def beforeEach(): Unit = reset(dynamoDBHelper)
"DynamoDBMembershipRepository.addMembers" should {
"add the members in batches and return the members that were added to a group" in {
val members = (for (i <- 1 to 60) yield s"member-${i}").toSet
val batchCaptor = ArgumentCaptor.forClass(classOf[BatchWriteItemRequest])
val dynamoResponse = mock[BatchWriteItemResult]
val unprocessed = mock[java.util.Map[String, AttributeValue]]
doReturn(null).when(unprocessed).get(anyString())
doReturn(unprocessed).when(dynamoResponse).getUnprocessedItems
val store = new TestDynamoDBMembershipRepository
doReturn(IO.pure(dynamoResponse))
.when(dynamoDBHelper)
.batchWriteItem(any[String], any[BatchWriteItemRequest], any[Int], any[FiniteDuration])
val response = store.saveMembers(okGroup.id, members, isAdmin = false).unsafeRunSync()
verify(dynamoDBHelper, times(3)).batchWriteItem(
any[String],
batchCaptor.capture(),
any[Int],
any[FiniteDuration]
)
// we should have 3 batches
val batchWrites = batchCaptor.getAllValues
batchWrites.get(0).getRequestItems.get(membershipTable).size() shouldBe 25
batchWrites.get(1).getRequestItems.get(membershipTable).size() shouldBe 25
batchWrites.get(2).getRequestItems.get(membershipTable).size() shouldBe 10
response should contain theSameElementsAs members
}
"add the members in a single batch if there are less than 25 members to be added" in {
val members = (for (i <- 1 to 20) yield s"member-${i}").toSet
val batchCaptor = ArgumentCaptor.forClass(classOf[BatchWriteItemRequest])
val dynamoResponse = mock[BatchWriteItemResult]
val unprocessed = mock[java.util.Map[String, AttributeValue]]
doReturn(null).when(unprocessed).get(anyString())
doReturn(unprocessed).when(dynamoResponse).getUnprocessedItems
val store = new TestDynamoDBMembershipRepository
doReturn(IO.pure(dynamoResponse))
.when(dynamoDBHelper)
.batchWriteItem(any[String], any[BatchWriteItemRequest], any[Int], any[FiniteDuration])
val response = store.saveMembers(okGroup.id, members, isAdmin = false).unsafeRunSync()
verify(dynamoDBHelper, times(1)).batchWriteItem(
any[String],
batchCaptor.capture(),
any[Int],
any[FiniteDuration]
)
val batchWrites = batchCaptor.getAllValues
batchWrites.get(0).getRequestItems.get(membershipTable).size() shouldBe 20
response should contain theSameElementsAs members
}
"throw an exception if thrown by dynamo" in {
val members = (for (i <- 1 to 30) yield s"member-${i}").toSet
val dynamoResponse = mock[BatchWriteItemResult]
val unprocessed = mock[java.util.Map[String, AttributeValue]]
doReturn(null).when(unprocessed).get(anyString())
doReturn(unprocessed).when(dynamoResponse).getUnprocessedItems
val store = new TestDynamoDBMembershipRepository
doReturn(IO.pure(dynamoResponse))
.doThrow(new RuntimeException("failed"))
.when(dynamoDBHelper)
.batchWriteItem(any[String], any[BatchWriteItemRequest], any[Int], any[FiniteDuration])
val response = store.saveMembers(okGroup.id, members, isAdmin = false)
a[RuntimeException] shouldBe thrownBy(response.unsafeRunSync())
}
}
"DynamoDBMembershipRepository.removeMembers" should {
"remove the members in batches and return the members that were removed from the group" in {
val members = (for (i <- 1 to 60) yield s"member-${i}").toSet
val batchCaptor = ArgumentCaptor.forClass(classOf[BatchWriteItemRequest])
val dynamoResponse = mock[BatchWriteItemResult]
val unprocessed = mock[java.util.Map[String, AttributeValue]]
doReturn(null).when(unprocessed).get(anyString())
doReturn(unprocessed).when(dynamoResponse).getUnprocessedItems
val store = new TestDynamoDBMembershipRepository
doReturn(IO.pure(dynamoResponse))
.when(dynamoDBHelper)
.batchWriteItem(any[String], any[BatchWriteItemRequest], any[Int], any[FiniteDuration])
val response = store.removeMembers(okGroup.id, members).unsafeRunSync()
verify(dynamoDBHelper, times(3)).batchWriteItem(
any[String],
batchCaptor.capture(),
any[Int],
any[FiniteDuration]
)
// we should have 3 batches
val batchWrites = batchCaptor.getAllValues
batchWrites.get(0).getRequestItems.get(membershipTable).size() shouldBe 25
batchWrites.get(1).getRequestItems.get(membershipTable).size() shouldBe 25
batchWrites.get(2).getRequestItems.get(membershipTable).size() shouldBe 10
response should contain theSameElementsAs members
}
"remove the members in a single batch if there are less than 25 members to be removed" in {
val members = (for (i <- 1 to 20) yield s"member-${i}").toSet
val batchCaptor = ArgumentCaptor.forClass(classOf[BatchWriteItemRequest])
val dynamoResponse = mock[BatchWriteItemResult]
val unprocessed = mock[java.util.Map[String, AttributeValue]]
doReturn(null).when(unprocessed).get(anyString())
doReturn(unprocessed).when(dynamoResponse).getUnprocessedItems
val store = new TestDynamoDBMembershipRepository
doReturn(IO.pure(dynamoResponse))
.when(dynamoDBHelper)
.batchWriteItem(any[String], any[BatchWriteItemRequest], any[Int], any[FiniteDuration])
val response = store.removeMembers(okGroup.id, members).unsafeRunSync()
verify(dynamoDBHelper, times(1)).batchWriteItem(
any[String],
batchCaptor.capture(),
any[Int],
any[FiniteDuration]
)
val batchWrites = batchCaptor.getAllValues
batchWrites.get(0).getRequestItems.get(membershipTable).size() shouldBe 20
response should contain theSameElementsAs members
}
"throw an exception if thrown by dynamo" in {
val members = (for (i <- 1 to 30) yield s"member-${i}").toSet
val dynamoResponse = mock[BatchWriteItemResult]
val unprocessed = mock[java.util.Map[String, AttributeValue]]
doReturn(null).when(unprocessed).get(anyString())
doReturn(unprocessed).when(dynamoResponse).getUnprocessedItems
val store = new TestDynamoDBMembershipRepository
doReturn(IO.pure(dynamoResponse))
.doThrow(new RuntimeException("failed"))
.when(dynamoDBHelper)
.batchWriteItem(any[String], any[BatchWriteItemRequest], any[Int], any[FiniteDuration])
val response = store.removeMembers(okGroup.id, members)
a[RuntimeException] shouldBe thrownBy(response.unsafeRunSync())
}
}
"DynamoDBMembershipRepository.getGroupsForUser" should {
"returns empty if no groups exist" in {
val dynamoResponse = mock[QueryResult]
when(dynamoResponse.getItems)
.thenReturn(new java.util.ArrayList[java.util.Map[String, AttributeValue]]())
when(dynamoDBHelper.query(any[QueryRequest])).thenReturn(IO.pure(dynamoResponse))
val store = new TestDynamoDBMembershipRepository
val response = store.getGroupsForUser(okUser.id).unsafeRunSync()
verify(dynamoDBHelper).query(any[QueryRequest])
response shouldBe empty
}
"returns groups found for user" in {
val dynamoResponse = mock[QueryResult]
val expected = for (i <- 1 to 30) yield s"group-$i"
val resultList = expected.map(underTest.toItem(okUser.id, _)).asJava
when(dynamoResponse.getItems).thenReturn(resultList)
when(dynamoDBHelper.query(any[QueryRequest])).thenReturn(IO.pure(dynamoResponse))
val store = new TestDynamoDBMembershipRepository
val response = store.getGroupsForUser(okUser.id).unsafeRunSync()
verify(dynamoDBHelper).query(any[QueryRequest])
response should contain theSameElementsAs expected
}
"throw exception when query returns an unexpected response" in {
val store = new TestDynamoDBMembershipRepository
when(dynamoDBHelper.query(any[QueryRequest])).thenThrow(new ResourceNotFoundException("foo"))
a[RuntimeException] should be thrownBy store.getGroupsForUser(okUser.id)
}
}
}

View File

@@ -1,193 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import com.amazonaws.services.dynamodbv2.model._
import org.mockito.ArgumentCaptor
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.concurrent.ScalaFutures
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import vinyldns.core.domain.record.ChangeSet
import vinyldns.core.TestRecordSetData._
import vinyldns.core.TestZoneData._
import cats.effect._
import org.scalatest.BeforeAndAfterEach
import vinyldns.dynamodb.DynamoTestConfig
import scala.concurrent.duration.FiniteDuration
class DynamoDBRecordChangeRepositorySpec
extends AnyWordSpec
with MockitoSugar
with Matchers
with ScalaFutures
with BeforeAndAfterEach {
private val dynamoDBHelper = mock[DynamoDBHelper]
private val recordSetConfig = DynamoTestConfig.recordChangeStoreConfig
private val recordChangeTable = recordSetConfig.tableName
class TestRepo extends DynamoDBRecordChangeRepository(recordChangeTable, dynamoDBHelper)
override def beforeEach(): Unit =
reset(dynamoDBHelper)
"DynamoDBRecordChangeRepository.save" should {
"group change sets into batch writes with 25 in each" in {
val changes = for (_ <- 1 to 52) yield pendingCreateAAAA
val changeSet = ChangeSet(changes)
val batchCaptor = ArgumentCaptor.forClass(classOf[Seq[WriteRequest]])
val dynamoResponse = mock[BatchWriteItemResult]
val dynamoRequest = mock[BatchWriteItemRequest]
doReturn(dynamoRequest)
.when(dynamoDBHelper)
.toBatchWriteItemRequest(any[Seq[WriteRequest]], anyString)
doReturn(IO.pure(dynamoResponse))
.when(dynamoDBHelper)
.batchWriteItem(any[String], any[BatchWriteItemRequest], any[Int], any[FiniteDuration])
val store = new TestRepo
val response = store.save(changeSet).unsafeRunSync()
verify(dynamoDBHelper, times(3))
.batchWriteItem(any[String], any[BatchWriteItemRequest], any[Int], any[FiniteDuration])
verify(dynamoDBHelper, times(3)).toBatchWriteItemRequest(batchCaptor.capture(), anyString)
response shouldBe changeSet
// we should have 3 batches
val batchWrites = batchCaptor.getAllValues
batchWrites.get(0).size shouldBe 25
batchWrites.get(1).size shouldBe 25
batchWrites.get(2).size shouldBe 2
}
"returns a future failure if the first batch fails" in {
val changes = for (_ <- 0 to 52) yield pendingCreateAAAA
val changeSet = ChangeSet(changes)
val dynamoResponse = mock[BatchWriteItemResult]
val unprocessed = mock[java.util.Map[String, AttributeValue]]
doReturn(null).when(unprocessed).get(anyString())
doReturn(unprocessed).when(dynamoResponse).getUnprocessedItems
val store = new TestRepo
doThrow(new RuntimeException("failed")) //fail on the first batch
.when(dynamoDBHelper)
.batchWriteItem(any[String], any[BatchWriteItemRequest], any[Int], any[FiniteDuration])
val result = store.save(changeSet)
a[RuntimeException] shouldBe thrownBy(result.unsafeRunSync())
}
"returns a future failure if any batch fails" in {
val changes = for (_ <- 0 to 52) yield pendingCreateAAAA
val changeSet = ChangeSet(changes)
val dynamoResponse = mock[BatchWriteItemResult]
val unprocessed = mock[java.util.Map[String, AttributeValue]]
doReturn(null).when(unprocessed).get(anyString())
doReturn(unprocessed).when(dynamoResponse).getUnprocessedItems
val store = new TestRepo
when(
dynamoDBHelper
.batchWriteItem(any[String], any[BatchWriteItemRequest], any[Int], any[FiniteDuration])
).thenReturn(IO.pure(dynamoResponse))
.thenThrow(new RuntimeException("failed")) //fail on the second batch
val result = store.save(changeSet)
a[RuntimeException] shouldBe thrownBy(result.unsafeRunSync())
}
}
"DynamoDBRecordChangeRepository.getRecordSetChange(zoneId, recordSetChangeId)" should {
"call AmazonDynamoDBClient.get when retrieving an record set using an id" in {
val dynamoResponse = mock[QueryResult]
val store = new TestRepo
val expected = new java.util.ArrayList[java.util.Map[String, AttributeValue]]()
expected.add(store.toItem(pendingChangeSet, pendingCreateAAAA))
when(dynamoResponse.getItems).thenReturn(expected)
when(dynamoDBHelper.query(any[QueryRequest])).thenReturn(IO.pure(dynamoResponse))
val response = store.getRecordSetChange(zoneActive.id, pendingCreateAAAA.id).unsafeRunSync()
verify(dynamoDBHelper).query(any[QueryRequest])
response shouldBe Some(pendingCreateAAAA)
}
"throw exception when get returns an unexpected response" in {
when(dynamoDBHelper.query(any[QueryRequest]))
.thenThrow(new ResourceNotFoundException("bar does not exist"))
val store = new TestRepo
a[ResourceNotFoundException] should be thrownBy store.getRecordSetChange(
zoneActive.id,
pendingCreateAAAA.id
)
}
"return None if not found" in {
val dynamoResponse = mock[QueryResult]
when(dynamoResponse.getItems)
.thenReturn(new java.util.ArrayList[java.util.Map[String, AttributeValue]]())
when(dynamoDBHelper.query(any[QueryRequest])).thenReturn(IO.pure(dynamoResponse))
val store = new DynamoDBRecordChangeRepository(recordChangeTable, dynamoDBHelper)
val response = store.getRecordSetChange(zoneActive.id, pendingCreateAAAA.id).unsafeRunSync()
verify(dynamoDBHelper).query(any[QueryRequest])
response shouldBe None
}
}
"DynamoDBRecordChangeRepository.toRecordSetChange" should {
"be able to decode the output of toItem" in {
val store = new TestRepo
val blob = store.toItem(pendingChangeSet, pendingCreateAAAA)
val result = store.toRecordSetChange(blob)
result shouldBe pendingCreateAAAA
}
"throw an error when given bad input" in {
val store = new TestRepo
val blob = new java.util.HashMap[String, AttributeValue]()
intercept[UnexpectedDynamoResponseException] {
store.toRecordSetChange(blob)
}
}
}
"DynamoDBRecordChangeRepository.toItem" should {
"be able to encode an item" in {
val store = new TestRepo
val result = store.toItem(pendingChangeSet, pendingCreateAAAA)
store.toRecordSetChange(result) shouldBe pendingCreateAAAA
}
}
}

View File

@@ -1,208 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import java.nio.ByteBuffer
import com.amazonaws.services.dynamodbv2.model.AttributeValue
import org.joda.time.DateTime
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import vinyldns.core.domain.record.{
RecordSet,
RecordSetChange,
RecordSetChangeType,
RecordSetStatus
}
import vinyldns.core.protobuf.ProtobufConversions
import vinyldns.core.TestMembershipData.okUser
import vinyldns.core.TestRecordSetData._
import vinyldns.core.TestZoneData._
import vinyldns.proto.VinylDNSProto
import scala.collection.JavaConverters._
class DynamoDBRecordSetConversionsSpec
extends AnyWordSpec
with Matchers
with MockitoSugar
with ProtobufConversions {
import DynamoDBRecordSetRepository._
private val underTest = new DynamoDBRecordSetConversions {
private[repository] val recordSetTableName: String = "testTable"
}
private def theRecordSetIn(item: java.util.Map[String, AttributeValue]): RecordSet =
fromPB(VinylDNSProto.RecordSet.parseFrom(item.get(RECORD_SET_BLOB).getB.array()))
"DynamoDBRecordSetConversions" should {
"convert from and to item" in {
val rs = aaaa.copy(name = "MixedCase.")
val item = underTest.toItem(rs).asScala
item(RECORD_SET_ID).getS shouldBe rs.id
item(RECORD_SET_TYPE).getS shouldBe rs.typ.toString
item(ZONE_ID).getS shouldBe rs.zoneId
item(RECORD_SET_NAME).getS shouldBe rs.name
item(RECORD_SET_SORT).getS shouldBe "mixedcase"
underTest.fromItem(item.asJava) shouldBe rs
}
"throw an error if fromItem cannot parse" in {
intercept[UnexpectedDynamoResponseException] {
val item = underTest.toItem(aaaa)
val shouldFail = "HELLO".getBytes
val bb = ByteBuffer.allocate(shouldFail.length) //convert byte array to byte buffer
bb.put(shouldFail)
bb.position(0)
item.put(RECORD_SET_BLOB, new AttributeValue().withB(bb))
underTest.fromItem(item)
}
}
"toWriteRequests" should {
"convert a ChangeSet to Write Requests" in {
val result = underTest.toWriteRequests(pendingChangeSet)
result.size shouldBe pendingChangeSet.changes.size
val put1 = result.head.getPutRequest
val change1 = pendingChangeSet.changes.head
put1.getItem.get(RECORD_SET_ID).getS shouldBe change1.recordSet.id
theRecordSetIn(put1.getItem) shouldBe change1.recordSet
val put2 = result(1).getPutRequest
val change2 = pendingChangeSet.changes(1)
put2.getItem.get(RECORD_SET_ID).getS shouldBe change2.recordSet.id
theRecordSetIn(put2.getItem) shouldBe change2.recordSet
}
}
"toWriteRequest" should {
val pendingDeleteAAAA = RecordSetChange(
zone = zoneActive,
recordSet = aaaa.copy(
status = RecordSetStatus.PendingDelete,
updated = Some(DateTime.now)
),
userId = okUser.id,
changeType = RecordSetChangeType.Delete,
updates = Some(aaaa)
)
"convert a failed Add Record Set change" in {
val failedAdd = pendingCreateAAAA.failed()
val result = underTest.toWriteRequest(failedAdd)
Option(result.getPutRequest) shouldBe None
val delete = result.getDeleteRequest
delete.getKey.get(RECORD_SET_ID).getS shouldBe failedAdd.recordSet.id
}
"convert a failed Update Record Set change" in {
val failedUpdate = pendingUpdateAAAA.failed()
val result = underTest.toWriteRequest(failedUpdate)
Option(result.getDeleteRequest) shouldBe None
val put = result.getPutRequest
put.getItem.get(RECORD_SET_ID).getS shouldBe pendingUpdateAAAA.recordSet.id
theRecordSetIn(put.getItem) shouldBe pendingUpdateAAAA.updates.get
}
"convert a failed Delete Record Set change" in {
val failedDelete = pendingDeleteAAAA.failed()
val result = underTest.toWriteRequest(failedDelete)
Option(result.getDeleteRequest) shouldBe None
val put = result.getPutRequest
put.getItem.get(RECORD_SET_ID).getS shouldBe pendingDeleteAAAA.recordSet.id
theRecordSetIn(put.getItem) shouldBe pendingDeleteAAAA.updates.get
}
"convert a successful Add Record Set change" in {
val successAdd = pendingCreateAAAA.successful
val result = underTest.toWriteRequest(successAdd)
Option(result.getDeleteRequest) shouldBe None
val put = result.getPutRequest
put.getItem.get(RECORD_SET_ID).getS shouldBe successAdd.recordSet.id
theRecordSetIn(put.getItem) shouldBe successAdd.recordSet
}
"convert a successful Update Record Set change" in {
val successUpdate = pendingUpdateAAAA.successful
val result = underTest.toWriteRequest(successUpdate)
Option(result.getDeleteRequest) shouldBe None
val put = result.getPutRequest
put.getItem.get(RECORD_SET_ID).getS shouldBe successUpdate.recordSet.id
theRecordSetIn(put.getItem) shouldBe successUpdate.recordSet
}
"convert a successful Delete Record Set change" in {
val successDelete = pendingDeleteAAAA.successful
val result = underTest.toWriteRequest(successDelete)
Option(result.getPutRequest) shouldBe None
val delete = result.getDeleteRequest
delete.getKey.get(RECORD_SET_ID).getS shouldBe successDelete.recordSet.id
}
"store a pending Add Record Set change" in {
val result = underTest.toWriteRequest(pendingCreateAAAA)
Option(result.getDeleteRequest) shouldBe None
val put = result.getPutRequest
put.getItem.get(RECORD_SET_ID).getS shouldBe pendingCreateAAAA.recordSet.id
theRecordSetIn(put.getItem) shouldBe pendingCreateAAAA.recordSet
}
"store a pending Update Record Set change" in {
val result = underTest.toWriteRequest(pendingUpdateAAAA)
Option(result.getDeleteRequest) shouldBe None
val put = result.getPutRequest
put.getItem.get(RECORD_SET_ID).getS shouldBe pendingUpdateAAAA.recordSet.id
theRecordSetIn(put.getItem) shouldBe pendingUpdateAAAA.recordSet
}
"store a pending Delete Record Set change" in {
val result = underTest.toWriteRequest(pendingDeleteAAAA)
Option(result.getDeleteRequest) shouldBe None
val put = result.getPutRequest
put.getItem.get(RECORD_SET_ID).getS shouldBe pendingDeleteAAAA.recordSet.id
theRecordSetIn(put.getItem) shouldBe pendingDeleteAAAA.recordSet
}
}
}
}

View File

@@ -1,455 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import java.util
import com.amazonaws.services.dynamodbv2.model._
import org.mockito.ArgumentCaptor
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.concurrent.ScalaFutures
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.BeforeAndAfterEach
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import vinyldns.core.domain.record.{ChangeSet, NameSort}
import vinyldns.core.TestRecordSetData._
import cats.effect._
import vinyldns.dynamodb.DynamoTestConfig
import scala.concurrent.duration.FiniteDuration
class DynamoDBRecordSetRepositorySpec
extends AnyWordSpec
with MockitoSugar
with Matchers
with ScalaFutures
with BeforeAndAfterEach {
private val dynamoDBHelper = mock[DynamoDBHelper]
private val recordChangeConfig = DynamoTestConfig.recordChangeStoreConfig
class TestDynamoRecordSetRepo
extends DynamoDBRecordSetRepository(recordChangeConfig.tableName, dynamoDBHelper)
override def beforeEach(): Unit =
reset(dynamoDBHelper)
"DynamoDBRecordSetRepository.applyChangeSet" should {
"return the ChangeSet" in {
val dynamoResponse = mock[BatchWriteItemResult]
val unprocessed = mock[java.util.Map[String, AttributeValue]]
doReturn(null).when(unprocessed).get(anyString())
doReturn(unprocessed).when(dynamoResponse).getUnprocessedItems
val store = new TestDynamoRecordSetRepo
doReturn(IO.pure(dynamoResponse))
.when(dynamoDBHelper)
.batchWriteItem(any[String], any[BatchWriteItemRequest], any[Int], any[FiniteDuration])
val response = store.apply(pendingChangeSet).unsafeRunSync()
verify(dynamoDBHelper).batchWriteItem(
any[String],
any[BatchWriteItemRequest],
any[Int],
any[FiniteDuration]
)
response shouldBe pendingChangeSet
}
"group change sets into batch writes with 25 in each" in {
val changes = for (_ <- 1 to 52) yield pendingCreateAAAA
val batchCaptor = ArgumentCaptor.forClass(classOf[Seq[WriteRequest]])
val dynamoResponse = mock[BatchWriteItemResult]
val dynamoRequest = mock[BatchWriteItemRequest]
val store = new TestDynamoRecordSetRepo
doReturn(dynamoRequest)
.when(dynamoDBHelper)
.toBatchWriteItemRequest(any[Seq[WriteRequest]], anyString)
doReturn(IO.pure(dynamoResponse))
.when(dynamoDBHelper)
.batchWriteItem(any[String], any[BatchWriteItemRequest], any[Int], any[FiniteDuration])
val changeSet = ChangeSet(changes)
val response = store.apply(changeSet).unsafeRunSync()
verify(dynamoDBHelper, times(3)).batchWriteItem(
any[String],
any[BatchWriteItemRequest],
any[Int],
any[FiniteDuration]
)
verify(dynamoDBHelper, times(3)).toBatchWriteItemRequest(batchCaptor.capture(), anyString)
// we should have 3 batches
val batchWrites = batchCaptor.getAllValues
batchWrites.get(0).size shouldBe 25
batchWrites.get(1).size shouldBe 25
batchWrites.get(2).size shouldBe 2
response shouldBe changeSet
response.status shouldBe changeSet.status
}
"returns a future failure if any batch fails" in {
val changes = for (_ <- 0 to 52) yield pendingCreateAAAA
val dynamoResponse = mock[BatchWriteItemResult]
val unprocessed = mock[java.util.Map[String, AttributeValue]]
doReturn(null).when(unprocessed).get(anyString())
doReturn(unprocessed).when(dynamoResponse).getUnprocessedItems
val store = new TestDynamoRecordSetRepo
doReturn(IO.pure(dynamoResponse))
.doThrow(new RuntimeException("failed"))
.when(dynamoDBHelper)
.batchWriteItem(any[String], any[BatchWriteItemRequest], any[Int], any[FiniteDuration])
val result = store.apply(ChangeSet(changes))
a[RuntimeException] shouldBe thrownBy(result.unsafeRunSync())
}
}
"DynamoDBRecordSetRepository.getRecordSet(zoneId, recordSetId)" should {
"call AmazonDynamoDBClient.get when retrieving an record set using an id" in {
val dynamoResponse = mock[GetItemResult]
val store = new TestDynamoRecordSetRepo
val expected = store.toItem(rsOk)
when(dynamoResponse.getItem).thenReturn(expected)
when(dynamoDBHelper.getItem(any[GetItemRequest]))
.thenReturn(IO.pure(dynamoResponse))
val response = store.getRecordSet(rsOk.zoneId).unsafeRunSync()
verify(dynamoDBHelper).getItem(any[GetItemRequest])
response shouldBe Some(rsOk)
}
"throw exception when get returns an unexpected response" in {
when(dynamoDBHelper.getItem(any[GetItemRequest]))
.thenThrow(new ResourceNotFoundException("bar does not exist"))
val store = new TestDynamoRecordSetRepo
a[ResourceNotFoundException] should be thrownBy store.getRecordSet(rsOk.zoneId)
}
"return None if not found" in {
val dynamoResponse = mock[GetItemResult]
when(dynamoResponse.getItem).thenReturn(null)
when(dynamoDBHelper.getItem(any[GetItemRequest]))
.thenReturn(IO.pure(dynamoResponse))
val store = new DynamoDBRecordSetRepository(recordChangeConfig.tableName, dynamoDBHelper)
val response = store.getRecordSet(rsOk.zoneId).unsafeRunSync()
verify(dynamoDBHelper).getItem(any[GetItemRequest])
response shouldBe None
}
}
"DynamoDBRecordSetRepository.listRecordSets" should {
"returns empty if no record set exist" in {
val store = new DynamoDBRecordSetRepository(recordChangeConfig.tableName, dynamoDBHelper)
val dynamoResponse = mock[QueryResult]
val expectedItems = new util.ArrayList[util.HashMap[String, AttributeValue]]()
doReturn(expectedItems).when(dynamoResponse).getItems
doReturn(null).when(dynamoResponse).getLastEvaluatedKey
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).query(any[QueryRequest])
val response = store
.listRecordSets(
zoneId = Some(rsOk.zoneId),
startFrom = None,
maxItems = None,
recordNameFilter = None,
recordTypeFilter = None,
nameSort = NameSort.ASC,
recordOwnerGroupFilter = None
)
.unsafeRunSync()
verify(dynamoDBHelper).query(any[QueryRequest])
response.recordSets shouldBe empty
}
"returns all record sets returned" in {
val store = new TestDynamoRecordSetRepo
val dynamoResponse = mock[QueryResult]
val expectedItems = new util.ArrayList[util.Map[String, AttributeValue]]()
expectedItems.add(store.toItem(rsOk))
expectedItems.add(store.toItem(aaaa))
expectedItems.add(store.toItem(cname))
doReturn(expectedItems).when(dynamoResponse).getItems
doReturn(null).when(dynamoResponse).getLastEvaluatedKey
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).query(any[QueryRequest])
val response =
store
.listRecordSets(
Some(rsOk.zoneId),
None,
Some(3),
None,
None,
None,
NameSort.ASC
)
.unsafeRunSync()
verify(dynamoDBHelper).query(any[QueryRequest])
(response.recordSets should contain).allOf(rsOk, aaaa, cname)
}
"throw exception when query returns an unexpected response" in {
when(dynamoDBHelper.query(any[QueryRequest]))
.thenThrow(new ResourceNotFoundException("failed"))
val store = new TestDynamoRecordSetRepo
a[ResourceNotFoundException] should be thrownBy store.listRecordSets(
zoneId = Some(rsOk.zoneId),
startFrom = None,
maxItems = None,
recordNameFilter = None,
recordTypeFilter = None,
nameSort = NameSort.ASC,
recordOwnerGroupFilter = None
)
}
"return an error if used without a zoneId" in {
val store = new TestDynamoRecordSetRepo
an[UnsupportedDynamoDBRepoFunction] should be thrownBy store
.listRecordSets(
zoneId = None,
startFrom = None,
maxItems = None,
recordNameFilter = None,
recordTypeFilter = None,
nameSort = NameSort.ASC,
recordOwnerGroupFilter = None
)
.unsafeRunSync()
}
}
"DynamoDBRecordSetRepository.getRecordSetsByName(zoneId, name)" should {
"returns empty if no record set exist" in {
val dynamoResponse = mock[QueryResult]
val store = new TestDynamoRecordSetRepo
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).query(any[QueryRequest])
doReturn(null).when(dynamoResponse).getLastEvaluatedKey
doReturn(new java.util.ArrayList[java.util.Map[String, AttributeValue]]())
.when(dynamoResponse)
.getItems
val response = store.getRecordSetsByName(rsOk.zoneId, rsOk.name).unsafeRunSync()
verify(dynamoDBHelper).query(any[QueryRequest])
response shouldBe empty
}
"call dynamoClient.query when retrieving an existing record set" in {
val dynamoResponse = mock[QueryResult]
val store = new TestDynamoRecordSetRepo
val resultList = new java.util.ArrayList[java.util.Map[String, AttributeValue]]()
resultList.add(store.toItem(rsOk))
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).query(any[QueryRequest])
doReturn(null).when(dynamoResponse).getLastEvaluatedKey
doReturn(resultList).when(dynamoResponse).getItems
val response = store.getRecordSetsByName(rsOk.zoneId, rsOk.name).unsafeRunSync()
verify(dynamoDBHelper).query(any[QueryRequest])
response shouldBe List(rsOk)
}
"throw exception when query returns an unexpected response" in {
when(dynamoDBHelper.query(any[QueryRequest]))
.thenThrow(new ResourceNotFoundException("failed"))
val store = new TestDynamoRecordSetRepo
a[ResourceNotFoundException] should be thrownBy store
.getRecordSetsByName(rsOk.zoneId, rsOk.name)
}
}
"DynamoDBRecordSetRepository.getRecordSets(zoneId, name, type)" should {
"returns empty if no record set exist" in {
val dynamoResponse = mock[QueryResult]
val store = new TestDynamoRecordSetRepo
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).query(any[QueryRequest])
doReturn(null).when(dynamoResponse).getLastEvaluatedKey
doReturn(new java.util.ArrayList[java.util.Map[String, AttributeValue]]())
.when(dynamoResponse)
.getItems
val response = store.getRecordSets(rsOk.zoneId, rsOk.name, rsOk.typ).unsafeRunSync()
verify(dynamoDBHelper).query(any[QueryRequest])
response shouldBe empty
}
"call dynamoClient.query when retrieving an existing record set" in {
val dynamoResponse = mock[QueryResult]
val store = new TestDynamoRecordSetRepo
val resultList = new java.util.ArrayList[java.util.Map[String, AttributeValue]]()
resultList.add(store.toItem(rsOk))
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).query(any[QueryRequest])
doReturn(null).when(dynamoResponse).getLastEvaluatedKey
doReturn(resultList).when(dynamoResponse).getItems
val response = store.getRecordSets(rsOk.zoneId, rsOk.name, rsOk.typ).unsafeRunSync()
verify(dynamoDBHelper).query(any[QueryRequest])
response shouldBe List(rsOk)
}
"throw exception when query returns an unexpected response" in {
when(dynamoDBHelper.query(any[QueryRequest])).thenThrow(new ResourceNotFoundException("fail"))
val store = new DynamoDBRecordSetRepository(recordChangeConfig.tableName, dynamoDBHelper)
a[ResourceNotFoundException] should be thrownBy store.getRecordSets(
rsOk.zoneId,
rsOk.name,
rsOk.typ
)
}
}
"DynamoDBRecordSetRepository.getRecordSetCount(zoneId)" should {
"returns 0 when there is no matching record set" in {
val dynamoResponse = mock[QueryResult]
val expectedCount = 0
doReturn(expectedCount).when(dynamoResponse).getCount
doReturn(null).when(dynamoResponse).getLastEvaluatedKey
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).query(any[QueryRequest])
val store = new TestDynamoRecordSetRepo
val response = store.getRecordSetCount(rsOk.zoneId).unsafeRunSync()
verify(dynamoDBHelper).query(any[QueryRequest])
response shouldBe 0
}
"returns the count value when available" in {
val dynamoResponse = mock[QueryResult]
val expectedCount = 10
doReturn(expectedCount).when(dynamoResponse).getCount
doReturn(null).when(dynamoResponse).getLastEvaluatedKey
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).query(any[QueryRequest])
val store = new TestDynamoRecordSetRepo
val response = store.getRecordSetCount(rsOk.zoneId).unsafeRunSync()
verify(dynamoDBHelper).query(any[QueryRequest])
response shouldBe 10
}
"returns the aggregated count value if query is multiple pages" in {
val dynamoResponse1 = mock[QueryResult]
val dynamoResponse2 = mock[QueryResult]
val key = new util.HashMap[String, AttributeValue]
key.put("test", new AttributeValue("test"))
doReturn(25).when(dynamoResponse1).getCount
doReturn(25).when(dynamoResponse2).getCount
doReturn(key).when(dynamoResponse1).getLastEvaluatedKey
doReturn(null).when(dynamoResponse2).getLastEvaluatedKey
doReturn(IO.pure(dynamoResponse1))
.doReturn(IO.pure(dynamoResponse2))
.when(dynamoDBHelper)
.query(any[QueryRequest])
val store = new TestDynamoRecordSetRepo
val response = store.getRecordSetCount(rsOk.zoneId).unsafeRunSync()
verify(dynamoDBHelper, times(2)).query(any[QueryRequest])
response shouldBe 50
}
"throw exception when query returns an unexpected response" in {
when(dynamoDBHelper.query(any[QueryRequest])).thenThrow(new ResourceNotFoundException("fail"))
val store = new TestDynamoRecordSetRepo
a[ResourceNotFoundException] should be thrownBy store.getRecordSetCount(rsOk.zoneId)
}
}
"DynamoDBRecordSetRepository.fromItem" should {
"be able to decode the output of toItem" in {
val store = new TestDynamoRecordSetRepo
val blob = store.toItem(rsOk)
val result = store.fromItem(blob)
result shouldBe rsOk
}
"throw an error when given bad input" in {
val store = new TestDynamoRecordSetRepo
val blob = new java.util.HashMap[String, AttributeValue]()
intercept[UnexpectedDynamoResponseException] {
store.fromItem(blob)
}
}
}
"DynamoDBRecordSetRepository.toItem" should {
"be able to encode an item" in {
val store = new TestDynamoRecordSetRepo
val result = store.toItem(rsOk)
store.fromItem(result) shouldBe rsOk
}
}
"DynamoDBRecordSetRepository.getRecordSetsByFQDNs" should {
"return an error if used" in {
val store = new TestDynamoRecordSetRepo
an[UnsupportedDynamoDBRepoFunction] should be thrownBy store
.getRecordSetsByFQDNs(Set("test"))
.unsafeRunSync()
}
}
"DynamoDBRecordSetRepository.getRecordSetIdOwnerGroup" should {
"return an error if used" in {
val store = new TestDynamoRecordSetRepo
an[UnsupportedDynamoDBRepoFunction] should be thrownBy store
.getFirstOwnedRecordByGroup("someId")
.unsafeRunSync()
}
}
"DynamoDBRecordSetRepository.deleteRecordSetsInZone" should {
"return an error if used" in {
val store = new TestDynamoRecordSetRepo
an[UnsupportedDynamoDBRepoFunction] should be thrownBy store
.deleteRecordSetsInZone("zoneId", "zoneName")
.unsafeRunSync()
}
}
}

View File

@@ -1,416 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import java.util
import com.amazonaws.services.dynamodbv2.model._
import org.mockito.ArgumentCaptor
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.concurrent.ScalaFutures
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.BeforeAndAfterEach
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import vinyldns.core.TestMembershipData._
import scala.collection.JavaConverters._
import cats.effect._
import com.typesafe.config.ConfigFactory
import vinyldns.core.crypto.{CryptoAlgebra, NoOpCrypto}
import vinyldns.core.domain.membership.LockStatus
import vinyldns.dynamodb.DynamoTestConfig
class DynamoDBUserRepositorySpec
extends AnyWordSpec
with MockitoSugar
with Matchers
with ScalaFutures
with BeforeAndAfterEach {
private val dynamoDBHelper = mock[DynamoDBHelper]
private val mockPutItemResult = mock[PutItemResult] // User repo is initialized with dummy users
doReturn(IO.pure(mockPutItemResult)).when(dynamoDBHelper).putItem(any[PutItemRequest])
private val usersStoreConfig = DynamoTestConfig.usersStoreConfig
private val userTable = usersStoreConfig.tableName
private val crypto = new NoOpCrypto(ConfigFactory.load())
private val underTest = new DynamoDBUserRepository(
userTable,
dynamoDBHelper,
DynamoDBUserRepository.toItem(crypto, _),
DynamoDBUserRepository.fromItem
)
override def beforeEach(): Unit =
reset(dynamoDBHelper)
import DynamoDBUserRepository._
"DynamoDBUserRepository.toItem" should {
"set all values correctly" in {
val crypt = new CryptoAlgebra {
def encrypt(value: String): String = "encrypted"
def decrypt(value: String): String = "decrypted"
}
val items = toItem(crypt, okUser)
items.get(USER_ID).getS shouldBe okUser.id
items.get(USER_NAME).getS shouldBe okUser.userName
items.get(ACCESS_KEY).getS shouldBe okUser.accessKey
items.get(SECRET_KEY).getS shouldBe "encrypted"
items.get(FIRST_NAME).getS shouldBe okUser.firstName.get
items.get(LAST_NAME).getS shouldBe okUser.lastName.get
items.get(EMAIL).getS shouldBe okUser.email.get
items.get(CREATED).getN shouldBe okUser.created.getMillis.toString
items.get(LOCK_STATUS).getS shouldBe okUser.lockStatus.toString
}
"set the first name to null if it is not present" in {
val emptyFirstName = okUser.copy(firstName = None)
val items = toItem(crypto, emptyFirstName)
Option(items.get(DynamoDBUserRepository.FIRST_NAME).getS) shouldBe None
items.get(DynamoDBUserRepository.FIRST_NAME).getNULL shouldBe true
}
"set the last name to null if it is not present" in {
val emptyLastName = okUser.copy(lastName = None)
val items = toItem(crypto, emptyLastName)
Option(items.get(LAST_NAME).getS) shouldBe None
items.get(LAST_NAME).getNULL shouldBe true
}
"set the email to null if it is not present" in {
val emptyEmail = okUser.copy(email = None)
val items = toItem(crypto, emptyEmail)
Option(items.get(EMAIL).getS) shouldBe None
items.get(EMAIL).getNULL shouldBe true
}
}
"DynamoDBUserRepository.fromItem" should {
"set all the values correctly" in {
val items = toItem(crypto, okUser)
val user = fromItem(items).unsafeRunSync()
user shouldBe okUser
}
"set all the values correctly if first name is not present" in {
val emptyFirstName = okUser.copy(firstName = None)
val items = toItem(crypto, emptyFirstName)
val user = fromItem(items).unsafeRunSync()
user shouldBe emptyFirstName
}
"set all the values correctly if last name is not present" in {
val emptyLastName = okUser.copy(lastName = None)
val items = toItem(crypto, emptyLastName)
val user = fromItem(items).unsafeRunSync()
user shouldBe emptyLastName
}
"set all the values correctly if email is not present" in {
val emptyEmail = okUser.copy(email = None)
val items = toItem(crypto, emptyEmail)
val user = fromItem(items).unsafeRunSync()
user shouldBe emptyEmail
}
"sets empty values correctly if key is not present in item" in {
val item = new java.util.HashMap[String, AttributeValue]()
item.put(USER_ID, new AttributeValue("ok"))
item.put(USER_NAME, new AttributeValue("ok"))
item.put(CREATED, new AttributeValue().withN("0"))
item.put(ACCESS_KEY, new AttributeValue("accessKey"))
item.put(SECRET_KEY, new AttributeValue("secretkey"))
item.put(LOCK_STATUS, new AttributeValue("lockstatus"))
val user = fromItem(item).unsafeRunSync()
user.firstName shouldBe None
user.lastName shouldBe None
user.email shouldBe None
}
"sets the isSuper flag correctly" in {
val superUser = okUser.copy(isSuper = true)
val items = toItem(crypto, superUser)
val user = fromItem(items).unsafeRunSync()
user shouldBe superUser
}
"sets the isSuper flag correctly if the key is not present in the item" in {
val item = new java.util.HashMap[String, AttributeValue]()
item.put(USER_ID, new AttributeValue("ok"))
item.put(USER_NAME, new AttributeValue("ok"))
item.put(CREATED, new AttributeValue().withN("0"))
item.put(ACCESS_KEY, new AttributeValue("accesskey"))
item.put(SECRET_KEY, new AttributeValue("secretkey"))
item.put(LOCK_STATUS, new AttributeValue("Locked"))
val user = fromItem(item).unsafeRunSync()
user.isSuper shouldBe false
}
"sets the lockStatus to Unlocked if the given value is invalid" in {
val item = new java.util.HashMap[String, AttributeValue]()
item.put(USER_ID, new AttributeValue("ok"))
item.put(USER_NAME, new AttributeValue("ok"))
item.put(CREATED, new AttributeValue().withN("0"))
item.put(ACCESS_KEY, new AttributeValue("accesskey"))
item.put(SECRET_KEY, new AttributeValue("secretkey"))
item.put(LOCK_STATUS, new AttributeValue("lock_status"))
val user = fromItem(item).unsafeRunSync()
user.lockStatus shouldBe LockStatus.Unlocked
}
"sets the lockStatus to Unlocked if the given value is null" in {
val item = new java.util.HashMap[String, AttributeValue]()
item.put(USER_ID, new AttributeValue("ok"))
item.put(USER_NAME, new AttributeValue("ok"))
item.put(CREATED, new AttributeValue().withN("0"))
item.put(ACCESS_KEY, new AttributeValue("accesskey"))
item.put(SECRET_KEY, new AttributeValue("secretkey"))
val user = fromItem(item).unsafeRunSync()
user.lockStatus shouldBe LockStatus.Unlocked
}
}
"DynamoDBUserRepository.getUser" should {
"return the user if the id is found" in {
val dynamoResponse = mock[GetItemResult]
val expected = toItem(crypto, okUser)
doReturn(expected).when(dynamoResponse).getItem
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).getItem(any[GetItemRequest])
val response = underTest.getUser(okUser.id).unsafeRunSync()
verify(dynamoDBHelper).getItem(any[GetItemRequest])
response shouldBe Some(okUser)
}
"throw exception when get returns an unexpected response" in {
doReturn(IO.raiseError(new ResourceNotFoundException("bar does not exist")))
.when(dynamoDBHelper)
.getItem(any[GetItemRequest])
val result = underTest.getUser(okUser.id)
a[ResourceNotFoundException] shouldBe thrownBy(result.unsafeRunSync())
}
"return None if not found" in {
val dynamoResponse = mock[GetItemResult]
doReturn(null).when(dynamoResponse).getItem
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).getItem(any[GetItemRequest])
val response = underTest.getUser(okUser.id).unsafeRunSync()
verify(dynamoDBHelper).getItem(any[GetItemRequest])
response shouldBe None
}
}
"DynamoDBUserRepository.getUsers" should {
"return the users if the id is found" in {
val firstResponse = mock[BatchGetItemResult]
val firstPage =
Map(userTable -> listOfDummyUsers.slice(0, 100).map(toItem(crypto, _)).asJava).asJava
doReturn(firstPage).when(firstResponse).getResponses
val secondResponse = mock[BatchGetItemResult]
val secondPage = Map(
userTable -> listOfDummyUsers
.slice(100, 200)
.map(toItem(crypto, _))
.asJava
).asJava
doReturn(secondPage).when(secondResponse).getResponses
doReturn(IO.pure(firstResponse))
.doReturn(IO.pure(secondResponse))
.when(dynamoDBHelper)
.batchGetItem(any[BatchGetItemRequest])
val response =
underTest.getUsers(listOfDummyUsers.map(_.id).toSet, None, None).unsafeRunSync()
verify(dynamoDBHelper, times(2)).batchGetItem(any[BatchGetItemRequest])
response.users should contain theSameElementsAs listOfDummyUsers
response.lastEvaluatedId shouldBe None
}
"return None if no users found" in {
val firstResponse = mock[BatchGetItemResult]
val firstPage = Map(userTable -> List().asJava).asJava
doReturn(firstPage).when(firstResponse).getResponses
doReturn(IO.pure(firstResponse))
.when(dynamoDBHelper)
.batchGetItem(any[BatchGetItemRequest])
val response = underTest.getUsers(Set("notFound"), None, None).unsafeRunSync()
verify(dynamoDBHelper).batchGetItem(any[BatchGetItemRequest])
response.users should contain theSameElementsAs Set()
response.lastEvaluatedId shouldBe None
}
"return None if table is missing" in {
val firstResponse = mock[BatchGetItemResult]
val firstPage = Map().asJava
doReturn(firstPage).when(firstResponse).getResponses
doReturn(IO.pure(firstResponse))
.when(dynamoDBHelper)
.batchGetItem(any[BatchGetItemRequest])
val response = underTest.getUsers(Set("notFound"), None, None).unsafeRunSync()
verify(dynamoDBHelper).batchGetItem(any[BatchGetItemRequest])
response.users should contain theSameElementsAs Set()
response.lastEvaluatedId shouldBe None
}
"returns is starting at the exclusiveStartKey" in {
def toBatchGetItemRequest(userIds: List[String]): BatchGetItemRequest = {
val allKeys = new util.ArrayList[util.Map[String, AttributeValue]]()
for { userId <- userIds } {
val key = new util.HashMap[String, AttributeValue]()
key.put(USER_ID, new AttributeValue(userId))
allKeys.add(key)
}
val keysAndAttributes = new KeysAndAttributes().withKeys(allKeys)
val request = new util.HashMap[String, KeysAndAttributes]()
request.put(userTable, keysAndAttributes)
new BatchGetItemRequest().withRequestItems(request)
}
val firstResponse = mock[BatchGetItemResult]
val firstPage = Map(
userTable -> listOfDummyUsers
.slice(151, 200)
.map(toItem(crypto, _))
.asJava
).asJava
doReturn(firstPage).when(firstResponse).getResponses
doReturn(IO.pure(firstResponse))
.when(dynamoDBHelper)
.batchGetItem(any[BatchGetItemRequest])
val batchGetCaptor = ArgumentCaptor.forClass(classOf[BatchGetItemRequest])
val response =
underTest.getUsers(listOfDummyUsers.map(_.id).toSet, Some("dummy150"), None).unsafeRunSync()
response.users should contain theSameElementsAs listOfDummyUsers.slice(151, 200)
response.lastEvaluatedId shouldBe None
verify(dynamoDBHelper).batchGetItem(batchGetCaptor.capture())
val batchGet = batchGetCaptor.getValue
val expected = toBatchGetItemRequest(listOfDummyUsers.slice(151, 200).map(_.id))
batchGet shouldBe expected
}
"truncates the response to only return limit items" in {
val firstResponse = mock[BatchGetItemResult]
val firstPage =
Map(userTable -> listOfDummyUsers.slice(0, 50).map(toItem(crypto, _)).asJava).asJava
doReturn(firstPage).when(firstResponse).getResponses
doReturn(IO.pure(firstResponse))
.when(dynamoDBHelper)
.batchGetItem(any[BatchGetItemRequest])
val response =
underTest.getUsers(listOfDummyUsers.map(_.id).toSet, None, Some(50)).unsafeRunSync()
verify(dynamoDBHelper).batchGetItem(any[BatchGetItemRequest])
response.users should contain theSameElementsAs listOfDummyUsers.take(50)
response.lastEvaluatedId shouldBe Some(listOfDummyUsers(49).id)
}
"throw exception when get returns an unexpected response" in {
doReturn(IO.raiseError(new ResourceNotFoundException("bar does not exist")))
.when(dynamoDBHelper)
.batchGetItem(any[BatchGetItemRequest])
a[ResourceNotFoundException] shouldBe thrownBy(
underTest.getUsers(listOfDummyUsers.map(_.id).toSet, None, None).unsafeRunSync()
)
}
}
"DynamoDBUserRepository.getAllUsers" should {
"throw an UnsupportedDynamoDBRepoFunction error" in {
assertThrows[UnsupportedDynamoDBRepoFunction](underTest.getAllUsers.unsafeRunSync())
}
}
"DynamoDBUserRepository.getUserByAccessKey" should {
"return the user if the access key is found" in {
val dynamoResponse = mock[QueryResult]
val expected = List(toItem(crypto, okUser)).asJava
doReturn(expected).when(dynamoResponse).getItems
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).query(any[QueryRequest])
val response = underTest.getUserByAccessKey(okUser.accessKey).unsafeRunSync()
verify(dynamoDBHelper).query(any[QueryRequest])
response shouldBe Some(okUser)
}
"throw exception when get returns an unexpected response" in {
doReturn(IO.raiseError(new ResourceNotFoundException("bar does not exist")))
.when(dynamoDBHelper)
.query(any[QueryRequest])
val result = underTest.getUserByAccessKey(okUser.accessKey)
a[ResourceNotFoundException] shouldBe thrownBy(result.unsafeRunSync())
}
"return None if not found" in {
val dynamoResponse = mock[QueryResult]
doReturn(List().asJava).when(dynamoResponse).getItems
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).query(any[QueryRequest])
val response = underTest.getUserByAccessKey(okUser.accessKey).unsafeRunSync()
verify(dynamoDBHelper).query(any[QueryRequest])
response shouldBe None
}
}
"DynamoDBUserRepository.save" should {
"return the user when saved" in {
val mockPutItemResult = mock[PutItemResult]
doReturn(IO.pure(mockPutItemResult))
.when(dynamoDBHelper)
.putItem(any[PutItemRequest])
val response = underTest.save(okUser).unsafeRunSync()
response shouldBe okUser
}
"throw an UnsupportedDynamoDBRepoFunction error when batch save is invoked" in {
assertThrows[UnsupportedDynamoDBRepoFunction](underTest.save(List(okUser)).unsafeRunSync())
}
}
}

View File

@@ -1,130 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import com.amazonaws.services.dynamodbv2.model._
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.concurrent.ScalaFutures
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.BeforeAndAfterEach
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import vinyldns.core.domain.zone.{ZoneChange, ZoneChangeStatus, ZoneChangeType}
import cats.effect._
import vinyldns.dynamodb.DynamoTestConfig
import vinyldns.core.TestZoneData._
class DynamoDBZoneChangeRepositorySpec
extends AnyWordSpec
with MockitoSugar
with Matchers
with ScalaFutures
with BeforeAndAfterEach {
private val dynamoDBHelper = mock[TestDynamoDBHelper]
private val zoneChangeStoreConfig = DynamoTestConfig.zoneChangeStoreConfig
private val zoneChangeTable = zoneChangeStoreConfig.tableName
class TestDynamoDBZoneChangeRepository
extends DynamoDBZoneChangeRepository(zoneChangeTable, dynamoDBHelper)
private val underTest = new TestDynamoDBZoneChangeRepository
override def beforeEach(): Unit = reset(dynamoDBHelper)
val zoneChangeComplete: ZoneChange =
ZoneChange(okZone, "ok", ZoneChangeType.Update, ZoneChangeStatus.Complete)
val zoneChangeSynced: ZoneChange =
ZoneChange(okZone, "ok", ZoneChangeType.Update, ZoneChangeStatus.Synced)
val zoneChangeFailed: ZoneChange =
ZoneChange(okZone, "ok", ZoneChangeType.Update, ZoneChangeStatus.Failed)
"DynamoDBZoneChangeRepository.save" should {
"call DynamoDBClient.putItem when creating a zone change" in {
val putItemResult = mock[PutItemResult]
when(dynamoDBHelper.putItem(any[PutItemRequest])).thenReturn(IO.pure(putItemResult))
val actual = underTest.save(zoneChangeComplete).unsafeRunSync()
verify(dynamoDBHelper).putItem(any[PutItemRequest])
actual shouldBe zoneChangeComplete
}
"throw an exception when anything goes wrong" in {
when(dynamoDBHelper.putItem(any[PutItemRequest]))
.thenThrow(new InternalServerErrorException("foobar"))
a[RuntimeException] should be thrownBy underTest.save(zoneChangeComplete)
}
}
"DynamoDBZoneChangeRepository.getChanges" should {
"call dynamo client when no changes exist" in {
val dynamoResponse = mock[QueryResult]
val dynamoResponses = List(dynamoResponse)
when(dynamoResponse.getItems)
.thenReturn(new java.util.ArrayList[java.util.Map[String, AttributeValue]]())
doReturn(IO.pure(dynamoResponses)).when(dynamoDBHelper).queryAll(any[QueryRequest])
val response = underTest.listZoneChanges(okZone.id).unsafeRunSync()
verify(dynamoDBHelper).queryAll(any[QueryRequest])
response.items shouldBe empty
}
"call dynamoDBHelper.query when retrieving an existing zone" in {
val dynamoResponse = mock[QueryResult]
val dynamoResponses = List(dynamoResponse)
val resultList = new java.util.ArrayList[java.util.Map[String, AttributeValue]]()
resultList.add(underTest.toItem(zoneChangePending))
resultList.add(underTest.toItem(zoneChangeSynced))
doReturn(IO.pure(dynamoResponses)).when(dynamoDBHelper).queryAll(any[QueryRequest])
when(dynamoResponse.getItems).thenReturn(resultList)
val response = underTest.listZoneChanges(okZone.id).unsafeRunSync()
verify(dynamoDBHelper).queryAll(any[QueryRequest])
response.items should contain theSameElementsAs List(zoneChangePending, zoneChangeSynced)
}
"not return duplicate changes " in {
val dynamoResponse = mock[QueryResult]
val dynamoResponses = List(dynamoResponse)
val resultList = new java.util.ArrayList[java.util.Map[String, AttributeValue]]()
resultList.add(underTest.toItem(zoneChangeComplete))
resultList.add(underTest.toItem(zoneChangeComplete))
doReturn(IO.pure(dynamoResponses)).when(dynamoDBHelper).queryAll(any[QueryRequest])
when(dynamoResponse.getItems).thenReturn(resultList)
val response = underTest.listZoneChanges(okZone.id).unsafeRunSync()
verify(dynamoDBHelper).queryAll(any[QueryRequest])
response.items shouldBe List(zoneChangeComplete)
}
"throw exception when query returns an unexpected response" in {
when(dynamoDBHelper.queryAll(any[QueryRequest]))
.thenThrow(new InternalServerErrorException("foo"))
a[InternalServerErrorException] should be thrownBy underTest.listZoneChanges(okZone.id)
}
}
}

View File

@@ -1,238 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import java.util
import com.amazonaws.services.dynamodbv2.model.{AttributeValue, _}
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.concurrent.ScalaFutures
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.BeforeAndAfterEach
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import scala.collection.JavaConverters._
import cats.effect._
class QueryHelperSpec
extends AnyWordSpec
with MockitoSugar
with Matchers
with ScalaFutures
with BeforeAndAfterEach {
private val dynamoDBHelper = mock[DynamoDBHelper]
class TestQueryHelper extends QueryHelper
private val underTest = new TestQueryHelper
override def beforeEach(): Unit = reset(dynamoDBHelper)
def makeJavaItem(value: String): util.HashMap[String, AttributeValue] = {
val item = new util.HashMap[String, AttributeValue]()
item.put("key", new AttributeValue(value))
item
}
def await[T](f: => IO[_]): T =
f.map(_.asInstanceOf[T]).unsafeRunSync()
"QueryHelper" should {
"run a query with no filter where there is no continuation" in {
val keyConditions = Map[String, String]("key" -> "value")
val dynamoResponse = mock[QueryResult]
val expectedItems = new util.ArrayList[util.HashMap[String, AttributeValue]]()
expectedItems.add(makeJavaItem("item1"))
expectedItems.add(makeJavaItem("item2"))
doReturn(expectedItems).when(dynamoResponse).getItems
doReturn(null).when(dynamoResponse).getLastEvaluatedKey
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).query(any[QueryRequest])
val result = await[QueryResponseItems](
underTest.doQuery("testName", "testIndex", keyConditions, None, None, Some(3))(
dynamoDBHelper
)
)
result.lastEvaluatedKey shouldBe None
result.items shouldBe expectedItems.asScala
}
"run a query with no filter where there is a continuation key" in {
val keyConditions = Map[String, String]("key" -> "value")
val dynamoResponse = mock[QueryResult]
val expectedItems = new util.ArrayList[util.HashMap[String, AttributeValue]]()
expectedItems.add(makeJavaItem("item1"))
expectedItems.add(makeJavaItem("item2"))
expectedItems.add(makeJavaItem("item3"))
doReturn(expectedItems).when(dynamoResponse).getItems
val key = makeJavaItem("item3")
doReturn(key).when(dynamoResponse).getLastEvaluatedKey
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).query(any[QueryRequest])
val result = await[QueryResponseItems](
underTest.doQuery("testName", "testIndex", keyConditions, None, None, Some(3))(
dynamoDBHelper
)
)
result.lastEvaluatedKey shouldBe Some(key)
result.items shouldBe expectedItems.asScala
}
"run a query with a filter requiring multiple query requests" in {
val keyConditions = Map[String, String]("key" -> "value")
val filterExpression = Some(ContainsFilter("filterkey", "filtervalue"))
val firstQuery =
QueryManager("testName", "testIndex", keyConditions, filterExpression, None, None, false)
.build()
val secondQuery = QueryManager(
"testName",
"testIndex",
keyConditions,
filterExpression,
Some(Map("key" -> "item3")),
None,
false
).build()
val firstResponse = mock[QueryResult]
val items1 = new util.ArrayList[util.HashMap[String, AttributeValue]]()
items1.add(makeJavaItem("item1"))
items1.add(makeJavaItem("item2"))
items1.add(makeJavaItem("item3"))
doReturn(items1).when(firstResponse).getItems
doReturn(makeJavaItem("item3")).when(firstResponse).getLastEvaluatedKey
val secondResponse = mock[QueryResult]
val items2 = new util.ArrayList[util.HashMap[String, AttributeValue]]()
items2.add(makeJavaItem("item4"))
items2.add(makeJavaItem("item5"))
items2.add(makeJavaItem("item6"))
doReturn(items2).when(secondResponse).getItems
doReturn(IO.pure(firstResponse)).when(dynamoDBHelper).query(firstQuery)
doReturn(IO.pure(secondResponse)).when(dynamoDBHelper).query(secondQuery)
val result = await[QueryResponseItems](
underTest.doQuery("testName", "testIndex", keyConditions, filterExpression, None, Some(4))(
dynamoDBHelper
)
)
result.lastEvaluatedKey shouldBe Some(makeJavaItem("item4"))
result.items shouldBe (items1.asScala ++ items2.asScala).take(4)
}
"run a query with a filter requiring multiple query requests where no key at end" in {
val keyConditions = Map[String, String]("key" -> "value")
val filterExpression = Some(ContainsFilter("filterkey", "filtervalue"))
val firstQuery =
QueryManager("testName", "testIndex", keyConditions, filterExpression, None, None, false)
.build()
val secondQuery = QueryManager(
"testName",
"testIndex",
keyConditions,
filterExpression,
Some(Map("key" -> "item3")),
None,
false
).build()
val firstResponse = mock[QueryResult]
val items1 = new util.ArrayList[util.HashMap[String, AttributeValue]]()
items1.add(makeJavaItem("item1"))
items1.add(makeJavaItem("item2"))
items1.add(makeJavaItem("item3"))
doReturn(items1).when(firstResponse).getItems
doReturn(makeJavaItem("item3")).when(firstResponse).getLastEvaluatedKey
val secondResponse = mock[QueryResult]
val items2 = new util.ArrayList[util.HashMap[String, AttributeValue]]()
items2.add(makeJavaItem("item4"))
items2.add(makeJavaItem("item5"))
items2.add(makeJavaItem("item6"))
doReturn(items2).when(secondResponse).getItems
doReturn(null).when(secondResponse).getLastEvaluatedKey
doReturn(IO.pure(firstResponse)).when(dynamoDBHelper).query(firstQuery)
doReturn(IO.pure(secondResponse)).when(dynamoDBHelper).query(secondQuery)
val result = await[QueryResponseItems](
underTest.doQuery("testName", "testIndex", keyConditions, filterExpression, None, Some(6))(
dynamoDBHelper
)
)
result.lastEvaluatedKey shouldBe None
result.items shouldBe items1.asScala ++ items2.asScala
}
"run a query with count returns QueryResponseCount" in {
val keyConditions = Map[String, String]("key" -> "value")
val dynamoResponse = mock[QueryResult]
doReturn(5).when(dynamoResponse).getCount
doReturn(null).when(dynamoResponse).getLastEvaluatedKey
doReturn(IO.pure(dynamoResponse)).when(dynamoDBHelper).query(any[QueryRequest])
val result = await[QueryResponseCount](
underTest
.doQuery("testName", "testIndex", keyConditions, None, None, None, isCountQuery = true)(
dynamoDBHelper
)
)
result.count shouldBe 5
}
"run a query with count works when there are multiple pages" in {
val keyConditions = Map[String, String]("key" -> "value")
val dynamoResponse1 = mock[QueryResult]
val dynamoResponse2 = mock[QueryResult]
doReturn(makeJavaItem("item")).when(dynamoResponse1).getLastEvaluatedKey
doReturn(5).when(dynamoResponse1).getCount
doReturn(null).when(dynamoResponse2).getLastEvaluatedKey
doReturn(2).when(dynamoResponse2).getCount
doReturn(IO.pure(dynamoResponse1))
.doReturn(IO.pure(dynamoResponse2))
.when(dynamoDBHelper)
.query(any[QueryRequest])
val result = await[QueryResponseCount](
underTest
.doQuery("testName", "testIndex", keyConditions, None, None, None, isCountQuery = true)(
dynamoDBHelper
)
)
result.count shouldBe 7
}
}
}

View File

@@ -1,33 +0,0 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.dynamodb.repository
import com.amazonaws.AmazonWebServiceRequest
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient
import org.slf4j.Logger
import cats.effect._
/* Overrides the send method so that it is synchronous, avoids goofy future timing issues in unit tests */
class TestDynamoDBHelper(dynamoDB: AmazonDynamoDBClient, log: Logger)
extends DynamoDBHelper(dynamoDB: AmazonDynamoDBClient, log: Logger) {
override private[repository] def send[In <: AmazonWebServiceRequest, Out](
aws: In,
func: (In) => Out
)(implicit d: Describe[_ >: In]): IO[Out] =
IO(func(aws))
}

View File

@@ -25,41 +25,11 @@ at this time.
1. You must have npm, if you don't have npm, follow instructions here <https://www.npmjs.com/get-npm>.
2. Run `npm install` to install all dependencies, this includes those needed for testing. If you just want to run the portal then `npm install --production` would suffice
3. You must have grunt, if you don't have grunt, run `npm install -g grunt`. Then run `grunt default` from the root of the portal project
4. Create a local.conf file in the portal conf folder. Include in it the following information:
```
LDAP {
user = [get this from CAP member]
password = [get this from CAP member]
domain = [get this from CAP member]
searchBase = [get this from CAP member]
context.providerUrl = [get this from CAP member]
}
portal.vinyldns.backend.url = "http://127.0.0.1:9000"
portal.dynamo_delay=0
dynamo {
key = "local"
secret = "local"
endpoint = "http://127.0.0.1:19000"
}
users {
dummy = false
tablename = "users"
provisionedReadThroughput = 100
provisionedWriteThroughput = 100
}
changelog {
dummy = false
tablename = "usersAndGroupChanges"
provisionedReadThroughput = 100
provisionedWriteThroughput = 100
}
```
4. Create a local.conf file in the portal conf folder for your settings if desired.
5. Follow the instructions for building vinyl locally on the vinyl readme
6. Start vinyl with `sbt run`. Vinyl will start on localhost on port 9000.
7. Run the portal with `sbt -Djavax.net.ssl.trustStore="./private/trustStore.jks" -Dhttp.port=8080 run`
8. In a web browser go to localhost:8080
8. In a web browser go to localhost:9001
# Working locally
Often times as a developer you want to work with the portal locally in a "real" setting against your own LDAP

View File

@@ -1,10 +1,9 @@
play.http.secret.key = "changeme"
play.i18n.langs = [ "en" ]
portal.dynamo_delay = 0
portal.vinyldns.backend.url = "http://not.real.com"
data-stores = ["dynamodb", "mysql"]
data-stores = ["mysql"]
mysql {
class-name = "vinyldns.mysql.repository.MySqlDataStoreProvider"
@@ -23,25 +22,8 @@ mysql {
repositories {
user {}
}
}
dynamodb {
class-name = "vinyldns.dynamodb.repository.DynamoDBDataStoreProvider"
settings {
key = "akid goes here"
secret = "secret key goes here"
endpoint = "http://foo.bar"
region = "us-east-1" # note: we are always in us-east-1, but this can be overridden
}
repositories {
user-change {
table-name = "userChangeTest"
provisioned-reads = 30
provisioned-writes = 20
}
user-change {}
task {}
}
}

View File

@@ -5,29 +5,16 @@ crypto {
secret = "8B06A7F3BC8A2497736F1916A123AA40E88217BE9264D8872597EF7A6E5DCE61"
}
data-stores = ["mysql", "dynamodb"]
data-stores = ["mysql"]
data-stores = ${?DATA_STORES}
mysql {
repositories {
user {
# no additional settings for now
}
task {
# no additional settings for now
}
}
}
dynamodb {
repositories {
user-change {
table-name = "userChangeTest"
table-name = ${?USER_CHANGE_TABLE_NAME}
provisioned-reads = 30
provisioned-reads = ${?DYNAMODB_READS}
provisioned-writes = 20
provisioned-writes = ${?DYNAMODB_WRITES}
}
}
}

View File

@@ -44,9 +44,6 @@ play.i18n.langs = [ "en" ]
# You can disable evolutions for a specific datasource if necessary
# play.evolutions.db.default.enabled=false
portal.dynamo_delay = 1100
portal.dynamo_delay = ${?DYNAMO_DELAY}
portal.vinyldns.backend.url = "http://localhost:9000"
portal.vinyldns.backend.url = ${?VINYLDNS_BACKEND_URL}
@@ -72,27 +69,12 @@ mysql {
repositories {
# override with any repos that are running in mysql
user {}
user-change {}
task {}
}
}
dynamodb {
class-name = "vinyldns.dynamodb.repository.DynamoDBDataStoreProvider"
class-name = ${?DATA_STORE_CLASS_NAME}
settings {
key = "akid goes here"
key = ${?DYNAMODB_KEY}
secret = "secret key goes here"
secret = ${?DYNAMODB_SECRET}
endpoint = "http://127.0.0.1:19000"
endpoint = ${?DYNAMODB_ENDPOINT}
region = "us-east-1" # note: we are always in us-east-1, but this can be overridden
region = ${?DYNAMODB_REGION}
}
repositories {
}
}
LDAP {
user = "test"

View File

@@ -76,11 +76,6 @@ object Dependencies {
"javax.activation" % "activation" % "1.1.1"
)
lazy val dynamoDBDependencies = Seq(
"com.amazonaws" % "aws-java-sdk-core" % awsV withSources(),
"com.amazonaws" % "aws-java-sdk-dynamodb" % awsV withSources()
)
lazy val mysqlDependencies = Seq(
"org.flywaydb" % "flyway-core" % "5.1.4",
"org.mariadb.jdbc" % "mariadb-java-client" % "2.3.0",
@@ -117,7 +112,6 @@ object Dependencies {
lazy val portalDependencies = Seq(
"com.typesafe.play" %% "play-json" % "2.7.4",
"com.amazonaws" % "aws-java-sdk-core" % awsV withSources(),
"com.amazonaws" % "aws-java-sdk-dynamodb" % awsV withSources(),
"com.typesafe.play" %% "play-jdbc" % playV,
"com.typesafe.play" %% "play-guice" % playV,
"com.typesafe.play" %% "play-ahc-ws" % playV,