mirror of
https://github.com/VinylDNS/vinyldns
synced 2025-08-22 10:10:12 +00:00
WIP - Functional Test Updates
- Update `dnsjava` library - Add support for H2 database - Update functional tests to support parallel runs - Remove the ability to specify number of processes for functional tests - always 4 now - Add `Makefile` and `Dockerfile` in `functional_test` to make it easier to run tests without spinning up multiple containers
This commit is contained in:
parent
0b3824ad6c
commit
0a1b533192
37
build.sbt
37
build.sbt
@ -1,12 +1,11 @@
|
|||||||
import Resolvers._
|
|
||||||
import Dependencies._
|
|
||||||
import CompilerOptions._
|
import CompilerOptions._
|
||||||
|
import Dependencies._
|
||||||
|
import Resolvers._
|
||||||
import com.typesafe.sbt.packager.docker._
|
import com.typesafe.sbt.packager.docker._
|
||||||
import scoverage.ScoverageKeys.{coverageFailOnMinimum, coverageMinimum}
|
|
||||||
import org.scalafmt.sbt.ScalafmtPlugin._
|
|
||||||
import microsites._
|
import microsites._
|
||||||
import ReleaseTransformations._
|
import org.scalafmt.sbt.ScalafmtPlugin._
|
||||||
import sbtrelease.Version
|
import sbtrelease.ReleasePlugin.autoImport.ReleaseTransformations._
|
||||||
|
import scoverage.ScoverageKeys.{coverageFailOnMinimum, coverageMinimum}
|
||||||
|
|
||||||
import scala.util.Try
|
import scala.util.Try
|
||||||
|
|
||||||
@ -22,15 +21,18 @@ lazy val sharedSettings = Seq(
|
|||||||
startYear := Some(2018),
|
startYear := Some(2018),
|
||||||
licenses += ("Apache-2.0", new URL("https://www.apache.org/licenses/LICENSE-2.0.txt")),
|
licenses += ("Apache-2.0", new URL("https://www.apache.org/licenses/LICENSE-2.0.txt")),
|
||||||
scalacOptions ++= scalacOptionsByV(scalaVersion.value),
|
scalacOptions ++= scalacOptionsByV(scalaVersion.value),
|
||||||
scalacOptions in (Compile, doc) += "-no-link-warnings",
|
scalacOptions in(Compile, doc) += "-no-link-warnings",
|
||||||
// Use wart remover to eliminate code badness
|
// Use wart remover to eliminate code badness
|
||||||
wartremoverErrors ++= Seq(
|
wartremoverErrors := (
|
||||||
Wart.EitherProjectionPartial,
|
if (getPropertyFlagOrDefault("build.lintOnCompile", true))
|
||||||
|
Seq(Wart.EitherProjectionPartial,
|
||||||
Wart.IsInstanceOf,
|
Wart.IsInstanceOf,
|
||||||
Wart.JavaConversions,
|
Wart.JavaConversions,
|
||||||
Wart.Return,
|
Wart.Return,
|
||||||
Wart.LeakingSealed,
|
Wart.LeakingSealed,
|
||||||
Wart.ExplicitImplicitTypes
|
Wart.ExplicitImplicitTypes
|
||||||
|
)
|
||||||
|
else Seq.empty
|
||||||
),
|
),
|
||||||
|
|
||||||
// scala format
|
// scala format
|
||||||
@ -72,7 +74,7 @@ lazy val apiAssemblySettings = Seq(
|
|||||||
mainClass in reStart := Some("vinyldns.api.Boot"),
|
mainClass in reStart := Some("vinyldns.api.Boot"),
|
||||||
// there are some odd things from dnsjava including update.java and dig.java that we don't use
|
// there are some odd things from dnsjava including update.java and dig.java that we don't use
|
||||||
assemblyMergeStrategy in assembly := {
|
assemblyMergeStrategy in assembly := {
|
||||||
case "update.class"| "dig.class" => MergeStrategy.discard
|
case "update.class" | "dig.class" => MergeStrategy.discard
|
||||||
case PathList("scala", "tools", "nsc", "doc", "html", "resource", "lib", "index.js") => MergeStrategy.discard
|
case PathList("scala", "tools", "nsc", "doc", "html", "resource", "lib", "index.js") => MergeStrategy.discard
|
||||||
case PathList("scala", "tools", "nsc", "doc", "html", "resource", "lib", "template.js") => MergeStrategy.discard
|
case PathList("scala", "tools", "nsc", "doc", "html", "resource", "lib", "template.js") => MergeStrategy.discard
|
||||||
case x =>
|
case x =>
|
||||||
@ -158,11 +160,11 @@ lazy val portalPublishSettings = Seq(
|
|||||||
publishLocal := (publishLocal in Docker).value,
|
publishLocal := (publishLocal in Docker).value,
|
||||||
publish := (publish in Docker).value,
|
publish := (publish in Docker).value,
|
||||||
// for sbt-native-packager (docker) to exclude local.conf
|
// for sbt-native-packager (docker) to exclude local.conf
|
||||||
mappings in Universal ~= ( _.filterNot {
|
mappings in Universal ~= (_.filterNot {
|
||||||
case (file, _) => file.getName.equals("local.conf")
|
case (file, _) => file.getName.equals("local.conf")
|
||||||
}),
|
}),
|
||||||
// for local.conf to be excluded in jars
|
// for local.conf to be excluded in jars
|
||||||
mappings in (Compile, packageBin) ~= ( _.filterNot {
|
mappings in(Compile, packageBin) ~= (_.filterNot {
|
||||||
case (file, _) => file.getName.equals("local.conf")
|
case (file, _) => file.getName.equals("local.conf")
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
@ -216,8 +218,6 @@ lazy val coreBuildSettings = Seq(
|
|||||||
// to write a crypto plugin so that we fall back to a noarg constructor
|
// to write a crypto plugin so that we fall back to a noarg constructor
|
||||||
scalacOptions ++= scalacOptionsByV(scalaVersion.value).filterNot(_ == "-Ywarn-unused:params")
|
scalacOptions ++= scalacOptionsByV(scalaVersion.value).filterNot(_ == "-Ywarn-unused:params")
|
||||||
) ++ pbSettings
|
) ++ pbSettings
|
||||||
|
|
||||||
import xerial.sbt.Sonatype._
|
|
||||||
lazy val corePublishSettings = Seq(
|
lazy val corePublishSettings = Seq(
|
||||||
publishMavenStyle := true,
|
publishMavenStyle := true,
|
||||||
publishArtifact in Test := false,
|
publishArtifact in Test := false,
|
||||||
@ -232,13 +232,6 @@ lazy val corePublishSettings = Seq(
|
|||||||
"scm:git@github.com:vinyldns/vinyldns.git"
|
"scm:git@github.com:vinyldns/vinyldns.git"
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
developers := List(
|
|
||||||
Developer(id="pauljamescleary", name="Paul James Cleary", email="pauljamescleary@gmail.com", url=url("https://github.com/pauljamescleary")),
|
|
||||||
Developer(id="rebstar6", name="Rebecca Star", email="rebstar6@gmail.com", url=url("https://github.com/rebstar6")),
|
|
||||||
Developer(id="nimaeskandary", name="Nima Eskandary", email="nimaesk1@gmail.com", url=url("https://github.com/nimaeskandary")),
|
|
||||||
Developer(id="mitruly", name="Michael Ly", email="michaeltrulyng@gmail.com", url=url("https://github.com/mitruly")),
|
|
||||||
Developer(id="britneywright", name="Britney Wright", email="blw06g@gmail.com", url=url("https://github.com/britneywright")),
|
|
||||||
),
|
|
||||||
sonatypeProfileName := "io.vinyldns"
|
sonatypeProfileName := "io.vinyldns"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -428,7 +421,7 @@ lazy val initReleaseStage = Seq[ReleaseStep](
|
|||||||
setSonatypeReleaseSettings
|
setSonatypeReleaseSettings
|
||||||
)
|
)
|
||||||
|
|
||||||
lazy val finalReleaseStage = Seq[ReleaseStep] (
|
lazy val finalReleaseStage = Seq[ReleaseStep](
|
||||||
releaseStepCommand("project root"), // use version.sbt file from root
|
releaseStepCommand("project root"), // use version.sbt file from root
|
||||||
commitReleaseVersion,
|
commitReleaseVersion,
|
||||||
setNextVersion,
|
setNextVersion,
|
||||||
|
@ -47,5 +47,5 @@ done
|
|||||||
|
|
||||||
echo "Starting up Vinyl..."
|
echo "Starting up Vinyl..."
|
||||||
sleep 2
|
sleep 2
|
||||||
java -Djava.net.preferIPv4Stack=true -Dconfig.file=/app/docker.conf -Dakka.loglevel=INFO -Dlogback.configurationFile=test/logback.xml -jar /app/vinyldns-server.jar vinyldns.api.Boot
|
java -Djava.net.preferIPv4Stack=true -Dconfig.file=/app/docker.conf -Dakka.loglevel=INFO -Dlogback.configurationFile=/app/logback.xml -jar /app/vinyldns-server.jar vinyldns.api.Boot
|
||||||
|
|
||||||
|
23
docker/bind9/README.md
Normal file
23
docker/bind9/README.md
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
## Bind Test Configuration
|
||||||
|
|
||||||
|
This folder contains test configuration for BIND zones. The zones are partitioned into four distinct partitions to allow
|
||||||
|
for four parallel testing threads that won't interfere with one another.
|
||||||
|
|
||||||
|
### Layout
|
||||||
|
|
||||||
|
| Directory | Detail |
|
||||||
|
|:---|:---|
|
||||||
|
| `etc/` | Contains zone configurations separated by partition |
|
||||||
|
| `etc/_template` | Contains the template file for creating the partitioned `conf` files. Currently this is just a find and replace operation - finding `{placeholder}` and replacing it with the desired placeholder. |
|
||||||
|
| `zones/` | Contains zone definitions separated by partition |
|
||||||
|
| `zones/_template` |Contains the template file for creating the partitioned zone files. Currently this is just a find and replace operation - finding `{placeholder}` and replacing it with the desired placeholder. |
|
||||||
|
|
||||||
|
### Target Directories
|
||||||
|
|
||||||
|
When used in a container, or to run `named`, the files in this directory should be copied to the following directories:
|
||||||
|
|
||||||
|
| Directory | Target |
|
||||||
|
|:---|:---|
|
||||||
|
| `etc/named.conf.local` | `/etc/bind/` |
|
||||||
|
| `etc/named.partition*.conf` | `/var/bind/config/` |
|
||||||
|
| `zones/` | `/var/bind/` |
|
@ -29,10 +29,7 @@ key "vinyldns-sha512." {
|
|||||||
secret "xfKA0DYb88tiUGND+cWddwUg3/SugYSsdvCfBOJ1jr8MEdgbVRyrlVDEXLsfTUGorQ3ShENdymw2yw+rTr+lwA==";
|
secret "xfKA0DYb88tiUGND+cWddwUg3/SugYSsdvCfBOJ1jr8MEdgbVRyrlVDEXLsfTUGorQ3ShENdymw2yw+rTr+lwA==";
|
||||||
};
|
};
|
||||||
|
|
||||||
// Consider adding the 1918 zones here, if they are not used in your organization
|
include "/var/bind/config/named.partition1.conf";
|
||||||
//include "/etc/bind/zones.rfc1918";
|
include "/var/bind/config/named.partition2.conf";
|
||||||
|
include "/var/bind/config/named.partition3.conf";
|
||||||
include "/var/cache/bind/config/named.partition1.conf";
|
include "/var/bind/config/named.partition4.conf";
|
||||||
include "/var/cache/bind/config/named.partition2.conf";
|
|
||||||
include "/var/cache/bind/config/named.partition3.conf";
|
|
||||||
include "/var/cache/bind/config/named.partition4.conf";
|
|
||||||
|
33
modules/api/functional_test/Dockerfile
Normal file
33
modules/api/functional_test/Dockerfile
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
# Build VinylDNS API if the JAR doesn't already exist
|
||||||
|
FROM vinyldns/build:base-api as vinyldns-api
|
||||||
|
COPY modules/api/functional_test/docker.conf modules/api/functional_test/vinyldns*.jar /opt/vinyldns/
|
||||||
|
COPY . /build/
|
||||||
|
WORKDIR /build
|
||||||
|
|
||||||
|
## Run the build if we don't already have a vinyldns.jar
|
||||||
|
RUN if [ ! -f /opt/vinyldns/vinyldns.jar ]; then \
|
||||||
|
env SBT_OPTS="-XX:+UseConcMarkSweepGC -Xmx4G -Xms1G" \
|
||||||
|
sbt -Dbuild.scalafmtOnCompile=false -Dbuild.lintOnCompile=fase ";project api;coverageOff;assembly" \
|
||||||
|
&& cp modules/api/target/scala-2.12/vinyldns.jar /opt/vinyldns/; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Build the testing image, copying data from `vinyldns-api`
|
||||||
|
FROM vinyldns/build:base-test
|
||||||
|
SHELL ["/bin/bash","-c"]
|
||||||
|
COPY --from=vinyldns-api /opt/vinyldns /opt/vinyldns
|
||||||
|
|
||||||
|
# Local bind server files
|
||||||
|
COPY docker/bind9/etc/named.conf.local /etc/bind/
|
||||||
|
COPY docker/bind9/etc/*.conf /var/bind/config/
|
||||||
|
COPY docker/bind9/zones/ /var/bind/
|
||||||
|
RUN named-checkconf
|
||||||
|
|
||||||
|
# Copy over the functional tests
|
||||||
|
COPY modules/api/functional_test /functional_test
|
||||||
|
|
||||||
|
ENTRYPOINT ["/bin/bash", "-c", "/initialize.sh && \
|
||||||
|
(java -Dconfig.file=/opt/vinyldns/docker.conf -jar /opt/vinyldns/vinyldns.jar &> /opt/vinyldns/vinyldns.log &) && \
|
||||||
|
echo -n 'Starting VinylDNS API..' && \
|
||||||
|
timeout 30s grep -q 'STARTED SUCCESSFULLY' <(timeout 30s tail -f /opt/vinyldns/vinyldns.log) && \
|
||||||
|
echo 'done.' && \
|
||||||
|
/bin/bash"]
|
16
modules/api/functional_test/Dockerfile.dockerignore
Normal file
16
modules/api/functional_test/Dockerfile.dockerignore
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
**/.venv_win
|
||||||
|
**/.virtualenv
|
||||||
|
**/.venv
|
||||||
|
**/target
|
||||||
|
**/docs
|
||||||
|
**/out
|
||||||
|
**/.log
|
||||||
|
**/.idea/
|
||||||
|
**/.bsp
|
||||||
|
**/*cache*
|
||||||
|
**/*.png
|
||||||
|
**/.git
|
||||||
|
**/Dockerfile
|
||||||
|
**/*.dockerignore
|
||||||
|
**/.github
|
||||||
|
**/_template
|
25
modules/api/functional_test/Makefile
Normal file
25
modules/api/functional_test/Makefile
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
SHELL=bash
|
||||||
|
ROOT_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
|
||||||
|
|
||||||
|
# Check that the required version of make is being used
|
||||||
|
REQ_MAKE_VER:=3.82
|
||||||
|
ifneq ($(REQ_MAKE_VER),$(firstword $(sort $(MAKE_VERSION) $(REQ_MAKE_VER))))
|
||||||
|
$(error The version of MAKE $(REQ_MAKE_VER) or higher is required; you are running $(MAKE_VERSION))
|
||||||
|
endif
|
||||||
|
|
||||||
|
.ONESHELL:
|
||||||
|
|
||||||
|
.PHONY: all build run
|
||||||
|
|
||||||
|
all: build run
|
||||||
|
|
||||||
|
build:
|
||||||
|
@set -euo pipefail
|
||||||
|
trap 'if [ -f modules/api/functional_test/vinyldns.jar ]; then rm modules/api/functional_test/vinyldns.jar; fi' EXIT
|
||||||
|
cd ../../..
|
||||||
|
if [ -f modules/api/target/scala-2.12/vinyldns.jar ]; then cp modules/api/target/scala-2.12/vinyldns.jar modules/api/functional_test/vinyldns.jar; fi
|
||||||
|
docker build -t vinyldns-test -f modules/api/functional_test/Dockerfile .
|
||||||
|
|
||||||
|
run:
|
||||||
|
@set -euo pipefail
|
||||||
|
docker run -it --rm -p 9000:9000 -p 19001:53/tcp -p 19001:53/udp vinyldns-test
|
@ -4,9 +4,12 @@ import os
|
|||||||
import ssl
|
import ssl
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
from collections import OrderedDict
|
||||||
|
from typing import MutableMapping, List
|
||||||
|
|
||||||
import _pytest.config
|
import _pytest.config
|
||||||
import pytest
|
import pytest
|
||||||
|
from xdist.scheduler import LoadScopeScheduling
|
||||||
|
|
||||||
from vinyldns_context import VinylDNSTestContext
|
from vinyldns_context import VinylDNSTestContext
|
||||||
|
|
||||||
@ -26,7 +29,7 @@ def pytest_addoption(parser: _pytest.config.argparsing.Parser) -> None:
|
|||||||
Adds additional options that we can parse when we run the tests, stores them in the parser / py.test context
|
Adds additional options that we can parse when we run the tests, stores them in the parser / py.test context
|
||||||
"""
|
"""
|
||||||
parser.addoption("--url", dest="url", action="store", default="http://localhost:9000", help="URL for application to root")
|
parser.addoption("--url", dest="url", action="store", default="http://localhost:9000", help="URL for application to root")
|
||||||
parser.addoption("--dns-ip", dest="dns_ip", action="store", default="127.0.0.1:19001", help="The ip address for the dns name server to update")
|
parser.addoption("--dns-ip", dest="dns_ip", action="store", default="127.0.0.1", help="The ip address for the dns name server to update")
|
||||||
parser.addoption("--resolver-ip", dest="resolver_ip", action="store", help="The ip address for the dns server to use for the tests during resolution. This is usually the same as `--dns-ip`")
|
parser.addoption("--resolver-ip", dest="resolver_ip", action="store", help="The ip address for the dns server to use for the tests during resolution. This is usually the same as `--dns-ip`")
|
||||||
parser.addoption("--dns-zone", dest="dns_zone", action="store", default="vinyldns.", help="The zone name that will be used for testing")
|
parser.addoption("--dns-zone", dest="dns_zone", action="store", default="vinyldns.", help="The zone name that will be used for testing")
|
||||||
parser.addoption("--dns-key-name", dest="dns_key_name", action="store", default="vinyldns.", help="The name of the key used to sign updates for the zone")
|
parser.addoption("--dns-key-name", dest="dns_key_name", action="store", default="vinyldns.", help="The name of the key used to sign updates for the zone")
|
||||||
@ -116,3 +119,41 @@ def retrieve_resolver(resolver_name: str) -> str:
|
|||||||
pytest.exit(1)
|
pytest.exit(1)
|
||||||
|
|
||||||
return resolver_address
|
return resolver_address
|
||||||
|
|
||||||
|
class WorkerScheduler(LoadScopeScheduling):
|
||||||
|
worker_assignments: List[MutableMapping] = [{"name": "list_batch_change_summaries_test.py", "worker": 0}]
|
||||||
|
|
||||||
|
def _assign_work_unit(self, node):
|
||||||
|
"""Assign a work unit to a node."""
|
||||||
|
assert self.workqueue
|
||||||
|
|
||||||
|
# Grab a unit of work
|
||||||
|
scope, work_unit = self.workqueue.popitem(last=False)
|
||||||
|
|
||||||
|
# Always run list_batch_change_summaries_test on the first worker
|
||||||
|
for assignment in WorkerScheduler.worker_assignments:
|
||||||
|
while assignment["name"] in scope:
|
||||||
|
self.run_work_on_node(self.nodes[assignment["worker"]], scope, work_unit)
|
||||||
|
scope, work_unit = self.workqueue.popitem(last=False)
|
||||||
|
|
||||||
|
self.run_work_on_node(node, scope, work_unit)
|
||||||
|
|
||||||
|
def run_work_on_node(self, node, scope, work_unit):
|
||||||
|
# Keep track of the assigned work
|
||||||
|
assigned_to_node = self.assigned_work.setdefault(node, default=OrderedDict())
|
||||||
|
assigned_to_node[scope] = work_unit
|
||||||
|
# Ask the node to execute the workload
|
||||||
|
worker_collection = self.registered_collections[node]
|
||||||
|
nodeids_indexes = [
|
||||||
|
worker_collection.index(nodeid)
|
||||||
|
for nodeid, completed in work_unit.items()
|
||||||
|
if not completed
|
||||||
|
]
|
||||||
|
node.send_runtest_some(nodeids_indexes)
|
||||||
|
|
||||||
|
def _split_scope(self, nodeid):
|
||||||
|
return nodeid
|
||||||
|
|
||||||
|
|
||||||
|
def pytest_xdist_make_scheduler(config, log):
|
||||||
|
return WorkerScheduler(config, log)
|
||||||
|
302
modules/api/functional_test/docker.conf
Normal file
302
modules/api/functional_test/docker.conf
Normal file
@ -0,0 +1,302 @@
|
|||||||
|
################################################################################################################
|
||||||
|
# This configuration is only used by docker and the build process
|
||||||
|
################################################################################################################
|
||||||
|
vinyldns {
|
||||||
|
|
||||||
|
# configured backend providers
|
||||||
|
backend {
|
||||||
|
# Use "default" when dns backend legacy = true
|
||||||
|
# otherwise, use the id of one of the connections in any of your backends
|
||||||
|
default-backend-id = "default"
|
||||||
|
|
||||||
|
# this is where we can save additional backends
|
||||||
|
backend-providers = [
|
||||||
|
{
|
||||||
|
class-name = "vinyldns.api.backend.dns.DnsBackendProviderLoader"
|
||||||
|
settings = {
|
||||||
|
legacy = false
|
||||||
|
backends = [
|
||||||
|
{
|
||||||
|
id = "default"
|
||||||
|
zone-connection = {
|
||||||
|
name = "vinyldns."
|
||||||
|
key-name = "vinyldns."
|
||||||
|
key-name = ${?DEFAULT_DNS_KEY_NAME}
|
||||||
|
key = "nzisn+4G2ldMn0q1CV3vsg=="
|
||||||
|
key = ${?DEFAULT_DNS_KEY_SECRET}
|
||||||
|
primary-server = "127.0.0.1"
|
||||||
|
primary-server = ${?DEFAULT_DNS_ADDRESS}
|
||||||
|
}
|
||||||
|
transfer-connection = {
|
||||||
|
name = "vinyldns."
|
||||||
|
key-name = "vinyldns."
|
||||||
|
key-name = ${?DEFAULT_DNS_KEY_NAME}
|
||||||
|
key = "nzisn+4G2ldMn0q1CV3vsg=="
|
||||||
|
key = ${?DEFAULT_DNS_KEY_SECRET}
|
||||||
|
primary-server = "127.0.0.1"
|
||||||
|
primary-server = ${?DEFAULT_DNS_ADDRESS}
|
||||||
|
},
|
||||||
|
tsig-usage = "always"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id = "func-test-backend"
|
||||||
|
zone-connection = {
|
||||||
|
name = "vinyldns."
|
||||||
|
key-name = "vinyldns."
|
||||||
|
key-name = ${?DEFAULT_DNS_KEY_NAME}
|
||||||
|
key = "nzisn+4G2ldMn0q1CV3vsg=="
|
||||||
|
key = ${?DEFAULT_DNS_KEY_SECRET}
|
||||||
|
primary-server = "127.0.0.1"
|
||||||
|
primary-server = ${?DEFAULT_DNS_ADDRESS}
|
||||||
|
}
|
||||||
|
transfer-connection = {
|
||||||
|
name = "vinyldns."
|
||||||
|
key-name = "vinyldns."
|
||||||
|
key-name = ${?DEFAULT_DNS_KEY_NAME}
|
||||||
|
key = "nzisn+4G2ldMn0q1CV3vsg=="
|
||||||
|
key = ${?DEFAULT_DNS_KEY_SECRET}
|
||||||
|
primary-server = "127.0.0.1"
|
||||||
|
primary-server = ${?DEFAULT_DNS_ADDRESS}
|
||||||
|
},
|
||||||
|
tsig-usage = "always"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
queue {
|
||||||
|
class-name = "vinyldns.sqs.queue.SqsMessageQueueProvider"
|
||||||
|
|
||||||
|
messages-per-poll = 10
|
||||||
|
polling-interval = 250.millis
|
||||||
|
|
||||||
|
settings {
|
||||||
|
# AWS access key and secret.
|
||||||
|
access-key = "test"
|
||||||
|
access-key = ${?AWS_ACCESS_KEY}
|
||||||
|
secret-key = "test"
|
||||||
|
secret-key = ${?AWS_SECRET_ACCESS_KEY}
|
||||||
|
|
||||||
|
# Regional endpoint to make your requests (eg. 'us-west-2', 'us-east-1', etc.). This is the region where your queue is housed.
|
||||||
|
signing-region = "us-east-1"
|
||||||
|
signing-region = ${?SQS_REGION}
|
||||||
|
|
||||||
|
# Endpoint to access queue
|
||||||
|
service-endpoint = "http://localhost:4566/"
|
||||||
|
service-endpoint = ${?SQS_ENDPOINT}
|
||||||
|
|
||||||
|
# Queue name. Should be used in conjunction with service endpoint, rather than using a queue url which is subject to change.
|
||||||
|
queue-name = "vinyldns"
|
||||||
|
queue-name = ${?SQS_QUEUE_NAME}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rest {
|
||||||
|
host = "0.0.0.0"
|
||||||
|
port = 9000
|
||||||
|
}
|
||||||
|
|
||||||
|
sync-delay = 10000
|
||||||
|
|
||||||
|
approved-name-servers = [
|
||||||
|
"172.17.42.1.",
|
||||||
|
"ns1.parent.com."
|
||||||
|
"ns1.parent.com1."
|
||||||
|
"ns1.parent.com2."
|
||||||
|
"ns1.parent.com3."
|
||||||
|
"ns1.parent.com4."
|
||||||
|
]
|
||||||
|
|
||||||
|
crypto {
|
||||||
|
type = "vinyldns.core.crypto.NoOpCrypto"
|
||||||
|
}
|
||||||
|
|
||||||
|
data-stores = ["mysql"]
|
||||||
|
|
||||||
|
mysql {
|
||||||
|
settings {
|
||||||
|
# JDBC Settings, these are all values in scalikejdbc-config, not our own
|
||||||
|
# these must be overridden to use MYSQL for production use
|
||||||
|
# assumes a docker or mysql instance running locally
|
||||||
|
name = "vinyldns"
|
||||||
|
driver = "org.h2.Driver"
|
||||||
|
driver = ${?JDBC_DRIVER}
|
||||||
|
migration-url = "jdbc:h2:mem:vinyldns;MODE=MYSQL;DB_CLOSE_DELAY=-1;DATABASE_TO_LOWER=TRUE;IGNORECASE=TRUE;INIT=RUNSCRIPT FROM 'classpath:test/ddl.sql'"
|
||||||
|
migration-url = ${?JDBC_MIGRATION_URL}
|
||||||
|
url = "jdbc:h2:mem:vinyldns;MODE=MYSQL;DB_CLOSE_DELAY=-1;DATABASE_TO_LOWER=TRUE;IGNORECASE=TRUE;INIT=RUNSCRIPT FROM 'classpath:test/ddl.sql'"
|
||||||
|
url = ${?JDBC_URL}
|
||||||
|
user = "sa"
|
||||||
|
user = ${?JDBC_USER}
|
||||||
|
password = ""
|
||||||
|
password = ${?JDBC_PASSWORD}
|
||||||
|
# see https://github.com/brettwooldridge/HikariCP
|
||||||
|
connection-timeout-millis = 1000
|
||||||
|
idle-timeout = 10000
|
||||||
|
max-lifetime = 600000
|
||||||
|
maximum-pool-size = 20
|
||||||
|
minimum-idle = 20
|
||||||
|
register-mbeans = true
|
||||||
|
}
|
||||||
|
# Repositories that use this data store are listed here
|
||||||
|
repositories {
|
||||||
|
zone {
|
||||||
|
# no additional settings for now
|
||||||
|
}
|
||||||
|
batch-change {
|
||||||
|
# no additional settings for now
|
||||||
|
}
|
||||||
|
user {
|
||||||
|
|
||||||
|
}
|
||||||
|
record-set {
|
||||||
|
|
||||||
|
}
|
||||||
|
group {
|
||||||
|
|
||||||
|
}
|
||||||
|
membership {
|
||||||
|
|
||||||
|
}
|
||||||
|
group-change {
|
||||||
|
|
||||||
|
}
|
||||||
|
zone-change {
|
||||||
|
|
||||||
|
}
|
||||||
|
record-change {
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
backends = []
|
||||||
|
|
||||||
|
batch-change-limit = 1000
|
||||||
|
|
||||||
|
# FQDNs / IPs that cannot be modified via VinylDNS
|
||||||
|
# regex-list used for all record types except PTR
|
||||||
|
# ip-list used exclusively for PTR records
|
||||||
|
high-value-domains = {
|
||||||
|
regex-list = [
|
||||||
|
"high-value-domain.*" # for testing
|
||||||
|
]
|
||||||
|
ip-list = [
|
||||||
|
# using reverse zones in the vinyldns/bind9 docker image for testing
|
||||||
|
"192.0.2.252",
|
||||||
|
"192.0.2.253",
|
||||||
|
"fd69:27cc:fe91:0:0:0:0:ffff",
|
||||||
|
"fd69:27cc:fe91:0:0:0:ffff:0"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# FQDNs / IPs / zone names that require manual review upon submission in batch change interface
|
||||||
|
# domain-list used for all record types except PTR
|
||||||
|
# ip-list used exclusively for PTR records
|
||||||
|
manual-review-domains = {
|
||||||
|
domain-list = [
|
||||||
|
"needs-review.*"
|
||||||
|
]
|
||||||
|
ip-list = [
|
||||||
|
"192.0.1.254",
|
||||||
|
"192.0.1.255",
|
||||||
|
"192.0.2.254",
|
||||||
|
"192.0.2.255",
|
||||||
|
"192.0.3.254",
|
||||||
|
"192.0.3.255",
|
||||||
|
"192.0.4.254",
|
||||||
|
"192.0.4.255",
|
||||||
|
"fd69:27cc:fe91:0:0:0:ffff:1",
|
||||||
|
"fd69:27cc:fe91:0:0:0:ffff:2",
|
||||||
|
"fd69:27cc:fe92:0:0:0:ffff:1",
|
||||||
|
"fd69:27cc:fe92:0:0:0:ffff:2",
|
||||||
|
"fd69:27cc:fe93:0:0:0:ffff:1",
|
||||||
|
"fd69:27cc:fe93:0:0:0:ffff:2",
|
||||||
|
"fd69:27cc:fe94:0:0:0:ffff:1",
|
||||||
|
"fd69:27cc:fe94:0:0:0:ffff:2"
|
||||||
|
]
|
||||||
|
zone-name-list = [
|
||||||
|
"zone.requires.review."
|
||||||
|
"zone.requires.review1."
|
||||||
|
"zone.requires.review2."
|
||||||
|
"zone.requires.review3."
|
||||||
|
"zone.requires.review4."
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# FQDNs / IPs that cannot be modified via VinylDNS
|
||||||
|
# regex-list used for all record types except PTR
|
||||||
|
# ip-list used exclusively for PTR records
|
||||||
|
high-value-domains = {
|
||||||
|
regex-list = [
|
||||||
|
"high-value-domain.*" # for testing
|
||||||
|
]
|
||||||
|
ip-list = [
|
||||||
|
# using reverse zones in the vinyldns/bind9 docker image for testing
|
||||||
|
"192.0.1.252",
|
||||||
|
"192.0.1.253",
|
||||||
|
"192.0.2.252",
|
||||||
|
"192.0.2.253",
|
||||||
|
"192.0.3.252",
|
||||||
|
"192.0.3.253",
|
||||||
|
"192.0.4.252",
|
||||||
|
"192.0.4.253",
|
||||||
|
"fd69:27cc:fe91:0:0:0:0:ffff",
|
||||||
|
"fd69:27cc:fe91:0:0:0:ffff:0",
|
||||||
|
"fd69:27cc:fe92:0:0:0:0:ffff",
|
||||||
|
"fd69:27cc:fe92:0:0:0:ffff:0",
|
||||||
|
"fd69:27cc:fe93:0:0:0:0:ffff",
|
||||||
|
"fd69:27cc:fe93:0:0:0:ffff:0",
|
||||||
|
"fd69:27cc:fe94:0:0:0:0:ffff",
|
||||||
|
"fd69:27cc:fe94:0:0:0:ffff:0"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# types of unowned records that users can access in shared zones
|
||||||
|
shared-approved-types = ["A", "AAAA", "CNAME", "PTR", "TXT"]
|
||||||
|
|
||||||
|
manual-batch-review-enabled = true
|
||||||
|
|
||||||
|
scheduled-changes-enabled = true
|
||||||
|
|
||||||
|
multi-record-batch-change-enabled = true
|
||||||
|
|
||||||
|
global-acl-rules = [
|
||||||
|
{
|
||||||
|
group-ids: ["global-acl-group-id"],
|
||||||
|
fqdn-regex-list: [".*shared[0-9]{1}."]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
group-ids: ["another-global-acl-group"],
|
||||||
|
fqdn-regex-list: [".*ok[0-9]{1}."]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
akka {
|
||||||
|
loglevel = "INFO"
|
||||||
|
loggers = ["akka.event.slf4j.Slf4jLogger"]
|
||||||
|
logging-filter = "akka.event.slf4j.Slf4jLoggingFilter"
|
||||||
|
logger-startup-timeout = 30s
|
||||||
|
|
||||||
|
actor {
|
||||||
|
provider = "akka.actor.LocalActorRefProvider"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
akka.http {
|
||||||
|
server {
|
||||||
|
# The time period within which the TCP binding process must be completed.
|
||||||
|
# Set to `infinite` to disable.
|
||||||
|
bind-timeout = 5s
|
||||||
|
|
||||||
|
# Show verbose error messages back to the client
|
||||||
|
verbose-error-messages = on
|
||||||
|
}
|
||||||
|
|
||||||
|
parsing {
|
||||||
|
# Spray doesn't like the AWS4 headers
|
||||||
|
illegal-header-warnings = on
|
||||||
|
}
|
||||||
|
}
|
@ -84,7 +84,7 @@ def test_get_batch_change_with_deleted_record_owner_group_success(shared_zone_te
|
|||||||
client = shared_zone_test_context.shared_zone_vinyldns_client
|
client = shared_zone_test_context.shared_zone_vinyldns_client
|
||||||
shared_zone_name = shared_zone_test_context.shared_zone["name"]
|
shared_zone_name = shared_zone_test_context.shared_zone["name"]
|
||||||
temp_group = {
|
temp_group = {
|
||||||
"name": "test-get-batch-record-owner-group2",
|
"name": f"test-get-batch-record-owner-group{shared_zone_test_context.partition_id}",
|
||||||
"email": "test@test.com",
|
"email": "test@test.com",
|
||||||
"description": "for testing that a get batch change still works when record owner group is deleted",
|
"description": "for testing that a get batch change still works when record owner group is deleted",
|
||||||
"members": [{"id": "sharedZoneUser"}],
|
"members": [{"id": "sharedZoneUser"}],
|
||||||
|
@ -5,10 +5,15 @@ from vinyldns_context import VinylDNSTestContext
|
|||||||
from vinyldns_python import VinylDNSClient
|
from vinyldns_python import VinylDNSClient
|
||||||
|
|
||||||
|
|
||||||
|
# FIXME: this whole suite of tests is fragile as it relies on data ordered in a specific way
|
||||||
|
# and that data cannot be cleaned up via the API (batchrecordchanges). This causes problems
|
||||||
|
# with xdist and parallel execution. The xdist scheduler will only ever schedule this suite
|
||||||
|
# on the first worker (gw0).
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def list_fixture(shared_zone_test_context):
|
def list_fixture(shared_zone_test_context, tmp_path_factory):
|
||||||
ctx = shared_zone_test_context.list_batch_summaries_context
|
ctx = shared_zone_test_context.list_batch_summaries_context
|
||||||
ctx.setup(shared_zone_test_context)
|
ctx.setup(shared_zone_test_context, tmp_path_factory.getbasetemp().parent)
|
||||||
yield ctx
|
yield ctx
|
||||||
ctx.tear_down(shared_zone_test_context)
|
ctx.tear_down(shared_zone_test_context)
|
||||||
|
|
||||||
@ -20,7 +25,7 @@ def test_list_batch_change_summaries_success(list_fixture):
|
|||||||
client = list_fixture.client
|
client = list_fixture.client
|
||||||
batch_change_summaries_result = client.list_batch_change_summaries(status=200)
|
batch_change_summaries_result = client.list_batch_change_summaries(status=200)
|
||||||
|
|
||||||
list_fixture.check_batch_change_summaries_page_accuracy(batch_change_summaries_result, size=3)
|
list_fixture.check_batch_change_summaries_page_accuracy(batch_change_summaries_result, size=len(list_fixture.completed_changes))
|
||||||
|
|
||||||
|
|
||||||
def test_list_batch_change_summaries_with_max_items(list_fixture):
|
def test_list_batch_change_summaries_with_max_items(list_fixture):
|
||||||
@ -40,7 +45,8 @@ def test_list_batch_change_summaries_with_start_from(list_fixture):
|
|||||||
client = list_fixture.client
|
client = list_fixture.client
|
||||||
batch_change_summaries_result = client.list_batch_change_summaries(status=200, start_from=1)
|
batch_change_summaries_result = client.list_batch_change_summaries(status=200, start_from=1)
|
||||||
|
|
||||||
list_fixture.check_batch_change_summaries_page_accuracy(batch_change_summaries_result, size=2, start_from=1)
|
all_changes = list_fixture.completed_changes
|
||||||
|
list_fixture.check_batch_change_summaries_page_accuracy(batch_change_summaries_result, size=len(all_changes) - 1, start_from=1)
|
||||||
|
|
||||||
|
|
||||||
def test_list_batch_change_summaries_with_next_id(list_fixture):
|
def test_list_batch_change_summaries_with_next_id(list_fixture):
|
||||||
@ -49,13 +55,15 @@ def test_list_batch_change_summaries_with_next_id(list_fixture):
|
|||||||
Apply retrieved nextId to get second page of batch change summaries.
|
Apply retrieved nextId to get second page of batch change summaries.
|
||||||
"""
|
"""
|
||||||
client = list_fixture.client
|
client = list_fixture.client
|
||||||
|
|
||||||
batch_change_summaries_result = client.list_batch_change_summaries(status=200, start_from=1, max_items=1)
|
batch_change_summaries_result = client.list_batch_change_summaries(status=200, start_from=1, max_items=1)
|
||||||
|
|
||||||
list_fixture.check_batch_change_summaries_page_accuracy(batch_change_summaries_result, size=1, start_from=1, max_items=1, next_id=2)
|
list_fixture.check_batch_change_summaries_page_accuracy(batch_change_summaries_result, size=1, start_from=1, max_items=1, next_id=2)
|
||||||
|
|
||||||
next_page_result = client.list_batch_change_summaries(status=200, start_from=batch_change_summaries_result["nextId"])
|
next_page_result = client.list_batch_change_summaries(status=200, start_from=batch_change_summaries_result["nextId"])
|
||||||
|
|
||||||
list_fixture.check_batch_change_summaries_page_accuracy(next_page_result, size=1, start_from=batch_change_summaries_result["nextId"])
|
all_changes = list_fixture.completed_changes
|
||||||
|
list_fixture.check_batch_change_summaries_page_accuracy(next_page_result, size=len(all_changes) - int(batch_change_summaries_result["nextId"]), start_from=batch_change_summaries_result["nextId"])
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.manual_batch_review
|
@pytest.mark.manual_batch_review
|
||||||
|
@ -16,7 +16,7 @@ ctx_cache: MutableMapping[str, SharedZoneTestContext] = {}
|
|||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def shared_zone_test_context(tmp_path_factory, worker_id):
|
def shared_zone_test_context(tmp_path_factory, worker_id):
|
||||||
if worker_id == "master":
|
if worker_id == "master":
|
||||||
partition_id = "2"
|
partition_id = "1"
|
||||||
else:
|
else:
|
||||||
partition_id = str(int(worker_id.replace("gw", "")) + 1)
|
partition_id = str(int(worker_id.replace("gw", "")) + 1)
|
||||||
|
|
||||||
|
@ -1,26 +1,31 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from utils import *
|
from utils import *
|
||||||
from vinyldns_python import VinylDNSClient
|
from vinyldns_python import VinylDNSClient
|
||||||
|
|
||||||
|
# FIXME: this context is fragile as it depends on creating batch changes carefully created with a time delay.
|
||||||
|
|
||||||
class ListBatchChangeSummariesTestContext:
|
class ListBatchChangeSummariesTestContext:
|
||||||
to_delete: set = set()
|
|
||||||
completed_changes: list = []
|
|
||||||
group: object = None
|
|
||||||
is_setup: bool = False
|
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self, partition_id: str):
|
||||||
|
self.to_delete: set = set()
|
||||||
|
self.completed_changes: list = []
|
||||||
|
self.setup_started = False
|
||||||
|
self.partition_id = partition_id
|
||||||
self.client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "listBatchSummariesAccessKey", "listBatchSummariesSecretKey")
|
self.client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "listBatchSummariesAccessKey", "listBatchSummariesSecretKey")
|
||||||
|
|
||||||
def setup(self, shared_zone_test_context):
|
def setup(self, shared_zone_test_context, temp_directory: Path):
|
||||||
|
if self.setup_started:
|
||||||
|
# Safeguard against reentrance
|
||||||
|
return
|
||||||
|
|
||||||
|
self.setup_started = True
|
||||||
self.completed_changes = []
|
self.completed_changes = []
|
||||||
self.to_delete = set()
|
self.to_delete = set()
|
||||||
|
|
||||||
acl_rule = generate_acl_rule("Write", userId="list-batch-summaries-id")
|
acl_rule = generate_acl_rule("Write", userId="list-batch-summaries-id")
|
||||||
add_ok_acl_rules(shared_zone_test_context, [acl_rule])
|
add_ok_acl_rules(shared_zone_test_context, [acl_rule])
|
||||||
|
|
||||||
initial_db_check = self.client.list_batch_change_summaries(status=200)
|
|
||||||
self.group = self.client.get_group("list-summaries-group", status=200)
|
|
||||||
|
|
||||||
ok_zone_name = shared_zone_test_context.ok_zone["name"]
|
ok_zone_name = shared_zone_test_context.ok_zone["name"]
|
||||||
batch_change_input_one = {
|
batch_change_input_one = {
|
||||||
"comments": "first",
|
"comments": "first",
|
||||||
@ -48,7 +53,6 @@ class ListBatchChangeSummariesTestContext:
|
|||||||
record_set_list = []
|
record_set_list = []
|
||||||
self.completed_changes = []
|
self.completed_changes = []
|
||||||
|
|
||||||
if len(initial_db_check["batchChanges"]) == 0:
|
|
||||||
# make some batch changes
|
# make some batch changes
|
||||||
for batch_change_input in batch_change_inputs:
|
for batch_change_input in batch_change_inputs:
|
||||||
change = self.client.create_batch_change(batch_change_input, status=202)
|
change = self.client.create_batch_change(batch_change_input, status=202)
|
||||||
@ -57,17 +61,12 @@ class ListBatchChangeSummariesTestContext:
|
|||||||
completed = self.client.wait_until_batch_change_completed(change)
|
completed = self.client.wait_until_batch_change_completed(change)
|
||||||
assert_that(completed["comments"], equal_to(batch_change_input["comments"]))
|
assert_that(completed["comments"], equal_to(batch_change_input["comments"]))
|
||||||
record_set_list += [(change["zoneId"], change["recordSetId"]) for change in completed["changes"]]
|
record_set_list += [(change["zoneId"], change["recordSetId"]) for change in completed["changes"]]
|
||||||
|
self.to_delete = set(record_set_list)
|
||||||
|
|
||||||
# sleep for consistent ordering of timestamps, must be at least one second apart
|
# Sleep for consistent ordering of timestamps, must be at least one second apart
|
||||||
time.sleep(1)
|
time.sleep(1.1)
|
||||||
|
|
||||||
self.completed_changes = self.client.list_batch_change_summaries(status=200)["batchChanges"]
|
self.completed_changes = self.client.list_batch_change_summaries(status=200)["batchChanges"]
|
||||||
assert_that(len(self.completed_changes), equal_to(len(batch_change_inputs)))
|
|
||||||
else:
|
|
||||||
print("\r\n!!! USING EXISTING SUMMARIES")
|
|
||||||
self.completed_changes = initial_db_check["batchChanges"]
|
|
||||||
self.to_delete = set(record_set_list)
|
|
||||||
self.is_setup = True
|
|
||||||
|
|
||||||
def tear_down(self, shared_zone_test_context):
|
def tear_down(self, shared_zone_test_context):
|
||||||
for result_rs in self.to_delete:
|
for result_rs in self.to_delete:
|
||||||
@ -76,6 +75,8 @@ class ListBatchChangeSummariesTestContext:
|
|||||||
shared_zone_test_context.ok_vinyldns_client.wait_until_recordset_change_status(delete_result, 'Complete')
|
shared_zone_test_context.ok_vinyldns_client.wait_until_recordset_change_status(delete_result, 'Complete')
|
||||||
self.to_delete.clear()
|
self.to_delete.clear()
|
||||||
clear_ok_acl_rules(shared_zone_test_context)
|
clear_ok_acl_rules(shared_zone_test_context)
|
||||||
|
self.client.clear_zones()
|
||||||
|
self.client.clear_groups()
|
||||||
self.client.tear_down()
|
self.client.tear_down()
|
||||||
|
|
||||||
def check_batch_change_summaries_page_accuracy(self, summaries_page, size, next_id=False, start_from=False, max_items=100, approval_status=False):
|
def check_batch_change_summaries_page_accuracy(self, summaries_page, size, next_id=False, start_from=False, max_items=100, approval_status=False):
|
||||||
|
@ -5,11 +5,16 @@ from vinyldns_python import VinylDNSClient
|
|||||||
class ListGroupsTestContext(object):
|
class ListGroupsTestContext(object):
|
||||||
def __init__(self, partition_id: str):
|
def __init__(self, partition_id: str):
|
||||||
self.partition_id = partition_id
|
self.partition_id = partition_id
|
||||||
|
self.setup_started = False
|
||||||
self.client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "listGroupAccessKey", "listGroupSecretKey")
|
self.client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "listGroupAccessKey", "listGroupSecretKey")
|
||||||
self.support_user_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "supportUserAccessKey", "supportUserSecretKey")
|
self.support_user_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "supportUserAccessKey", "supportUserSecretKey")
|
||||||
self.group_prefix = f"test-list-my-groups{partition_id}"
|
self.group_prefix = f"test-list-my-groups{partition_id}"
|
||||||
|
|
||||||
def build(self):
|
def setup(self):
|
||||||
|
if self.setup_started:
|
||||||
|
# Safeguard against reentrance
|
||||||
|
return
|
||||||
|
self.setup_started = True
|
||||||
try:
|
try:
|
||||||
for index in range(0, 50):
|
for index in range(0, 50):
|
||||||
new_group = {
|
new_group = {
|
||||||
@ -25,7 +30,9 @@ class ListGroupsTestContext(object):
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
def tear_down(self):
|
def tear_down(self):
|
||||||
clear_zones(self.client)
|
self.client.clear_zones()
|
||||||
clear_groups(self.client)
|
self.client.clear_groups()
|
||||||
self.client.tear_down()
|
self.client.tear_down()
|
||||||
|
self.support_user_client.clear_zones()
|
||||||
|
self.support_user_client.clear_groups()
|
||||||
self.support_user_client.tear_down()
|
self.support_user_client.tear_down()
|
||||||
|
@ -5,6 +5,7 @@ from vinyldns_python import VinylDNSClient
|
|||||||
class ListRecordSetsTestContext(object):
|
class ListRecordSetsTestContext(object):
|
||||||
def __init__(self, partition_id: str):
|
def __init__(self, partition_id: str):
|
||||||
self.partition_id = partition_id
|
self.partition_id = partition_id
|
||||||
|
self.setup_started = False
|
||||||
self.client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "listRecordsAccessKey", "listRecordsSecretKey")
|
self.client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "listRecordsAccessKey", "listRecordsSecretKey")
|
||||||
self.zone = None
|
self.zone = None
|
||||||
self.all_records = []
|
self.all_records = []
|
||||||
@ -19,6 +20,11 @@ class ListRecordSetsTestContext(object):
|
|||||||
self.group = my_groups["groups"][0]
|
self.group = my_groups["groups"][0]
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
|
if self.setup_started:
|
||||||
|
# Safeguard against reentrance
|
||||||
|
return
|
||||||
|
self.setup_started = True
|
||||||
|
|
||||||
partition_id = self.partition_id
|
partition_id = self.partition_id
|
||||||
group = {
|
group = {
|
||||||
"name": f"list-records-group{partition_id}",
|
"name": f"list-records-group{partition_id}",
|
||||||
@ -42,8 +48,8 @@ class ListRecordSetsTestContext(object):
|
|||||||
self.all_records = self.client.list_recordsets_by_zone(self.zone["id"])["recordSets"]
|
self.all_records = self.client.list_recordsets_by_zone(self.zone["id"])["recordSets"]
|
||||||
|
|
||||||
def tear_down(self):
|
def tear_down(self):
|
||||||
clear_zones(self.client)
|
self.client.clear_zones()
|
||||||
clear_groups(self.client)
|
self.client.clear_groups()
|
||||||
self.client.tear_down()
|
self.client.tear_down()
|
||||||
|
|
||||||
def check_recordsets_page_accuracy(self, list_results_page, size, offset, next_id=False, start_from=False, max_items=100, record_type_filter=False, name_sort="ASC"):
|
def check_recordsets_page_accuracy(self, list_results_page, size, offset, next_id=False, start_from=False, max_items=100, record_type_filter=False, name_sort="ASC"):
|
||||||
|
@ -5,6 +5,7 @@ from vinyldns_python import VinylDNSClient
|
|||||||
class ListZonesTestContext(object):
|
class ListZonesTestContext(object):
|
||||||
def __init__(self, partition_id):
|
def __init__(self, partition_id):
|
||||||
self.partition_id = partition_id
|
self.partition_id = partition_id
|
||||||
|
self.setup_started = False
|
||||||
self.client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "listZonesAccessKey", "listZonesSecretKey")
|
self.client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "listZonesAccessKey", "listZonesSecretKey")
|
||||||
self.search_zone1 = None
|
self.search_zone1 = None
|
||||||
self.search_zone2 = None
|
self.search_zone2 = None
|
||||||
@ -13,7 +14,12 @@ class ListZonesTestContext(object):
|
|||||||
self.non_search_zone2 = None
|
self.non_search_zone2 = None
|
||||||
self.list_zones_group = None
|
self.list_zones_group = None
|
||||||
|
|
||||||
def build(self):
|
def setup(self):
|
||||||
|
if self.setup_started:
|
||||||
|
# Safeguard against reentrance
|
||||||
|
return
|
||||||
|
self.setup_started = True
|
||||||
|
|
||||||
partition_id = self.partition_id
|
partition_id = self.partition_id
|
||||||
group = {
|
group = {
|
||||||
"name": f"list-zones-group{partition_id}",
|
"name": f"list-zones-group{partition_id}",
|
||||||
@ -84,6 +90,6 @@ class ListZonesTestContext(object):
|
|||||||
self.client.wait_until_zone_active(change["zone"]["id"])
|
self.client.wait_until_zone_active(change["zone"]["id"])
|
||||||
|
|
||||||
def tear_down(self):
|
def tear_down(self):
|
||||||
clear_zones(self.client)
|
self.client.clear_zones()
|
||||||
clear_groups(self.client)
|
self.client.clear_groups()
|
||||||
self.client.tear_down()
|
self.client.tear_down()
|
||||||
|
@ -10,7 +10,7 @@ def test_create_group_success(shared_zone_test_context):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
new_group = {
|
new_group = {
|
||||||
"name": "test-create-group-success",
|
"name": f"test-create-group-success{shared_zone_test_context.partition_id}",
|
||||||
"email": "test@test.com",
|
"email": "test@test.com",
|
||||||
"description": "this is a description",
|
"description": "this is a description",
|
||||||
"members": [{"id": "ok"}],
|
"members": [{"id": "ok"}],
|
||||||
|
@ -42,7 +42,7 @@ def test_delete_group_that_is_already_deleted(shared_zone_test_context):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
new_group = {
|
new_group = {
|
||||||
"name": "test-delete-group-already",
|
"name": f"test-delete-group-already{shared_zone_test_context.partition_id}",
|
||||||
"email": "test@test.com",
|
"email": "test@test.com",
|
||||||
"description": "this is a description",
|
"description": "this is a description",
|
||||||
"members": [{"id": "ok"}],
|
"members": [{"id": "ok"}],
|
||||||
@ -76,7 +76,6 @@ def test_delete_admin_group(shared_zone_test_context):
|
|||||||
}
|
}
|
||||||
|
|
||||||
result_group = client.create_group(new_group, status=200)
|
result_group = client.create_group(new_group, status=200)
|
||||||
print(result_group)
|
|
||||||
|
|
||||||
# Create zone with that group ID as admin
|
# Create zone with that group ID as admin
|
||||||
zone = {
|
zone = {
|
||||||
|
@ -13,16 +13,17 @@ def test_list_my_groups_no_parameters(list_my_groups_context):
|
|||||||
Test that we can get all the groups where a user is a member
|
Test that we can get all the groups where a user is a member
|
||||||
"""
|
"""
|
||||||
results = list_my_groups_context.client.list_my_groups(status=200)
|
results = list_my_groups_context.client.list_my_groups(status=200)
|
||||||
|
|
||||||
assert_that(results, has_length(3)) # 3 fields
|
assert_that(results, has_length(3)) # 3 fields
|
||||||
|
|
||||||
assert_that(results["groups"], has_length(50))
|
# Only count the groups with the group prefix
|
||||||
|
groups = [x for x in results["groups"] if x["name"].startswith(list_my_groups_context.group_prefix)]
|
||||||
|
assert_that(groups, has_length(50))
|
||||||
assert_that(results, is_not(has_key("groupNameFilter")))
|
assert_that(results, is_not(has_key("groupNameFilter")))
|
||||||
assert_that(results, is_not(has_key("startFrom")))
|
assert_that(results, is_not(has_key("startFrom")))
|
||||||
assert_that(results, is_not(has_key("nextId")))
|
assert_that(results, is_not(has_key("nextId")))
|
||||||
assert_that(results["maxItems"], is_(100))
|
assert_that(results["maxItems"], is_(200))
|
||||||
|
|
||||||
results["groups"] = sorted(results["groups"], key=lambda x: x["name"])
|
results["groups"] = sorted(groups, key=lambda x: x["name"])
|
||||||
|
|
||||||
for i in range(0, 50):
|
for i in range(0, 50):
|
||||||
assert_that(results["groups"][i]["name"], is_("{0}-{1:0>3}".format(list_my_groups_context.group_prefix, i)))
|
assert_that(results["groups"][i]["name"], is_("{0}-{1:0>3}".format(list_my_groups_context.group_prefix, i)))
|
||||||
@ -37,7 +38,7 @@ def test_get_my_groups_using_old_account_auth(list_my_groups_context):
|
|||||||
assert_that(results, is_not(has_key("groupNameFilter")))
|
assert_that(results, is_not(has_key("groupNameFilter")))
|
||||||
assert_that(results, is_not(has_key("startFrom")))
|
assert_that(results, is_not(has_key("startFrom")))
|
||||||
assert_that(results, is_not(has_key("nextId")))
|
assert_that(results, is_not(has_key("nextId")))
|
||||||
assert_that(results["maxItems"], is_(100))
|
assert_that(results["maxItems"], is_(200))
|
||||||
|
|
||||||
|
|
||||||
def test_list_my_groups_max_items(list_my_groups_context):
|
def test_list_my_groups_max_items(list_my_groups_context):
|
||||||
@ -101,7 +102,7 @@ def test_list_my_groups_filter_matches(list_my_groups_context):
|
|||||||
assert_that(results["groupNameFilter"], is_(f"{list_my_groups_context.group_prefix}-01"))
|
assert_that(results["groupNameFilter"], is_(f"{list_my_groups_context.group_prefix}-01"))
|
||||||
assert_that(results, is_not(has_key("startFrom")))
|
assert_that(results, is_not(has_key("startFrom")))
|
||||||
assert_that(results, is_not(has_key("nextId")))
|
assert_that(results, is_not(has_key("nextId")))
|
||||||
assert_that(results["maxItems"], is_(100))
|
assert_that(results["maxItems"], is_(200))
|
||||||
|
|
||||||
results["groups"] = sorted(results["groups"], key=lambda x: x["name"])
|
results["groups"] = sorted(results["groups"], key=lambda x: x["name"])
|
||||||
|
|
||||||
@ -133,15 +134,17 @@ def test_list_my_groups_with_ignore_access_true(list_my_groups_context):
|
|||||||
"""
|
"""
|
||||||
results = list_my_groups_context.client.list_my_groups(ignore_access=True, status=200)
|
results = list_my_groups_context.client.list_my_groups(ignore_access=True, status=200)
|
||||||
|
|
||||||
|
# Only count the groups with the group prefix
|
||||||
assert_that(len(results["groups"]), greater_than(50))
|
assert_that(len(results["groups"]), greater_than(50))
|
||||||
assert_that(results["maxItems"], is_(100))
|
assert_that(results["maxItems"], is_(200))
|
||||||
assert_that(results["ignoreAccess"], is_(True))
|
assert_that(results["ignoreAccess"], is_(True))
|
||||||
|
|
||||||
my_results = list_my_groups_context.client.list_my_groups(status=200)
|
my_results = list_my_groups_context.client.list_my_groups(status=200)
|
||||||
my_results["groups"] = sorted(my_results["groups"], key=lambda x: x["name"])
|
my_groups = [x for x in my_results["groups"] if x["name"].startswith(list_my_groups_context.group_prefix)]
|
||||||
|
sorted_groups = sorted(my_groups, key=lambda x: x["name"])
|
||||||
|
|
||||||
for i in range(0, 50):
|
for i in range(0, 50):
|
||||||
assert_that(my_results["groups"][i]["name"], is_("{0}-{1:0>3}".format(list_my_groups_context.group_prefix, i)))
|
assert_that(sorted_groups[i]["name"], is_("{0}-{1:0>3}".format(list_my_groups_context.group_prefix, i)))
|
||||||
|
|
||||||
|
|
||||||
def test_list_my_groups_as_support_user(list_my_groups_context):
|
def test_list_my_groups_as_support_user(list_my_groups_context):
|
||||||
@ -151,7 +154,7 @@ def test_list_my_groups_as_support_user(list_my_groups_context):
|
|||||||
results = list_my_groups_context.support_user_client.list_my_groups(status=200)
|
results = list_my_groups_context.support_user_client.list_my_groups(status=200)
|
||||||
|
|
||||||
assert_that(len(results["groups"]), greater_than(50))
|
assert_that(len(results["groups"]), greater_than(50))
|
||||||
assert_that(results["maxItems"], is_(100))
|
assert_that(results["maxItems"], is_(200))
|
||||||
assert_that(results["ignoreAccess"], is_(False))
|
assert_that(results["ignoreAccess"], is_(False))
|
||||||
|
|
||||||
|
|
||||||
@ -162,5 +165,5 @@ def test_list_my_groups_as_support_user_with_ignore_access_true(list_my_groups_c
|
|||||||
results = list_my_groups_context.support_user_client.list_my_groups(ignore_access=True, status=200)
|
results = list_my_groups_context.support_user_client.list_my_groups(ignore_access=True, status=200)
|
||||||
|
|
||||||
assert_that(len(results["groups"]), greater_than(50))
|
assert_that(len(results["groups"]), greater_than(50))
|
||||||
assert_that(results["maxItems"], is_(100))
|
assert_that(results["maxItems"], is_(200))
|
||||||
assert_that(results["ignoreAccess"], is_(True))
|
assert_that(results["ignoreAccess"], is_(True))
|
||||||
|
@ -23,10 +23,7 @@ def test_verify_production(shared_zone_test_context):
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
print("\r\nCreating recordset in zone " + str(shared_zone_test_context.ok_zone) + "\r\n")
|
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
assert_that(result["changeType"], is_("Create"))
|
assert_that(result["changeType"], is_("Create"))
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
assert_that(result["created"], is_not(none()))
|
assert_that(result["created"], is_not(none()))
|
||||||
@ -34,17 +31,14 @@ def test_verify_production(shared_zone_test_context):
|
|||||||
|
|
||||||
result_rs = result["recordSet"]
|
result_rs = result["recordSet"]
|
||||||
result_rs = client.wait_until_recordset_change_status(result, "Complete")["recordSet"]
|
result_rs = client.wait_until_recordset_change_status(result, "Complete")["recordSet"]
|
||||||
print("\r\n\r\n!!!recordset is active! Verifying...")
|
|
||||||
|
|
||||||
verify_recordset(result_rs, new_rs)
|
verify_recordset(result_rs, new_rs)
|
||||||
print("\r\n\r\n!!!recordset verified...")
|
|
||||||
|
|
||||||
records = [x["address"] for x in result_rs["records"]]
|
records = [x["address"] for x in result_rs["records"]]
|
||||||
assert_that(records, has_length(2))
|
assert_that(records, has_length(2))
|
||||||
assert_that("10.1.1.1", is_in(records))
|
assert_that("10.1.1.1", is_in(records))
|
||||||
assert_that("10.2.2.2", is_in(records))
|
assert_that("10.2.2.2", is_in(records))
|
||||||
|
|
||||||
print("\r\n\r\n!!!verifying recordset in dns backend")
|
|
||||||
answers = dns_resolve(shared_zone_test_context.ok_zone, result_rs["name"], result_rs["type"])
|
answers = dns_resolve(shared_zone_test_context.ok_zone, result_rs["name"], result_rs["type"])
|
||||||
rdata_strings = rdata(answers)
|
rdata_strings = rdata(answers)
|
||||||
|
|
||||||
|
@ -26,7 +26,6 @@ def test_create_recordset_with_dns_verify(shared_zone_test_context):
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
assert_that(result["changeType"], is_("Create"))
|
assert_that(result["changeType"], is_("Create"))
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
@ -157,7 +156,6 @@ def test_create_srv_recordset_with_service_and_protocol(shared_zone_test_context
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
assert_that(result["changeType"], is_("Create"))
|
assert_that(result["changeType"], is_("Create"))
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
@ -193,7 +191,6 @@ def test_create_aaaa_recordset_with_shorthand_record(shared_zone_test_context):
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
assert_that(result["changeType"], is_("Create"))
|
assert_that(result["changeType"], is_("Create"))
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
@ -229,7 +226,6 @@ def test_create_aaaa_recordset_with_normal_record(shared_zone_test_context):
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
assert_that(result["changeType"], is_("Create"))
|
assert_that(result["changeType"], is_("Create"))
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
@ -392,7 +388,6 @@ def test_create_recordset_conflict_with_dns_different_type(shared_zone_test_cont
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
assert_that(result["changeType"], is_("Create"))
|
assert_that(result["changeType"], is_("Create"))
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
@ -1032,7 +1027,6 @@ def test_create_recordset_forward_record_types(shared_zone_test_context, record_
|
|||||||
|
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
result_rs = result["recordSet"]
|
result_rs = result["recordSet"]
|
||||||
verify_recordset(result_rs, new_rs)
|
verify_recordset(result_rs, new_rs)
|
||||||
@ -1064,7 +1058,6 @@ def test_reverse_create_recordset_reverse_record_types(shared_zone_test_context,
|
|||||||
|
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
result_rs = result["recordSet"]
|
result_rs = result["recordSet"]
|
||||||
verify_recordset(result_rs, new_rs)
|
verify_recordset(result_rs, new_rs)
|
||||||
@ -1183,7 +1176,6 @@ def test_create_ipv4_ptr_recordset_with_verify(shared_zone_test_context):
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
assert_that(result["changeType"], is_("Create"))
|
assert_that(result["changeType"], is_("Create"))
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
@ -1269,7 +1261,6 @@ def test_create_ipv6_ptr_recordset(shared_zone_test_context):
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
assert_that(result["changeType"], is_("Create"))
|
assert_that(result["changeType"], is_("Create"))
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
@ -1377,7 +1368,6 @@ def test_at_create_recordset(shared_zone_test_context):
|
|||||||
}
|
}
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
|
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
assert_that(result["changeType"], is_("Create"))
|
assert_that(result["changeType"], is_("Create"))
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
@ -1428,7 +1418,6 @@ def test_create_record_with_escape_characters_in_record_data_succeeds(shared_zon
|
|||||||
}
|
}
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
|
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
assert_that(result["changeType"], is_("Create"))
|
assert_that(result["changeType"], is_("Create"))
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
@ -1686,7 +1675,6 @@ def test_create_ipv4_ptr_recordset_with_verify_in_classless(shared_zone_test_con
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
assert_that(result["changeType"], is_("Create"))
|
assert_that(result["changeType"], is_("Create"))
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
|
@ -18,7 +18,6 @@ def test_delete_recordset_forward_record_types(shared_zone_test_context, record_
|
|||||||
|
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
result_rs = result["recordSet"]
|
result_rs = result["recordSet"]
|
||||||
verify_recordset(result_rs, new_rs)
|
verify_recordset(result_rs, new_rs)
|
||||||
@ -62,7 +61,6 @@ def test_delete_recordset_reverse_record_types(shared_zone_test_context, record_
|
|||||||
|
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
result_rs = result["recordSet"]
|
result_rs = result["recordSet"]
|
||||||
verify_recordset(result_rs, new_rs)
|
verify_recordset(result_rs, new_rs)
|
||||||
@ -114,7 +112,6 @@ def test_delete_recordset_with_verify(shared_zone_test_context):
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
assert_that(result["changeType"], is_("Create"))
|
assert_that(result["changeType"], is_("Create"))
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
@ -250,7 +247,6 @@ def test_delete_ipv4_ptr_recordset(shared_zone_test_context):
|
|||||||
}
|
}
|
||||||
result = client.create_recordset(orig_rs, status=202)
|
result = client.create_recordset(orig_rs, status=202)
|
||||||
result_rs = client.wait_until_recordset_change_status(result, "Complete")["recordSet"]
|
result_rs = client.wait_until_recordset_change_status(result, "Complete")["recordSet"]
|
||||||
print("\r\n\r\n!!!recordset is active! Deleting...")
|
|
||||||
|
|
||||||
delete_result = client.delete_recordset(result_rs["zoneId"], result_rs["id"], status=202)
|
delete_result = client.delete_recordset(result_rs["zoneId"], result_rs["id"], status=202)
|
||||||
client.wait_until_recordset_change_status(delete_result, "Complete")
|
client.wait_until_recordset_change_status(delete_result, "Complete")
|
||||||
@ -289,7 +285,6 @@ def test_delete_ipv6_ptr_recordset(shared_zone_test_context):
|
|||||||
}
|
}
|
||||||
result = client.create_recordset(orig_rs, status=202)
|
result = client.create_recordset(orig_rs, status=202)
|
||||||
result_rs = client.wait_until_recordset_change_status(result, "Complete")["recordSet"]
|
result_rs = client.wait_until_recordset_change_status(result, "Complete")["recordSet"]
|
||||||
print("\r\n\r\n!!!recordset is active! Deleting...")
|
|
||||||
|
|
||||||
delete_result = client.delete_recordset(result_rs["zoneId"], result_rs["id"], status=202)
|
delete_result = client.delete_recordset(result_rs["zoneId"], result_rs["id"], status=202)
|
||||||
client.wait_until_recordset_change_status(delete_result, "Complete")
|
client.wait_until_recordset_change_status(delete_result, "Complete")
|
||||||
@ -344,8 +339,6 @@ def test_at_delete_recordset(shared_zone_test_context):
|
|||||||
}
|
}
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
|
|
||||||
print(json.dumps(result, indent=3))
|
|
||||||
|
|
||||||
assert_that(result["changeType"], is_("Create"))
|
assert_that(result["changeType"], is_("Create"))
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
assert_that(result["created"], is_not(none()))
|
assert_that(result["created"], is_not(none()))
|
||||||
@ -391,28 +384,21 @@ def test_delete_recordset_with_different_dns_data(shared_zone_test_context):
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
print("\r\nCreating recordset in zone " + str(ok_zone) + "\r\n")
|
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
result_rs = result["recordSet"]
|
result_rs = result["recordSet"]
|
||||||
result_rs = client.wait_until_recordset_change_status(result, "Complete")["recordSet"]
|
result_rs = client.wait_until_recordset_change_status(result, "Complete")["recordSet"]
|
||||||
print("\r\n\r\n!!!recordset is active! Verifying...")
|
|
||||||
|
|
||||||
verify_recordset(result_rs, new_rs)
|
verify_recordset(result_rs, new_rs)
|
||||||
print("\r\n\r\n!!!recordset verified...")
|
|
||||||
|
|
||||||
result_rs["records"][0]["address"] = "10.8.8.8"
|
result_rs["records"][0]["address"] = "10.8.8.8"
|
||||||
result = client.update_recordset(result_rs, status=202)
|
result = client.update_recordset(result_rs, status=202)
|
||||||
result_rs = client.wait_until_recordset_change_status(result, "Complete")["recordSet"]
|
result_rs = client.wait_until_recordset_change_status(result, "Complete")["recordSet"]
|
||||||
|
|
||||||
print("\r\n\r\n!!!verifying recordset in dns backend")
|
|
||||||
answers = dns_resolve(ok_zone, result_rs["name"], result_rs["type"])
|
answers = dns_resolve(ok_zone, result_rs["name"], result_rs["type"])
|
||||||
assert_that(answers, has_length(1))
|
assert_that(answers, has_length(1))
|
||||||
|
|
||||||
response = dns_update(ok_zone, result_rs["name"], 300, result_rs["type"], "10.9.9.9")
|
response = dns_update(ok_zone, result_rs["name"], 300, result_rs["type"], "10.9.9.9")
|
||||||
print("\nSuccessfully updated the record, record is now out of sync\n")
|
|
||||||
print(str(response))
|
|
||||||
|
|
||||||
# check you can delete
|
# check you can delete
|
||||||
delete_result = client.delete_recordset(result_rs["zoneId"], result_rs["id"], status=202)
|
delete_result = client.delete_recordset(result_rs["zoneId"], result_rs["id"], status=202)
|
||||||
|
@ -149,7 +149,6 @@ def test_update_recordset_forward_record_types(shared_zone_test_context, record_
|
|||||||
|
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
result_rs = result["recordSet"]
|
result_rs = result["recordSet"]
|
||||||
verify_recordset(result_rs, new_rs)
|
verify_recordset(result_rs, new_rs)
|
||||||
@ -193,7 +192,6 @@ def test_update_reverse_record_types(shared_zone_test_context, record_name, test
|
|||||||
|
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
result_rs = result["recordSet"]
|
result_rs = result["recordSet"]
|
||||||
verify_recordset(result_rs, new_rs)
|
verify_recordset(result_rs, new_rs)
|
||||||
@ -305,7 +303,6 @@ def test_update_recordset_replace_2_records_with_1_different_record(shared_zone_
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
assert_that(result["changeType"], is_("Create"))
|
assert_that(result["changeType"], is_("Create"))
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
@ -376,7 +373,6 @@ def test_update_existing_record_set_add_record(shared_zone_test_context):
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
assert_that(result["changeType"], is_("Create"))
|
assert_that(result["changeType"], is_("Create"))
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
@ -393,8 +389,6 @@ def test_update_existing_record_set_add_record(shared_zone_test_context):
|
|||||||
|
|
||||||
answers = dns_resolve(ok_zone, result_rs["name"], result_rs["type"])
|
answers = dns_resolve(ok_zone, result_rs["name"], result_rs["type"])
|
||||||
rdata_strings = rdata(answers)
|
rdata_strings = rdata(answers)
|
||||||
print("GOT ANSWERS BACK FOR INITIAL CREATE:")
|
|
||||||
print(str(rdata_strings))
|
|
||||||
|
|
||||||
# Update the record set, adding a new record to the existing one
|
# Update the record set, adding a new record to the existing one
|
||||||
modified_records = [
|
modified_records = [
|
||||||
@ -426,8 +420,6 @@ def test_update_existing_record_set_add_record(shared_zone_test_context):
|
|||||||
answers = dns_resolve(ok_zone, result_rs["name"], result_rs["type"])
|
answers = dns_resolve(ok_zone, result_rs["name"], result_rs["type"])
|
||||||
rdata_strings = rdata(answers)
|
rdata_strings = rdata(answers)
|
||||||
|
|
||||||
print("GOT BACK ANSWERS FOR UPDATE")
|
|
||||||
print(str(rdata_strings))
|
|
||||||
assert_that(rdata_strings, has_length(2))
|
assert_that(rdata_strings, has_length(2))
|
||||||
assert_that("10.2.2.2", is_in(rdata_strings))
|
assert_that("10.2.2.2", is_in(rdata_strings))
|
||||||
assert_that("4.4.4.8", is_in(rdata_strings))
|
assert_that("4.4.4.8", is_in(rdata_strings))
|
||||||
@ -542,9 +534,7 @@ def test_update_ipv4_ptr_recordset_with_verify(shared_zone_test_context):
|
|||||||
|
|
||||||
new_ptr_target = "www.vinyldns."
|
new_ptr_target = "www.vinyldns."
|
||||||
new_rs = result_rs
|
new_rs = result_rs
|
||||||
print(new_rs)
|
|
||||||
new_rs["records"][0]["ptrdname"] = new_ptr_target
|
new_rs["records"][0]["ptrdname"] = new_ptr_target
|
||||||
print(new_rs)
|
|
||||||
result = client.update_recordset(new_rs, status=202)
|
result = client.update_recordset(new_rs, status=202)
|
||||||
|
|
||||||
result_rs = result["recordSet"]
|
result_rs = result["recordSet"]
|
||||||
@ -552,7 +542,6 @@ def test_update_ipv4_ptr_recordset_with_verify(shared_zone_test_context):
|
|||||||
|
|
||||||
verify_recordset(result_rs, new_rs)
|
verify_recordset(result_rs, new_rs)
|
||||||
|
|
||||||
print(result_rs)
|
|
||||||
records = result_rs["records"]
|
records = result_rs["records"]
|
||||||
assert_that(records[0]["ptrdname"], is_(new_ptr_target))
|
assert_that(records[0]["ptrdname"], is_(new_ptr_target))
|
||||||
|
|
||||||
@ -594,9 +583,7 @@ def test_update_ipv6_ptr_recordset(shared_zone_test_context):
|
|||||||
|
|
||||||
new_ptr_target = "www.vinyldns."
|
new_ptr_target = "www.vinyldns."
|
||||||
new_rs = result_rs
|
new_rs = result_rs
|
||||||
print(new_rs)
|
|
||||||
new_rs["records"][0]["ptrdname"] = new_ptr_target
|
new_rs["records"][0]["ptrdname"] = new_ptr_target
|
||||||
print(new_rs)
|
|
||||||
result = client.update_recordset(new_rs, status=202)
|
result = client.update_recordset(new_rs, status=202)
|
||||||
|
|
||||||
result_rs = result["recordSet"]
|
result_rs = result["recordSet"]
|
||||||
@ -604,7 +591,6 @@ def test_update_ipv6_ptr_recordset(shared_zone_test_context):
|
|||||||
|
|
||||||
verify_recordset(result_rs, new_rs)
|
verify_recordset(result_rs, new_rs)
|
||||||
|
|
||||||
print(result_rs)
|
|
||||||
records = result_rs["records"]
|
records = result_rs["records"]
|
||||||
assert_that(records[0]["ptrdname"], is_(new_ptr_target))
|
assert_that(records[0]["ptrdname"], is_(new_ptr_target))
|
||||||
|
|
||||||
@ -698,7 +684,6 @@ def test_at_update_recordset(shared_zone_test_context):
|
|||||||
}
|
}
|
||||||
|
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
print(str(result))
|
|
||||||
|
|
||||||
assert_that(result["changeType"], is_("Create"))
|
assert_that(result["changeType"], is_("Create"))
|
||||||
assert_that(result["status"], is_("Pending"))
|
assert_that(result["status"], is_("Pending"))
|
||||||
|
@ -20,60 +20,10 @@ class SharedZoneTestContext(object):
|
|||||||
"""
|
"""
|
||||||
_data_cache: MutableMapping[str, MutableMapping[str, Mapping]] = {}
|
_data_cache: MutableMapping[str, MutableMapping[str, Mapping]] = {}
|
||||||
|
|
||||||
@property
|
|
||||||
def ok_zone(self) -> Mapping:
|
|
||||||
return self.attempt_retrieve_value("_ok_zone")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def shared_zone(self) -> Mapping:
|
|
||||||
return self.attempt_retrieve_value("_shared_zone")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def history_zone(self) -> Mapping:
|
|
||||||
return self.attempt_retrieve_value("_history_zone")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def dummy_zone(self) -> Mapping:
|
|
||||||
return self.attempt_retrieve_value("_dummy_zone")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ip6_reverse_zone(self) -> Mapping:
|
|
||||||
return self.attempt_retrieve_value("_ip6_reverse_zone")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ip6_16_nibble_zone(self) -> Mapping:
|
|
||||||
return self.attempt_retrieve_value("_ip6_16_nibble_zone")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ip4_reverse_zone(self) -> Mapping:
|
|
||||||
return self.attempt_retrieve_value("_ip4_reverse_zone")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def classless_base_zone(self) -> Mapping:
|
|
||||||
return self.attempt_retrieve_value("_classless_base_zone")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def classless_zone_delegation_zone(self) -> Mapping:
|
|
||||||
return self.attempt_retrieve_value("_classless_zone_delegation_zone")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def system_test_zone(self) -> Mapping:
|
|
||||||
return self.attempt_retrieve_value("_system_test_zone")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def parent_zone(self) -> Mapping:
|
|
||||||
return self.attempt_retrieve_value("_parent_zone")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ds_zone(self) -> Mapping:
|
|
||||||
return self.attempt_retrieve_value("_ds_zone")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def requires_review_zone(self) -> Mapping:
|
|
||||||
return self.attempt_retrieve_value("_requires_review_zone")
|
|
||||||
|
|
||||||
def __init__(self, partition_id: str):
|
def __init__(self, partition_id: str):
|
||||||
self.partition_id = partition_id
|
self.partition_id = partition_id
|
||||||
|
self.setup_started = False
|
||||||
self.ok_vinyldns_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "okAccessKey", "okSecretKey")
|
self.ok_vinyldns_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "okAccessKey", "okSecretKey")
|
||||||
self.dummy_vinyldns_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "dummyAccessKey", "dummySecretKey")
|
self.dummy_vinyldns_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "dummyAccessKey", "dummySecretKey")
|
||||||
self.shared_zone_vinyldns_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "sharedZoneUserAccessKey", "sharedZoneUserSecretKey")
|
self.shared_zone_vinyldns_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "sharedZoneUserAccessKey", "sharedZoneUserSecretKey")
|
||||||
@ -87,7 +37,7 @@ class SharedZoneTestContext(object):
|
|||||||
self.list_zones_client = self.list_zones.client
|
self.list_zones_client = self.list_zones.client
|
||||||
self.list_records_context = ListRecordSetsTestContext(partition_id)
|
self.list_records_context = ListRecordSetsTestContext(partition_id)
|
||||||
self.list_groups_context = ListGroupsTestContext(partition_id)
|
self.list_groups_context = ListGroupsTestContext(partition_id)
|
||||||
self.list_batch_summaries_context = None
|
self.list_batch_summaries_context = ListBatchChangeSummariesTestContext(partition_id)
|
||||||
|
|
||||||
self.dummy_group = None
|
self.dummy_group = None
|
||||||
self.ok_group = None
|
self.ok_group = None
|
||||||
@ -96,25 +46,30 @@ class SharedZoneTestContext(object):
|
|||||||
self.group_activity_created = None
|
self.group_activity_created = None
|
||||||
self.group_activity_updated = None
|
self.group_activity_updated = None
|
||||||
|
|
||||||
self._history_zone = None
|
self.history_zone = None
|
||||||
self._ok_zone = None
|
self.ok_zone = None
|
||||||
self._dummy_zone = None
|
self.dummy_zone = None
|
||||||
self._ip6_reverse_zone = None
|
self.ip6_reverse_zone = None
|
||||||
self._ip6_16_nibble_zone = None
|
self.ip6_16_nibble_zone = None
|
||||||
self._ip4_reverse_zone = None
|
self.ip4_reverse_zone = None
|
||||||
self._classless_base_zone = None
|
self.classless_base_zone = None
|
||||||
self._classless_zone_delegation_zone = None
|
self.classless_zone_delegation_zone = None
|
||||||
self._system_test_zone = None
|
self.system_test_zone = None
|
||||||
self._parent_zone = None
|
self.parent_zone = None
|
||||||
self._ds_zone = None
|
self.ds_zone = None
|
||||||
self._requires_review_zone = None
|
self.requires_review_zone = None
|
||||||
self._shared_zone = None
|
self.shared_zone = None
|
||||||
|
|
||||||
self.ip4_10_prefix = None
|
self.ip4_10_prefix = None
|
||||||
self.ip4_classless_prefix = None
|
self.ip4_classless_prefix = None
|
||||||
self.ip6_prefix = None
|
self.ip6_prefix = None
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
|
if self.setup_started:
|
||||||
|
# Safeguard against reentrance
|
||||||
|
return
|
||||||
|
self.setup_started = True
|
||||||
|
|
||||||
partition_id = self.partition_id
|
partition_id = self.partition_id
|
||||||
try:
|
try:
|
||||||
ok_group = {
|
ok_group = {
|
||||||
@ -181,7 +136,11 @@ class SharedZoneTestContext(object):
|
|||||||
"primaryServer": VinylDNSTestContext.name_server_ip
|
"primaryServer": VinylDNSTestContext.name_server_ip
|
||||||
}
|
}
|
||||||
}, status=202)
|
}, status=202)
|
||||||
self._history_zone = history_zone_change["zone"]
|
self.history_zone = history_zone_change["zone"]
|
||||||
|
|
||||||
|
# initialize history
|
||||||
|
self.history_client.wait_until_zone_active(history_zone_change["zone"]["id"])
|
||||||
|
self.init_history()
|
||||||
|
|
||||||
ok_zone_change = self.ok_vinyldns_client.create_zone(
|
ok_zone_change = self.ok_vinyldns_client.create_zone(
|
||||||
{
|
{
|
||||||
@ -205,7 +164,7 @@ class SharedZoneTestContext(object):
|
|||||||
"primaryServer": VinylDNSTestContext.name_server_ip
|
"primaryServer": VinylDNSTestContext.name_server_ip
|
||||||
}
|
}
|
||||||
}, status=202)
|
}, status=202)
|
||||||
self._ok_zone = ok_zone_change["zone"]
|
self.ok_zone = ok_zone_change["zone"]
|
||||||
|
|
||||||
dummy_zone_change = self.dummy_vinyldns_client.create_zone(
|
dummy_zone_change = self.dummy_vinyldns_client.create_zone(
|
||||||
{
|
{
|
||||||
@ -229,7 +188,7 @@ class SharedZoneTestContext(object):
|
|||||||
"primaryServer": VinylDNSTestContext.name_server_ip
|
"primaryServer": VinylDNSTestContext.name_server_ip
|
||||||
}
|
}
|
||||||
}, status=202)
|
}, status=202)
|
||||||
self._dummy_zone = dummy_zone_change["zone"]
|
self.dummy_zone = dummy_zone_change["zone"]
|
||||||
|
|
||||||
self.ip6_prefix = f"fd69:27cc:fe9{partition_id}"
|
self.ip6_prefix = f"fd69:27cc:fe9{partition_id}"
|
||||||
ip6_reverse_zone_change = self.ok_vinyldns_client.create_zone(
|
ip6_reverse_zone_change = self.ok_vinyldns_client.create_zone(
|
||||||
@ -255,7 +214,7 @@ class SharedZoneTestContext(object):
|
|||||||
}
|
}
|
||||||
}, status=202
|
}, status=202
|
||||||
)
|
)
|
||||||
self._ip6_reverse_zone = ip6_reverse_zone_change["zone"]
|
self.ip6_reverse_zone = ip6_reverse_zone_change["zone"]
|
||||||
|
|
||||||
ip6_16_nibble_zone_change = self.ok_vinyldns_client.create_zone(
|
ip6_16_nibble_zone_change = self.ok_vinyldns_client.create_zone(
|
||||||
{
|
{
|
||||||
@ -267,7 +226,7 @@ class SharedZoneTestContext(object):
|
|||||||
"backendId": "func-test-backend"
|
"backendId": "func-test-backend"
|
||||||
}, status=202
|
}, status=202
|
||||||
)
|
)
|
||||||
self._ip6_16_nibble_zone = ip6_16_nibble_zone_change["zone"]
|
self.ip6_16_nibble_zone = ip6_16_nibble_zone_change["zone"]
|
||||||
|
|
||||||
self.ip4_10_prefix = f"10.{partition_id}"
|
self.ip4_10_prefix = f"10.{partition_id}"
|
||||||
ip4_reverse_zone_change = self.ok_vinyldns_client.create_zone(
|
ip4_reverse_zone_change = self.ok_vinyldns_client.create_zone(
|
||||||
@ -293,7 +252,7 @@ class SharedZoneTestContext(object):
|
|||||||
}
|
}
|
||||||
}, status=202
|
}, status=202
|
||||||
)
|
)
|
||||||
self._ip4_reverse_zone = ip4_reverse_zone_change["zone"]
|
self.ip4_reverse_zone = ip4_reverse_zone_change["zone"]
|
||||||
|
|
||||||
self.ip4_classless_prefix = f"192.0.{partition_id}"
|
self.ip4_classless_prefix = f"192.0.{partition_id}"
|
||||||
classless_base_zone_change = self.ok_vinyldns_client.create_zone(
|
classless_base_zone_change = self.ok_vinyldns_client.create_zone(
|
||||||
@ -319,7 +278,7 @@ class SharedZoneTestContext(object):
|
|||||||
}
|
}
|
||||||
}, status=202
|
}, status=202
|
||||||
)
|
)
|
||||||
self._classless_base_zone = classless_base_zone_change["zone"]
|
self.classless_base_zone = classless_base_zone_change["zone"]
|
||||||
|
|
||||||
classless_zone_delegation_change = self.ok_vinyldns_client.create_zone(
|
classless_zone_delegation_change = self.ok_vinyldns_client.create_zone(
|
||||||
{
|
{
|
||||||
@ -344,7 +303,7 @@ class SharedZoneTestContext(object):
|
|||||||
}
|
}
|
||||||
}, status=202
|
}, status=202
|
||||||
)
|
)
|
||||||
self._classless_zone_delegation_zone = classless_zone_delegation_change["zone"]
|
self.classless_zone_delegation_zone = classless_zone_delegation_change["zone"]
|
||||||
|
|
||||||
system_test_zone_change = self.ok_vinyldns_client.create_zone(
|
system_test_zone_change = self.ok_vinyldns_client.create_zone(
|
||||||
{
|
{
|
||||||
@ -369,7 +328,7 @@ class SharedZoneTestContext(object):
|
|||||||
}
|
}
|
||||||
}, status=202
|
}, status=202
|
||||||
)
|
)
|
||||||
self._system_test_zone = system_test_zone_change["zone"]
|
self.system_test_zone = system_test_zone_change["zone"]
|
||||||
|
|
||||||
# parent zone gives access to the dummy user, dummy user cannot manage ns records
|
# parent zone gives access to the dummy user, dummy user cannot manage ns records
|
||||||
parent_zone_change = self.ok_vinyldns_client.create_zone(
|
parent_zone_change = self.ok_vinyldns_client.create_zone(
|
||||||
@ -403,7 +362,7 @@ class SharedZoneTestContext(object):
|
|||||||
"primaryServer": VinylDNSTestContext.name_server_ip
|
"primaryServer": VinylDNSTestContext.name_server_ip
|
||||||
}
|
}
|
||||||
}, status=202)
|
}, status=202)
|
||||||
self._parent_zone = parent_zone_change["zone"]
|
self.parent_zone = parent_zone_change["zone"]
|
||||||
|
|
||||||
# mimicking the spec example
|
# mimicking the spec example
|
||||||
ds_zone_change = self.ok_vinyldns_client.create_zone(
|
ds_zone_change = self.ok_vinyldns_client.create_zone(
|
||||||
@ -428,7 +387,7 @@ class SharedZoneTestContext(object):
|
|||||||
"primaryServer": VinylDNSTestContext.name_server_ip
|
"primaryServer": VinylDNSTestContext.name_server_ip
|
||||||
}
|
}
|
||||||
}, status=202)
|
}, status=202)
|
||||||
self._ds_zone = ds_zone_change["zone"]
|
self.ds_zone = ds_zone_change["zone"]
|
||||||
|
|
||||||
# zone with name configured for manual review
|
# zone with name configured for manual review
|
||||||
requires_review_zone_change = self.ok_vinyldns_client.create_zone(
|
requires_review_zone_change = self.ok_vinyldns_client.create_zone(
|
||||||
@ -440,7 +399,7 @@ class SharedZoneTestContext(object):
|
|||||||
"isTest": True,
|
"isTest": True,
|
||||||
"backendId": "func-test-backend"
|
"backendId": "func-test-backend"
|
||||||
}, status=202)
|
}, status=202)
|
||||||
self._requires_review_zone = requires_review_zone_change["zone"]
|
self.requires_review_zone = requires_review_zone_change["zone"]
|
||||||
|
|
||||||
# Shared zone
|
# Shared zone
|
||||||
shared_zone_change = self.support_user_client.create_zone(
|
shared_zone_change = self.support_user_client.create_zone(
|
||||||
@ -465,7 +424,7 @@ class SharedZoneTestContext(object):
|
|||||||
"primaryServer": VinylDNSTestContext.name_server_ip
|
"primaryServer": VinylDNSTestContext.name_server_ip
|
||||||
}
|
}
|
||||||
}, status=202)
|
}, status=202)
|
||||||
self._shared_zone = shared_zone_change["zone"]
|
self.shared_zone = shared_zone_change["zone"]
|
||||||
|
|
||||||
# wait until our zones are created
|
# wait until our zones are created
|
||||||
self.ok_vinyldns_client.wait_until_zone_active(system_test_zone_change["zone"]["id"])
|
self.ok_vinyldns_client.wait_until_zone_active(system_test_zone_change["zone"]["id"])
|
||||||
@ -480,23 +439,13 @@ class SharedZoneTestContext(object):
|
|||||||
self.ok_vinyldns_client.wait_until_zone_active(parent_zone_change["zone"]["id"])
|
self.ok_vinyldns_client.wait_until_zone_active(parent_zone_change["zone"]["id"])
|
||||||
self.ok_vinyldns_client.wait_until_zone_active(ds_zone_change["zone"]["id"])
|
self.ok_vinyldns_client.wait_until_zone_active(ds_zone_change["zone"]["id"])
|
||||||
self.ok_vinyldns_client.wait_until_zone_active(requires_review_zone_change["zone"]["id"])
|
self.ok_vinyldns_client.wait_until_zone_active(requires_review_zone_change["zone"]["id"])
|
||||||
self.history_client.wait_until_zone_active(history_zone_change["zone"]["id"])
|
|
||||||
self.shared_zone_vinyldns_client.wait_until_zone_active(shared_zone_change["zone"]["id"])
|
self.shared_zone_vinyldns_client.wait_until_zone_active(shared_zone_change["zone"]["id"])
|
||||||
|
|
||||||
# validate all in there
|
|
||||||
zones = self.dummy_vinyldns_client.list_zones()["zones"]
|
|
||||||
assert_that(len(zones), is_(2))
|
|
||||||
zones = self.ok_vinyldns_client.list_zones()["zones"]
|
|
||||||
assert_that(len(zones), is_(11))
|
|
||||||
|
|
||||||
# initialize history
|
|
||||||
self.init_history()
|
|
||||||
|
|
||||||
# initialize group activity
|
# initialize group activity
|
||||||
self.init_group_activity()
|
self.init_group_activity()
|
||||||
|
|
||||||
# initialize list zones, only do this when constructing the whole!
|
# initialize list zones, only do this when constructing the whole!
|
||||||
self.list_zones.build()
|
self.list_zones.setup()
|
||||||
|
|
||||||
# note: there are no state to load, the tests only need the client
|
# note: there are no state to load, the tests only need the client
|
||||||
self.list_zones_client = self.list_zones.client
|
self.list_zones_client = self.list_zones.client
|
||||||
@ -505,9 +454,7 @@ class SharedZoneTestContext(object):
|
|||||||
self.list_records_context.setup()
|
self.list_records_context.setup()
|
||||||
|
|
||||||
# build the list of groups
|
# build the list of groups
|
||||||
self.list_groups_context.build()
|
self.list_groups_context.setup()
|
||||||
|
|
||||||
self.list_batch_summaries_context = ListBatchChangeSummariesTestContext()
|
|
||||||
except Exception:
|
except Exception:
|
||||||
# Cleanup if setup fails
|
# Cleanup if setup fails
|
||||||
self.tear_down()
|
self.tear_down()
|
||||||
@ -519,7 +466,7 @@ class SharedZoneTestContext(object):
|
|||||||
# change the zone nine times to we have update events in zone change history,
|
# change the zone nine times to we have update events in zone change history,
|
||||||
# ten total changes including creation
|
# ten total changes including creation
|
||||||
for i in range(2, 11):
|
for i in range(2, 11):
|
||||||
zone_update = copy.deepcopy(self._history_zone)
|
zone_update = copy.deepcopy(self.history_zone)
|
||||||
zone_update["connection"]["key"] = VinylDNSTestContext.dns_key
|
zone_update["connection"]["key"] = VinylDNSTestContext.dns_key
|
||||||
zone_update["transferConnection"]["key"] = VinylDNSTestContext.dns_key
|
zone_update["transferConnection"]["key"] = VinylDNSTestContext.dns_key
|
||||||
zone_update["email"] = "i.changed.this.{0}.times@history-test.com".format(i)
|
zone_update["email"] = "i.changed.this.{0}.times@history-test.com".format(i)
|
||||||
@ -527,11 +474,11 @@ class SharedZoneTestContext(object):
|
|||||||
|
|
||||||
# create some record sets
|
# create some record sets
|
||||||
test_a = TestData.A.copy()
|
test_a = TestData.A.copy()
|
||||||
test_a["zoneId"] = self._history_zone["id"]
|
test_a["zoneId"] = self.history_zone["id"]
|
||||||
test_aaaa = TestData.AAAA.copy()
|
test_aaaa = TestData.AAAA.copy()
|
||||||
test_aaaa["zoneId"] = self._history_zone["id"]
|
test_aaaa["zoneId"] = self.history_zone["id"]
|
||||||
test_cname = TestData.CNAME.copy()
|
test_cname = TestData.CNAME.copy()
|
||||||
test_cname["zoneId"] = self._history_zone["id"]
|
test_cname["zoneId"] = self.history_zone["id"]
|
||||||
|
|
||||||
a_record = self.history_client.create_recordset(test_a, status=202)["recordSet"]
|
a_record = self.history_client.create_recordset(test_a, status=202)["recordSet"]
|
||||||
aaaa_record = self.history_client.create_recordset(test_aaaa, status=202)["recordSet"]
|
aaaa_record = self.history_client.create_recordset(test_aaaa, status=202)["recordSet"]
|
||||||
@ -574,13 +521,7 @@ class SharedZoneTestContext(object):
|
|||||||
def init_group_activity(self):
|
def init_group_activity(self):
|
||||||
client = self.ok_vinyldns_client
|
client = self.ok_vinyldns_client
|
||||||
|
|
||||||
group_name = "test-list-group-activity-max-item-success"
|
group_name = f"test-list-group-activity-max-item-success{self.partition_id}"
|
||||||
|
|
||||||
# cleanup existing group if it's already in there
|
|
||||||
groups = client.list_all_my_groups()
|
|
||||||
existing = [grp for grp in groups if grp["name"] == group_name]
|
|
||||||
for grp in existing:
|
|
||||||
client.delete_group(grp["id"], status=200)
|
|
||||||
|
|
||||||
members = [{"id": "ok"}]
|
members = [{"id": "ok"}]
|
||||||
new_group = {
|
new_group = {
|
||||||
@ -625,12 +566,11 @@ class SharedZoneTestContext(object):
|
|||||||
if self.list_groups_context:
|
if self.list_groups_context:
|
||||||
self.list_groups_context.tear_down()
|
self.list_groups_context.tear_down()
|
||||||
|
|
||||||
clear_zones(self.dummy_vinyldns_client)
|
for client in self.clients:
|
||||||
clear_zones(self.ok_vinyldns_client)
|
client.clear_zones()
|
||||||
clear_zones(self.history_client)
|
|
||||||
clear_groups(self.dummy_vinyldns_client, "global-acl-group-id")
|
for client in self.clients:
|
||||||
clear_groups(self.ok_vinyldns_client, "global-acl-group-id")
|
client.clear_groups()
|
||||||
clear_groups(self.history_client)
|
|
||||||
|
|
||||||
# Close all clients
|
# Close all clients
|
||||||
for client in self.clients:
|
for client in self.clients:
|
||||||
@ -649,29 +589,3 @@ class SharedZoneTestContext(object):
|
|||||||
time.sleep(.05)
|
time.sleep(.05)
|
||||||
retries -= 1
|
retries -= 1
|
||||||
assert_that(success, is_(True))
|
assert_that(success, is_(True))
|
||||||
|
|
||||||
def attempt_retrieve_value(self, attribute_name: str) -> Mapping:
|
|
||||||
"""
|
|
||||||
Attempts to retrieve the data for the attribute given by `attribute_name`
|
|
||||||
:param attribute_name: The name of the attribute for which to attempt to retrieve the value
|
|
||||||
:return: The value of the attribute given by `attribute_name`
|
|
||||||
"""
|
|
||||||
if not VinylDNSTestContext.enable_safety_check:
|
|
||||||
# Just return the real data
|
|
||||||
return getattr(self, attribute_name)
|
|
||||||
|
|
||||||
# Get the real data, stored on this instance
|
|
||||||
real_data = getattr(self, attribute_name)
|
|
||||||
|
|
||||||
# If we don't have a cache of the original value, make a copy and cache it
|
|
||||||
if self._data_cache.get(attribute_name) is None:
|
|
||||||
self._data_cache[attribute_name] = {"caller": "", "data": copy.deepcopy(real_data)}
|
|
||||||
else:
|
|
||||||
print("last caller: " + str(self._data_cache[attribute_name]["caller"]))
|
|
||||||
assert_that(real_data, has_entries(self._data_cache[attribute_name]["data"]))
|
|
||||||
|
|
||||||
# Set last known caller to print if our assertion fails
|
|
||||||
self._data_cache[attribute_name]["caller"] = inspect.stack()[2][3]
|
|
||||||
|
|
||||||
# Return the data
|
|
||||||
return self._data_cache[attribute_name]["data"]
|
|
||||||
|
@ -245,8 +245,6 @@ def test_create_zone_no_connection_uses_defaults(shared_zone_test_context):
|
|||||||
|
|
||||||
# Check response from create
|
# Check response from create
|
||||||
assert_that(zone["name"], is_(zone_name + "."))
|
assert_that(zone["name"], is_(zone_name + "."))
|
||||||
print("`connection` not in zone = " + "connection" not in zone)
|
|
||||||
|
|
||||||
assert_that("connection" not in zone)
|
assert_that("connection" not in zone)
|
||||||
assert_that("transferConnection" not in zone)
|
assert_that("transferConnection" not in zone)
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ def test_list_zones_success(list_zone_context, shared_zone_test_context):
|
|||||||
"""
|
"""
|
||||||
Test that we can retrieve a list of the user's zones
|
Test that we can retrieve a list of the user's zones
|
||||||
"""
|
"""
|
||||||
result = shared_zone_test_context.list_zones_client.list_zones(status=200)
|
result = shared_zone_test_context.list_zones_client.list_zones(name_filter=f"*{shared_zone_test_context.partition_id}", status=200)
|
||||||
retrieved = result["zones"]
|
retrieved = result["zones"]
|
||||||
|
|
||||||
assert_that(retrieved, has_length(5))
|
assert_that(retrieved, has_length(5))
|
||||||
@ -20,6 +20,8 @@ def test_list_zones_success(list_zone_context, shared_zone_test_context):
|
|||||||
assert_that(retrieved, has_item(has_entry("adminGroupName", list_zone_context.list_zones_group["name"])))
|
assert_that(retrieved, has_item(has_entry("adminGroupName", list_zone_context.list_zones_group["name"])))
|
||||||
assert_that(retrieved, has_item(has_entry("backendId", "func-test-backend")))
|
assert_that(retrieved, has_item(has_entry("backendId", "func-test-backend")))
|
||||||
|
|
||||||
|
assert_that(result["nameFilter"], is_(f"*{shared_zone_test_context.partition_id}"))
|
||||||
|
|
||||||
|
|
||||||
def test_list_zones_max_items_100(shared_zone_test_context):
|
def test_list_zones_max_items_100(shared_zone_test_context):
|
||||||
"""
|
"""
|
||||||
@ -56,7 +58,7 @@ def test_list_zones_no_search_first_page(list_zone_context, shared_zone_test_con
|
|||||||
"""
|
"""
|
||||||
Test that the first page of listing zones returns correctly when no name filter is provided
|
Test that the first page of listing zones returns correctly when no name filter is provided
|
||||||
"""
|
"""
|
||||||
result = shared_zone_test_context.list_zones_client.list_zones(max_items=3)
|
result = shared_zone_test_context.list_zones_client.list_zones(name_filter=f"*{shared_zone_test_context.partition_id}", max_items=3)
|
||||||
zones = result["zones"]
|
zones = result["zones"]
|
||||||
|
|
||||||
assert_that(zones, has_length(3))
|
assert_that(zones, has_length(3))
|
||||||
@ -67,14 +69,18 @@ def test_list_zones_no_search_first_page(list_zone_context, shared_zone_test_con
|
|||||||
assert_that(result["nextId"], is_(list_zone_context.search_zone3["name"]))
|
assert_that(result["nextId"], is_(list_zone_context.search_zone3["name"]))
|
||||||
assert_that(result["maxItems"], is_(3))
|
assert_that(result["maxItems"], is_(3))
|
||||||
assert_that(result, is_not(has_key("startFrom")))
|
assert_that(result, is_not(has_key("startFrom")))
|
||||||
assert_that(result, is_not(has_key("nameFilter")))
|
|
||||||
|
assert_that(result["nameFilter"], is_(f"*{shared_zone_test_context.partition_id}"))
|
||||||
|
|
||||||
|
|
||||||
def test_list_zones_no_search_second_page(list_zone_context, shared_zone_test_context):
|
def test_list_zones_no_search_second_page(list_zone_context, shared_zone_test_context):
|
||||||
"""
|
"""
|
||||||
Test that the second page of listing zones returns correctly when no name filter is provided
|
Test that the second page of listing zones returns correctly when no name filter is provided
|
||||||
"""
|
"""
|
||||||
result = shared_zone_test_context.list_zones_client.list_zones(start_from=list_zone_context.search_zone2["name"], max_items=2, status=200)
|
result = shared_zone_test_context.list_zones_client.list_zones(name_filter=f"*{shared_zone_test_context.partition_id}",
|
||||||
|
start_from=list_zone_context.search_zone2["name"],
|
||||||
|
max_items=2,
|
||||||
|
status=200)
|
||||||
zones = result["zones"]
|
zones = result["zones"]
|
||||||
|
|
||||||
assert_that(zones, has_length(2))
|
assert_that(zones, has_length(2))
|
||||||
@ -84,14 +90,18 @@ def test_list_zones_no_search_second_page(list_zone_context, shared_zone_test_co
|
|||||||
assert_that(result["nextId"], is_(list_zone_context.non_search_zone1["name"]))
|
assert_that(result["nextId"], is_(list_zone_context.non_search_zone1["name"]))
|
||||||
assert_that(result["maxItems"], is_(2))
|
assert_that(result["maxItems"], is_(2))
|
||||||
assert_that(result["startFrom"], is_(list_zone_context.search_zone2["name"]))
|
assert_that(result["startFrom"], is_(list_zone_context.search_zone2["name"]))
|
||||||
assert_that(result, is_not(has_key("nameFilter")))
|
|
||||||
|
assert_that(result["nameFilter"], is_(f"*{shared_zone_test_context.partition_id}"))
|
||||||
|
|
||||||
|
|
||||||
def test_list_zones_no_search_last_page(list_zone_context, shared_zone_test_context):
|
def test_list_zones_no_search_last_page(list_zone_context, shared_zone_test_context):
|
||||||
"""
|
"""
|
||||||
Test that the last page of listing zones returns correctly when no name filter is provided
|
Test that the last page of listing zones returns correctly when no name filter is provided
|
||||||
"""
|
"""
|
||||||
result = shared_zone_test_context.list_zones_client.list_zones(start_from=list_zone_context.search_zone3["name"], max_items=4, status=200)
|
result = shared_zone_test_context.list_zones_client.list_zones(name_filter=f"*{shared_zone_test_context.partition_id}",
|
||||||
|
start_from=list_zone_context.search_zone3["name"],
|
||||||
|
max_items=4,
|
||||||
|
status=200)
|
||||||
zones = result["zones"]
|
zones = result["zones"]
|
||||||
|
|
||||||
assert_that(zones, has_length(2))
|
assert_that(zones, has_length(2))
|
||||||
@ -101,14 +111,14 @@ def test_list_zones_no_search_last_page(list_zone_context, shared_zone_test_cont
|
|||||||
assert_that(result, is_not(has_key("nextId")))
|
assert_that(result, is_not(has_key("nextId")))
|
||||||
assert_that(result["maxItems"], is_(4))
|
assert_that(result["maxItems"], is_(4))
|
||||||
assert_that(result["startFrom"], is_(list_zone_context.search_zone3["name"]))
|
assert_that(result["startFrom"], is_(list_zone_context.search_zone3["name"]))
|
||||||
assert_that(result, is_not(has_key("nameFilter")))
|
assert_that(result["nameFilter"], is_(f"*{shared_zone_test_context.partition_id}"))
|
||||||
|
|
||||||
|
|
||||||
def test_list_zones_with_search_first_page(list_zone_context, shared_zone_test_context):
|
def test_list_zones_with_search_first_page(list_zone_context, shared_zone_test_context):
|
||||||
"""
|
"""
|
||||||
Test that the first page of listing zones returns correctly when a name filter is provided
|
Test that the first page of listing zones returns correctly when a name filter is provided
|
||||||
"""
|
"""
|
||||||
result = shared_zone_test_context.list_zones_client.list_zones(name_filter="*searched*", max_items=2, status=200)
|
result = shared_zone_test_context.list_zones_client.list_zones(name_filter=f"*searched*{shared_zone_test_context.partition_id}", max_items=2, status=200)
|
||||||
zones = result["zones"]
|
zones = result["zones"]
|
||||||
|
|
||||||
assert_that(zones, has_length(2))
|
assert_that(zones, has_length(2))
|
||||||
@ -117,7 +127,7 @@ def test_list_zones_with_search_first_page(list_zone_context, shared_zone_test_c
|
|||||||
|
|
||||||
assert_that(result["nextId"], is_(list_zone_context.search_zone2["name"]))
|
assert_that(result["nextId"], is_(list_zone_context.search_zone2["name"]))
|
||||||
assert_that(result["maxItems"], is_(2))
|
assert_that(result["maxItems"], is_(2))
|
||||||
assert_that(result["nameFilter"], is_("*searched*"))
|
assert_that(result["nameFilter"], is_(f"*searched*{shared_zone_test_context.partition_id}"))
|
||||||
assert_that(result, is_not(has_key("startFrom")))
|
assert_that(result, is_not(has_key("startFrom")))
|
||||||
|
|
||||||
|
|
||||||
|
@ -729,9 +729,6 @@ def test_user_can_update_zone_to_another_admin_group(shared_zone_test_context):
|
|||||||
zone = result["zone"]
|
zone = result["zone"]
|
||||||
client.wait_until_zone_active(result["zone"]["id"])
|
client.wait_until_zone_active(result["zone"]["id"])
|
||||||
|
|
||||||
import json
|
|
||||||
print(json.dumps(zone, indent=3))
|
|
||||||
|
|
||||||
new_joint_group = {
|
new_joint_group = {
|
||||||
"name": "new-ok-group",
|
"name": "new-ok-group",
|
||||||
"email": "test@test.com",
|
"email": "test@test.com",
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
[pytest]
|
[pytest]
|
||||||
norecursedirs=.virtualenv eggs .venv_win
|
norecursedirs=.virtualenv eggs .venv_win
|
||||||
addopts = -rfesxX --capture=sys --junitxml=../target/pytest_reports/pytest.xml --durations=30
|
addopts = -rfesxX --capture=sys --junitxml=../target/pytest_reports/pytest.xml --durations=30
|
||||||
|
|
||||||
|
@ -3,10 +3,10 @@ pytz>=2014
|
|||||||
pytest==6.2.5
|
pytest==6.2.5
|
||||||
mock==4.0.3
|
mock==4.0.3
|
||||||
dnspython==2.1.0
|
dnspython==2.1.0
|
||||||
boto3==1.18.47
|
boto3==1.18.51
|
||||||
botocore==1.21.47
|
botocore==1.21.51
|
||||||
requests==2.26.0
|
requests==2.26.0
|
||||||
pytest-xdist==2.4.0
|
pytest-xdist==2.4.0
|
||||||
python-dateutil==2.8.2
|
python-dateutil==2.8.2
|
||||||
filelock==3.0.12
|
filelock==3.2.0
|
||||||
pytest-custom_exit_code==0.3.0
|
pytest-custom_exit_code==0.3.0
|
@ -1,12 +1,13 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
set -euo pipefail
|
set -eo pipefail
|
||||||
|
|
||||||
|
ROOT_DIR=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
|
||||||
UPDATE_DEPS=""
|
UPDATE_DEPS=""
|
||||||
if [ "$1" == "--update" ]; then
|
if [ "$1" == "--update" ]; then
|
||||||
UPDATE_DEPS="$1"
|
UPDATE_DEPS="$1"
|
||||||
shift
|
shift
|
||||||
fi
|
fi
|
||||||
|
|
||||||
PARAMS=("$@")
|
cd "${ROOT_DIR}"
|
||||||
./pytest.sh "${UPDATE_DEPS}" --suppress-no-test-exit-code -v live_tests "${PARAMS[@]}"
|
"./pytest.sh" "${UPDATE_DEPS}" -n4 --suppress-no-test-exit-code -v live_tests "$@"
|
||||||
|
@ -101,7 +101,6 @@ def dns_do_command(zone, record_name, record_type, command, ttl=0, rdata=""):
|
|||||||
|
|
||||||
(name_server, name_server_port) = dns_server_port(zone)
|
(name_server, name_server_port) = dns_server_port(zone)
|
||||||
fqdn = record_name + "." + zone["name"]
|
fqdn = record_name + "." + zone["name"]
|
||||||
print("updating " + fqdn + " to have data " + rdata)
|
|
||||||
update = dns.update.Update(zone["name"], keyring=keyring)
|
update = dns.update.Update(zone["name"], keyring=keyring)
|
||||||
|
|
||||||
if command == "add":
|
if command == "add":
|
||||||
@ -198,9 +197,6 @@ def parse_record(record_string):
|
|||||||
# for each record, we have exactly 4 fields in order: 1 record name; 2 TTL; 3 DCLASS; 4 TYPE; 5 RDATA
|
# for each record, we have exactly 4 fields in order: 1 record name; 2 TTL; 3 DCLASS; 4 TYPE; 5 RDATA
|
||||||
parts = record_string.split(" ")
|
parts = record_string.split(" ")
|
||||||
|
|
||||||
print("record parts")
|
|
||||||
print(str(parts))
|
|
||||||
|
|
||||||
# any parts over 4 have to be kept together
|
# any parts over 4 have to be kept together
|
||||||
offset = record_string.find(parts[3]) + len(parts[3]) + 1
|
offset = record_string.find(parts[3]) + len(parts[3]) + 1
|
||||||
length = len(record_string) - offset
|
length = len(record_string) - offset
|
||||||
@ -214,8 +210,6 @@ def parse_record(record_string):
|
|||||||
"rdata": record_data
|
"rdata": record_data
|
||||||
}
|
}
|
||||||
|
|
||||||
print("parsed record:")
|
|
||||||
print(str(record))
|
|
||||||
return record
|
return record
|
||||||
|
|
||||||
|
|
||||||
@ -318,6 +312,7 @@ def remove_classless_acl_rules(test_context, rules):
|
|||||||
|
|
||||||
def clear_ok_acl_rules(test_context):
|
def clear_ok_acl_rules(test_context):
|
||||||
zone = test_context.ok_zone
|
zone = test_context.ok_zone
|
||||||
|
if zone is not None and "acl" in zone and "rules" in zone["acl"]:
|
||||||
zone["acl"]["rules"] = []
|
zone["acl"]["rules"] = []
|
||||||
update_change = test_context.ok_vinyldns_client.update_zone(zone, status=(202, 404))
|
update_change = test_context.ok_vinyldns_client.update_zone(zone, status=(202, 404))
|
||||||
test_context.ok_vinyldns_client.wait_until_zone_change_status_synced(update_change)
|
test_context.ok_vinyldns_client.wait_until_zone_change_status_synced(update_change)
|
||||||
@ -325,6 +320,7 @@ def clear_ok_acl_rules(test_context):
|
|||||||
|
|
||||||
def clear_shared_zone_acl_rules(test_context):
|
def clear_shared_zone_acl_rules(test_context):
|
||||||
zone = test_context.shared_zone
|
zone = test_context.shared_zone
|
||||||
|
if zone is not None and "acl" in zone and "rules" in zone["acl"]:
|
||||||
zone["acl"]["rules"] = []
|
zone["acl"]["rules"] = []
|
||||||
update_change = test_context.shared_zone_vinyldns_client.update_zone(zone, status=(202, 404))
|
update_change = test_context.shared_zone_vinyldns_client.update_zone(zone, status=(202, 404))
|
||||||
test_context.shared_zone_vinyldns_client.wait_until_zone_change_status_synced(update_change)
|
test_context.shared_zone_vinyldns_client.wait_until_zone_change_status_synced(update_change)
|
||||||
@ -332,6 +328,7 @@ def clear_shared_zone_acl_rules(test_context):
|
|||||||
|
|
||||||
def clear_ip4_acl_rules(test_context):
|
def clear_ip4_acl_rules(test_context):
|
||||||
zone = test_context.ip4_reverse_zone
|
zone = test_context.ip4_reverse_zone
|
||||||
|
if zone is not None and "acl" in zone and "rules" in zone["acl"]:
|
||||||
zone["acl"]["rules"] = []
|
zone["acl"]["rules"] = []
|
||||||
update_change = test_context.ok_vinyldns_client.update_zone(zone, status=(202, 404))
|
update_change = test_context.ok_vinyldns_client.update_zone(zone, status=(202, 404))
|
||||||
test_context.ok_vinyldns_client.wait_until_zone_change_status_synced(update_change)
|
test_context.ok_vinyldns_client.wait_until_zone_change_status_synced(update_change)
|
||||||
@ -339,6 +336,7 @@ def clear_ip4_acl_rules(test_context):
|
|||||||
|
|
||||||
def clear_ip6_acl_rules(test_context):
|
def clear_ip6_acl_rules(test_context):
|
||||||
zone = test_context.ip6_reverse_zone
|
zone = test_context.ip6_reverse_zone
|
||||||
|
if zone is not None and "acl" in zone and "rules" in zone["acl"]:
|
||||||
zone["acl"]["rules"] = []
|
zone["acl"]["rules"] = []
|
||||||
update_change = test_context.ok_vinyldns_client.update_zone(zone, status=(202, 404))
|
update_change = test_context.ok_vinyldns_client.update_zone(zone, status=(202, 404))
|
||||||
test_context.ok_vinyldns_client.wait_until_zone_change_status_synced(update_change)
|
test_context.ok_vinyldns_client.wait_until_zone_change_status_synced(update_change)
|
||||||
@ -346,6 +344,7 @@ def clear_ip6_acl_rules(test_context):
|
|||||||
|
|
||||||
def clear_classless_acl_rules(test_context):
|
def clear_classless_acl_rules(test_context):
|
||||||
zone = test_context.classless_zone_delegation_zone
|
zone = test_context.classless_zone_delegation_zone
|
||||||
|
if zone is not None and "acl" in zone and "rules" in zone["acl"]:
|
||||||
zone["acl"]["rules"] = []
|
zone["acl"]["rules"] = []
|
||||||
update_change = test_context.ok_vinyldns_client.update_zone(zone, status=(202, 404))
|
update_change = test_context.ok_vinyldns_client.update_zone(zone, status=(202, 404))
|
||||||
test_context.ok_vinyldns_client.wait_until_zone_change_status_synced(update_change)
|
test_context.ok_vinyldns_client.wait_until_zone_change_status_synced(update_change)
|
||||||
@ -361,10 +360,7 @@ def seed_text_recordset(client, record_name, zone, records=[{"text": "someText"}
|
|||||||
}
|
}
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
result_rs = result["recordSet"]
|
result_rs = result["recordSet"]
|
||||||
if client.wait_until_recordset_exists(result_rs["zoneId"], result_rs["id"]):
|
client.wait_until_recordset_exists(result_rs["zoneId"], result_rs["id"])
|
||||||
print("\r\n!!! record set exists !!!")
|
|
||||||
else:
|
|
||||||
print("\r\n!!! record set does not exist !!!")
|
|
||||||
|
|
||||||
return result_rs
|
return result_rs
|
||||||
|
|
||||||
@ -379,10 +375,7 @@ def seed_ptr_recordset(client, record_name, zone, records=[{"ptrdname": "foo.com
|
|||||||
}
|
}
|
||||||
result = client.create_recordset(new_rs, status=202)
|
result = client.create_recordset(new_rs, status=202)
|
||||||
result_rs = result["recordSet"]
|
result_rs = result["recordSet"]
|
||||||
if client.wait_until_recordset_exists(result_rs["zoneId"], result_rs["id"]):
|
client.wait_until_recordset_exists(result_rs["zoneId"], result_rs["id"])
|
||||||
print("\r\n!!! record set exists !!!")
|
|
||||||
else:
|
|
||||||
print("\r\n!!! record set does not exist !!!")
|
|
||||||
|
|
||||||
return result_rs
|
return result_rs
|
||||||
|
|
||||||
@ -536,14 +529,19 @@ def clear_recordset_list(to_delete, client):
|
|||||||
try:
|
try:
|
||||||
delete_result = client.delete_recordset(result_rs["zone"]["id"], result_rs["recordSet"]["id"], status=202)
|
delete_result = client.delete_recordset(result_rs["zone"]["id"], result_rs["recordSet"]["id"], status=202)
|
||||||
delete_changes.append(delete_result)
|
delete_changes.append(delete_result)
|
||||||
|
except AssertionError:
|
||||||
|
pass
|
||||||
except Exception:
|
except Exception:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
raise
|
||||||
for change in delete_changes:
|
for change in delete_changes:
|
||||||
try:
|
try:
|
||||||
client.wait_until_recordset_change_status(change, "Complete")
|
client.wait_until_recordset_change_status(change, "Complete")
|
||||||
|
except AssertionError:
|
||||||
|
pass
|
||||||
except Exception:
|
except Exception:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
pass
|
raise
|
||||||
|
|
||||||
|
|
||||||
def clear_zoneid_rsid_tuple_list(to_delete, client):
|
def clear_zoneid_rsid_tuple_list(to_delete, client):
|
||||||
@ -552,15 +550,19 @@ def clear_zoneid_rsid_tuple_list(to_delete, client):
|
|||||||
try:
|
try:
|
||||||
delete_result = client.delete_recordset(tup[0], tup[1], status=202)
|
delete_result = client.delete_recordset(tup[0], tup[1], status=202)
|
||||||
delete_changes.append(delete_result)
|
delete_changes.append(delete_result)
|
||||||
|
except AssertionError:
|
||||||
|
pass
|
||||||
except Exception:
|
except Exception:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
pass
|
raise
|
||||||
for change in delete_changes:
|
for change in delete_changes:
|
||||||
try:
|
try:
|
||||||
client.wait_until_recordset_change_status(change, "Complete")
|
client.wait_until_recordset_change_status(change, "Complete")
|
||||||
|
except AssertionError:
|
||||||
|
pass
|
||||||
except Exception:
|
except Exception:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
pass
|
raise
|
||||||
|
|
||||||
|
|
||||||
def get_group_json(group_name, email="test@test.com", description="this is a description", members=[{"id": "ok"}],
|
def get_group_json(group_name, email="test@test.com", description="this is a description", members=[{"id": "ok"}],
|
||||||
|
@ -28,7 +28,8 @@ class VinylDNSClient(object):
|
|||||||
"Accept": "application/json, text/plain",
|
"Accept": "application/json, text/plain",
|
||||||
"Content-Type": "application/json"
|
"Content-Type": "application/json"
|
||||||
}
|
}
|
||||||
|
self.created_zones = []
|
||||||
|
self.created_groups = []
|
||||||
self.signer = AwsSigV4RequestSigner(self.index_url, access_key, secret_key)
|
self.signer = AwsSigV4RequestSigner(self.index_url, access_key, secret_key)
|
||||||
self.session = self.requests_retry_session()
|
self.session = self.requests_retry_session()
|
||||||
self.session_not_found_ok = self.requests_retry_not_found_ok_session()
|
self.session_not_found_ok = self.requests_retry_not_found_ok_session()
|
||||||
@ -39,11 +40,18 @@ class VinylDNSClient(object):
|
|||||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
self.tear_down()
|
self.tear_down()
|
||||||
|
|
||||||
|
def clear_groups(self):
|
||||||
|
for group_id in self.created_groups:
|
||||||
|
self.delete_group(group_id)
|
||||||
|
|
||||||
|
def clear_zones(self):
|
||||||
|
self.abandon_zones(self.created_zones)
|
||||||
|
|
||||||
def tear_down(self):
|
def tear_down(self):
|
||||||
self.session.close()
|
self.session.close()
|
||||||
self.session_not_found_ok.close()
|
self.session_not_found_ok.close()
|
||||||
|
|
||||||
def requests_retry_not_found_ok_session(self, retries=5, backoff_factor=0.4, status_forcelist=(500, 502, 504), session=None):
|
def requests_retry_not_found_ok_session(self, retries=20, backoff_factor=0.1, status_forcelist=(500, 502, 504), session=None):
|
||||||
session = session or requests.Session()
|
session = session or requests.Session()
|
||||||
retry = Retry(
|
retry = Retry(
|
||||||
total=retries,
|
total=retries,
|
||||||
@ -57,7 +65,7 @@ class VinylDNSClient(object):
|
|||||||
session.mount("https://", adapter)
|
session.mount("https://", adapter)
|
||||||
return session
|
return session
|
||||||
|
|
||||||
def requests_retry_session(self, retries=5, backoff_factor=0.4, status_forcelist=(500, 502, 504), session=None):
|
def requests_retry_session(self, retries=20, backoff_factor=0.1, status_forcelist=(500, 502, 504), session=None):
|
||||||
session = session or requests.Session()
|
session = session or requests.Session()
|
||||||
retry = Retry(
|
retry = Retry(
|
||||||
total=retries,
|
total=retries,
|
||||||
@ -104,13 +112,9 @@ class VinylDNSClient(object):
|
|||||||
|
|
||||||
if status_code is not None:
|
if status_code is not None:
|
||||||
if isinstance(status_code, Iterable):
|
if isinstance(status_code, Iterable):
|
||||||
if response.status_code not in status_code:
|
assert_that(response.status_code, is_in(status_code), response.text)
|
||||||
print(response.text)
|
|
||||||
assert_that(response.status_code, is_in(status_code))
|
|
||||||
else:
|
else:
|
||||||
if response.status_code != status_code:
|
assert_that(response.status_code, is_(status_code), response.text)
|
||||||
print(response.text)
|
|
||||||
assert_that(response.status_code, is_(status_code))
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return response.status_code, response.json()
|
return response.status_code, response.json()
|
||||||
@ -177,6 +181,9 @@ class VinylDNSClient(object):
|
|||||||
url = urljoin(self.index_url, "/groups")
|
url = urljoin(self.index_url, "/groups")
|
||||||
response, data = self.make_request(url, "POST", self.headers, json.dumps(group), **kwargs)
|
response, data = self.make_request(url, "POST", self.headers, json.dumps(group), **kwargs)
|
||||||
|
|
||||||
|
if type(data) != str and "id" in data:
|
||||||
|
self.created_groups.append(data["id"])
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def get_group(self, group_id, **kwargs):
|
def get_group(self, group_id, **kwargs):
|
||||||
@ -213,7 +220,7 @@ class VinylDNSClient(object):
|
|||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def list_my_groups(self, group_name_filter=None, start_from=None, max_items=None, ignore_access=False, **kwargs):
|
def list_my_groups(self, group_name_filter=None, start_from=None, max_items=200, ignore_access=False, **kwargs):
|
||||||
"""
|
"""
|
||||||
Retrieves my groups
|
Retrieves my groups
|
||||||
:param start_from: the start key of the page
|
:param start_from: the start key of the page
|
||||||
@ -332,6 +339,9 @@ class VinylDNSClient(object):
|
|||||||
url = urljoin(self.index_url, "/zones")
|
url = urljoin(self.index_url, "/zones")
|
||||||
response, data = self.make_request(url, "POST", self.headers, json.dumps(zone), **kwargs)
|
response, data = self.make_request(url, "POST", self.headers, json.dumps(zone), **kwargs)
|
||||||
|
|
||||||
|
if type(data) != str and "zone" in data:
|
||||||
|
self.created_zones.append(data["zone"]["id"])
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def update_zone(self, zone, **kwargs):
|
def update_zone(self, zone, **kwargs):
|
||||||
@ -361,6 +371,7 @@ class VinylDNSClient(object):
|
|||||||
:param zone_id: the id of the zone to be deleted
|
:param zone_id: the id of the zone to be deleted
|
||||||
:return: nothing, will fail if the status code was not expected
|
:return: nothing, will fail if the status code was not expected
|
||||||
"""
|
"""
|
||||||
|
|
||||||
url = urljoin(self.index_url, "/zones/{0}".format(zone_id))
|
url = urljoin(self.index_url, "/zones/{0}".format(zone_id))
|
||||||
response, data = self.make_request(url, "DELETE", self.headers, not_found_ok=True, **kwargs)
|
response, data = self.make_request(url, "DELETE", self.headers, not_found_ok=True, **kwargs)
|
||||||
|
|
||||||
@ -471,7 +482,6 @@ class VinylDNSClient(object):
|
|||||||
|
|
||||||
url = urljoin(self.index_url, "/zones/{0}/recordsets".format(recordset["zoneId"]))
|
url = urljoin(self.index_url, "/zones/{0}/recordsets".format(recordset["zoneId"]))
|
||||||
response, data = self.make_request(url, "POST", self.headers, json.dumps(recordset), **kwargs)
|
response, data = self.make_request(url, "POST", self.headers, json.dumps(recordset), **kwargs)
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def delete_recordset(self, zone_id, rs_id, **kwargs):
|
def delete_recordset(self, zone_id, rs_id, **kwargs):
|
||||||
@ -752,7 +762,7 @@ class VinylDNSClient(object):
|
|||||||
Waits a period of time for the record set creation to complete.
|
Waits a period of time for the record set creation to complete.
|
||||||
|
|
||||||
:param zone_id: the id of the zone the record set lives in
|
:param zone_id: the id of the zone the record set lives in
|
||||||
:param record_set_id: the id of the recprdset that has been created.
|
:param record_set_id: the id of the recordset that has been created.
|
||||||
:param kw: Additional parameters for the http request
|
:param kw: Additional parameters for the http request
|
||||||
:return: True when the recordset creation is complete False if the timeout expires
|
:return: True when the recordset creation is complete False if the timeout expires
|
||||||
"""
|
"""
|
||||||
@ -764,6 +774,7 @@ class VinylDNSClient(object):
|
|||||||
time.sleep(RETRY_WAIT)
|
time.sleep(RETRY_WAIT)
|
||||||
response, data = self.make_request(url, "GET", self.headers, not_found_ok=True, status=(200, 404), **kwargs)
|
response, data = self.make_request(url, "GET", self.headers, not_found_ok=True, status=(200, 404), **kwargs)
|
||||||
|
|
||||||
|
assert_that(response, equal_to(200), data)
|
||||||
if response == 200:
|
if response == 200:
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
@ -17,11 +17,11 @@
|
|||||||
package vinyldns.api.backend.dns
|
package vinyldns.api.backend.dns
|
||||||
|
|
||||||
import java.net.SocketAddress
|
import java.net.SocketAddress
|
||||||
|
|
||||||
import cats.effect._
|
import cats.effect._
|
||||||
import cats.syntax.all._
|
import cats.syntax.all._
|
||||||
import org.slf4j.{Logger, LoggerFactory}
|
import org.slf4j.{Logger, LoggerFactory}
|
||||||
import org.xbill.DNS
|
import org.xbill.DNS
|
||||||
|
import org.xbill.DNS.Name
|
||||||
import vinyldns.api.domain.zone.ZoneTooLargeError
|
import vinyldns.api.domain.zone.ZoneTooLargeError
|
||||||
import vinyldns.core.crypto.CryptoAlgebra
|
import vinyldns.core.crypto.CryptoAlgebra
|
||||||
import vinyldns.core.domain.backend.{Backend, BackendResponse}
|
import vinyldns.core.domain.backend.{Backend, BackendResponse}
|
||||||
@ -166,7 +166,7 @@ class DnsBackend(val id: String, val resolver: DNS.SimpleResolver, val xfrInfo:
|
|||||||
logger.info(s"Querying for dns dnsRecordName='${dnsName.toString}'; recordType='$typ'")
|
logger.info(s"Querying for dns dnsRecordName='${dnsName.toString}'; recordType='$typ'")
|
||||||
val lookup = new DNS.Lookup(dnsName, toDnsRecordType(typ))
|
val lookup = new DNS.Lookup(dnsName, toDnsRecordType(typ))
|
||||||
lookup.setResolver(resolver)
|
lookup.setResolver(resolver)
|
||||||
lookup.setSearchPath(Array.empty[String])
|
lookup.setSearchPath(List(Name.empty).asJava)
|
||||||
lookup.setCache(null)
|
lookup.setCache(null)
|
||||||
|
|
||||||
Right(new DnsQuery(lookup, zoneDnsName(zoneName)))
|
Right(new DnsQuery(lookup, zoneDnsName(zoneName)))
|
||||||
@ -283,6 +283,7 @@ object DnsBackend {
|
|||||||
val (host, port) = parseHostAndPort(conn.primaryServer)
|
val (host, port) = parseHostAndPort(conn.primaryServer)
|
||||||
val resolver = new DNS.SimpleResolver(host)
|
val resolver = new DNS.SimpleResolver(host)
|
||||||
resolver.setPort(port)
|
resolver.setPort(port)
|
||||||
|
resolver.setTCP(true)
|
||||||
tsig.foreach(resolver.setTSIGKey)
|
tsig.foreach(resolver.setTSIGKey)
|
||||||
resolver
|
resolver
|
||||||
}
|
}
|
||||||
|
@ -125,7 +125,7 @@ trait DnsConversions {
|
|||||||
/* Remove the additional record of the TSIG key from the message before generating the string */
|
/* Remove the additional record of the TSIG key from the message before generating the string */
|
||||||
def obscuredDnsMessage(msg: DNS.Message): DNS.Message = {
|
def obscuredDnsMessage(msg: DNS.Message): DNS.Message = {
|
||||||
val clone = msg.clone.asInstanceOf[DNS.Message]
|
val clone = msg.clone.asInstanceOf[DNS.Message]
|
||||||
val sections = clone.getSectionArray(DNS.Section.ADDITIONAL)
|
val sections = clone.getSection(DNS.Section.ADDITIONAL).asScala
|
||||||
if (sections != null && sections.nonEmpty) {
|
if (sections != null && sections.nonEmpty) {
|
||||||
sections.filter(_.getType == DNS.Type.TSIG).foreach { tsigRecord =>
|
sections.filter(_.getType == DNS.Type.TSIG).foreach { tsigRecord =>
|
||||||
clone.removeRecord(tsigRecord, DNS.Section.ADDITIONAL)
|
clone.removeRecord(tsigRecord, DNS.Section.ADDITIONAL)
|
||||||
@ -231,7 +231,7 @@ trait DnsConversions {
|
|||||||
|
|
||||||
def fromCNAMERecord(r: DNS.CNAMERecord, zoneName: DNS.Name, zoneId: String): RecordSet =
|
def fromCNAMERecord(r: DNS.CNAMERecord, zoneName: DNS.Name, zoneId: String): RecordSet =
|
||||||
fromDnsRecord(r, zoneName, zoneId) { data =>
|
fromDnsRecord(r, zoneName, zoneId) { data =>
|
||||||
List(CNAMEData(Fqdn(data.getAlias.toString)))
|
List(CNAMEData(Fqdn(data.getTarget.toString)))
|
||||||
}
|
}
|
||||||
|
|
||||||
def fromDSRecord(r: DNS.DSRecord, zoneName: DNS.Name, zoneId: String): RecordSet =
|
def fromDSRecord(r: DNS.DSRecord, zoneName: DNS.Name, zoneId: String): RecordSet =
|
||||||
|
@ -19,7 +19,8 @@ package vinyldns.core
|
|||||||
object Messages {
|
object Messages {
|
||||||
|
|
||||||
// Error displayed when less than two letters or numbers is filled in Record Name Filter field in RecordSetSearch page
|
// Error displayed when less than two letters or numbers is filled in Record Name Filter field in RecordSetSearch page
|
||||||
val RecordNameFilterError = "Record Name Filter field must contain at least two letters or numbers to perform a RecordSet Search."
|
val RecordNameFilterError =
|
||||||
|
"Record Name Filter field must contain at least two letters or numbers to perform a RecordSet Search."
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Error displayed when attempting to create group with name that already exists
|
* Error displayed when attempting to create group with name that already exists
|
||||||
@ -28,7 +29,8 @@ object Messages {
|
|||||||
* 1. [string] group name
|
* 1. [string] group name
|
||||||
* 2. [string] group email address
|
* 2. [string] group email address
|
||||||
*/
|
*/
|
||||||
val GroupAlreadyExistsErrorMsg = "Group with name %s already exists. Please try a different name or contact %s to be added to the group."
|
val GroupAlreadyExistsErrorMsg =
|
||||||
|
"Group with name %s already exists. Please try a different name or contact %s to be added to the group."
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Error displayed when deleting a group being the admin of a zone
|
* Error displayed when deleting a group being the admin of a zone
|
||||||
@ -36,7 +38,8 @@ object Messages {
|
|||||||
* Placeholders:
|
* Placeholders:
|
||||||
* 1. [string] group name
|
* 1. [string] group name
|
||||||
*/
|
*/
|
||||||
val ZoneAdminError = "%s is the admin of a zone. Cannot delete. Please transfer the ownership to another group before deleting."
|
val ZoneAdminError =
|
||||||
|
"%s is the admin of a zone. Cannot delete. Please transfer the ownership to another group before deleting."
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Error displayed when deleting a group being the owner for a record set
|
* Error displayed when deleting a group being the owner for a record set
|
||||||
@ -45,7 +48,8 @@ object Messages {
|
|||||||
* 1. [string] group name
|
* 1. [string] group name
|
||||||
* 2. [string] record set id
|
* 2. [string] record set id
|
||||||
*/
|
*/
|
||||||
val RecordSetOwnerError = "%s is the owner for a record set including %s. Cannot delete. Please transfer the ownership to another group before deleting."
|
val RecordSetOwnerError =
|
||||||
|
"%s is the owner for a record set including %s. Cannot delete. Please transfer the ownership to another group before deleting."
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Error displayed when deleting a group which has an ACL rule for a zone
|
* Error displayed when deleting a group which has an ACL rule for a zone
|
||||||
@ -54,7 +58,8 @@ object Messages {
|
|||||||
* 1. [string] group name
|
* 1. [string] group name
|
||||||
* 2. [string] zone id
|
* 2. [string] zone id
|
||||||
*/
|
*/
|
||||||
val ACLRuleError = "%s has an ACL rule for a zone including %s. Cannot delete. Please transfer the ownership to another group before deleting."
|
val ACLRuleError =
|
||||||
|
"%s has an ACL rule for a zone including %s. Cannot delete. Please transfer the ownership to another group before deleting."
|
||||||
|
|
||||||
// Error displayed when NSData field is not a positive integer
|
// Error displayed when NSData field is not a positive integer
|
||||||
val NSDataError = "NS data must be a positive integer"
|
val NSDataError = "NS data must be a positive integer"
|
||||||
@ -71,6 +76,7 @@ object Messages {
|
|||||||
* 3. [string] owner group name | owner group id
|
* 3. [string] owner group name | owner group id
|
||||||
* 4. [string] contact email
|
* 4. [string] contact email
|
||||||
*/
|
*/
|
||||||
val NotAuthorizedErrorMsg = "User \"%s\" is not authorized. Contact %s owner group: %s at %s to make DNS changes."
|
val NotAuthorizedErrorMsg =
|
||||||
|
"User \"%s\" is not authorized. Contact %s owner group: %s at %s to make DNS changes."
|
||||||
|
|
||||||
}
|
}
|
||||||
|
238
modules/mysql/src/main/resources/test/ddl.sql
Normal file
238
modules/mysql/src/main/resources/test/ddl.sql
Normal file
@ -0,0 +1,238 @@
|
|||||||
|
-- This script will populate the database with the VinylDNS schema
|
||||||
|
-- It is used for testing with the H2 in-memory database where
|
||||||
|
-- migration is not necessary.
|
||||||
|
--
|
||||||
|
-- This should be run via the INIT parameter in the H2 JDBC URL
|
||||||
|
-- Ex: "jdbc:h2:mem:vinyldns;MODE=MYSQL;DB_CLOSE_DELAY=-1;DATABASE_TO_LOWER=TRUE;INIT=RUNSCRIPT FROM 'classpath:test/ddl.sql'"
|
||||||
|
--
|
||||||
|
|
||||||
|
CREATE TABLE batch_change
|
||||||
|
(
|
||||||
|
id char(36) not null primary key,
|
||||||
|
user_id char(36) not null,
|
||||||
|
user_name varchar(45) not null,
|
||||||
|
created_time datetime not null,
|
||||||
|
comments varchar(1024) null,
|
||||||
|
owner_group_id char(36) null,
|
||||||
|
approval_status tinyint null,
|
||||||
|
reviewer_id char(36) null,
|
||||||
|
review_comment varchar(1024) null,
|
||||||
|
review_timestamp datetime null,
|
||||||
|
scheduled_time datetime null,
|
||||||
|
cancelled_timestamp datetime null
|
||||||
|
);
|
||||||
|
|
||||||
|
create index batch_change_approval_status_index
|
||||||
|
on batch_change (approval_status);
|
||||||
|
|
||||||
|
create index batch_change_user_id_created_time_index
|
||||||
|
on batch_change (user_id, created_time);
|
||||||
|
|
||||||
|
create index batch_change_user_id_index
|
||||||
|
on batch_change (user_id);
|
||||||
|
|
||||||
|
create table group_change
|
||||||
|
(
|
||||||
|
id char(36) not null primary key,
|
||||||
|
group_id char(36) not null,
|
||||||
|
created_timestamp bigint(13) not null,
|
||||||
|
data blob not null
|
||||||
|
);
|
||||||
|
|
||||||
|
create index group_change_group_id_index
|
||||||
|
on group_change (group_id);
|
||||||
|
|
||||||
|
create table `groups`
|
||||||
|
(
|
||||||
|
id char(36) not null primary key,
|
||||||
|
name varchar(256) not null,
|
||||||
|
data blob not null,
|
||||||
|
description varchar(256) null,
|
||||||
|
created_timestamp datetime not null,
|
||||||
|
email varchar(256) not null
|
||||||
|
);
|
||||||
|
|
||||||
|
create index groups_name_index
|
||||||
|
on `groups` (name);
|
||||||
|
|
||||||
|
create table membership
|
||||||
|
(
|
||||||
|
user_id char(36) not null,
|
||||||
|
group_id char(36) not null,
|
||||||
|
is_admin tinyint(1) not null,
|
||||||
|
primary key (user_id, group_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
create table message_queue
|
||||||
|
(
|
||||||
|
id char(36) not null primary key,
|
||||||
|
message_type tinyint null,
|
||||||
|
in_flight bit null,
|
||||||
|
data blob not null,
|
||||||
|
created datetime not null,
|
||||||
|
updated datetime not null,
|
||||||
|
timeout_seconds int not null,
|
||||||
|
attempts int default 0 not null
|
||||||
|
);
|
||||||
|
|
||||||
|
create index message_queue_inflight_index
|
||||||
|
on message_queue (in_flight);
|
||||||
|
|
||||||
|
create index message_queue_timeout_index
|
||||||
|
on message_queue (timeout_seconds);
|
||||||
|
|
||||||
|
create index message_queue_updated_index
|
||||||
|
on message_queue (updated);
|
||||||
|
|
||||||
|
create table record_change
|
||||||
|
(
|
||||||
|
id char(36) not null primary key,
|
||||||
|
zone_id char(36) not null,
|
||||||
|
created bigint(13) not null,
|
||||||
|
type tinyint not null,
|
||||||
|
data blob not null
|
||||||
|
);
|
||||||
|
|
||||||
|
create index record_change_created_index
|
||||||
|
on record_change (created);
|
||||||
|
|
||||||
|
create index record_change_zone_id_index
|
||||||
|
on record_change (zone_id);
|
||||||
|
|
||||||
|
create table recordset
|
||||||
|
(
|
||||||
|
id char(36) not null primary key,
|
||||||
|
zone_id char(36) not null,
|
||||||
|
name varchar(256) not null,
|
||||||
|
type tinyint not null,
|
||||||
|
data blob not null,
|
||||||
|
fqdn varchar(255) not null,
|
||||||
|
owner_group_id char(36) null,
|
||||||
|
constraint recordset_zone_id_name_type_index
|
||||||
|
unique (zone_id, name, type)
|
||||||
|
);
|
||||||
|
|
||||||
|
create index recordset_fqdn_index
|
||||||
|
on recordset (fqdn);
|
||||||
|
|
||||||
|
create index recordset_owner_group_id_index
|
||||||
|
on recordset (owner_group_id);
|
||||||
|
|
||||||
|
create index recordset_type_index
|
||||||
|
on recordset (type);
|
||||||
|
|
||||||
|
create table single_change
|
||||||
|
(
|
||||||
|
id char(36) not null primary key,
|
||||||
|
seq_num smallint not null,
|
||||||
|
input_name varchar(255) not null,
|
||||||
|
change_type varchar(25) not null,
|
||||||
|
data blob not null,
|
||||||
|
status varchar(16) not null,
|
||||||
|
batch_change_id char(36) not null,
|
||||||
|
record_set_change_id char(36) null,
|
||||||
|
record_set_id char(36) null,
|
||||||
|
zone_id char(36) null,
|
||||||
|
constraint fk_single_change_batch_change_id_batch_change
|
||||||
|
foreign key (batch_change_id) references batch_change (id)
|
||||||
|
on delete cascade
|
||||||
|
);
|
||||||
|
|
||||||
|
create index single_change_batch_change_id_index
|
||||||
|
on single_change (batch_change_id);
|
||||||
|
|
||||||
|
create index single_change_record_set_change_id_index
|
||||||
|
on single_change (record_set_change_id);
|
||||||
|
|
||||||
|
create table stats
|
||||||
|
(
|
||||||
|
id bigint auto_increment primary key,
|
||||||
|
name varchar(255) not null,
|
||||||
|
count bigint not null,
|
||||||
|
created datetime not null
|
||||||
|
);
|
||||||
|
|
||||||
|
create index stats_name_created_index
|
||||||
|
on stats (name, created);
|
||||||
|
|
||||||
|
create index stats_name_index
|
||||||
|
on stats (name);
|
||||||
|
|
||||||
|
create table task
|
||||||
|
(
|
||||||
|
name varchar(255) not null primary key,
|
||||||
|
in_flight bit not null,
|
||||||
|
created datetime not null,
|
||||||
|
updated datetime null
|
||||||
|
);
|
||||||
|
|
||||||
|
create table user
|
||||||
|
(
|
||||||
|
id char(36) not null primary key,
|
||||||
|
user_name varchar(256) not null,
|
||||||
|
access_key varchar(256) not null,
|
||||||
|
data blob not null
|
||||||
|
);
|
||||||
|
|
||||||
|
create index user_access_key_index
|
||||||
|
on user (access_key);
|
||||||
|
|
||||||
|
create index user_user_name_index
|
||||||
|
on user (user_name);
|
||||||
|
|
||||||
|
create table user_change
|
||||||
|
(
|
||||||
|
change_id char(36) not null primary key,
|
||||||
|
user_id char(36) not null,
|
||||||
|
data blob not null,
|
||||||
|
created_timestamp bigint(13) not null
|
||||||
|
);
|
||||||
|
|
||||||
|
create table zone
|
||||||
|
(
|
||||||
|
id char(36) not null primary key,
|
||||||
|
name varchar(256) not null,
|
||||||
|
admin_group_id char(36) not null,
|
||||||
|
data blob not null,
|
||||||
|
constraint zone_name_unique
|
||||||
|
unique (name)
|
||||||
|
);
|
||||||
|
|
||||||
|
create index zone_admin_group_id_index
|
||||||
|
on zone (admin_group_id);
|
||||||
|
|
||||||
|
create index zone_name_index
|
||||||
|
on zone (name);
|
||||||
|
|
||||||
|
create table zone_access
|
||||||
|
(
|
||||||
|
accessor_id char(36) not null,
|
||||||
|
zone_id char(36) not null,
|
||||||
|
primary key (accessor_id, zone_id),
|
||||||
|
constraint fk_zone_access_zone_id
|
||||||
|
foreign key (zone_id) references zone (id)
|
||||||
|
on delete cascade
|
||||||
|
);
|
||||||
|
|
||||||
|
create index zone_access_accessor_id_index
|
||||||
|
on zone_access (accessor_id);
|
||||||
|
|
||||||
|
create index zone_access_zone_id_index
|
||||||
|
on zone_access (zone_id);
|
||||||
|
|
||||||
|
create table zone_change
|
||||||
|
(
|
||||||
|
change_id char(36) not null primary key,
|
||||||
|
zone_id char(36) not null,
|
||||||
|
data blob not null,
|
||||||
|
created_timestamp bigint(13) not null
|
||||||
|
);
|
||||||
|
|
||||||
|
create index zone_change_created_timestamp_index
|
||||||
|
on zone_change (created_timestamp);
|
||||||
|
|
||||||
|
create index zone_change_zone_id_index
|
||||||
|
on zone_change (zone_id);
|
||||||
|
|
||||||
|
INSERT IGNORE INTO task(name, in_flight, created, updated)
|
||||||
|
VALUES ('user_sync', 0, NOW(), NULL);
|
@ -28,6 +28,10 @@ object MySqlConnector {
|
|||||||
private val logger = LoggerFactory.getLogger("MySqlConnector")
|
private val logger = LoggerFactory.getLogger("MySqlConnector")
|
||||||
|
|
||||||
def runDBMigrations(config: MySqlConnectionConfig): IO[Unit] = {
|
def runDBMigrations(config: MySqlConnectionConfig): IO[Unit] = {
|
||||||
|
// We can skip migrations for h2, we'll use the test/ddl.sql for initializing
|
||||||
|
// that for testing
|
||||||
|
if (config.driver.contains("h2")) IO.unit
|
||||||
|
else {
|
||||||
val migrationConnectionSettings = MySqlDataSourceSettings(
|
val migrationConnectionSettings = MySqlDataSourceSettings(
|
||||||
"flywayConnectionPool",
|
"flywayConnectionPool",
|
||||||
config.driver,
|
config.driver,
|
||||||
@ -57,6 +61,7 @@ object MySqlConnector {
|
|||||||
logger.info("migrations complete")
|
logger.info("migrations complete")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
def getDataSource(settings: MySqlDataSourceSettings): IO[HikariDataSource] = IO {
|
def getDataSource(settings: MySqlDataSourceSettings): IO[HikariDataSource] = IO {
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ object Dependencies {
|
|||||||
"com.github.ben-manes.caffeine" % "caffeine" % "2.2.7",
|
"com.github.ben-manes.caffeine" % "caffeine" % "2.2.7",
|
||||||
"com.github.cb372" %% "scalacache-caffeine" % "0.9.4",
|
"com.github.cb372" %% "scalacache-caffeine" % "0.9.4",
|
||||||
"com.google.protobuf" % "protobuf-java" % "2.6.1",
|
"com.google.protobuf" % "protobuf-java" % "2.6.1",
|
||||||
"dnsjava" % "dnsjava" % "2.1.8",
|
"dnsjava" % "dnsjava" % "3.4.2",
|
||||||
"org.apache.commons" % "commons-lang3" % "3.4",
|
"org.apache.commons" % "commons-lang3" % "3.4",
|
||||||
"org.apache.commons" % "commons-text" % "1.4",
|
"org.apache.commons" % "commons-text" % "1.4",
|
||||||
"org.flywaydb" % "flyway-core" % "5.1.4",
|
"org.flywaydb" % "flyway-core" % "5.1.4",
|
||||||
@ -73,7 +73,7 @@ object Dependencies {
|
|||||||
"io.dropwizard.metrics" % "metrics-jvm" % "3.2.2",
|
"io.dropwizard.metrics" % "metrics-jvm" % "3.2.2",
|
||||||
"co.fs2" %% "fs2-core" % "2.3.0",
|
"co.fs2" %% "fs2-core" % "2.3.0",
|
||||||
"javax.xml.bind" % "jaxb-api" % "2.3.0",
|
"javax.xml.bind" % "jaxb-api" % "2.3.0",
|
||||||
"javax.activation" % "activation" % "1.1.1"
|
"javax.activation" % "activation" % "1.1.1",
|
||||||
)
|
)
|
||||||
|
|
||||||
lazy val mysqlDependencies = Seq(
|
lazy val mysqlDependencies = Seq(
|
||||||
@ -81,7 +81,8 @@ object Dependencies {
|
|||||||
"org.mariadb.jdbc" % "mariadb-java-client" % "2.3.0",
|
"org.mariadb.jdbc" % "mariadb-java-client" % "2.3.0",
|
||||||
"org.scalikejdbc" %% "scalikejdbc" % scalikejdbcV,
|
"org.scalikejdbc" %% "scalikejdbc" % scalikejdbcV,
|
||||||
"org.scalikejdbc" %% "scalikejdbc-config" % scalikejdbcV,
|
"org.scalikejdbc" %% "scalikejdbc-config" % scalikejdbcV,
|
||||||
"com.zaxxer" % "HikariCP" % "3.2.0"
|
"com.zaxxer" % "HikariCP" % "3.2.0",
|
||||||
|
"com.h2database" % "h2" % "1.4.200",
|
||||||
)
|
)
|
||||||
|
|
||||||
lazy val sqsDependencies = Seq(
|
lazy val sqsDependencies = Seq(
|
||||||
|
Loading…
x
Reference in New Issue
Block a user