mirror of
https://github.com/VinylDNS/vinyldns
synced 2025-08-22 10:10:12 +00:00
Update log levels and adopt ECS (#1103)
* Update log messages, log levels, and log formatting
This commit is contained in:
parent
1c2635a441
commit
4b31f8b9d9
@ -1,8 +1,9 @@
|
|||||||
<configuration>
|
<configuration>
|
||||||
<!-- Test configuration, log to console so we can get the docker logs -->
|
<!-- Test configuration, log to console so we can get the docker logs -->
|
||||||
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
||||||
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
|
<encoder class="co.elastic.logging.logback.EcsEncoder">
|
||||||
<pattern>%d [test] %-5p | \(%logger{4}:%line\) | %msg %n</pattern>
|
<serviceName>vinyldns-api</serviceName>
|
||||||
|
<serviceNodeName>vinyldns-api</serviceNodeName>
|
||||||
</encoder>
|
</encoder>
|
||||||
</appender>
|
</appender>
|
||||||
|
|
||||||
|
@ -2,9 +2,10 @@
|
|||||||
|
|
||||||
<conversionRule conversionWord="coloredLevel" converterClass="play.api.libs.logback.ColoredLevel" />
|
<conversionRule conversionWord="coloredLevel" converterClass="play.api.libs.logback.ColoredLevel" />
|
||||||
|
|
||||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
||||||
<encoder>
|
<encoder class="co.elastic.logging.logback.EcsEncoder">
|
||||||
<pattern>%d{"yyyy-MM-dd HH:mm:ss,SSS"} %coloredLevel - %logger - %message%n%xException</pattern>
|
<serviceName>vinyldns-portal</serviceName>
|
||||||
|
<serviceNodeName>vinyldns-portal</serviceNodeName>
|
||||||
</encoder>
|
</encoder>
|
||||||
</appender>
|
</appender>
|
||||||
<!--
|
<!--
|
||||||
@ -15,7 +16,7 @@
|
|||||||
<logger name="application" level="DEBUG" />
|
<logger name="application" level="DEBUG" />
|
||||||
|
|
||||||
<root level="INFO">
|
<root level="INFO">
|
||||||
<appender-ref ref="STDOUT" />
|
<appender-ref ref="CONSOLE" />
|
||||||
</root>
|
</root>
|
||||||
|
|
||||||
</configuration>
|
</configuration>
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
<configuration>
|
<configuration>
|
||||||
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
||||||
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
|
<encoder class="co.elastic.logging.logback.EcsEncoder">
|
||||||
<pattern>%msg%n</pattern>
|
<serviceName>vinyldns-api</serviceName>
|
||||||
|
<serviceNodeName>vinyldns-api</serviceNodeName>
|
||||||
</encoder>
|
</encoder>
|
||||||
</appender>
|
</appender>
|
||||||
|
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
<configuration>
|
<configuration>
|
||||||
<!-- Test configuration, log to console so we can get the docker logs -->
|
<!-- Test configuration, log to console so we can get the docker logs -->
|
||||||
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
||||||
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
|
<encoder class="co.elastic.logging.logback.EcsEncoder">
|
||||||
<pattern>%d [test] %-5p | \(%logger{4}:%line\) | %msg %n</pattern>
|
<serviceName>vinyldns-api</serviceName>
|
||||||
|
<serviceNodeName>vinyldns-api</serviceNodeName>
|
||||||
</encoder>
|
</encoder>
|
||||||
</appender>
|
</appender>
|
||||||
|
|
||||||
@ -18,7 +19,7 @@
|
|||||||
<appender-ref ref="CONSOLE"/>
|
<appender-ref ref="CONSOLE"/>
|
||||||
</logger>
|
</logger>
|
||||||
|
|
||||||
<root level="ERROR">
|
<root level="INFO">
|
||||||
<appender-ref ref="CONSOLE"/>
|
<appender-ref ref="CONSOLE"/>
|
||||||
</root>
|
</root>
|
||||||
</configuration>
|
</configuration>
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
<configuration>
|
<configuration>
|
||||||
<!-- Test configuration, log to console so we can get the docker logs -->
|
<!-- Test configuration, log to console so we can get the docker logs -->
|
||||||
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
||||||
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
|
<encoder class="co.elastic.logging.logback.EcsEncoder">
|
||||||
<pattern>%d [test] %-5p | \(%logger{4}:%line\) | %msg %n</pattern>
|
<serviceName>vinyldns-api</serviceName>
|
||||||
|
<serviceNodeName>vinyldns-api</serviceNodeName>
|
||||||
</encoder>
|
</encoder>
|
||||||
</appender>
|
</appender>
|
||||||
|
|
||||||
@ -18,7 +19,7 @@
|
|||||||
<appender-ref ref="CONSOLE"/>
|
<appender-ref ref="CONSOLE"/>
|
||||||
</logger>
|
</logger>
|
||||||
|
|
||||||
<root level="ERROR">
|
<root level="INFO">
|
||||||
<appender-ref ref="CONSOLE"/>
|
<appender-ref ref="CONSOLE"/>
|
||||||
</root>
|
</root>
|
||||||
</configuration>
|
</configuration>
|
||||||
|
@ -219,10 +219,10 @@ object Boot extends App {
|
|||||||
()
|
()
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.error(
|
logger.info(
|
||||||
s"STARTING VINYLDNS SERVER ON ${vinyldnsConfig.httpConfig.host}:${vinyldnsConfig.httpConfig.port}"
|
s"STARTING VINYLDNS SERVER ON ${vinyldnsConfig.httpConfig.host}:${vinyldnsConfig.httpConfig.port}"
|
||||||
)
|
)
|
||||||
logger.error(banner)
|
logger.info(banner)
|
||||||
|
|
||||||
// Starts up our http server
|
// Starts up our http server
|
||||||
implicit val actorSystem: ActorSystem = system
|
implicit val actorSystem: ActorSystem = system
|
||||||
@ -237,7 +237,7 @@ object Boot extends App {
|
|||||||
// runApp gives us a Task, we actually have to run it! Running it will yield a Future, which is our app!
|
// runApp gives us a Task, we actually have to run it! Running it will yield a Future, which is our app!
|
||||||
runApp().unsafeRunAsync {
|
runApp().unsafeRunAsync {
|
||||||
case Right(_) =>
|
case Right(_) =>
|
||||||
logger.error("VINYLDNS SERVER STARTED SUCCESSFULLY!!")
|
logger.info("VINYLDNS SERVER STARTED SUCCESSFULLY!!")
|
||||||
case Left(startupFailure) =>
|
case Left(startupFailure) =>
|
||||||
logger.error(s"VINYLDNS SERVER UNABLE TO START $startupFailure")
|
logger.error(s"VINYLDNS SERVER UNABLE TO START $startupFailure")
|
||||||
startupFailure.printStackTrace()
|
startupFailure.printStackTrace()
|
||||||
|
@ -237,7 +237,7 @@ class DnsBackend(val id: String, val resolver: DNS.SimpleResolver, val xfrInfo:
|
|||||||
logger.info(s"Received TRY_AGAIN from DNS lookup; converting error: ${query.error}")
|
logger.info(s"Received TRY_AGAIN from DNS lookup; converting error: ${query.error}")
|
||||||
fromDnsRcodeToError(DNS.Rcode.value(query.error), query.error)
|
fromDnsRcodeToError(DNS.Rcode.value(query.error), query.error)
|
||||||
} else {
|
} else {
|
||||||
logger.info(s"Unparseable error code returned from DNS: ${query.error}")
|
logger.warn(s"Unparseable error code returned from DNS: ${query.error}")
|
||||||
Left(TryAgain(query.error))
|
Left(TryAgain(query.error))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ import vinyldns.core.queue.MessageQueue
|
|||||||
class BatchChangeConverter(batchChangeRepo: BatchChangeRepository, messageQueue: MessageQueue)
|
class BatchChangeConverter(batchChangeRepo: BatchChangeRepository, messageQueue: MessageQueue)
|
||||||
extends BatchChangeConverterAlgebra {
|
extends BatchChangeConverterAlgebra {
|
||||||
|
|
||||||
private val logger = LoggerFactory.getLogger("BatchChangeConverter")
|
private val logger = LoggerFactory.getLogger(classOf[BatchChangeConverter])
|
||||||
|
|
||||||
def sendBatchForProcessing(
|
def sendBatchForProcessing(
|
||||||
batchChange: BatchChange,
|
batchChange: BatchChange,
|
||||||
|
@ -46,7 +46,7 @@ import vinyldns.core.domain.batch.BatchChangeApprovalStatus._
|
|||||||
class EmailNotifier(config: EmailNotifierConfig, session: Session, userRepository: UserRepository)
|
class EmailNotifier(config: EmailNotifierConfig, session: Session, userRepository: UserRepository)
|
||||||
extends Notifier {
|
extends Notifier {
|
||||||
|
|
||||||
private val logger = LoggerFactory.getLogger("EmailNotifier")
|
private val logger = LoggerFactory.getLogger(classOf[EmailNotifier])
|
||||||
|
|
||||||
def notify(notification: Notification[_]): IO[Unit] =
|
def notify(notification: Notification[_]): IO[Unit] =
|
||||||
notification.change match {
|
notification.change match {
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
<configuration>
|
<configuration>
|
||||||
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
||||||
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
|
<encoder class="co.elastic.logging.logback.EcsEncoder">
|
||||||
<pattern>%msg%n</pattern>
|
<serviceName>vinyldns-api</serviceName>
|
||||||
|
<serviceNodeName>vinyldns-api</serviceNodeName>
|
||||||
</encoder>
|
</encoder>
|
||||||
</appender>
|
</appender>
|
||||||
|
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
<configuration>
|
<configuration>
|
||||||
<!-- Test configuration, log to console so we can get the docker logs -->
|
<!-- Test configuration, log to console so we can get the docker logs -->
|
||||||
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
||||||
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
|
<encoder class="co.elastic.logging.logback.EcsEncoder">
|
||||||
<pattern>%d [test] %-5p | \(%logger{4}:%line\) | %msg %n</pattern>
|
<serviceName>vinyldns-api</serviceName>
|
||||||
|
<serviceNodeName>vinyldns-api</serviceNodeName>
|
||||||
</encoder>
|
</encoder>
|
||||||
</appender>
|
</appender>
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@ object BackendLoader {
|
|||||||
def load(configs: List[BackendProviderConfig]): IO[List[BackendProvider]] = {
|
def load(configs: List[BackendProviderConfig]): IO[List[BackendProvider]] = {
|
||||||
def loadOne(config: BackendProviderConfig): IO[BackendProvider] =
|
def loadOne(config: BackendProviderConfig): IO[BackendProvider] =
|
||||||
for {
|
for {
|
||||||
_ <- IO(logger.error(s"Attempting to load backend ${config.className}"))
|
_ <- IO(logger.info(s"Attempting to load backend ${config.className}"))
|
||||||
provider <- IO(
|
provider <- IO(
|
||||||
Class
|
Class
|
||||||
.forName(config.className)
|
.forName(config.className)
|
||||||
|
@ -25,7 +25,7 @@ object MessageQueueLoader {
|
|||||||
|
|
||||||
def load(config: MessageQueueConfig): IO[MessageQueue] =
|
def load(config: MessageQueueConfig): IO[MessageQueue] =
|
||||||
for {
|
for {
|
||||||
_ <- IO(logger.error(s"Attempting to load queue ${config.className}"))
|
_ <- IO(logger.info(s"Attempting to load queue ${config.className}"))
|
||||||
provider <- IO(
|
provider <- IO(
|
||||||
Class
|
Class
|
||||||
.forName(config.className)
|
.forName(config.className)
|
||||||
|
@ -67,7 +67,7 @@ object DataStoreLoader {
|
|||||||
for {
|
for {
|
||||||
_ <- IO(
|
_ <- IO(
|
||||||
logger
|
logger
|
||||||
.error(s"Attempting to load repos ${config.repositories.keys} from ${config.className}")
|
.info(s"Attempting to load repos ${config.repositories.keys} from ${config.className}")
|
||||||
)
|
)
|
||||||
provider <- IO(
|
provider <- IO(
|
||||||
Class
|
Class
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
<configuration>
|
<configuration>
|
||||||
<!-- Test configuration, log to console so we can get the docker logs -->
|
<!-- Test configuration, log to console so we can get the docker logs -->
|
||||||
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
||||||
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
|
<encoder class="co.elastic.logging.logback.EcsEncoder">
|
||||||
<pattern>%d [test] %-5p | \(%logger{4}:%line\) | %msg %n %nopexception </pattern>
|
<serviceName>vinyldns-core</serviceName>
|
||||||
|
<serviceNodeName>vinyldns-core</serviceNodeName>
|
||||||
</encoder>
|
</encoder>
|
||||||
</appender>
|
</appender>
|
||||||
|
|
||||||
|
@ -60,13 +60,13 @@ object MySqlConnector {
|
|||||||
|
|
||||||
// Runs flyway migrations
|
// Runs flyway migrations
|
||||||
migration.load().migrate()
|
migration.load().migrate()
|
||||||
logger.info("migrations complete")
|
logger.info("Migrations complete")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def getDataSource(settings: MySqlDataSourceSettings): IO[HikariDataSource] = IO {
|
def getDataSource(settings: MySqlDataSourceSettings): IO[HikariDataSource] = IO {
|
||||||
|
|
||||||
logger.error(s"Initializing data source with settings: $settings")
|
logger.info(s"Initializing data source with settings: $settings")
|
||||||
|
|
||||||
val dsConfig = new HikariConfig()
|
val dsConfig = new HikariConfig()
|
||||||
|
|
||||||
@ -96,7 +96,7 @@ object MySqlConnector {
|
|||||||
result match {
|
result match {
|
||||||
case Success(t) => Some(t)
|
case Success(t) => Some(t)
|
||||||
case Failure(e) =>
|
case Failure(e) =>
|
||||||
logger.warn("failed to startup database connection, retrying..")
|
logger.warn("Failed to startup database connection, retrying..")
|
||||||
// Hard abort if we exhaust retries
|
// Hard abort if we exhaust retries
|
||||||
if (currentIndex >= times - 1) {
|
if (currentIndex >= times - 1) {
|
||||||
throw e
|
throw e
|
||||||
|
@ -52,7 +52,7 @@ class MySqlMessageQueueProvider extends MessageQueueProvider {
|
|||||||
val queueConnectionSettings = MySqlDataSourceSettings(config, "mysqlQueuePool")
|
val queueConnectionSettings = MySqlDataSourceSettings(config, "mysqlQueuePool")
|
||||||
|
|
||||||
getDataSource(queueConnectionSettings).map { dataSource =>
|
getDataSource(queueConnectionSettings).map { dataSource =>
|
||||||
logger.error("configuring connection pool for queue")
|
logger.info("Configuring connection pool for queue")
|
||||||
|
|
||||||
// note this is being called 2x in the case you use the mysql datastores and
|
// note this is being called 2x in the case you use the mysql datastores and
|
||||||
// loader. That should be ok
|
// loader. That should be ok
|
||||||
@ -64,7 +64,7 @@ class MySqlMessageQueueProvider extends MessageQueueProvider {
|
|||||||
new DataSourceConnectionPool(dataSource, closer = new HikariCloser(dataSource))
|
new DataSourceConnectionPool(dataSource, closer = new HikariCloser(dataSource))
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.error("queue connection pool init complete")
|
logger.info("Queue connection pool init complete")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ class MySqlDataStoreProvider extends DataStoreProvider {
|
|||||||
val dbConnectionSettings = MySqlDataSourceSettings(config, "mysqlDbPool")
|
val dbConnectionSettings = MySqlDataSourceSettings(config, "mysqlDbPool")
|
||||||
|
|
||||||
getDataSource(dbConnectionSettings).map { dataSource =>
|
getDataSource(dbConnectionSettings).map { dataSource =>
|
||||||
logger.error("configuring connection pool")
|
logger.info("Configuring connection pool")
|
||||||
|
|
||||||
// pulled out of DBs.setupAll since we're no longer using the db. structure for config
|
// pulled out of DBs.setupAll since we're no longer using the db. structure for config
|
||||||
DBs.loadGlobalSettings()
|
DBs.loadGlobalSettings()
|
||||||
@ -93,13 +93,13 @@ class MySqlDataStoreProvider extends DataStoreProvider {
|
|||||||
new DataSourceConnectionPool(dataSource, closer = new HikariCloser(dataSource))
|
new DataSourceConnectionPool(dataSource, closer = new HikariCloser(dataSource))
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.error("database init complete")
|
logger.info("Database init complete")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private def shutdown(): IO[Unit] =
|
private def shutdown(): IO[Unit] =
|
||||||
IO(DBs.close())
|
IO(DBs.close())
|
||||||
.handleError(e => logger.error(s"exception occurred while shutting down", e))
|
.handleError(e => logger.error(s"Exception occurred while shutting down", e))
|
||||||
|
|
||||||
private final val HEALTH_CHECK =
|
private final val HEALTH_CHECK =
|
||||||
sql"""
|
sql"""
|
||||||
|
@ -64,7 +64,7 @@ class MySqlGroupChangeRepository extends GroupChangeRepository with Monitored {
|
|||||||
def save(db: DB, groupChange: GroupChange): IO[GroupChange] =
|
def save(db: DB, groupChange: GroupChange): IO[GroupChange] =
|
||||||
monitor("repo.GroupChange.save") {
|
monitor("repo.GroupChange.save") {
|
||||||
IO {
|
IO {
|
||||||
logger.info(
|
logger.debug(
|
||||||
s"Saving group change with (group_change_id, group_id): " +
|
s"Saving group change with (group_change_id, group_id): " +
|
||||||
s"(${groupChange.id}, ${groupChange.newGroup.id})"
|
s"(${groupChange.id}, ${groupChange.newGroup.id})"
|
||||||
)
|
)
|
||||||
@ -87,7 +87,7 @@ class MySqlGroupChangeRepository extends GroupChangeRepository with Monitored {
|
|||||||
def getGroupChange(groupChangeId: String): IO[Option[GroupChange]] =
|
def getGroupChange(groupChangeId: String): IO[Option[GroupChange]] =
|
||||||
monitor("repo.GroupChange.getGroupChange") {
|
monitor("repo.GroupChange.getGroupChange") {
|
||||||
IO {
|
IO {
|
||||||
logger.info(s"Getting group change with group_change_id: $groupChangeId")
|
logger.debug(s"Getting group change with group_change_id: $groupChangeId")
|
||||||
DB.readOnly { implicit s =>
|
DB.readOnly { implicit s =>
|
||||||
GET_GROUP_CHANGE
|
GET_GROUP_CHANGE
|
||||||
.bind(groupChangeId)
|
.bind(groupChangeId)
|
||||||
@ -105,7 +105,7 @@ class MySqlGroupChangeRepository extends GroupChangeRepository with Monitored {
|
|||||||
): IO[ListGroupChangesResults] =
|
): IO[ListGroupChangesResults] =
|
||||||
monitor("repo.GroupChange.getGroupChanges") {
|
monitor("repo.GroupChange.getGroupChanges") {
|
||||||
IO {
|
IO {
|
||||||
logger.info(
|
logger.debug(
|
||||||
s"Getting group changes with (groupId, startFrom, maxItems): ($groupId, $startFrom, $maxItems)"
|
s"Getting group changes with (groupId, startFrom, maxItems): ($groupId, $startFrom, $maxItems)"
|
||||||
)
|
)
|
||||||
DB.readOnly { implicit s =>
|
DB.readOnly { implicit s =>
|
||||||
|
@ -72,7 +72,7 @@ class MySqlGroupRepository extends GroupRepository with GroupProtobufConversions
|
|||||||
def save(db: DB, group: Group): IO[Group] =
|
def save(db: DB, group: Group): IO[Group] =
|
||||||
monitor("repo.Group.save") {
|
monitor("repo.Group.save") {
|
||||||
IO {
|
IO {
|
||||||
logger.info(s"Saving group with (id, name): (${group.id}, ${group.name})")
|
logger.debug(s"Saving group with (id, name): (${group.id}, ${group.name})")
|
||||||
db.withinTx { implicit s =>
|
db.withinTx { implicit s =>
|
||||||
PUT_GROUP
|
PUT_GROUP
|
||||||
.bindByName(
|
.bindByName(
|
||||||
@ -94,7 +94,7 @@ class MySqlGroupRepository extends GroupRepository with GroupProtobufConversions
|
|||||||
def delete(group: Group): IO[Group] =
|
def delete(group: Group): IO[Group] =
|
||||||
monitor("repo.Group.delete") {
|
monitor("repo.Group.delete") {
|
||||||
IO {
|
IO {
|
||||||
logger.info(s"Deleting group with (id, name): (${group.id}, ${group.name})")
|
logger.debug(s"Deleting group with (id, name): (${group.id}, ${group.name})")
|
||||||
DB.localTx { implicit s =>
|
DB.localTx { implicit s =>
|
||||||
DELETE_GROUP
|
DELETE_GROUP
|
||||||
.bind(group.id)
|
.bind(group.id)
|
||||||
@ -109,7 +109,7 @@ class MySqlGroupRepository extends GroupRepository with GroupProtobufConversions
|
|||||||
def getGroup(groupId: String): IO[Option[Group]] =
|
def getGroup(groupId: String): IO[Option[Group]] =
|
||||||
monitor("repo.Group.getGroup") {
|
monitor("repo.Group.getGroup") {
|
||||||
IO {
|
IO {
|
||||||
logger.info(s"Getting group with id: $groupId")
|
logger.debug(s"Getting group with id: $groupId")
|
||||||
DB.readOnly { implicit s =>
|
DB.readOnly { implicit s =>
|
||||||
GET_GROUP_BY_ID
|
GET_GROUP_BY_ID
|
||||||
.bind(groupId)
|
.bind(groupId)
|
||||||
@ -123,7 +123,7 @@ class MySqlGroupRepository extends GroupRepository with GroupProtobufConversions
|
|||||||
def getGroups(groupIds: Set[String]): IO[Set[Group]] =
|
def getGroups(groupIds: Set[String]): IO[Set[Group]] =
|
||||||
monitor("repo.Group.getGroups") {
|
monitor("repo.Group.getGroups") {
|
||||||
IO {
|
IO {
|
||||||
logger.info(s"Getting group with ids: $groupIds")
|
logger.debug(s"Getting group with ids: $groupIds")
|
||||||
if (groupIds.isEmpty)
|
if (groupIds.isEmpty)
|
||||||
Set[Group]()
|
Set[Group]()
|
||||||
else {
|
else {
|
||||||
@ -144,7 +144,7 @@ class MySqlGroupRepository extends GroupRepository with GroupProtobufConversions
|
|||||||
def getGroupByName(groupName: String): IO[Option[Group]] =
|
def getGroupByName(groupName: String): IO[Option[Group]] =
|
||||||
monitor("repo.Group.getGroupByName") {
|
monitor("repo.Group.getGroupByName") {
|
||||||
IO {
|
IO {
|
||||||
logger.info(s"Getting group with name: $groupName")
|
logger.debug(s"Getting group with name: $groupName")
|
||||||
DB.readOnly { implicit s =>
|
DB.readOnly { implicit s =>
|
||||||
GET_GROUP_BY_NAME
|
GET_GROUP_BY_NAME
|
||||||
.bind(groupName)
|
.bind(groupName)
|
||||||
@ -158,7 +158,7 @@ class MySqlGroupRepository extends GroupRepository with GroupProtobufConversions
|
|||||||
def getAllGroups(): IO[Set[Group]] =
|
def getAllGroups(): IO[Set[Group]] =
|
||||||
monitor("repo.Group.getAllGroups") {
|
monitor("repo.Group.getAllGroups") {
|
||||||
IO {
|
IO {
|
||||||
logger.info(s"Getting all groups")
|
logger.debug(s"Getting all groups")
|
||||||
DB.readOnly { implicit s =>
|
DB.readOnly { implicit s =>
|
||||||
GET_ALL_GROUPS
|
GET_ALL_GROUPS
|
||||||
.map(toGroup(1))
|
.map(toGroup(1))
|
||||||
|
@ -83,7 +83,7 @@ class MySqlMembershipRepository extends MembershipRepository with Monitored {
|
|||||||
// Diff is used to check if the users we are trying to add in the group is already present.
|
// Diff is used to check if the users we are trying to add in the group is already present.
|
||||||
// If they don't exist in the group, we save the users.
|
// If they don't exist in the group, we save the users.
|
||||||
val saveMembers = nonEmpty.diff(existingMembers)
|
val saveMembers = nonEmpty.diff(existingMembers)
|
||||||
logger.info(s"Saving into group $groupId members $nonEmpty")
|
logger.debug(s"Saving into group $groupId members $nonEmpty")
|
||||||
|
|
||||||
db.withinTx { implicit s =>
|
db.withinTx { implicit s =>
|
||||||
SAVE_MEMBERS.batchByName(saveParams(saveMembers, groupId, isAdmin): _*).apply()
|
SAVE_MEMBERS.batchByName(saveParams(saveMembers, groupId, isAdmin): _*).apply()
|
||||||
@ -100,7 +100,7 @@ class MySqlMembershipRepository extends MembershipRepository with Monitored {
|
|||||||
case nonEmpty =>
|
case nonEmpty =>
|
||||||
monitor("repo.Membership.removeMembers") {
|
monitor("repo.Membership.removeMembers") {
|
||||||
IO {
|
IO {
|
||||||
logger.info(s"Removing from group $groupId members $nonEmpty")
|
logger.debug(s"Removing from group $groupId members $nonEmpty")
|
||||||
db.withinTx { implicit s =>
|
db.withinTx { implicit s =>
|
||||||
val inClause = " AND user_id IN (" + nonEmpty.as("?").mkString(",") + ")"
|
val inClause = " AND user_id IN (" + nonEmpty.as("?").mkString(",") + ")"
|
||||||
val query = BASE_REMOVE_MEMBERS + inClause
|
val query = BASE_REMOVE_MEMBERS + inClause
|
||||||
@ -118,7 +118,7 @@ class MySqlMembershipRepository extends MembershipRepository with Monitored {
|
|||||||
def getExistingMembers(groupId: String): Set[String] =
|
def getExistingMembers(groupId: String): Set[String] =
|
||||||
monitor("repo.Membership.getExistingUsers") {
|
monitor("repo.Membership.getExistingUsers") {
|
||||||
IO {
|
IO {
|
||||||
logger.info(s"Getting existing users")
|
logger.debug(s"Getting existing users")
|
||||||
DB.readOnly { implicit s =>
|
DB.readOnly { implicit s =>
|
||||||
SQL(GET_EXISTING_USERS)
|
SQL(GET_EXISTING_USERS)
|
||||||
.bind(groupId)
|
.bind(groupId)
|
||||||
@ -133,7 +133,7 @@ class MySqlMembershipRepository extends MembershipRepository with Monitored {
|
|||||||
def getGroupsForUser(userId: String): IO[Set[String]] =
|
def getGroupsForUser(userId: String): IO[Set[String]] =
|
||||||
monitor("repo.Membership.getGroupsForUser") {
|
monitor("repo.Membership.getGroupsForUser") {
|
||||||
IO {
|
IO {
|
||||||
logger.info(s"Getting groups for user $userId")
|
logger.debug(s"Getting groups for user $userId")
|
||||||
DB.readOnly { implicit s =>
|
DB.readOnly { implicit s =>
|
||||||
GET_GROUPS_FOR_USER
|
GET_GROUPS_FOR_USER
|
||||||
.bind(userId)
|
.bind(userId)
|
||||||
@ -147,7 +147,7 @@ class MySqlMembershipRepository extends MembershipRepository with Monitored {
|
|||||||
|
|
||||||
def getUsersForGroup(groupId: String, isAdmin: Option[Boolean]): IO[Set[String]] =
|
def getUsersForGroup(groupId: String, isAdmin: Option[Boolean]): IO[Set[String]] =
|
||||||
IO {
|
IO {
|
||||||
logger.info(s"Getting users for group $groupId")
|
logger.debug(s"Getting users for group $groupId")
|
||||||
DB.readOnly { implicit s =>
|
DB.readOnly { implicit s =>
|
||||||
val baseConditions = Seq('groupId -> groupId)
|
val baseConditions = Seq('groupId -> groupId)
|
||||||
|
|
||||||
|
@ -383,7 +383,7 @@ class MySqlRecordSetRepository extends RecordSetRepository with Monitored {
|
|||||||
.update()
|
.update()
|
||||||
.apply()
|
.apply()
|
||||||
}
|
}
|
||||||
logger.info(s"Deleted $numDeleted records from zone $zoneName (zone id: $zoneId)")
|
logger.debug(s"Deleted $numDeleted records from zone $zoneName (zone id: $zoneId)")
|
||||||
}.handleErrorWith { error =>
|
}.handleErrorWith { error =>
|
||||||
logger.error(s"Failed deleting records from zone $zoneName (zone id: $zoneId)", error)
|
logger.error(s"Failed deleting records from zone $zoneName (zone id: $zoneId)", error)
|
||||||
IO.raiseError(error)
|
IO.raiseError(error)
|
||||||
|
@ -48,7 +48,7 @@ class MySqlUserChangeRepository
|
|||||||
|
|
||||||
def get(changeId: String): IO[Option[UserChange]] =
|
def get(changeId: String): IO[Option[UserChange]] =
|
||||||
monitor("repo.UserChange.get") {
|
monitor("repo.UserChange.get") {
|
||||||
logger.info(s"Getting user change with id: $changeId")
|
logger.debug(s"Getting user change with id: $changeId")
|
||||||
IO {
|
IO {
|
||||||
DB.readOnly { implicit s =>
|
DB.readOnly { implicit s =>
|
||||||
GET_USER_CHANGE_BY_ID
|
GET_USER_CHANGE_BY_ID
|
||||||
@ -62,7 +62,7 @@ class MySqlUserChangeRepository
|
|||||||
|
|
||||||
def save(change: UserChange): IO[UserChange] =
|
def save(change: UserChange): IO[UserChange] =
|
||||||
monitor("repo.UserChange.save") {
|
monitor("repo.UserChange.save") {
|
||||||
logger.info(s"Saving user change: $change")
|
logger.debug(s"Saving user change: $change")
|
||||||
IO {
|
IO {
|
||||||
DB.localTx { implicit s =>
|
DB.localTx { implicit s =>
|
||||||
PUT_USER_CHANGE
|
PUT_USER_CHANGE
|
||||||
|
@ -68,7 +68,7 @@ class MySqlUserRepository(cryptoAlgebra: CryptoAlgebra)
|
|||||||
|
|
||||||
def getUser(userId: String): IO[Option[User]] =
|
def getUser(userId: String): IO[Option[User]] =
|
||||||
monitor("repo.User.getUser") {
|
monitor("repo.User.getUser") {
|
||||||
logger.info(s"Getting user with id: $userId")
|
logger.debug(s"Getting user with id: $userId")
|
||||||
IO {
|
IO {
|
||||||
DB.readOnly { implicit s =>
|
DB.readOnly { implicit s =>
|
||||||
GET_USER_BY_ID
|
GET_USER_BY_ID
|
||||||
@ -86,7 +86,7 @@ class MySqlUserRepository(cryptoAlgebra: CryptoAlgebra)
|
|||||||
maxItems: Option[Int]
|
maxItems: Option[Int]
|
||||||
): IO[ListUsersResults] =
|
): IO[ListUsersResults] =
|
||||||
monitor("repo.User.getUsers") {
|
monitor("repo.User.getUsers") {
|
||||||
logger.info(s"Getting users with ids: $userIds")
|
logger.debug(s"Getting users with ids: $userIds")
|
||||||
IO {
|
IO {
|
||||||
if (userIds.isEmpty)
|
if (userIds.isEmpty)
|
||||||
ListUsersResults(List[User](), None)
|
ListUsersResults(List[User](), None)
|
||||||
@ -133,7 +133,7 @@ class MySqlUserRepository(cryptoAlgebra: CryptoAlgebra)
|
|||||||
def getUserByAccessKey(accessKey: String): IO[Option[User]] =
|
def getUserByAccessKey(accessKey: String): IO[Option[User]] =
|
||||||
monitor("repo.User.getUserByAccessKey") {
|
monitor("repo.User.getUserByAccessKey") {
|
||||||
IO {
|
IO {
|
||||||
logger.info(s"Getting user with accessKey: $accessKey")
|
logger.debug(s"Getting user with accessKey: $accessKey")
|
||||||
DB.readOnly { implicit s =>
|
DB.readOnly { implicit s =>
|
||||||
GET_USER_BY_ACCESS_KEY
|
GET_USER_BY_ACCESS_KEY
|
||||||
.bind(accessKey)
|
.bind(accessKey)
|
||||||
@ -147,7 +147,7 @@ class MySqlUserRepository(cryptoAlgebra: CryptoAlgebra)
|
|||||||
def getUserByName(userName: String): IO[Option[User]] =
|
def getUserByName(userName: String): IO[Option[User]] =
|
||||||
monitor("repo.User.getUserByName") {
|
monitor("repo.User.getUserByName") {
|
||||||
IO {
|
IO {
|
||||||
logger.info(s"Getting user with userName: $userName")
|
logger.debug(s"Getting user with userName: $userName")
|
||||||
DB.readOnly { implicit s =>
|
DB.readOnly { implicit s =>
|
||||||
GET_USER_BY_USER_NAME
|
GET_USER_BY_USER_NAME
|
||||||
.bind(userName)
|
.bind(userName)
|
||||||
@ -161,7 +161,7 @@ class MySqlUserRepository(cryptoAlgebra: CryptoAlgebra)
|
|||||||
def save(user: User): IO[User] =
|
def save(user: User): IO[User] =
|
||||||
monitor("repo.User.save") {
|
monitor("repo.User.save") {
|
||||||
IO {
|
IO {
|
||||||
logger.info(s"Saving user with id: ${user.id}")
|
logger.debug(s"Saving user with id: ${user.id}")
|
||||||
DB.localTx { implicit s =>
|
DB.localTx { implicit s =>
|
||||||
PUT_USER
|
PUT_USER
|
||||||
.bindByName(
|
.bindByName(
|
||||||
@ -180,7 +180,7 @@ class MySqlUserRepository(cryptoAlgebra: CryptoAlgebra)
|
|||||||
def save(users: List[User]): IO[List[User]] =
|
def save(users: List[User]): IO[List[User]] =
|
||||||
monitor("repo.User.save") {
|
monitor("repo.User.save") {
|
||||||
IO {
|
IO {
|
||||||
logger.info(s"Saving users with ids: ${users.map(_.id).mkString(", ")}")
|
logger.debug(s"Saving users with ids: ${users.map(_.id).mkString(", ")}")
|
||||||
|
|
||||||
val updates = users.map { u =>
|
val updates = users.map { u =>
|
||||||
Seq(
|
Seq(
|
||||||
|
@ -49,7 +49,7 @@ class MySqlZoneChangeRepository
|
|||||||
override def save(zoneChange: ZoneChange): IO[ZoneChange] =
|
override def save(zoneChange: ZoneChange): IO[ZoneChange] =
|
||||||
monitor("repo.ZoneChange.save") {
|
monitor("repo.ZoneChange.save") {
|
||||||
IO {
|
IO {
|
||||||
logger.info(s"Saving zone change ${zoneChange.id}")
|
logger.debug(s"Saving zone change ${zoneChange.id}")
|
||||||
DB.localTx { implicit s =>
|
DB.localTx { implicit s =>
|
||||||
PUT_ZONE_CHANGE
|
PUT_ZONE_CHANGE
|
||||||
.bindByName(
|
.bindByName(
|
||||||
@ -74,7 +74,7 @@ class MySqlZoneChangeRepository
|
|||||||
// sorted from most recent, startFrom is an offset from the most recent change
|
// sorted from most recent, startFrom is an offset from the most recent change
|
||||||
monitor("repo.ZoneChange.listZoneChanges") {
|
monitor("repo.ZoneChange.listZoneChanges") {
|
||||||
IO {
|
IO {
|
||||||
logger.info(s"Getting zone changes for zone $zoneId")
|
logger.debug(s"Getting zone changes for zone $zoneId")
|
||||||
DB.readOnly { implicit s =>
|
DB.readOnly { implicit s =>
|
||||||
val startValue = startFrom.getOrElse(DateTime.now().getMillis.toString)
|
val startValue = startFrom.getOrElse(DateTime.now().getMillis.toString)
|
||||||
// maxItems gets a plus one to know if the table is exhausted so we can conditionally give a nextId
|
// maxItems gets a plus one to know if the table is exhausted so we can conditionally give a nextId
|
||||||
|
@ -2,9 +2,10 @@
|
|||||||
|
|
||||||
<conversionRule conversionWord="coloredLevel" converterClass="play.api.libs.logback.ColoredLevel" />
|
<conversionRule conversionWord="coloredLevel" converterClass="play.api.libs.logback.ColoredLevel" />
|
||||||
|
|
||||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
||||||
<encoder>
|
<encoder class="co.elastic.logging.logback.EcsEncoder">
|
||||||
<pattern>%d{"yyyy-MM-dd HH:mm:ss,SSS"} %coloredLevel - %logger - %message%n%xException</pattern>
|
<serviceName>vinyldns-portal</serviceName>
|
||||||
|
<serviceNodeName>vinyldns-portal</serviceNodeName>
|
||||||
</encoder>
|
</encoder>
|
||||||
</appender>
|
</appender>
|
||||||
<!--
|
<!--
|
||||||
@ -15,7 +16,7 @@
|
|||||||
<logger name="application" level="OFF" />
|
<logger name="application" level="OFF" />
|
||||||
|
|
||||||
<root level="OFF">
|
<root level="OFF">
|
||||||
<appender-ref ref="STDOUT" />
|
<appender-ref ref="CONSOLE" />
|
||||||
</root>
|
</root>
|
||||||
|
|
||||||
</configuration>
|
</configuration>
|
||||||
|
@ -2,9 +2,10 @@
|
|||||||
|
|
||||||
<conversionRule conversionWord="coloredLevel" converterClass="play.api.libs.logback.ColoredLevel" />
|
<conversionRule conversionWord="coloredLevel" converterClass="play.api.libs.logback.ColoredLevel" />
|
||||||
|
|
||||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
||||||
<encoder>
|
<encoder class="co.elastic.logging.logback.EcsEncoder">
|
||||||
<pattern>%d{"yyyy-MM-dd HH:mm:ss,SSS"} %coloredLevel - %logger - %message%n%xException</pattern>
|
<serviceName>vinyldns-portal</serviceName>
|
||||||
|
<serviceNodeName>vinyldns-portal</serviceNodeName>
|
||||||
</encoder>
|
</encoder>
|
||||||
</appender>
|
</appender>
|
||||||
<!--
|
<!--
|
||||||
@ -15,7 +16,7 @@
|
|||||||
<logger name="application" level="DEBUG" />
|
<logger name="application" level="DEBUG" />
|
||||||
|
|
||||||
<root level="INFO">
|
<root level="INFO">
|
||||||
<appender-ref ref="STDOUT" />
|
<appender-ref ref="CONSOLE" />
|
||||||
</root>
|
</root>
|
||||||
|
|
||||||
</configuration>
|
</configuration>
|
||||||
|
@ -140,7 +140,7 @@ class Route53Backend(
|
|||||||
typ: RecordSetChangeType,
|
typ: RecordSetChangeType,
|
||||||
rs: ResourceRecordSet
|
rs: ResourceRecordSet
|
||||||
): ChangeResourceRecordSetsRequest = {
|
): ChangeResourceRecordSetsRequest = {
|
||||||
logger.debug(s"applying change to zone, record set is $rs")
|
logger.debug(s"Applying change to zone, record set is $rs")
|
||||||
new ChangeResourceRecordSetsRequest().withChangeBatch(
|
new ChangeResourceRecordSetsRequest().withChangeBatch(
|
||||||
new ChangeBatch().withChanges(
|
new ChangeBatch().withChanges(
|
||||||
new Change().withAction(changeAction(typ)).withResourceRecordSet(rs)
|
new Change().withAction(changeAction(typ)).withResourceRecordSet(rs)
|
||||||
@ -170,7 +170,7 @@ class Route53Backend(
|
|||||||
changeRequest(change.changeType, r53RecordSet).withHostedZoneId(hostedZoneId),
|
changeRequest(change.changeType, r53RecordSet).withHostedZoneId(hostedZoneId),
|
||||||
client.changeResourceRecordSetsAsync
|
client.changeResourceRecordSetsAsync
|
||||||
).map { response =>
|
).map { response =>
|
||||||
logger.debug(s"applied record change $change, change result is ${response.getChangeInfo}")
|
logger.debug(s"Applied record change $change, change result is ${response.getChangeInfo}")
|
||||||
BackendResponse.NoError(response.toString)
|
BackendResponse.NoError(response.toString)
|
||||||
}
|
}
|
||||||
} yield result
|
} yield result
|
||||||
|
@ -49,7 +49,7 @@ class SqsMessageQueueProvider extends MessageQueueProvider {
|
|||||||
_ <- IO.fromEither(validateQueueName(settingsConfig.queueName))
|
_ <- IO.fromEither(validateQueueName(settingsConfig.queueName))
|
||||||
client <- setupClient(settingsConfig)
|
client <- setupClient(settingsConfig)
|
||||||
queueUrl <- setupQueue(client, settingsConfig.queueName)
|
queueUrl <- setupQueue(client, settingsConfig.queueName)
|
||||||
_ <- IO(logger.error(s"Queue URL: $queueUrl\n"))
|
_ <- IO(logger.info(s"Queue URL: $queueUrl\n"))
|
||||||
} yield new SqsMessageQueue(queueUrl, client)
|
} yield new SqsMessageQueue(queueUrl, client)
|
||||||
|
|
||||||
def validateQueueName(queueName: String): Either[InvalidQueueName, String] = {
|
def validateQueueName(queueName: String): Either[InvalidQueueName, String] = {
|
||||||
@ -104,7 +104,7 @@ class SqsMessageQueueProvider extends MessageQueueProvider {
|
|||||||
def setupQueue(client: AmazonSQSAsync, queueName: String): IO[String] =
|
def setupQueue(client: AmazonSQSAsync, queueName: String): IO[String] =
|
||||||
// Create queue if it doesn't exist
|
// Create queue if it doesn't exist
|
||||||
IO {
|
IO {
|
||||||
logger.error(s"Setting up queue with name [$queueName]")
|
logger.info(s"Setting up queue with name [$queueName]")
|
||||||
client.getQueueUrl(queueName).getQueueUrl
|
client.getQueueUrl(queueName).getQueueUrl
|
||||||
}.recoverWith {
|
}.recoverWith {
|
||||||
case _: QueueDoesNotExistException => IO(client.createQueue(queueName).getQueueUrl)
|
case _: QueueDoesNotExistException => IO(client.createQueue(queueName).getQueueUrl)
|
||||||
|
@ -54,7 +54,8 @@ object Dependencies {
|
|||||||
"com.iheart" %% "ficus" % ficusV,
|
"com.iheart" %% "ficus" % ficusV,
|
||||||
"com.sun.mail" % "javax.mail" % "1.6.2",
|
"com.sun.mail" % "javax.mail" % "1.6.2",
|
||||||
"javax.mail" % "javax.mail-api" % "1.6.2",
|
"javax.mail" % "javax.mail-api" % "1.6.2",
|
||||||
"com.amazonaws" % "aws-java-sdk-sns" % awsV withSources()
|
"com.amazonaws" % "aws-java-sdk-sns" % awsV withSources(),
|
||||||
|
"co.elastic.logging" % "logback-ecs-encoder" % "1.3.2"
|
||||||
)
|
)
|
||||||
|
|
||||||
lazy val coreDependencies = Seq(
|
lazy val coreDependencies = Seq(
|
||||||
@ -75,7 +76,8 @@ object Dependencies {
|
|||||||
"javax.xml.bind" % "jaxb-api" % "2.3.0",
|
"javax.xml.bind" % "jaxb-api" % "2.3.0",
|
||||||
"javax.activation" % "activation" % "1.1.1",
|
"javax.activation" % "activation" % "1.1.1",
|
||||||
"org.scalikejdbc" %% "scalikejdbc" % scalikejdbcV,
|
"org.scalikejdbc" %% "scalikejdbc" % scalikejdbcV,
|
||||||
"org.scalikejdbc" %% "scalikejdbc-config" % scalikejdbcV
|
"org.scalikejdbc" %% "scalikejdbc-config" % scalikejdbcV,
|
||||||
|
"co.elastic.logging" % "logback-ecs-encoder" % "1.3.2"
|
||||||
)
|
)
|
||||||
|
|
||||||
lazy val mysqlDependencies = Seq(
|
lazy val mysqlDependencies = Seq(
|
||||||
@ -86,7 +88,6 @@ object Dependencies {
|
|||||||
"com.zaxxer" % "HikariCP" % "3.2.0",
|
"com.zaxxer" % "HikariCP" % "3.2.0",
|
||||||
"com.h2database" % "h2" % "1.4.200",
|
"com.h2database" % "h2" % "1.4.200",
|
||||||
"commons-codec" % "commons-codec" % "1.15"
|
"commons-codec" % "commons-codec" % "1.15"
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
lazy val sqsDependencies = Seq(
|
lazy val sqsDependencies = Seq(
|
||||||
@ -107,7 +108,6 @@ object Dependencies {
|
|||||||
"org.scalatestplus" %% "scalatestplus-mockito" % "1.0.0-M2",
|
"org.scalatestplus" %% "scalatestplus-mockito" % "1.0.0-M2",
|
||||||
"org.scalatestplus" %% "scalatestplus-scalacheck" % "3.1.0.0-RC2",
|
"org.scalatestplus" %% "scalatestplus-scalacheck" % "3.1.0.0-RC2",
|
||||||
"commons-codec" % "commons-codec" % "1.15"
|
"commons-codec" % "commons-codec" % "1.15"
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
lazy val apiTestDependencies = commonTestDependencies ++ Seq(
|
lazy val apiTestDependencies = commonTestDependencies ++ Seq(
|
||||||
@ -115,7 +115,6 @@ object Dependencies {
|
|||||||
"com.typesafe.akka" %% "akka-stream-testkit" % akkaV,
|
"com.typesafe.akka" %% "akka-stream-testkit" % akkaV,
|
||||||
"junit" % "junit" % "4.12",
|
"junit" % "junit" % "4.12",
|
||||||
"commons-codec" % "commons-codec" % "1.15"
|
"commons-codec" % "commons-codec" % "1.15"
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
lazy val portalDependencies = Seq(
|
lazy val portalDependencies = Seq(
|
||||||
@ -132,6 +131,7 @@ object Dependencies {
|
|||||||
"com.nimbusds" % "nimbus-jose-jwt" % "7.0",
|
"com.nimbusds" % "nimbus-jose-jwt" % "7.0",
|
||||||
"co.fs2" %% "fs2-core" % fs2V,
|
"co.fs2" %% "fs2-core" % fs2V,
|
||||||
"de.leanovate.play-mockws" %% "play-mockws" % "2.7.1" % "test",
|
"de.leanovate.play-mockws" %% "play-mockws" % "2.7.1" % "test",
|
||||||
"com.iheart" %% "ficus" % ficusV
|
"com.iheart" %% "ficus" % ficusV,
|
||||||
|
"co.elastic.logging" % "logback-ecs-encoder" % "1.3.2"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user