2
0
mirror of https://github.com/VinylDNS/vinyldns synced 2025-08-22 18:17:07 +00:00

Update log levels and adopt ECS (#1103)

* Update log messages, log levels, and log formatting
This commit is contained in:
Aravindh R 2022-04-05 01:36:05 +05:30 committed by GitHub
parent 1c2635a441
commit 4b31f8b9d9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 90 additions and 80 deletions

View File

@ -1,8 +1,9 @@
<configuration>
<!-- Test configuration, log to console so we can get the docker logs -->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [test] %-5p | \(%logger{4}:%line\) | %msg %n</pattern>
<encoder class="co.elastic.logging.logback.EcsEncoder">
<serviceName>vinyldns-api</serviceName>
<serviceNodeName>vinyldns-api</serviceNodeName>
</encoder>
</appender>

View File

@ -2,9 +2,10 @@
<conversionRule conversionWord="coloredLevel" converterClass="play.api.libs.logback.ColoredLevel" />
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d{"yyyy-MM-dd HH:mm:ss,SSS"} %coloredLevel - %logger - %message%n%xException</pattern>
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="co.elastic.logging.logback.EcsEncoder">
<serviceName>vinyldns-portal</serviceName>
<serviceNodeName>vinyldns-portal</serviceNodeName>
</encoder>
</appender>
<!--
@ -15,7 +16,7 @@
<logger name="application" level="DEBUG" />
<root level="INFO">
<appender-ref ref="STDOUT" />
<appender-ref ref="CONSOLE" />
</root>
</configuration>

View File

@ -1,7 +1,8 @@
<configuration>
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%msg%n</pattern>
<encoder class="co.elastic.logging.logback.EcsEncoder">
<serviceName>vinyldns-api</serviceName>
<serviceNodeName>vinyldns-api</serviceNodeName>
</encoder>
</appender>

View File

@ -1,8 +1,9 @@
<configuration>
<!-- Test configuration, log to console so we can get the docker logs -->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [test] %-5p | \(%logger{4}:%line\) | %msg %n</pattern>
<encoder class="co.elastic.logging.logback.EcsEncoder">
<serviceName>vinyldns-api</serviceName>
<serviceNodeName>vinyldns-api</serviceNodeName>
</encoder>
</appender>
@ -18,7 +19,7 @@
<appender-ref ref="CONSOLE"/>
</logger>
<root level="ERROR">
<root level="INFO">
<appender-ref ref="CONSOLE"/>
</root>
</configuration>

View File

@ -1,8 +1,9 @@
<configuration>
<!-- Test configuration, log to console so we can get the docker logs -->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [test] %-5p | \(%logger{4}:%line\) | %msg %n</pattern>
<encoder class="co.elastic.logging.logback.EcsEncoder">
<serviceName>vinyldns-api</serviceName>
<serviceNodeName>vinyldns-api</serviceNodeName>
</encoder>
</appender>
@ -18,7 +19,7 @@
<appender-ref ref="CONSOLE"/>
</logger>
<root level="ERROR">
<root level="INFO">
<appender-ref ref="CONSOLE"/>
</root>
</configuration>

View File

@ -219,10 +219,10 @@ object Boot extends App {
()
}
logger.error(
logger.info(
s"STARTING VINYLDNS SERVER ON ${vinyldnsConfig.httpConfig.host}:${vinyldnsConfig.httpConfig.port}"
)
logger.error(banner)
logger.info(banner)
// Starts up our http server
implicit val actorSystem: ActorSystem = system
@ -237,7 +237,7 @@ object Boot extends App {
// runApp gives us a Task, we actually have to run it! Running it will yield a Future, which is our app!
runApp().unsafeRunAsync {
case Right(_) =>
logger.error("VINYLDNS SERVER STARTED SUCCESSFULLY!!")
logger.info("VINYLDNS SERVER STARTED SUCCESSFULLY!!")
case Left(startupFailure) =>
logger.error(s"VINYLDNS SERVER UNABLE TO START $startupFailure")
startupFailure.printStackTrace()

View File

@ -237,7 +237,7 @@ class DnsBackend(val id: String, val resolver: DNS.SimpleResolver, val xfrInfo:
logger.info(s"Received TRY_AGAIN from DNS lookup; converting error: ${query.error}")
fromDnsRcodeToError(DNS.Rcode.value(query.error), query.error)
} else {
logger.info(s"Unparseable error code returned from DNS: ${query.error}")
logger.warn(s"Unparseable error code returned from DNS: ${query.error}")
Left(TryAgain(query.error))
}

View File

@ -33,7 +33,7 @@ import vinyldns.core.queue.MessageQueue
class BatchChangeConverter(batchChangeRepo: BatchChangeRepository, messageQueue: MessageQueue)
extends BatchChangeConverterAlgebra {
private val logger = LoggerFactory.getLogger("BatchChangeConverter")
private val logger = LoggerFactory.getLogger(classOf[BatchChangeConverter])
def sendBatchForProcessing(
batchChange: BatchChange,

View File

@ -46,7 +46,7 @@ import vinyldns.core.domain.batch.BatchChangeApprovalStatus._
class EmailNotifier(config: EmailNotifierConfig, session: Session, userRepository: UserRepository)
extends Notifier {
private val logger = LoggerFactory.getLogger("EmailNotifier")
private val logger = LoggerFactory.getLogger(classOf[EmailNotifier])
def notify(notification: Notification[_]): IO[Unit] =
notification.change match {

View File

@ -1,7 +1,8 @@
<configuration>
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%msg%n</pattern>
<encoder class="co.elastic.logging.logback.EcsEncoder">
<serviceName>vinyldns-api</serviceName>
<serviceNodeName>vinyldns-api</serviceNodeName>
</encoder>
</appender>

View File

@ -1,8 +1,9 @@
<configuration>
<!-- Test configuration, log to console so we can get the docker logs -->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [test] %-5p | \(%logger{4}:%line\) | %msg %n</pattern>
<encoder class="co.elastic.logging.logback.EcsEncoder">
<serviceName>vinyldns-api</serviceName>
<serviceNodeName>vinyldns-api</serviceNodeName>
</encoder>
</appender>

View File

@ -27,7 +27,7 @@ object BackendLoader {
def load(configs: List[BackendProviderConfig]): IO[List[BackendProvider]] = {
def loadOne(config: BackendProviderConfig): IO[BackendProvider] =
for {
_ <- IO(logger.error(s"Attempting to load backend ${config.className}"))
_ <- IO(logger.info(s"Attempting to load backend ${config.className}"))
provider <- IO(
Class
.forName(config.className)

View File

@ -25,7 +25,7 @@ object MessageQueueLoader {
def load(config: MessageQueueConfig): IO[MessageQueue] =
for {
_ <- IO(logger.error(s"Attempting to load queue ${config.className}"))
_ <- IO(logger.info(s"Attempting to load queue ${config.className}"))
provider <- IO(
Class
.forName(config.className)

View File

@ -67,7 +67,7 @@ object DataStoreLoader {
for {
_ <- IO(
logger
.error(s"Attempting to load repos ${config.repositories.keys} from ${config.className}")
.info(s"Attempting to load repos ${config.repositories.keys} from ${config.className}")
)
provider <- IO(
Class

View File

@ -1,8 +1,9 @@
<configuration>
<!-- Test configuration, log to console so we can get the docker logs -->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [test] %-5p | \(%logger{4}:%line\) | %msg %n %nopexception </pattern>
<encoder class="co.elastic.logging.logback.EcsEncoder">
<serviceName>vinyldns-core</serviceName>
<serviceNodeName>vinyldns-core</serviceNodeName>
</encoder>
</appender>

View File

@ -60,13 +60,13 @@ object MySqlConnector {
// Runs flyway migrations
migration.load().migrate()
logger.info("migrations complete")
logger.info("Migrations complete")
}
}
def getDataSource(settings: MySqlDataSourceSettings): IO[HikariDataSource] = IO {
logger.error(s"Initializing data source with settings: $settings")
logger.info(s"Initializing data source with settings: $settings")
val dsConfig = new HikariConfig()
@ -96,7 +96,7 @@ object MySqlConnector {
result match {
case Success(t) => Some(t)
case Failure(e) =>
logger.warn("failed to startup database connection, retrying..")
logger.warn("Failed to startup database connection, retrying..")
// Hard abort if we exhaust retries
if (currentIndex >= times - 1) {
throw e

View File

@ -52,7 +52,7 @@ class MySqlMessageQueueProvider extends MessageQueueProvider {
val queueConnectionSettings = MySqlDataSourceSettings(config, "mysqlQueuePool")
getDataSource(queueConnectionSettings).map { dataSource =>
logger.error("configuring connection pool for queue")
logger.info("Configuring connection pool for queue")
// note this is being called 2x in the case you use the mysql datastores and
// loader. That should be ok
@ -64,7 +64,7 @@ class MySqlMessageQueueProvider extends MessageQueueProvider {
new DataSourceConnectionPool(dataSource, closer = new HikariCloser(dataSource))
)
logger.error("queue connection pool init complete")
logger.info("Queue connection pool init complete")
}
}

View File

@ -83,7 +83,7 @@ class MySqlDataStoreProvider extends DataStoreProvider {
val dbConnectionSettings = MySqlDataSourceSettings(config, "mysqlDbPool")
getDataSource(dbConnectionSettings).map { dataSource =>
logger.error("configuring connection pool")
logger.info("Configuring connection pool")
// pulled out of DBs.setupAll since we're no longer using the db. structure for config
DBs.loadGlobalSettings()
@ -93,13 +93,13 @@ class MySqlDataStoreProvider extends DataStoreProvider {
new DataSourceConnectionPool(dataSource, closer = new HikariCloser(dataSource))
)
logger.error("database init complete")
logger.info("Database init complete")
}
}
private def shutdown(): IO[Unit] =
IO(DBs.close())
.handleError(e => logger.error(s"exception occurred while shutting down", e))
.handleError(e => logger.error(s"Exception occurred while shutting down", e))
private final val HEALTH_CHECK =
sql"""

View File

@ -64,7 +64,7 @@ class MySqlGroupChangeRepository extends GroupChangeRepository with Monitored {
def save(db: DB, groupChange: GroupChange): IO[GroupChange] =
monitor("repo.GroupChange.save") {
IO {
logger.info(
logger.debug(
s"Saving group change with (group_change_id, group_id): " +
s"(${groupChange.id}, ${groupChange.newGroup.id})"
)
@ -87,7 +87,7 @@ class MySqlGroupChangeRepository extends GroupChangeRepository with Monitored {
def getGroupChange(groupChangeId: String): IO[Option[GroupChange]] =
monitor("repo.GroupChange.getGroupChange") {
IO {
logger.info(s"Getting group change with group_change_id: $groupChangeId")
logger.debug(s"Getting group change with group_change_id: $groupChangeId")
DB.readOnly { implicit s =>
GET_GROUP_CHANGE
.bind(groupChangeId)
@ -105,7 +105,7 @@ class MySqlGroupChangeRepository extends GroupChangeRepository with Monitored {
): IO[ListGroupChangesResults] =
monitor("repo.GroupChange.getGroupChanges") {
IO {
logger.info(
logger.debug(
s"Getting group changes with (groupId, startFrom, maxItems): ($groupId, $startFrom, $maxItems)"
)
DB.readOnly { implicit s =>

View File

@ -72,7 +72,7 @@ class MySqlGroupRepository extends GroupRepository with GroupProtobufConversions
def save(db: DB, group: Group): IO[Group] =
monitor("repo.Group.save") {
IO {
logger.info(s"Saving group with (id, name): (${group.id}, ${group.name})")
logger.debug(s"Saving group with (id, name): (${group.id}, ${group.name})")
db.withinTx { implicit s =>
PUT_GROUP
.bindByName(
@ -94,7 +94,7 @@ class MySqlGroupRepository extends GroupRepository with GroupProtobufConversions
def delete(group: Group): IO[Group] =
monitor("repo.Group.delete") {
IO {
logger.info(s"Deleting group with (id, name): (${group.id}, ${group.name})")
logger.debug(s"Deleting group with (id, name): (${group.id}, ${group.name})")
DB.localTx { implicit s =>
DELETE_GROUP
.bind(group.id)
@ -109,7 +109,7 @@ class MySqlGroupRepository extends GroupRepository with GroupProtobufConversions
def getGroup(groupId: String): IO[Option[Group]] =
monitor("repo.Group.getGroup") {
IO {
logger.info(s"Getting group with id: $groupId")
logger.debug(s"Getting group with id: $groupId")
DB.readOnly { implicit s =>
GET_GROUP_BY_ID
.bind(groupId)
@ -123,7 +123,7 @@ class MySqlGroupRepository extends GroupRepository with GroupProtobufConversions
def getGroups(groupIds: Set[String]): IO[Set[Group]] =
monitor("repo.Group.getGroups") {
IO {
logger.info(s"Getting group with ids: $groupIds")
logger.debug(s"Getting group with ids: $groupIds")
if (groupIds.isEmpty)
Set[Group]()
else {
@ -144,7 +144,7 @@ class MySqlGroupRepository extends GroupRepository with GroupProtobufConversions
def getGroupByName(groupName: String): IO[Option[Group]] =
monitor("repo.Group.getGroupByName") {
IO {
logger.info(s"Getting group with name: $groupName")
logger.debug(s"Getting group with name: $groupName")
DB.readOnly { implicit s =>
GET_GROUP_BY_NAME
.bind(groupName)
@ -158,7 +158,7 @@ class MySqlGroupRepository extends GroupRepository with GroupProtobufConversions
def getAllGroups(): IO[Set[Group]] =
monitor("repo.Group.getAllGroups") {
IO {
logger.info(s"Getting all groups")
logger.debug(s"Getting all groups")
DB.readOnly { implicit s =>
GET_ALL_GROUPS
.map(toGroup(1))

View File

@ -83,7 +83,7 @@ class MySqlMembershipRepository extends MembershipRepository with Monitored {
// Diff is used to check if the users we are trying to add in the group is already present.
// If they don't exist in the group, we save the users.
val saveMembers = nonEmpty.diff(existingMembers)
logger.info(s"Saving into group $groupId members $nonEmpty")
logger.debug(s"Saving into group $groupId members $nonEmpty")
db.withinTx { implicit s =>
SAVE_MEMBERS.batchByName(saveParams(saveMembers, groupId, isAdmin): _*).apply()
@ -100,7 +100,7 @@ class MySqlMembershipRepository extends MembershipRepository with Monitored {
case nonEmpty =>
monitor("repo.Membership.removeMembers") {
IO {
logger.info(s"Removing from group $groupId members $nonEmpty")
logger.debug(s"Removing from group $groupId members $nonEmpty")
db.withinTx { implicit s =>
val inClause = " AND user_id IN (" + nonEmpty.as("?").mkString(",") + ")"
val query = BASE_REMOVE_MEMBERS + inClause
@ -118,7 +118,7 @@ class MySqlMembershipRepository extends MembershipRepository with Monitored {
def getExistingMembers(groupId: String): Set[String] =
monitor("repo.Membership.getExistingUsers") {
IO {
logger.info(s"Getting existing users")
logger.debug(s"Getting existing users")
DB.readOnly { implicit s =>
SQL(GET_EXISTING_USERS)
.bind(groupId)
@ -133,7 +133,7 @@ class MySqlMembershipRepository extends MembershipRepository with Monitored {
def getGroupsForUser(userId: String): IO[Set[String]] =
monitor("repo.Membership.getGroupsForUser") {
IO {
logger.info(s"Getting groups for user $userId")
logger.debug(s"Getting groups for user $userId")
DB.readOnly { implicit s =>
GET_GROUPS_FOR_USER
.bind(userId)
@ -147,7 +147,7 @@ class MySqlMembershipRepository extends MembershipRepository with Monitored {
def getUsersForGroup(groupId: String, isAdmin: Option[Boolean]): IO[Set[String]] =
IO {
logger.info(s"Getting users for group $groupId")
logger.debug(s"Getting users for group $groupId")
DB.readOnly { implicit s =>
val baseConditions = Seq('groupId -> groupId)

View File

@ -383,7 +383,7 @@ class MySqlRecordSetRepository extends RecordSetRepository with Monitored {
.update()
.apply()
}
logger.info(s"Deleted $numDeleted records from zone $zoneName (zone id: $zoneId)")
logger.debug(s"Deleted $numDeleted records from zone $zoneName (zone id: $zoneId)")
}.handleErrorWith { error =>
logger.error(s"Failed deleting records from zone $zoneName (zone id: $zoneId)", error)
IO.raiseError(error)

View File

@ -48,7 +48,7 @@ class MySqlUserChangeRepository
def get(changeId: String): IO[Option[UserChange]] =
monitor("repo.UserChange.get") {
logger.info(s"Getting user change with id: $changeId")
logger.debug(s"Getting user change with id: $changeId")
IO {
DB.readOnly { implicit s =>
GET_USER_CHANGE_BY_ID
@ -62,7 +62,7 @@ class MySqlUserChangeRepository
def save(change: UserChange): IO[UserChange] =
monitor("repo.UserChange.save") {
logger.info(s"Saving user change: $change")
logger.debug(s"Saving user change: $change")
IO {
DB.localTx { implicit s =>
PUT_USER_CHANGE

View File

@ -68,7 +68,7 @@ class MySqlUserRepository(cryptoAlgebra: CryptoAlgebra)
def getUser(userId: String): IO[Option[User]] =
monitor("repo.User.getUser") {
logger.info(s"Getting user with id: $userId")
logger.debug(s"Getting user with id: $userId")
IO {
DB.readOnly { implicit s =>
GET_USER_BY_ID
@ -86,7 +86,7 @@ class MySqlUserRepository(cryptoAlgebra: CryptoAlgebra)
maxItems: Option[Int]
): IO[ListUsersResults] =
monitor("repo.User.getUsers") {
logger.info(s"Getting users with ids: $userIds")
logger.debug(s"Getting users with ids: $userIds")
IO {
if (userIds.isEmpty)
ListUsersResults(List[User](), None)
@ -133,7 +133,7 @@ class MySqlUserRepository(cryptoAlgebra: CryptoAlgebra)
def getUserByAccessKey(accessKey: String): IO[Option[User]] =
monitor("repo.User.getUserByAccessKey") {
IO {
logger.info(s"Getting user with accessKey: $accessKey")
logger.debug(s"Getting user with accessKey: $accessKey")
DB.readOnly { implicit s =>
GET_USER_BY_ACCESS_KEY
.bind(accessKey)
@ -147,7 +147,7 @@ class MySqlUserRepository(cryptoAlgebra: CryptoAlgebra)
def getUserByName(userName: String): IO[Option[User]] =
monitor("repo.User.getUserByName") {
IO {
logger.info(s"Getting user with userName: $userName")
logger.debug(s"Getting user with userName: $userName")
DB.readOnly { implicit s =>
GET_USER_BY_USER_NAME
.bind(userName)
@ -161,7 +161,7 @@ class MySqlUserRepository(cryptoAlgebra: CryptoAlgebra)
def save(user: User): IO[User] =
monitor("repo.User.save") {
IO {
logger.info(s"Saving user with id: ${user.id}")
logger.debug(s"Saving user with id: ${user.id}")
DB.localTx { implicit s =>
PUT_USER
.bindByName(
@ -180,7 +180,7 @@ class MySqlUserRepository(cryptoAlgebra: CryptoAlgebra)
def save(users: List[User]): IO[List[User]] =
monitor("repo.User.save") {
IO {
logger.info(s"Saving users with ids: ${users.map(_.id).mkString(", ")}")
logger.debug(s"Saving users with ids: ${users.map(_.id).mkString(", ")}")
val updates = users.map { u =>
Seq(

View File

@ -49,7 +49,7 @@ class MySqlZoneChangeRepository
override def save(zoneChange: ZoneChange): IO[ZoneChange] =
monitor("repo.ZoneChange.save") {
IO {
logger.info(s"Saving zone change ${zoneChange.id}")
logger.debug(s"Saving zone change ${zoneChange.id}")
DB.localTx { implicit s =>
PUT_ZONE_CHANGE
.bindByName(
@ -74,7 +74,7 @@ class MySqlZoneChangeRepository
// sorted from most recent, startFrom is an offset from the most recent change
monitor("repo.ZoneChange.listZoneChanges") {
IO {
logger.info(s"Getting zone changes for zone $zoneId")
logger.debug(s"Getting zone changes for zone $zoneId")
DB.readOnly { implicit s =>
val startValue = startFrom.getOrElse(DateTime.now().getMillis.toString)
// maxItems gets a plus one to know if the table is exhausted so we can conditionally give a nextId

View File

@ -2,9 +2,10 @@
<conversionRule conversionWord="coloredLevel" converterClass="play.api.libs.logback.ColoredLevel" />
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d{"yyyy-MM-dd HH:mm:ss,SSS"} %coloredLevel - %logger - %message%n%xException</pattern>
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="co.elastic.logging.logback.EcsEncoder">
<serviceName>vinyldns-portal</serviceName>
<serviceNodeName>vinyldns-portal</serviceNodeName>
</encoder>
</appender>
<!--
@ -15,7 +16,7 @@
<logger name="application" level="OFF" />
<root level="OFF">
<appender-ref ref="STDOUT" />
<appender-ref ref="CONSOLE" />
</root>
</configuration>

View File

@ -2,9 +2,10 @@
<conversionRule conversionWord="coloredLevel" converterClass="play.api.libs.logback.ColoredLevel" />
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d{"yyyy-MM-dd HH:mm:ss,SSS"} %coloredLevel - %logger - %message%n%xException</pattern>
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="co.elastic.logging.logback.EcsEncoder">
<serviceName>vinyldns-portal</serviceName>
<serviceNodeName>vinyldns-portal</serviceNodeName>
</encoder>
</appender>
<!--
@ -15,7 +16,7 @@
<logger name="application" level="DEBUG" />
<root level="INFO">
<appender-ref ref="STDOUT" />
<appender-ref ref="CONSOLE" />
</root>
</configuration>

View File

@ -140,7 +140,7 @@ class Route53Backend(
typ: RecordSetChangeType,
rs: ResourceRecordSet
): ChangeResourceRecordSetsRequest = {
logger.debug(s"applying change to zone, record set is $rs")
logger.debug(s"Applying change to zone, record set is $rs")
new ChangeResourceRecordSetsRequest().withChangeBatch(
new ChangeBatch().withChanges(
new Change().withAction(changeAction(typ)).withResourceRecordSet(rs)
@ -170,7 +170,7 @@ class Route53Backend(
changeRequest(change.changeType, r53RecordSet).withHostedZoneId(hostedZoneId),
client.changeResourceRecordSetsAsync
).map { response =>
logger.debug(s"applied record change $change, change result is ${response.getChangeInfo}")
logger.debug(s"Applied record change $change, change result is ${response.getChangeInfo}")
BackendResponse.NoError(response.toString)
}
} yield result

View File

@ -49,7 +49,7 @@ class SqsMessageQueueProvider extends MessageQueueProvider {
_ <- IO.fromEither(validateQueueName(settingsConfig.queueName))
client <- setupClient(settingsConfig)
queueUrl <- setupQueue(client, settingsConfig.queueName)
_ <- IO(logger.error(s"Queue URL: $queueUrl\n"))
_ <- IO(logger.info(s"Queue URL: $queueUrl\n"))
} yield new SqsMessageQueue(queueUrl, client)
def validateQueueName(queueName: String): Either[InvalidQueueName, String] = {
@ -104,7 +104,7 @@ class SqsMessageQueueProvider extends MessageQueueProvider {
def setupQueue(client: AmazonSQSAsync, queueName: String): IO[String] =
// Create queue if it doesn't exist
IO {
logger.error(s"Setting up queue with name [$queueName]")
logger.info(s"Setting up queue with name [$queueName]")
client.getQueueUrl(queueName).getQueueUrl
}.recoverWith {
case _: QueueDoesNotExistException => IO(client.createQueue(queueName).getQueueUrl)

View File

@ -54,7 +54,8 @@ object Dependencies {
"com.iheart" %% "ficus" % ficusV,
"com.sun.mail" % "javax.mail" % "1.6.2",
"javax.mail" % "javax.mail-api" % "1.6.2",
"com.amazonaws" % "aws-java-sdk-sns" % awsV withSources()
"com.amazonaws" % "aws-java-sdk-sns" % awsV withSources(),
"co.elastic.logging" % "logback-ecs-encoder" % "1.3.2"
)
lazy val coreDependencies = Seq(
@ -75,7 +76,8 @@ object Dependencies {
"javax.xml.bind" % "jaxb-api" % "2.3.0",
"javax.activation" % "activation" % "1.1.1",
"org.scalikejdbc" %% "scalikejdbc" % scalikejdbcV,
"org.scalikejdbc" %% "scalikejdbc-config" % scalikejdbcV
"org.scalikejdbc" %% "scalikejdbc-config" % scalikejdbcV,
"co.elastic.logging" % "logback-ecs-encoder" % "1.3.2"
)
lazy val mysqlDependencies = Seq(
@ -86,7 +88,6 @@ object Dependencies {
"com.zaxxer" % "HikariCP" % "3.2.0",
"com.h2database" % "h2" % "1.4.200",
"commons-codec" % "commons-codec" % "1.15"
)
lazy val sqsDependencies = Seq(
@ -107,7 +108,6 @@ object Dependencies {
"org.scalatestplus" %% "scalatestplus-mockito" % "1.0.0-M2",
"org.scalatestplus" %% "scalatestplus-scalacheck" % "3.1.0.0-RC2",
"commons-codec" % "commons-codec" % "1.15"
)
lazy val apiTestDependencies = commonTestDependencies ++ Seq(
@ -115,7 +115,6 @@ object Dependencies {
"com.typesafe.akka" %% "akka-stream-testkit" % akkaV,
"junit" % "junit" % "4.12",
"commons-codec" % "commons-codec" % "1.15"
)
lazy val portalDependencies = Seq(
@ -132,6 +131,7 @@ object Dependencies {
"com.nimbusds" % "nimbus-jose-jwt" % "7.0",
"co.fs2" %% "fs2-core" % fs2V,
"de.leanovate.play-mockws" %% "play-mockws" % "2.7.1" % "test",
"com.iheart" %% "ficus" % ficusV
"com.iheart" %% "ficus" % ficusV,
"co.elastic.logging" % "logback-ecs-encoder" % "1.3.2"
)
}