2
0
mirror of https://github.com/VinylDNS/vinyldns synced 2025-08-30 05:47:56 +00:00

Compare commits

...

1272 Commits

Author SHA1 Message Date
Aravindh R
736b2b1538
fix records repetition (#1432)
Co-authored-by: Arpit Shah <arpit_shah@cable.comcast.com>
2025-08-12 15:06:44 -04:00
Jay
8747775fd3
fix for vinyldns banner log (#1444)
* fix for vinyldns banner log

Signed-off-by: Jay07GIT <jeyraj931@gmail.com>

* updated the console log

Signed-off-by: Jay07GIT <jeyraj931@gmail.com>

---------

Signed-off-by: Jay07GIT <jeyraj931@gmail.com>
Co-authored-by: Arpit Shah <arpit_shah@cable.comcast.com>
2025-08-12 09:11:23 -04:00
Arpit Shah
3e01481b36
Merge pull request #1453 from Jay07GIT/rename
rename
2025-08-05 14:02:53 -04:00
Jay07GIT
4210a6007e
rename
Signed-off-by: Jay07GIT <jeyraj931@gmail.com>
2025-08-05 19:04:52 +05:30
Arpit Shah
4d056f16c5
Merge pull request #1207 from Jay07GIT/dependency_upgrade
Dependencies upgraded for both mac M1 and Intel chip
2025-07-24 21:41:45 -04:00
Nicholas Spadaccino
7eed595bd4
Merge branch 'master' into dependency_upgrade 2025-07-24 17:50:56 -04:00
Arpit Shah
144d9b014c
Merge pull request #1449 from arpit4ever/arpit4ever/update-maintainers-20250701
Updated maintainers and contributors
2025-07-09 21:50:44 -04:00
Arpit Shah
3a307012d2
Updating maintainers & adding new contributor 2025-07-08 09:35:29 -04:00
Jay07GIT
95f6e72c4a
update
Signed-off-by: Jay07GIT <jeyraj931@gmail.com>
2025-03-04 15:46:29 +05:30
Nicholas Spadaccino
d0b72ccbd3
Bump version to v0.21.2 2024-12-08 15:01:48 -05:00
Nicholas Spadaccino
099e52b149
Merge pull request #1427 from nspadaccino/ownership_transfer_bugfix
Ownership transfer bugfix
2024-12-08 15:01:24 -05:00
nspadaccino
0ac47fcf42
update test 2024-12-04 13:47:06 -05:00
nspadaccino
6d799bc78e
update test 2024-12-04 13:25:21 -05:00
nspadaccino
29b1d6a7db
update test 2024-12-04 12:56:33 -05:00
nspadaccino
e7bc2c0e0d
update tests 2024-12-04 12:19:06 -05:00
nspadaccino
63577cebb9
fix tests 2024-12-04 11:57:05 -05:00
nspadaccino
2e4bf94126
fix 422 errors for updating records in private zones from zone view 2024-12-03 15:43:06 -05:00
nspadaccino
37109b4917
add logging for debug 2024-12-03 14:45:56 -05:00
Nicholas Spadaccino
371581ab61
Bump version to v0.21.1 2024-11-15 12:41:32 -05:00
Nicholas Spadaccino
a64fac99fd
Merge pull request #1417 from Aravindh-Raju/aravindhr/fix-batch-status-display
Fix batch status display
2024-11-15 12:40:37 -05:00
Aravindh-Raju
1993b240af
fix format 2024-11-15 11:26:39 +05:30
Aravindh-Raju
32c9f836e5
fix batch status display 2024-11-15 11:15:09 +05:30
Nicholas Spadaccino
b5853b0de6
Bump version to v0.21.0 2024-11-14 10:47:06 -05:00
Nicholas Spadaccino
d1a4cfdfa8
Merge pull request #1416 from vinyldns/version-0.21.0-beta.3
Version 0.21.0
2024-11-14 10:46:30 -05:00
Nicholas Spadaccino
7ba35aeb39
Merge branch 'master' into version-0.21.0-beta.3 2024-11-14 09:10:19 -05:00
Nicholas Spadaccino
93f2b94910
Bump version to v0.20.3 2024-11-12 13:00:45 -05:00
Nicholas Spadaccino
44d42b60fe
Merge pull request #1415 from nspadaccino/nspadaccino/mysql8-updates
Query updates for MySQL 8
2024-11-12 12:59:19 -05:00
Nicholas Spadaccino
e2e49e4e11
Bump version to v0.21.0-beta.3 2024-11-11 14:45:40 -05:00
Nicholas Spadaccino
d05e21b905
Merge pull request #1413 from Jay07GIT/ownership_undefined_fix_2
fix for undefined issue in ownership transfer
2024-11-11 14:45:00 -05:00
nspadaccino
94caf84233
updated more queries 2024-11-08 13:45:38 -05:00
nspadaccino
b31923e91f
added backticks around "groups" in all schemas and queries, as it is a reserved keyword as of mysql 8.0 2024-11-08 13:45:20 -05:00
Jay07GIT
6e5bb9a6f2
fix for undefined issue in ownership transfer 2024-11-07 12:22:37 +05:30
Nicholas Spadaccino
cb9784515f
Bump version to v0.21.0-beta.1 2024-10-16 16:33:02 -04:00
Nicholas Spadaccino
3cb95b2768
Merge pull request #1398 from Aravindh-Raju/aravindhr/fix-batch-delete
Fix batch delete
2024-10-16 16:28:06 -04:00
Nicholas Spadaccino
a363d6974b
Merge pull request #1397 from Jay07GIT/fix_recordSetGroupChange_undefined
Fix for undefined recordSetGroupChange
2024-10-16 16:27:58 -04:00
Arpit Shah
4f0bd04643
Merge pull request #1405 from Jay07GIT/rearrange_export_csv_column
Rearranged the DNS change export csv column
2024-10-15 11:45:07 -04:00
Arpit Shah
76a4d69162
Merge pull request #1404 from Aravindh-Raju/aravindhr/update-copy-button
Update copy button
2024-10-15 11:44:53 -04:00
Jay07GIT
65a59a81fb
rerun tests 2024-10-15 20:53:47 +05:30
Jay07GIT
706d3882e4
rearranged the DNS change export csv column 2024-10-15 20:39:09 +05:30
Arpit Shah
85bd74ac1a
Merge pull request #1335 from Aravindh-Raju/aravindhr/add-batch-status-filter
Save batch changes with their status
2024-10-15 10:28:43 -04:00
Aravindh-Raju
4dad8ed160
update alignment 2024-10-15 17:44:30 +05:30
Aravindh-Raju
7a46369826
update text 2024-10-15 12:33:39 +05:30
Aravindh-Raju
f742837dfd
update copy button 2024-10-15 12:21:32 +05:30
Aravindh-Raju
55e093d25d
add tests 2024-10-15 11:51:37 +05:30
Aravindh-Raju
3fe6b91942
only save changes made or errors 2024-10-14 13:37:15 +05:30
Jay07GIT
efc4e3f0a5
update 2024-10-10 23:52:27 +05:30
Jay07GIT
41e3beff8d
update 2024-10-10 23:49:35 +05:30
Nicholas Spadaccino
52ab3d999a
Merge pull request #1396 from Aravindh-Raju/aravindhr/copy-ids-to-clipboard
Copy group, batch and zone id to clipboard
2024-10-09 11:30:02 -04:00
Arpit Shah
6b56ebf158
Merge pull request #1400 from nspadaccino/nspadaccino/query-updates
Mysql 8.0 compatibility
2024-10-09 11:00:56 -04:00
Aravindh-Raju
618f2cd089
add copy button in batches page 2024-10-09 12:20:41 +05:30
Aravindh R
68c1d10027
Merge branch 'version-0.21.0-beta.1' into aravindhr/copy-ids-to-clipboard 2024-10-09 11:50:05 +05:30
nspadaccino
8354da0c71
Bump version to v0.21.0-beta.1 2024-10-07 13:54:49 -04:00
nspadaccino
a5a5f58d42
updated more queries 2024-10-03 14:32:21 -04:00
nspadaccino
61fe42ccfd
added backticks around "groups" in all schemas and queries, as it is a reserved keyword as of mysql 8.0 2024-10-02 15:56:23 -04:00
Aravindh-Raju
af60d73b3c
add-copy-button 2024-10-01 15:21:23 +05:30
Aravindh-Raju
93ceee7e15
fix-batch-delete 2024-09-30 12:36:22 +05:30
Aravindh-Raju
dcd4e9fe06
balance parranthesis 2024-09-30 10:37:32 +05:30
Aravindh R
2089d3fcab
Merge branch 'master' into aravindhr/add-batch-status-filter 2024-09-30 10:24:34 +05:30
Jay07GIT
b1cdf12a48
fix for undefined recordSetGroupChange 2024-09-26 16:10:27 +05:30
Aravindh-Raju
ed564cf54a
turn off debug 2024-09-25 18:34:57 +05:30
Aravindh-Raju
4101d5dacf
add copy to clipboard 2024-09-25 18:27:32 +05:30
Nicholas Spadaccino
40e817c142
Bump version to v0.20.3-beta.1 2024-09-24 16:19:10 -04:00
Nicholas Spadaccino
2e81e05104
Merge pull request #1389 from Aravindh-Raju/aravindhr/fix-update-logic
Fix batch update logic
2024-09-24 16:18:05 -04:00
Nicholas Spadaccino
00fe608cf5
Merge pull request #1393 from Jay07GIT/reset_file_batchchange_upload
Reset the batch file name when re-upload the same file again
2024-09-24 16:17:31 -04:00
Nicholas Spadaccino
709426db7d
Merge pull request #1391 from Jay07GIT/export_csv_batchchange
Export CSV in batch changes
2024-09-24 16:16:02 -04:00
Nicholas Spadaccino
a3e848a52c
Merge pull request #1348 from Jay07GIT/records_ownership_transfer
Ownership transfer for records
2024-09-24 16:02:03 -04:00
Aravindh-Raju
f1679c883d
remove print statements 2024-09-24 14:46:04 +05:30
Aravindh-Raju
0b8453ef26
handle non-existent record 2024-09-24 14:44:00 +05:30
Jay
d6fcd3d726
Merge branch 'master' into records_ownership_transfer 2024-09-23 12:09:08 +05:30
Aravindh-Raju
dcb9cb9d47
fix tests 2024-09-20 15:55:29 +05:30
Aravindh-Raju
a97d6b9166
handle deletes conflict with backend and db 2024-09-20 15:33:30 +05:30
Jay07GIT
d994559f72
reset the file name when re-upload the same file again 2024-09-16 12:44:54 +05:30
Jay07GIT
2dce5ab425
Export CSV in batch changes 2024-09-13 23:07:58 +05:30
Aravindh-Raju
9674a82e6a
fix update logic 2024-09-02 15:14:04 +05:30
Nicholas Spadaccino
d0aea448ae
Bump version to v0.20.2 2024-08-29 15:40:33 -04:00
Arpit Shah
73d7851abf
Merge pull request #1387 from vinyldns/v0.20.2-beta
v0.20.2
2024-08-29 15:39:29 -04:00
Nicholas Spadaccino
e7c9793a29
Merge pull request #1386 from nspadaccino/nspadaccino/gha-updates
Update Github actions plugins
2024-08-27 13:59:31 -04:00
Jay07GIT
52dcb88663
Resolved tests 2024-08-27 23:23:50 +05:30
nspadaccino
0bb4c58137
updated various github actions plugins to latest version 2024-08-27 13:36:15 -04:00
Jay07GIT
28de709b49
Removed email notifier in config 2024-08-27 17:47:27 +05:30
Jay07GIT
959ab28649
updated owner transfer email notifier 2024-08-27 17:37:57 +05:30
Nicholas Spadaccino
3250c9571b
Bump version to v0.20.2-beta.2 2024-08-26 14:41:20 -04:00
Arpit Shah
2eb0d321f1
Merge pull request #1385 from Jay07GIT/fix_batch_change_edit
Fix for dns edit page after submit
2024-08-26 14:37:03 -04:00
Arpit Shah
2ef34840f3
Merge pull request #1383 from Jay07GIT/add_batch_limit_portal_config
added batch-change-limit params in portal config
2024-08-26 14:36:33 -04:00
Aravindh R
9760e4a79e
Merge branch 'master' into aravindhr/add-batch-status-filter 2024-08-26 11:24:17 +05:30
Arpit Shah
45c6301146
Merge pull request #1381 from Aravindh-Raju/aravindhr/remove-logs-beta
Remove Logs Beta
2024-08-23 12:31:32 -04:00
Jay07GIT
58fff9a03b
update 2024-08-23 12:16:58 +05:30
Jay07GIT
96bf137172
Fix for dns edit page after submit 2024-08-22 11:52:48 +05:30
Jay07GIT
ce887e0ffa
added batch-change-limit params in portal config 2024-08-21 13:07:09 +05:30
Aravindh-Raju
53da50a232
trigger test 2024-08-20 12:50:11 +05:30
Aravindh-Raju
5e19bcab2d
remove logs 2024-08-20 11:41:39 +05:30
Nicholas Spadaccino
ab57f9f4a2
Bump version to v0.20.2-beta 2024-08-14 21:55:54 -04:00
nspadaccino
3712575a84
Merge branch 'master' into v0.20.2-beta 2024-08-14 21:23:18 -04:00
Nicholas Spadaccino
7d9937d4af
Merge pull request #1374 from Jay07GIT/latency_fix_batch_change
Fix for latency issue in batch change upload
2024-08-14 21:19:40 -04:00
Nicholas Spadaccino
587753b572
Merge pull request #1376 from Aravindh-Raju/aravindhr/optimize-batch-change
Optimize batch change
2024-08-14 21:14:36 -04:00
Nicholas Spadaccino
dbf56cac85
Merge pull request #1380 from Aravindh-Raju/update-hikari-log-level
Update Hikari Log Level
2024-08-14 21:13:34 -04:00
Nicholas Spadaccino
eacd6897ff
Merge pull request #1379 from nspadaccino/nspadaccino/beta-release-script
Add Beta Release Script
2024-08-14 16:19:06 -04:00
Aravindh-Raju
2787ce5904
update to conf and logback 2024-08-14 19:44:17 +05:30
Aravindh R
6c223a38f2
Merge branch 'master' into update-hikari-log-level 2024-08-14 19:20:51 +05:30
Aravindh-Raju
14300b3b15
update hikari log level 2024-08-14 19:15:29 +05:30
nspadaccino
b1266bf36b
add beta release script 2024-08-13 13:44:10 -04:00
Jay07GIT
5a6f761ec7
update 2024-08-12 18:29:21 +05:30
Jay07GIT
d90ab4c319
commented the request-timeout config 2024-08-12 18:06:53 +05:30
Aravindh-Raju
3532c032bf
add mysql props 2024-08-12 17:07:12 +05:30
Aravindh-Raju
978b852df1
add logs 2024-08-09 10:06:23 +05:30
Nicholas Spadaccino
e54dc71fe0
Merge branch 'master' into aravindhr/optimize-batch-change 2024-08-07 10:39:25 -04:00
Arpit Shah
bf9ee3f824
Merge pull request #1377 from Jay07GIT/loader_search_recorddata
spinner for record data search
2024-08-07 10:37:21 -04:00
Arpit Shah
3cd83f9e06
Merge branch 'master' into loader_search_recorddata 2024-08-07 09:52:16 -04:00
Arpit Shah
09c54e5389
Merge pull request #1372 from Jay07GIT/update_docker_compose
Removed docker compose version since no more use
2024-08-07 09:51:56 -04:00
Arpit Shah
976abad13e
Merge branch 'master' into update_docker_compose 2024-08-07 09:27:25 -04:00
Arpit Shah
a069129509
Merge pull request #1371 from Aravindh-Raju/aravindhr/fix-group-update-bug
Fix group update bug
2024-08-07 09:26:12 -04:00
Arpit Shah
6f20e35ad9
Merge branch 'master' into aravindhr/fix-group-update-bug 2024-08-07 09:11:49 -04:00
Arpit Shah
535b0136ba
Merge pull request #1366 from vinyldns/dependabot/pip/modules/api/src/test/functional/requests-2.32.0
Bump requests from 2.31.0 to 2.32.0 in /modules/api/src/test/functional
2024-08-07 09:10:30 -04:00
Jay07GIT
393a2f550f
spinner for record data search 2024-08-06 17:03:38 +05:30
Jay07GIT
59fd320a5a
removed future await and increased akka http timeout limit 2024-07-23 16:54:30 +05:30
Jay07GIT
cb122b1506
update 2024-07-19 11:18:22 +05:30
Jay07GIT
2ea7b0cbf3
resolved tests 2024-07-19 10:40:09 +05:30
Jay07GIT
6b832da233
fix for latency issue in batch change upload 2024-07-18 23:53:48 +05:30
Arpit Shah
4ed52b576b
Merge branch 'master' into aravindhr/fix-group-update-bug 2024-07-18 14:21:22 -04:00
Nicholas Spadaccino
5ac0c82c3d
Update requirements.txt
requests version 2.32.0 was yanked, 2.32.3 is the latest stable version
2024-07-18 12:47:47 -04:00
Aravindh-Raju
2f63cdcf41
fix tests 2024-07-16 14:40:03 +05:30
Aravindh-Raju
d5722f4961
handle timeout in api 2024-07-13 00:41:07 +05:30
Aravindh-Raju
817898c63f
remove unnecessary conditions 2024-07-12 16:43:16 +05:30
Aravindh-Raju
810ad389ea
optimize batch change 2024-07-12 16:01:56 +05:30
Jay07GIT
b9020329a3
typo 2024-07-02 17:54:31 +05:30
Jay07GIT
04d6dcb52c
removed docker compose version since no more use 2024-07-02 15:33:53 +05:30
Aravindh-Raju
ba3ab43671
fix group update bug 2024-06-24 13:44:21 +05:30
dependabot[bot]
8e0104d3c2
---
updated-dependencies:
- dependency-name: requests
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-05-21 05:46:38 +00:00
Nicholas Spadaccino
372e7db7bc
Bump version to v0.20.1 2024-05-17 13:49:07 -04:00
Nicholas Spadaccino
571194d2d5
Merge pull request #1355 from vinyldns/dependabot/pip/modules/api/src/test/functional/dnspython-2.6.1
Bump dnspython from 2.1.0 to 2.6.1 in /modules/api/src/test/functional
2024-05-17 13:48:20 -04:00
Nicholas Spadaccino
db888d58ac
Merge branch 'master' into dependabot/pip/modules/api/src/test/functional/dnspython-2.6.1 2024-05-17 13:34:13 -04:00
Nicholas Spadaccino
bba938905e
Merge pull request #1357 from Aravindh-Raju/aravindhr/remove-unnecessary-parameters
Remove unnecessary parameters from "listRecordSetChanges"
2024-05-17 13:34:07 -04:00
Nicholas Spadaccino
c13cf86f64
Merge branch 'master' into aravindhr/remove-unnecessary-parameters 2024-05-17 11:34:49 -04:00
Nicholas Spadaccino
1a0a66630e
Merge pull request #1363 from Aravindh-Raju/aravindhr/add-jks-details
Add info regarding java key store
2024-05-17 11:34:13 -04:00
Nicholas Spadaccino
97c84f183d
Merge branch 'master' into aravindhr/add-jks-details 2024-05-17 10:24:28 -04:00
Nicholas Spadaccino
867ad22fcf
Merge pull request #1365 from arpit4ever/arpit4ever/update-maintainers-contributors
Updated maintainers and contributors
2024-05-17 10:24:04 -04:00
Arpit Shah
5d7db47a16
Updated contributors 2024-05-17 09:52:25 -04:00
Arpit Shah
1d0127e791
Merge branch 'master' into arpit4ever/update-maintainers-contributors 2024-05-16 14:54:55 -04:00
Arpit Shah
3086b53808
Merge pull request #1361 from Aravindh-Raju/aravindhr/update-info-messages
Update info messages
2024-05-16 14:53:37 -04:00
Arpit Shah
6e91fcee86
Merge branch 'master' into aravindhr/update-info-messages 2024-05-15 09:44:55 -04:00
Arpit Shah
64617f1a57
Merge pull request #1358 from Jay07GIT/deleted_zone_status_fix
fixed deleted zone access issue in portal
2024-05-15 09:40:39 -04:00
Arpit Shah
5bf4c10375
Merge branch 'master' into deleted_zone_status_fix 2024-05-14 13:31:55 -04:00
Arpit Shah
580548726e
Merge pull request #1351 from Aravindh-Raju/aravindhr/fix-record-change-history-pagination
Fix record change history pagination
2024-05-14 13:31:15 -04:00
Arpit Shah
ff2d44c5ab
Merge branch 'master' into aravindhr/fix-record-change-history-pagination 2024-05-14 13:05:43 -04:00
Arpit Shah
8c242b65e6
Updated maintainers and contributors 2024-05-14 13:01:14 -04:00
Nicholas Spadaccino
f4fef7b1df
Merge pull request #1364 from arpit4ever/arpit4ever/update-codecov-version-v4
Updated codecov version to latest v4
2024-05-14 11:43:37 -04:00
Arpit Shah
fdf2601680
Updated codecov version to v4 2024-05-14 10:38:31 -04:00
Aravindh-Raju
dfea1651f8
add jks details 2024-05-14 16:14:09 +05:30
Aravindh-Raju
176a0ce659
update info messages 2024-05-07 10:47:34 +05:30
Jay07GIT
fa0be01a62
update 2024-05-02 12:23:20 +05:30
Arpit Shah
0df410a28a
Merge branch 'master' into aravindhr/fix-record-change-history-pagination 2024-04-25 14:24:55 -04:00
Jay07GIT
4a104b1fde
update 2024-04-25 23:01:13 +05:30
Aravindh-Raju
8335ee9b01
reuse toFqdn method 2024-04-25 14:21:11 +05:30
Arpit Shah
e4fb70beb9
Merge pull request #1344 from Jay07GIT/alignment_fix_abandoned_zones
Alignment fix for abandoned zones
2024-04-24 11:49:12 -04:00
Jay07GIT
8a8c22338c
fixed deleted zone access in portal 2024-04-24 14:59:53 +05:30
Jay07GIT
59b3196020
update 2024-04-24 14:50:28 +05:30
Jay07GIT
1231d4bdde
update in alignment 2024-04-24 14:47:51 +05:30
Aravindh-Raju
8a2c692471
remove unnecessary parameters 2024-04-18 14:48:46 +05:30
dependabot[bot]
11e89cb211
Bump dnspython from 2.1.0 to 2.6.1 in /modules/api/src/test/functional
Bumps [dnspython](https://github.com/rthalley/dnspython) from 2.1.0 to 2.6.1.
- [Release notes](https://github.com/rthalley/dnspython/releases)
- [Changelog](https://github.com/rthalley/dnspython/blob/main/doc/whatsnew.rst)
- [Commits](https://github.com/rthalley/dnspython/compare/v2.1.0...v2.6.1)

---
updated-dependencies:
- dependency-name: dnspython
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-04-12 21:58:08 +00:00
Jay07GIT
9c55dfa817
update 2024-04-04 14:41:20 +05:30
Jay07GIT
4fc132ae3a
Merge branch 'records_ownership_transfer' of https://github.com/Jay07GIT/vinyldns into records_ownership_transfer 2024-04-03 23:11:19 +05:30
Jay07GIT
bd549df516
update 2024-04-03 23:10:48 +05:30
Jay
099575a3a8
Merge branch 'master' into records_ownership_transfer 2024-03-29 09:21:49 +05:30
Jay07GIT
c7641905cf
update 2024-03-29 09:18:21 +05:30
Jay
80cb0d7294
Merge branch 'master' into alignment_fix_abandoned_zones 2024-03-28 22:31:07 +05:30
Jay07GIT
620d3ee110
update 2024-03-26 12:15:43 +05:30
Jay07GIT
f0931f4817
update 2024-03-22 22:46:02 +05:30
Jay07GIT
21667ebbf3
update 2024-03-22 22:36:01 +05:30
Jay07GIT
029ffbfd5d
update 2024-03-22 21:47:06 +05:30
Jay07GIT
885e135b31
Resolved func tests 2024-03-22 19:20:44 +05:30
Jay07GIT
cc7612497d
update 2024-03-22 19:02:47 +05:30
Jay07GIT
db2af0acca
update 2024-03-22 17:25:40 +05:30
Jay07GIT
1c816346c8
update 2024-03-22 12:24:46 +05:30
Jay07GIT
2ab7e85c66
update 2024-03-22 11:30:10 +05:30
Aravindh-Raju
1cfecc5fbe
fix pagination 2024-03-21 13:00:38 +05:30
Jay07GIT
37121aa451
update 2024-03-20 18:44:58 +05:30
Jay07GIT
42fd437517
update 2024-03-20 16:06:30 +05:30
Jay07GIT
be6df82668
update 2024-03-20 15:25:59 +05:30
Jay07GIT
3be5eb19b9
Merge branch 'alignment_fix_abandoned_zones' of https://github.com/Jay07GIT/vinyldns into alignment_fix_abandoned_zones 2024-03-20 11:06:28 +05:30
Jay07GIT
7089e89467
Merge branch 'master' of https://github.com/Jay07GIT/vinyldns into alignment_fix_abandoned_zones 2024-03-20 11:05:34 +05:30
Jay07GIT
acc0a33224
update 2024-03-20 11:03:15 +05:30
Jay07GIT
974434e053
Allow super user to request/approve ownership transfer 2024-03-18 21:37:21 +05:30
Jay07GIT
6d10973372
bug fix 2024-03-18 14:36:24 +05:30
Jay07GIT
123f2fa477
update 2024-03-15 22:57:48 +05:30
Nicholas Spadaccino
4601b6d7ac
Bump version to v0.20.0 2024-03-15 13:19:06 -04:00
Jay07GIT
ab6c42bc10
update 2024-03-14 22:35:08 +05:30
Jay07GIT
8f9e13e5c1
update 2024-03-14 22:09:08 +05:30
Jay07GIT
2e1e631314
Merge branch 'records_ownership_transfer' of https://github.com/Jay07GIT/vinyldns into records_ownership_transfer 2024-03-14 21:06:11 +05:30
Jay07GIT
f103985306
resolved minor bugs 2024-03-14 21:03:48 +05:30
Aravindh R
9ac3202372
Merge branch 'master' into aravindhr/add-batch-status-filter 2024-03-12 11:08:38 +05:30
Jay
dd96f50cfc
Merge branch 'vinyldns:master' into records_ownership_transfer 2024-03-02 01:12:14 +05:30
Jay07GIT
2565a9fbae
update 2024-03-02 00:53:47 +05:30
Jay07GIT
e684587bd4
update 2024-03-02 00:27:17 +05:30
Jay07GIT
45262c02c4
update 2024-03-02 00:20:29 +05:30
Jay07GIT
184fcf01b4
ownership transfer for records 2024-03-01 23:50:00 +05:30
Nicholas Spadaccino
f4459f72b7
Merge branch 'master' into alignment_fix_abandoned_zones 2024-02-29 10:11:57 -05:00
Nicholas Spadaccino
4b16996ec9
Merge pull request #1347 from Aravindh-Raju/aravindhr/add-group-change-faq
FAQ regarding group change history
2024-02-28 15:21:30 -05:00
Nicholas Spadaccino
5431da8d21
Merge branch 'master' into aravindhr/add-group-change-faq 2024-02-28 15:04:14 -05:00
Nicholas Spadaccino
c010e2aab5
Merge pull request #1342 from Aravindh-Raju/aravindhr/improve-batch-failed-change-message
Display info message for failed single changes
2024-02-28 14:58:17 -05:00
Nicholas Spadaccino
e1bd98bd44
Merge branch 'master' into aravindhr/improve-batch-failed-change-message 2024-02-28 14:46:40 -05:00
Nicholas Spadaccino
f608274091
Merge branch 'master' into aravindhr/add-group-change-faq 2024-02-28 14:12:48 -05:00
Nicholas Spadaccino
7c5b3235b3
Merge pull request #1323 from Aravindh-Raju/aravindhr/filter-batch-change-by-username
Filter batch changes by submitter and date time range
2024-02-28 14:11:18 -05:00
Nicholas Spadaccino
615050e093
Merge branch 'master' into aravindhr/filter-batch-change-by-username 2024-02-28 13:28:35 -05:00
Nicholas Spadaccino
ff772c512f
Merge pull request #1312 from Aravindh-Raju/aravindhr/api-docs-update
Update api docs
2024-02-28 12:45:16 -05:00
Nicholas Spadaccino
5070615979
Merge branch 'master' into aravindhr/api-docs-update 2024-02-28 12:33:17 -05:00
Nicholas Spadaccino
396f6cd59f
Merge branch 'master' into aravindhr/filter-batch-change-by-username 2024-02-28 11:42:12 -05:00
Aravindh-Raju
a2563d7b84
add most asked faq 2024-02-28 18:36:10 +05:30
Aravindh-Raju
a0c20dbe84
add faq 2024-02-28 15:04:02 +05:30
Nicholas Spadaccino
46a02a605b
Merge pull request #1343 from nspadaccino/nspadaccino/csv-error-fix
Improve CSV Import Error Message
2024-02-27 15:44:55 -05:00
Nicholas Spadaccino
80d6a8caf1
Merge branch 'master' into nspadaccino/csv-error-fix 2024-02-27 15:31:55 -05:00
nspadaccino
972057679c
Fix bug where file would import even with a txt extention 2024-02-27 11:35:58 -05:00
Nicholas Spadaccino
0a3868566c
Merge branch 'master' into aravindhr/filter-batch-change-by-username 2024-02-27 09:59:29 -05:00
Aravindh-Raju
4ce82c5c3e
remove id 2024-02-27 12:05:58 +05:30
Nicholas Spadaccino
3819c1ed1c
Merge pull request #1327 from Aravindh-Raju/aravindhr/hide-group-change-history
Hide group change history for non-members
2024-02-26 16:30:19 -05:00
Nicholas Spadaccino
000a21ad46
Merge branch 'master' into aravindhr/hide-group-change-history 2024-02-26 16:09:48 -05:00
Nicholas Spadaccino
3a68cc6e53
Merge pull request #1333 from Aravindh-Raju/aravindhr/hide-zone-change-history
Hide zone change for users not in admin group
2024-02-26 14:36:43 -05:00
Nicholas Spadaccino
7e8c436733
Merge branch 'master' into aravindhr/hide-zone-change-history 2024-02-26 13:57:59 -05:00
Aravindh-Raju
c1ea947e9b
fix pagination 2024-02-26 12:26:48 +05:30
Aravindh-Raju
112369c7fa
fix daterangepicker 2024-02-23 17:37:12 +05:30
Aravindh-Raju
cab17e88e4
remove sup user 2024-02-23 12:21:52 +05:30
Aravindh-Raju
239db95915
fix date range picker UI pos 2024-02-23 12:02:33 +05:30
Aravindh-Raju
1fbdfcdd61
update menu.yml 2024-02-22 15:29:37 +05:30
Aravindh-Raju
ade01cb216
few fixes 2024-02-22 13:45:29 +05:30
Aravindh-Raju
c5ee2918c0
address comments 2024-02-22 13:33:16 +05:30
Nicholas Spadaccino
2bfc0e38e5
Merge pull request #1345 from Jay07GIT/update_get_user_docs
Updated get user api docs
2024-02-21 16:24:35 -05:00
Nicholas Spadaccino
cd695a5cb4
Merge branch 'master' into update_get_user_docs 2024-02-21 16:14:29 -05:00
Nicholas Spadaccino
6210e1269b
Merge pull request #1328 from Jay07GIT/search_by_username
add group id in user response
2024-02-21 16:14:16 -05:00
Jay07GIT
ad6cf1479b
updated get user api docs- added group id in user response 2024-02-21 12:18:12 +05:30
Aravindh-Raju
13b6eae714
fix doc 2024-02-21 11:30:12 +05:30
Jay07GIT
fe69fc0125
alignment fix for abandoned zones 2024-02-20 12:21:37 +05:30
Aravindh-Raju
125f12821a
change message 2024-02-15 11:29:50 +05:30
Aravindh R
d46256248e
Merge branch 'master' into aravindhr/hide-zone-change-history 2024-02-14 12:24:25 +05:30
Aravindh R
077421d7ab
Merge branch 'master' into aravindhr/hide-group-change-history 2024-02-14 12:23:46 +05:30
Aravindh R
50c25f9ab3
Merge branch 'master' into aravindhr/api-docs-update 2024-02-14 12:23:03 +05:30
Aravindh R
9450f87f3a
Merge branch 'master' into aravindhr/add-batch-status-filter 2024-02-13 12:02:22 +05:30
Aravindh-Raju
0d7ceaf3cd
fix version 2024-02-13 11:52:06 +05:30
Aravindh-Raju
97b5599bef
fix format 2024-02-13 11:46:31 +05:30
Aravindh-Raju
2091f0dff2
temporarily reomve css 2024-02-13 11:45:36 +05:30
Aravindh-Raju
fa16fa3132
temporarily remove batch status filter 2024-02-13 11:30:48 +05:30
nspadaccino
43ca242fc0
Update csv import error messages to be more descriptive, remove unused messages 2024-02-12 13:38:23 -05:00
Aravindh-Raju
fcb90f66e2
fix test 2024-02-08 18:42:48 +05:30
Aravindh-Raju
ffa09d88b2
parse, change and display single change failed message 2024-02-08 15:02:48 +05:30
Nicholas Spadaccino
b7590161a3
Merge branch 'master' into search_by_username 2024-02-07 10:43:03 -05:00
Nicholas Spadaccino
8858f3547d
Merge branch 'master' into aravindhr/filter-batch-change-by-username 2024-02-07 09:19:47 -05:00
Nicholas Spadaccino
8192780ed4
Bump version to v0.19.5 2024-02-01 15:40:01 -05:00
Nicholas Spadaccino
576eac2523
Merge pull request #1338 from Aravindh-Raju/aravindhr/fix-record-change-history-view
Fix record change history view in portal
2024-02-01 15:39:26 -05:00
Aravindh-Raju
90bcc06506
remove log 2024-01-31 15:36:21 +05:30
Aravindh-Raju
9813077da4
add zone id parameter 2024-01-31 15:18:18 +05:30
Aravindh-Raju
f1e05fd5da
fix test 2024-01-30 16:28:26 +05:30
Aravindh-Raju
0ee2a1691e
fix record change history view 2024-01-30 15:56:23 +05:30
Nicholas Spadaccino
17c937c0db
Bump version to v0.19.4 2024-01-17 14:52:27 -05:00
Nicholas Spadaccino
28f57735cb
Merge pull request #1329 from Jay07GIT/fix_abondoned_zones_query
updated filter deleted zones using scala to sql query
2024-01-17 14:44:03 -05:00
Nicholas Spadaccino
4246932d52
Merge branch 'master' into fix_abondoned_zones_query 2024-01-17 14:13:57 -05:00
Nicholas Spadaccino
8ec7525bb5
Merge pull request #1305 from Aravindh-Raju/aravindhr/add-back-rc-history
Add back record change history
2024-01-17 14:13:38 -05:00
Nicholas Spadaccino
d3143ca9d5
Merge branch 'master' into aravindhr/add-back-rc-history 2024-01-17 14:02:45 -05:00
Nicholas Spadaccino
d9aa43025c
Merge pull request #1320 from Aravindh-Raju/aravindhr/show-action-buttons-if-access
Display action buttons if there's ACL access
2024-01-17 14:02:21 -05:00
Jay07GIT
3a7a97b385
update 2024-01-17 12:46:39 +05:30
Jay07GIT
9903b86db0
added unit tests 2024-01-17 12:40:07 +05:30
Aravindh-Raju
82f3fa3e88
add tests 2024-01-11 16:16:06 +05:30
Aravindh-Raju
3cae9e3fcc
add functionality 2024-01-10 13:22:46 +05:30
Jay07GIT
0589596f4d
Added tests 2024-01-09 11:28:39 +05:30
Jay07GIT
15b2b33ee3
Resolved failed tests 2024-01-08 14:39:52 +05:30
Jay07GIT
5d08848b5e
update 2024-01-08 12:10:19 +05:30
Aravindh-Raju
06aec0940f
add test 2023-12-13 10:25:26 +05:30
Aravindh-Raju
40a9fe5159
hide zone change for non admins 2023-12-12 11:48:55 +05:30
Jay07GIT
ebf1dbb2ae
added index for zone change columns 2023-12-06 19:07:10 +05:30
Jay07GIT
f52f1554ef
modified deleted zone search query and resolved pagination issue in portal 2023-12-06 19:03:36 +05:30
Jay07GIT
d2ae7514c3
update 2023-11-21 21:46:43 +05:30
Jay07GIT
ce7d995db5
updated query with zone name filter 2023-11-21 21:42:37 +05:30
Jay07GIT
38befae8b5
update 2023-11-20 20:21:29 +05:30
Jay07GIT
f253e87a26
updated filter deleted zones using scala to sql query 2023-11-20 19:29:03 +05:30
Aravindh-Raju
1f17a6b7be
update doc 2023-11-17 11:26:30 +05:30
Aravindh-Raju
2279c6e008
add tests 2023-11-15 13:44:02 +05:30
Aravindh-Raju
e251eedc76
fix tests 2023-11-14 18:02:15 +05:30
Aravindh-Raju
31c9ab3321
add date time range filter 2023-11-14 16:20:54 +05:30
Jay07GIT
41ed5215f8
add group id in user response 2023-11-08 19:28:55 +05:30
Aravindh-Raju
7f3d335047
fix test 2023-11-08 19:18:05 +05:30
Aravindh-Raju
0189665508
hide for non-members 2023-11-07 17:39:12 +05:30
Aravindh R
fa3b68bfb3
Merge branch 'master' into aravindhr/show-action-buttons-if-access 2023-10-30 14:41:46 +05:30
Aravindh-Raju
1f07093d68
show buttons in shared zones too 2023-10-30 12:47:16 +05:30
Aravindh-Raju
bb199ce74c
add eof line 2023-10-27 13:54:08 +05:30
Aravindh-Raju
cc2c3b15e6
filter batch by submitter name 2023-10-27 13:46:46 +05:30
Aravindh-Raju
69d5662fcc
fix for shared zones 2023-10-18 13:45:29 +05:30
Aravindh-Raju
44f649c922
display access buttons if there's access 2023-10-13 18:54:00 +05:30
Nicholas Spadaccino
e25189ae12
Bump version to v0.19.3 2023-10-12 16:12:44 -04:00
Nicholas Spadaccino
98f38b172a
Merge pull request #1319 from nspadaccino/nspadaccino/migration-3-30-fix
Update migration 3.30 to use drop foreign key instead of drop constraint
2023-10-12 16:12:14 -04:00
nspadaccino
7d6cdfebb0
Update migration 3.30 to use drop foreign key instead of drop constraint 2023-10-11 16:30:24 -04:00
Aravindh R
4463d79f14
Merge branch 'master' into aravindhr/api-docs-update 2023-10-09 12:03:11 +05:30
Nicholas Spadaccino
09d1199473
Merge pull request #1315 from nspadaccino/nspadaccino/skip-record-count-func-test
Skip test_get_recordset_count_by_zoneid Functional Test
2023-10-06 16:02:06 -04:00
nspadaccino
ee3298cea2
Skip test that is passing and failing randomly 2023-10-06 15:46:15 -04:00
Nicholas Spadaccino
b58067da7e
Bump version to v0.19.2 2023-10-06 13:20:00 -04:00
Arpit Shah
6fa2566e8c
Merge pull request #1307 from Jay07GIT/docs_abandoned_zones
updated docs with Abandoned zones details
2023-10-06 13:14:08 -04:00
Aravindh-Raju
185f8d2b4e
update docs 2023-10-05 14:53:29 +05:30
Aravindh-Raju
b8db74ec1e
update response type 2023-10-04 15:18:24 +05:30
Aravindh-Raju
bdbf010236
add email domains route 2023-10-04 15:15:38 +05:30
Aravindh-Raju
9529f6e692
update api docs 2023-10-04 14:56:41 +05:30
Jay07GIT
bfc426ed6d
resolved tests 2023-10-03 12:21:46 +05:30
Jay
4de2777bb4
Merge branch 'master' into docs_abandoned_zones 2023-10-03 11:55:20 +05:30
Jay07GIT
c8acf95901
added abondoned zones in menu.yml 2023-10-03 11:52:48 +05:30
Nicholas Spadaccino
d928d85b49
Merge branch 'master' into aravindhr/add-back-rc-history 2023-09-29 10:28:03 -04:00
Aravindh-Raju
93771bafbb
add rc history to doc menu 2023-09-29 12:39:42 +05:30
Nicholas Spadaccino
420d295a4d
Merge pull request #1304 from Jay07GIT/show_recordsets_count
Added api for record set count and displayed in portal
2023-09-28 15:55:06 -04:00
Aravindh-Raju
02ac901c88
add rc history docs 2023-09-28 12:48:31 +05:30
Jay07GIT
7088c20c50
updated based on the review comments 2023-09-27 15:02:54 +05:30
Aravindh-Raju
a6b4dbf24d
add rc history route 2023-09-27 12:15:31 +05:30
Aravindh R
266ac821a2
Merge branch 'master' into aravindhr/add-back-rc-history 2023-09-27 11:24:13 +05:30
Arpit Shah
fb6db99685
Merge branch 'master' into show_recordsets_count 2023-09-26 15:51:41 -04:00
Arpit Shah
faa7b164fc
Merge pull request #1306 from Aravindh-Raju/aravindhr/remove-rc-history-api-route
Remove record change history route
2023-09-26 15:49:19 -04:00
Jay07GIT
a334e17497
updated docs with Abandoned zones details 2023-09-26 15:24:26 +05:30
Aravindh R
0082796c0a
Merge branch 'master' into aravindhr/add-back-rc-history 2023-09-26 11:42:28 +05:30
Aravindh R
0dfed0cc2b
Merge branch 'master' into aravindhr/remove-rc-history-api-route 2023-09-26 11:41:57 +05:30
Aravindh-Raju
da980a16a1
remove rc history route 2023-09-26 11:39:23 +05:30
Aravindh-Raju
975db527da
add back rc history 2023-09-26 11:23:00 +05:30
Jay07GIT
91bcbedb14
updated typo. 2023-09-26 10:33:42 +05:30
Arpit Shah
b1936e107d
Merge pull request #1300 from pedrokiefer/assume_role_signed
refactor: allow assume role with optional externalId
2023-09-25 11:45:25 -04:00
Jay07GIT
db76ab2f95
update 2023-09-25 19:33:09 +05:30
Jay07GIT
c1194b9b65
resolved func tests 2023-09-25 18:53:48 +05:30
Jay07GIT
1d2cd50d15
added tests 2023-09-25 17:55:36 +05:30
Jay07GIT
b7d8c76f68
resolve tests 2023-09-25 12:52:18 +05:30
Jay07GIT
825b2ae5a5
added api for record set count and displayed in portal- zones>manage records 2023-09-25 12:37:13 +05:30
Nicholas Spadaccino
66c49a29ab
Merge branch 'master' into assume_role_signed 2023-09-13 14:17:49 -04:00
Nicholas Spadaccino
fb89c237d4
Merge pull request #1296 from Aravindh-Raju/aravindhr/gui-changes
Portal GUI changes -- Issue #1295
2023-09-13 14:17:34 -04:00
Nicholas Spadaccino
650daa133a
Merge branch 'master' into aravindhr/gui-changes 2023-09-13 11:19:07 -04:00
Aravindh-Raju
98d983a446
rename test 2023-09-13 10:55:20 +05:30
Nicholas Spadaccino
9548203ca9
Merge pull request #1240 from pedrokiefer/route53_fix
Add a filter for Route53 API results
2023-09-12 16:26:44 -04:00
Nicholas Spadaccino
40fba3a705
Merge branch 'master' into route53_fix 2023-09-12 14:38:37 -04:00
Arpit Shah
894ddf3851
Merge pull request #1284 from Jay07GIT/deleted_all_zones_history
list abandoned zones
2023-09-12 14:20:44 -04:00
Arpit Shah
6d0215da01
Merge branch 'master' into deleted_all_zones_history 2023-09-12 13:41:09 -04:00
Nicholas Spadaccino
6a57199797
Merge pull request #1294 from Jay07GIT/collapse_fix_manage_records
collapse fix in manage records
2023-09-12 13:39:30 -04:00
Nicholas Spadaccino
b8fdd9a712
Merge branch 'master' into collapse_fix_manage_records 2023-09-12 13:25:01 -04:00
Pedro Kiefer
4ada2885aa
refactor: allow assume role with optional externalId 2023-09-11 14:57:53 -03:00
Pedro Kiefer
b3312b7e90
test: update route53 api integration test 2023-09-11 14:57:34 -03:00
Pedro Kiefer
b345a859a1
Merge branch 'master' into route53_fix 2023-09-11 14:50:08 -03:00
Pedro Kiefer
1d848f5cb8
refactor: add a filter for route53 API results as it might return larger values than requested 2023-09-11 14:47:20 -03:00
Jay07GIT
cfd990ad17
update 2023-09-09 11:13:44 +05:30
Aravindh-Raju
d804779df9
add back blank space 2023-09-04 11:02:55 +05:30
Aravindh-Raju
9afe8d3e4b
fix pagination 2023-09-04 11:01:32 +05:30
Jay07GIT
835d9209d2
update 2023-08-24 09:14:51 +05:30
Aravindh-Raju
e1c12d37a2
gui changes 2023-08-23 18:58:04 +05:30
Arpit Shah
a9838b0707
Merge branch 'master' into deleted_all_zones_history 2023-08-22 14:45:52 -04:00
Jay07GIT
092b825a7c
collapse fix in manage records 2023-08-23 00:07:53 +05:30
Jay07GIT
b65fc23bdc
Update 2023-08-23 00:01:13 +05:30
Jay07GIT
6a8e588417
ignore access fix 2023-08-22 23:57:44 +05:30
Nicholas Spadaccino
ec54b1d533
Bump version to v0.19.1 2023-08-22 10:18:30 -04:00
Nicholas Spadaccino
2ab6058c0b
Merge pull request #1287 from Aravindh-Raju/aravindhr/remove-record-change-history
Temporarily remove record change history
2023-08-22 10:17:41 -04:00
Aravindh R
58d3fc5739
Merge branch 'master' into aravindhr/remove-record-change-history 2023-08-21 18:08:17 +05:30
Nicholas Spadaccino
f191c298dd
Merge branch 'master' into deleted_all_zones_history 2023-08-14 12:58:55 -04:00
Nicholas Spadaccino
85d3d7a287
Merge pull request #1293 from Aravindh-Raju/aravindhr/resolve-autocomplete
Resolve global recordset search autocomplete
2023-08-14 12:58:34 -04:00
Nicholas Spadaccino
b766f627c6
Merge branch 'master' into aravindhr/resolve-autocomplete 2023-08-14 12:21:45 -04:00
Nicholas Spadaccino
1e96bb765a
Merge pull request #1288 from Jay07GIT/add_zoneid_recordsetchangefailure
added zone id in recordset change failure metrics api parameters
2023-08-14 12:12:42 -04:00
Nicholas Spadaccino
ae18bd1c21
Merge branch 'master' into add_zoneid_recordsetchangefailure 2023-08-14 10:46:05 -04:00
Nicholas Spadaccino
2e6a1ecce6
Merge pull request #1285 from Aravindh-Raju/aravindhr/indicate-batch-errors
Batch change screen indicates if there are errors down by the "Confirm" button
2023-08-14 10:45:42 -04:00
Nicholas Spadaccino
0f7b2c07b7
Merge branch 'master' into aravindhr/indicate-batch-errors 2023-08-14 10:26:46 -04:00
Aravindh-Raju
1e36efd9e5
resolve autocomplete 2023-08-11 10:31:36 +05:30
Arpit Shah
887779ba60
Merge pull request #1205 from nspadaccino/groups-maxitems-config
Make the Number of Groups Displayed in a List a Configurable Value
2023-08-09 14:56:13 -04:00
Arpit Shah
e204a3404c
Merge branch 'master' into groups-maxitems-config 2023-08-09 14:38:18 -04:00
Arpit Shah
40f0028dc5
Merge pull request #1283 from Aravindh-Raju/aravindhr/fix-my-zones-bug
Fix 'My Zones' bug
2023-08-09 14:37:44 -04:00
Nicholas Spadaccino
2d90e5746f
Merge branch 'master' into aravindhr/fix-my-zones-bug 2023-08-09 14:20:29 -04:00
Nicholas Spadaccino
fd3893c320
Merge pull request #1282 from Aravindh-Raju/aravindhr/portal-ui-changes
Portal UI Changes
2023-08-09 14:20:18 -04:00
Jay07GIT
b9b5b10397
added pagination test in api 2023-08-04 18:24:04 +05:30
Jay07GIT
050c79ebc9
updated next id 2023-08-04 15:45:41 +05:30
Aravindh-Raju
ed6d40ab40
css changes 2023-08-03 07:37:10 +05:30
Jay07GIT
85631c9e29
update 2023-08-03 00:23:54 +05:30
Jay07GIT
3d4dd8b57b
update 2023-08-02 23:52:25 +05:30
Jay07GIT
7eeb0237d6
update 2023-08-02 23:37:49 +05:30
Jay07GIT
e7563fc1cd
added zone id in api parameters 2023-08-02 23:31:19 +05:30
Aravindh-Raju
abee07a0fb
temporarily skip rc history func tests 2023-08-01 18:34:36 +05:30
Aravindh-Raju
bd6aee3aec
temporarily remove rc history tests 2023-08-01 17:55:14 +05:30
Aravindh-Raju
e4373895a0
temporarily remove rc history 2023-08-01 12:54:19 +05:30
Aravindh-Raju
05d5dca69b
indicate batch errors 2023-07-26 10:19:10 +05:30
Jay07GIT
5af3b5d6f0
added descending sort with abandoned date 2023-07-25 22:25:53 +05:30
Jay07GIT
930e3c6d46
Update 2023-07-25 19:48:57 +05:30
Jay07GIT
6e527e9e72
dummy commit 2023-07-25 17:51:44 +05:30
Jay07GIT
b466b97507
update 2023-07-25 17:31:26 +05:30
Jay07GIT
2a2deda29f
update 2023-07-25 12:13:02 +05:30
Jay07GIT
19ef51bd16
update 2023-07-24 19:48:02 +05:30
Jay07GIT
490849c5ff
update 2023-07-24 19:34:38 +05:30
Jay07GIT
6a1e91d5df
list abandoned zones 2023-07-24 18:24:02 +05:30
Aravindh-Raju
5001d8bc62
fix ignore access bug 2023-07-24 15:19:06 +05:30
Aravindh-Raju
984360f6c2
add EOF line 2023-07-24 14:45:37 +05:30
Aravindh-Raju
165768f163
make ui changes 2023-07-24 14:41:47 +05:30
Nicholas Spadaccino
a6b320ef19
Bump version to v0.19.0 2023-07-21 15:22:56 -04:00
Nicholas Spadaccino
acb308788c
Merge branch 'master' into groups-maxitems-config 2023-07-21 14:56:47 -04:00
Arpit Shah
0e1c45ec36
Merge pull request #1243 from Jay07GIT/fix_change_failure_metrics
added max limits in zone and record change failure metrics api
2023-07-21 14:42:54 -04:00
Arpit Shah
d2ca231cd7
Merge branch 'master' into fix_change_failure_metrics 2023-07-21 14:10:40 -04:00
Arpit Shah
ed51361e27
Merge pull request #1246 from Aravindh-Raju/aravindhr/add-record-history-in-global-rs-search
Add record history in global recordset search
2023-07-21 14:09:29 -04:00
nspadaccino
15a0804ba1
Addressed review comments 2023-07-20 15:58:52 -04:00
nspadaccino
330c5156d3
Merge branch 'master' into groups-maxitems-config 2023-07-20 15:46:25 -04:00
Nicholas Spadaccino
9030af3b1b
Merge branch 'master' into aravindhr/add-record-history-in-global-rs-search 2023-07-20 14:39:21 -04:00
Nicholas Spadaccino
9217424c99
Merge pull request #1279 from Aravindh-Raju/aravindhr/fix-group-change-history-bug
Fix group history and zone update bug
2023-07-20 14:39:01 -04:00
Nicholas Spadaccino
30b2957b5a
Merge branch 'master' into aravindhr/fix-group-change-history-bug 2023-07-20 14:13:43 -04:00
Arpit Shah
342a19df97
Merge pull request #1264 from Aravindh-Raju/aravindhr/add-backend-r53-config
Add alternate backend config and r53 details
2023-07-20 10:12:07 -04:00
Nicholas Spadaccino
453552234d
Merge branch 'master' into aravindhr/add-backend-r53-config 2023-07-19 16:46:52 -04:00
Aravindh-Raju
2c209180b8
fix scheduler bug 2023-07-19 11:52:38 +05:30
Aravindh R
95d53567ab
Merge branch 'master' into aravindhr/fix-group-change-history-bug 2023-07-19 10:34:42 +05:30
Arpit Shah
280d269c5d
Merge branch 'master' into aravindhr/add-record-history-in-global-rs-search 2023-07-18 14:31:41 -04:00
Jay
5bf911ae68
Merge branch 'master' into fix_change_failure_metrics 2023-07-18 14:08:05 +05:30
Jay07GIT
921720677a
Resolve failed tests 2023-07-18 13:49:30 +05:30
Jay07GIT
47e4c5c41c
update 2023-07-18 12:39:34 +05:30
Jay07GIT
85316356ea
update 2023-07-18 12:09:01 +05:30
Jay07GIT
d2619664d4
Merge branch 'fix_change_failure_metrics' of https://github.com/Jay07GIT/vinyldns into fix_change_failure_metrics 2023-07-18 11:44:32 +05:30
Aravindh-Raju
0c1e20dd4c
fix group description 2023-07-17 14:44:31 +05:30
Arpit Shah
dcfcf67a7b
Merge pull request #1274 from nspadaccino/nspadaccino/ptr-zone-filter-new
Add an Option to Filter Out PTR Zones in the Zones View
2023-07-14 11:53:38 -04:00
Nicholas Spadaccino
47f6a7b5b3
Merge branch 'master' into nspadaccino/ptr-zone-filter-new 2023-07-14 11:32:34 -04:00
Arpit Shah
b40b7d6373
Merge pull request #1271 from Aravindh-Raju/aravindhr/add-filter-hosts-config
Add filter hosts config
2023-07-14 10:52:43 -04:00
Arpit Shah
d73aeaf53e
Merge branch 'master' into aravindhr/add-filter-hosts-config 2023-07-14 10:35:43 -04:00
Arpit Shah
f906c19b76
Merge pull request #1278 from Aravindh-Raju/aravindhr/fix-pagination-bug
Fix zone filter bug
2023-07-14 10:34:48 -04:00
Arpit Shah
0666c5e470
Merge branch 'master' into aravindhr/fix-pagination-bug 2023-07-14 10:09:34 -04:00
Nicholas Spadaccino
64dbc96bba
Merge pull request #1180 from Jay07GIT/sort_recordtype
Added record type sort in manage zones tab
2023-07-12 16:36:22 -04:00
Nicholas Spadaccino
7ed8d0ee59
Merge branch 'master' into sort_recordtype 2023-07-12 16:14:17 -04:00
Jay07GIT
67e3adbac3
update 2023-07-12 14:08:17 +05:30
Jay07GIT
4c6f04c1e1
Merge branch 'sort_recordtype' of https://github.com/Jay07GIT/vinyldns into sort_recordtype 2023-07-12 12:12:37 +05:30
Aravindh-Raju
6eb05adeea
fix group history bug 2023-07-11 11:25:40 +05:30
Aravindh-Raju
a7c9c08f66
add test 2023-07-10 11:35:24 +05:30
Aravindh-Raju
78194e7dc9
fix zone filter bug 2023-07-10 11:11:59 +05:30
Aravindh-Raju
3de69d37f5
remove zone sync 2023-07-07 11:09:59 +05:30
Arpit Shah
e06211408d
Merge pull request #1265 from Aravindh-Raju/aravindhr/add-metadata-for-social-media
Add metadata in portal for social media
2023-07-05 17:06:57 -04:00
Nicholas Spadaccino
75f4130a71
Merge branch 'master' into aravindhr/add-metadata-for-social-media 2023-07-05 16:22:50 -04:00
Nicholas Spadaccino
263d333add
Merge pull request #1268 from Aravindh-Raju/aravindhr/fix-zone-update-bug
Fix zone button bug
2023-07-05 16:22:29 -04:00
nspadaccino
eb329757c9
Added and updated tests 2023-07-05 14:25:39 -04:00
nspadaccino
056c678543
Removed commented code 2023-06-30 15:27:57 -04:00
nspadaccino
1135b2acca
Updated listZonesByAdminGroupIds to support ptr filter, resolved tests 2023-06-30 15:26:30 -04:00
Aravindh-Raju
8061d97d21
Update doc 2023-06-30 10:21:55 +05:30
nspadaccino
cbd4c56626
Update tests 2023-06-23 17:00:17 -04:00
nspadaccino
a10377051e
Update checkbox placement, add filter to allzones view 2023-06-23 16:21:35 -04:00
nspadaccino
a222a2efd9
Merge branch 'nspadaccino/ptr-filter' into nspadaccino/ptr-zone-filter-new 2023-06-23 14:54:19 -04:00
Aravindh-Raju
ac7f9d17e1
add details to doc 2023-06-23 18:06:14 +05:30
Aravindh-Raju
f27967d0aa
add filter hosts config 2023-06-23 17:59:46 +05:30
Aravindh R
245488a5a4
Merge branch 'master' into aravindhr/add-record-history-in-global-rs-search 2023-06-22 10:06:19 +05:30
Aravindh-Raju
7a21dd4e19
fix zone button bug 2023-06-20 12:26:00 +05:30
Aravindh-Raju
1862e101df
add info to doc 2023-06-12 18:12:57 +05:30
Aravindh-Raju
46b7e08604
add info to doc 2023-06-12 18:12:42 +05:30
Aravindh-Raju
836c4d5b01
remove filter 2023-06-12 16:29:28 +05:30
Aravindh-Raju
31c72b1f2e
add meta tags for social media 2023-06-12 16:23:04 +05:30
Aravindh-Raju
8b99aacc9e
minor changes 2023-06-12 11:10:58 +05:30
Aravindh-Raju
b8d09c56a6
update doc 2023-06-09 11:21:11 +05:30
Aravindh-Raju
edf038f9c2
update doc 2023-06-09 11:04:31 +05:30
Aravindh-Raju
7d119a3b82
add alternate backend config and r53 2023-06-09 11:01:07 +05:30
Nicholas Spadaccino
15bb24f444
Merge pull request #1262 from Aravindh-Raju/aravindhr/fix-zone-record-fields-bugs
Fix bugs in Zone view record fields
2023-06-05 12:47:15 -04:00
Nicholas Spadaccino
8112e31045
Merge branch 'master' into aravindhr/fix-zone-record-fields-bugs 2023-06-05 11:16:05 -04:00
Nicholas Spadaccino
1f1cfeefd2
Bump version to v0.18.8 2023-06-05 10:48:41 -04:00
Nicholas Spadaccino
c3b5f168f0
Merge branch 'master' into aravindhr/fix-zone-record-fields-bugs 2023-06-05 10:48:11 -04:00
Nicholas Spadaccino
b5860e3102
Merge pull request #1254 from vinyldns/dependabot/pip/modules/api/src/test/functional/requests-2.31.0
Bump requests from 2.26.0 to 2.31.0 in /modules/api/src/test/functional
2023-06-05 10:48:00 -04:00
Arpit Shah
9bc26ac8a1
Merge branch 'master' into dependabot/pip/modules/api/src/test/functional/requests-2.31.0 2023-06-05 09:06:04 -04:00
Aravindh-Raju
da880e276c
Fix bugs 2023-06-01 11:48:11 +05:30
Nicholas Spadaccino
da57c90966
Bump version to v0.18.7 2023-05-31 14:26:14 -04:00
Nicholas Spadaccino
2ac62a8d71
Merge pull request #1257 from Aravindh-Raju/aravindhr/skip-review-for-already-existing-records
Skip review for already existing records
2023-05-31 14:25:35 -04:00
Aravindh R
7eaa2ec7af
Merge branch 'master' into aravindhr/skip-review-for-already-existing-records 2023-05-31 10:27:01 +05:30
Nicholas Spadaccino
3dc2014a02
Merge pull request #1258 from Aravindh-Raju/aravindhr/manage-logs
Update portal logs to debug level
2023-05-30 17:51:13 -04:00
Nicholas Spadaccino
6782413ff4
Merge branch 'master' into aravindhr/manage-logs 2023-05-30 17:15:26 -04:00
Nicholas Spadaccino
9fb33d53e9
Merge pull request #1260 from Aravindh-Raju/aravindhr/add-naptr-record-validations
Add additional NAPTR validations
2023-05-30 17:15:09 -04:00
Nicholas Spadaccino
99032a7186
Merge branch 'master' into aravindhr/add-naptr-record-validations 2023-05-30 16:43:58 -04:00
Nicholas Spadaccino
11e5d040fc
Merge pull request #1252 from Aravindh-Raju/aravindhr/add-ns-naptr-srv-in-batch
Add new record types in batch change
2023-05-30 16:42:09 -04:00
Aravindh-Raju
839b683dfe
Add naptr validations 2023-05-30 12:51:56 +05:30
Aravindh R
21ce04fcc9
Merge branch 'master' into aravindhr/add-ns-naptr-srv-in-batch 2023-05-25 11:47:17 +05:30
Aravindh-Raju
d03315512c
manage logs 2023-05-25 11:38:49 +05:30
Aravindh-Raju
3b07bf1230
address review comments 2023-05-25 11:17:26 +05:30
Aravindh-Raju
7ef69e4b17
skip review 2023-05-24 11:07:30 +05:30
dependabot[bot]
357afe269b
Bump requests from 2.26.0 to 2.31.0 in /modules/api/src/test/functional
Bumps [requests](https://github.com/psf/requests) from 2.26.0 to 2.31.0.
- [Release notes](https://github.com/psf/requests/releases)
- [Changelog](https://github.com/psf/requests/blob/main/HISTORY.md)
- [Commits](https://github.com/psf/requests/compare/v2.26.0...v2.31.0)

---
updated-dependencies:
- dependency-name: requests
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-05-23 02:31:06 +00:00
Aravindh-Raju
86f346ef16
naptr flags test 2023-05-22 11:45:53 +05:30
Aravindh-Raju
afadf9f290
naptr flags validation 2023-05-22 11:40:05 +05:30
Aravindh-Raju
d584b5a038
fix pagination bug 2023-05-18 11:17:43 +05:30
Nicholas Spadaccino
5a5a9fd380
Bump version to v0.18.6 2023-05-17 13:45:35 -04:00
Nicholas Spadaccino
4ac6472812
Merge pull request #1251 from nspadaccino/nspadaccino/hikari-debug-logs
Enable debug logging for hikariCP
2023-05-17 13:36:10 -04:00
Nicholas Spadaccino
5284c6a0ce
Merge branch 'master' into nspadaccino/hikari-debug-logs 2023-05-17 12:15:52 -04:00
Arpit Shah
65bcc9acd7
Merge pull request #1250 from Aravindh-Raju/fix-scheduler-acl-bug
Fix zone sync scheduler bug
2023-05-17 10:10:35 -04:00
Arpit Shah
f9774bcbe5
Merge branch 'master' into fix-scheduler-acl-bug 2023-05-17 09:42:33 -04:00
Arpit Shah
b9ccfd203f
Merge pull request #1253 from Aravindh-Raju/aravindhr/remove-deprecated-config
Remove deprecated max-life-time
2023-05-17 08:34:27 -04:00
Arpit Shah
8dea88e5e0
Merge branch 'master' into aravindhr/remove-deprecated-config 2023-05-17 06:31:50 -04:00
Aravindh-Raju
4d2ffcf618
remove deprecated max-life-time 2023-05-17 14:22:36 +05:30
Aravindh-Raju
8d4ad632ee
add tests 2023-05-16 12:44:23 +05:30
Aravindh-Raju
c97465a4c9
add tests 2023-05-16 12:07:58 +05:30
Aravindh-Raju
9b9a4a875d
resolve pagination 2023-05-16 11:29:48 +05:30
Aravindh-Raju
c24fbf53d9
change placeholder 2023-05-16 10:09:33 +05:30
nspadaccino
82ff9f588a
Reverted changes to dependencies 2023-05-15 17:05:36 -04:00
Aravindh-Raju
84175dd990
revert super user 2023-05-15 15:38:47 +05:30
Aravindh-Raju
0fd93ce6d2
add tests 2023-05-15 15:36:26 +05:30
Aravindh-Raju
c12f0b63b7
add new data in sample csv 2023-05-12 11:30:19 +05:30
nspadaccino
2085ae74c4
Add socket timeout to jdbc url, upgrade hikari version to 5.0.1 (latest), upgrade logback and slf4j-api deps for compatibility with hikari 2023-05-11 16:57:30 -04:00
Aravindh-Raju
cf08f052a8
add unit tests 2023-05-11 16:13:23 +05:30
Aravindh-Raju
3acd6761a9
add unit tests 2023-05-11 12:50:49 +05:30
Aravindh-Raju
b78e08413e
add unit tests 2023-05-10 20:01:39 +05:30
Aravindh-Raju
77715b4f16
add functional test 2023-05-10 16:20:46 +05:30
Aravindh-Raju
96fb185b1c
add functional test 2023-05-09 10:36:46 +05:30
Aravindh-Raju
3bc40dcd61
add functional test 2023-05-09 09:29:45 +05:30
Aravindh-Raju
7c1b7ae499
add functional test 2023-05-08 21:36:56 +05:30
Aravindh-Raju
45a55dd52d
add new record types in batch 2023-05-08 12:29:18 +05:30
nspadaccino
2bb26c7930
Added additional hikari connection settings to application config 2023-05-03 16:34:34 -04:00
Jay07GIT
b58aead524
added start from in failure record change 2023-04-28 19:01:42 +05:30
nspadaccino
ecc3a2f79a
Enable debug logging for hikariCP, can now view pool stats on console 2023-04-25 20:16:13 -04:00
Aravindh-Raju
c1e148fc6a
remove super user permissions 2023-04-25 10:54:46 +05:30
Aravindh-Raju
b86ee93835
fix scheduler bug 2023-04-25 10:49:10 +05:30
Aravindh-Raju
08631db3fc
Optimize code 2023-04-18 19:25:25 +05:30
Aravindh-Raju
c3bce6b8fc
fix test 2023-04-18 15:59:44 +05:30
Aravindh-Raju
e84138378f
add functional tests 2023-04-18 15:20:39 +05:30
Aravindh-Raju
7fe99961a2
display owner group for records in private zones 2023-04-13 15:12:34 +05:30
Nicholas Spadaccino
7cc6cb8b1c
Bump version to v0.18.5 2023-04-12 17:26:21 -04:00
Nicholas Spadaccino
0d9936f02d
Merge pull request #1238 from ssranjani06/ranjani/Zoneeamailvalidations
Zone related email validation
2023-04-12 17:24:39 -04:00
nspadaccino
9eb7641e94
Update api config documentation 2023-04-12 15:03:17 -04:00
ssranjani06
56c5d71e4b
Removing unwanted comments 2023-04-12 12:01:16 +05:30
ssranjani06
e7ddf84571
fix for issues after review 2023-04-12 10:51:16 +05:30
ssranjani06
93dc115f11
Merge branch 'ranjani/Zoneeamailvalidations' of https://github.com/ssranjani06/vinyldns into ranjani/Zoneeamailvalidations
update latest changes
2023-04-11 18:30:24 +05:30
ssranjani06
7a9f3d4e5a
review changes 2023-04-11 18:27:37 +05:30
ssranjani06
0806a9e46c
Merge branch 'master' into ranjani/Zoneeamailvalidations 2023-04-11 18:07:56 +05:30
ssranjani06
d860102bd5
info for groups and zones update 2023-04-11 15:48:12 +05:30
Aravindh-Raju
1b5c9cc0b7
add new column 2023-04-11 15:26:45 +05:30
ssranjani06
537329e9dd
config-api.md update 2023-04-11 15:11:09 +05:30
ssranjani06
244d210204
update 2023-04-11 14:48:40 +05:30
ssranjani06
ba79f7e97b
update 2023-04-11 14:32:14 +05:30
ssranjani06
bc439dc683
update 2023-04-11 13:54:00 +05:30
ssranjani06
fb704cdb4e
update 2023-04-11 12:53:42 +05:30
ssranjani06
3575261827
update 2023-04-11 12:35:26 +05:30
ssranjani06
815e180aa1
update 2023-04-11 12:14:06 +05:30
ssranjani06
37ab1ea3a5
Changes after review 2023-04-11 11:50:49 +05:30
ssranjani06
258820baf3
Loading json error 3 2023-04-10 16:12:09 +05:30
ssranjani06
f1181de659
Loading json error 2 2023-04-10 15:44:19 +05:30
ssranjani06
ffc3c93129
Loading json error 1 2023-04-10 15:23:21 +05:30
ssranjani06
9fe7af0989
Removing Compilation Error 5 2023-04-10 15:02:46 +05:30
ssranjani06
02ef0dad85
Removing Compilation Error 4 2023-04-10 14:40:39 +05:30
ssranjani06
3361d6b103
Removing Compilation Error 3 2023-04-10 14:20:31 +05:30
ssranjani06
6c56cdd3b6
Removing Compilation Error 2 2023-04-10 14:06:12 +05:30
ssranjani06
6211570f28
Removing Compilation Error1 2023-04-10 13:48:19 +05:30
ssranjani06
7d4a455362
Removing Compilation Error 2023-04-10 12:52:13 +05:30
ssranjani06
e85cc87578
Adding tests 1 2023-04-10 12:28:07 +05:30
ssranjani06
c7135cd15e
Adding tests 2023-04-10 11:53:00 +05:30
ssranjani06
750287d8c5
info related to invalid email 2023-04-06 11:50:47 +05:30
Aravindh-Raju
f103d2c6b8
add history 2023-04-05 18:23:32 +05:30
Jay07GIT
bb211008c4
resolve failed tests 2023-04-05 12:29:38 +05:30
Jay
a78781ebd4
Merge branch 'master' into sort_recordtype 2023-04-04 14:26:47 +05:30
ssranjani06
cfe84baa31
Zone related dropdown changes 2023-04-04 11:33:57 +05:30
Jay
eb5d8fab97
Merge branch 'master' into fix_change_failure_metrics 2023-04-03 18:55:31 +05:30
Aravindh-Raju
19ba3c09fe
working skeleton 2023-03-31 17:29:15 +05:30
Jay07GIT
7d5cab3771
resolved tests 2023-03-31 14:27:57 +05:30
Jay07GIT
891aa1250a
added max limits in zone and record change failure metrics api 2023-03-31 14:07:36 +05:30
ssranjani06
7432eba197
dummy commit 2023-03-31 11:21:36 +05:30
ssranjani06
5ceab5a66c
Email Domains Dropdown for groups 2023-03-31 10:54:20 +05:30
ssranjani06
6cce7ec10a
inclusion of ! and & in regex 2023-03-30 18:54:05 +05:30
ssranjani06
7fb6e6f47d
Include + and - in regex in scala 2023-03-29 14:55:50 +05:30
ssranjani06
b658c01ada
Unit and functional tests for number of dots allowed positive cases 2023-03-24 12:20:32 +05:30
ssranjani06
45358ce4cd
Unit and functional tests for number of dots allowed 2023-03-24 11:49:31 +05:30
Nicholas Spadaccino
fbd009ee11
Bump version to v0.18.4 2023-03-23 15:16:52 -04:00
Nicholas Spadaccino
3cb20db211
Merge pull request #1239 from Aravindh-Raju/aravindhr/fix-converter-to-24hr-fmt
Updates to zone sync scheduler
2023-03-23 15:16:15 -04:00
ssranjani06
20bcbee731
Changes in reference.conf 2023-03-23 16:14:36 +05:30
ssranjani06
ec94b9fc35
Making changes in unit test while mocking valid email config 2023-03-23 15:58:47 +05:30
ssranjani06
3ce38a997d
Number od dots allowed after @ feature 2023-03-23 14:30:14 +05:30
ssranjani06
bb2bc3a43d
Functional Test for Update Zone changes for invalid domain 2023-03-23 11:17:41 +05:30
ssranjani06
a7708657f1
Functional Test for Update Zone changes error resolve 2023-03-23 10:52:51 +05:30
ssranjani06
1f0b026ef1
Functional Test for Update Zone changes 2023-03-23 10:25:28 +05:30
Aravindh-Raju
34395035be
remove superuser conf 2023-03-22 21:36:26 +05:30
Aravindh-Raju
03e450f8ba
make 24hrs format in all cases 2023-03-22 21:30:39 +05:30
ssranjani06
572c4bd28e
Functional Test for Update Zone invalid email test 1 2023-03-22 14:53:05 +05:30
ssranjani06
e3006bfaa2
Functional Test for Update Zone invalid email test 2023-03-22 12:31:17 +05:30
ssranjani06
49d7c8c3a9
Functional Test for Update Zone 2023-03-22 12:14:13 +05:30
ssranjani06
8eea0a18b0
Trigger test 2023-03-22 11:31:17 +05:30
ssranjani06
03ccd76acd
Create zone functional tests correction 3 2023-03-22 11:14:57 +05:30
ssranjani06
f8e8c79a15
Create zone functional tests correction2 2023-03-22 10:53:23 +05:30
ssranjani06
daf22c2de6
Create zone functional tests correction 2023-03-22 10:26:14 +05:30
ssranjani06
e09a85165f
Create zone functional tests 2023-03-22 10:05:30 +05:30
Aravindh-Raju
d973b2f042
remove super user config 2023-03-21 18:58:39 +05:30
Aravindh-Raju
16e065946e
add trailing zero for minutes 2023-03-21 18:47:52 +05:30
Aravindh-Raju
9842af62fd
Trigger test 2023-03-21 17:29:43 +05:30
Aravindh-Raju
a05d776ed1
add back config 2023-03-21 16:28:52 +05:30
Aravindh-Raju
9e321985ab
remove superUser change 2023-03-21 16:24:58 +05:30
Aravindh-Raju
6e779fbdf9
hide calendar and css overrides 2023-03-21 16:23:05 +05:30
ssranjani06
20d9caee87
Functional test changes 2023-03-21 15:52:47 +05:30
Aravindh-Raju
9e6a10159f
updates to zone sync scheduler 2023-03-21 15:46:08 +05:30
ssranjani06
50bca731f8
Making changes in ZoneServiceSpec.scala 2023-03-21 15:22:43 +05:30
ssranjani06
64b05b107d
Committing ZoneServiceIntegrationSpec.scala 2023-03-21 14:45:04 +05:30
ssranjani06
5a5488925a
Zone related email validation with unit tests 2023-03-21 12:45:46 +05:30
Nicholas Spadaccino
4ac406f92d
Bump version to v0.18.3 2023-03-17 16:00:15 -04:00
Nicholas Spadaccino
d9d0e52b4f
Merge pull request #1235 from Aravindh-Raju/aravindhr/add-zone-sync-scheduler-logs
Add logs and time converter for automated zone syncs
2023-03-17 15:59:54 -04:00
Aravindh-Raju
4c9ea3148b
add time converter 2023-03-17 17:19:05 +05:30
Aravindh-Raju
9098b99e53
add logs 2023-03-17 12:47:20 +05:30
Nicholas Spadaccino
2fe54a3da0
Bump version to v0.18.2 2023-03-15 17:11:24 -04:00
Nicholas Spadaccino
2c96f13b8c
Merge pull request #1177 from Aravindh-Raju/aravindhr/manage-portal-logging
Remove portal logs from browser console
2023-03-15 17:10:37 -04:00
Nicholas Spadaccino
55845519df
Merge branch 'master' into aravindhr/manage-portal-logging 2023-03-15 16:56:34 -04:00
nspadaccino
29424382b5
Merge branch 'master' into aravindhr/manage-portal-logging 2023-03-15 16:54:30 -04:00
Nicholas Spadaccino
2b0beec05c
Merge pull request #1230 from nspadaccino/nspadaccino/debug-dns-message
Debug DNS Resolver Info
2023-03-15 16:50:08 -04:00
nspadaccino
a1c575b0ca
remove unnecessary local bind conf options 2023-03-15 16:12:56 -04:00
Nicholas Spadaccino
d53e774b17
Merge branch 'master' into nspadaccino/debug-dns-message 2023-03-15 15:56:05 -04:00
nspadaccino
e58442fda8
Removed EDNS related code, only logging resolver info for now 2023-03-15 15:55:23 -04:00
Nicholas Spadaccino
58bf959ff2
Merge pull request #1233 from Aravindh-Raju/aravindhr/add-zone-sync-scheduler-config
Add config for zone sync scheduler
2023-03-15 15:46:47 -04:00
Nicholas Spadaccino
a448dfbb6d
Merge branch 'master' into aravindhr/add-zone-sync-scheduler-config 2023-03-15 15:25:01 -04:00
Nicholas Spadaccino
a1c0dc7228
Merge pull request #1234 from Jay07GIT/disallow_ipv4_in_cname_manage_records
Restrict IPv4 as Cname in manage records
2023-03-15 15:24:53 -04:00
Nicholas Spadaccino
ecf09ed223
Merge branch 'master' into disallow_ipv4_in_cname_manage_records 2023-03-15 15:09:19 -04:00
Nicholas Spadaccino
b7937c5a82
Merge pull request #1215 from ssranjani06/feature/groupemailvalidations
group email validation code along with unit test
2023-03-15 15:09:02 -04:00
Nicholas Spadaccino
526292428c
Merge branch 'master' into disallow_ipv4_in_cname_manage_records 2023-03-15 13:46:48 -04:00
Jay07GIT
1ed7672c90
Restrict IPv4 as Cname in manage records 2023-03-15 18:30:26 +05:30
ssranjani06
55cccc520b
Merge branch 'feature/groupemailvalidations' of https://github.com/ssranjani06/vinyldns into feature/groupemailvalidations
To push the changes
2023-03-14 12:03:39 +05:30
ssranjani06
5208725762
Review changes 2023-03-14 12:01:57 +05:30
ssranjani06
b568aec0c7
review changes 2023-03-14 11:59:45 +05:30
nspadaccino
7a86bdfd71
Commented out things I've tried to enable EDNS NSID option 2023-03-10 17:03:23 -05:00
Nicholas Spadaccino
7138175bd8
Merge branch 'master' into feature/groupemailvalidations 2023-03-10 16:34:30 -05:00
Nicholas Spadaccino
874bd11e47
Merge pull request #1178 from Jay07GIT/disallow_ipv4_in_cname
Restrict IPV4 address as CNAME in DNS changes.
2023-03-10 16:12:10 -05:00
ssranjani06
ff68756101
Aravindh's Suggested changes 2023-03-09 15:17:55 +05:30
ssranjani06
fe3157e15d
Rectified the error in functional test 2023-03-09 14:38:58 +05:30
ssranjani06
5eeea3b0e0
Merge branch 'feature/groupemailvalidations' of https://github.com/ssranjani06/vinyldns into feature/groupemailvalidations 2023-03-09 12:47:53 +05:30
ssranjani06
8b9ca181e8
added email validation for starting with dot and consequtive dots 2023-03-09 12:38:32 +05:30
Aravindh-Raju
0987177310
resolve test 2023-03-09 10:18:08 +05:30
nspadaccino
7e7e25a1e9
Resolve failing tests, add NSID info to logs 2023-03-08 16:30:20 -05:00
Aravindh-Raju
62ec7a6a52
add config for zone sync scheduler 2023-03-08 15:40:07 +05:30
nspadaccino
4c45da9f87
Update dns resolver debug message 2023-03-07 17:04:23 -05:00
nspadaccino
9e22c00b10
Added logger message with dns resolver info 2023-03-07 16:31:12 -05:00
Jay
35ee612a09
Merge branch 'master' into disallow_ipv4_in_cname 2023-03-07 17:54:15 +05:30
Jay07GIT
844cd66bec
dummy commit 2023-03-07 17:52:24 +05:30
Jay07GIT
f9dc0efc2f
update 2023-03-07 16:49:51 +05:30
Jay07GIT
068d408e7a
Updated the error message 2023-03-07 16:26:00 +05:30
Jay
c000e65512
Merge branch 'master' into sort_recordtype 2023-03-07 15:35:03 +05:30
ssranjani06
e9e1049e58
Merge branch 'master' into feature/groupemailvalidations 2023-03-06 10:45:49 +05:30
ssranjani06
3448e0442d
valid regex for email validation 2023-03-06 10:38:19 +05:30
Nicholas Spadaccino
8e215e49ba
Bump version to v0.18.1 2023-03-02 11:35:06 -05:00
Nicholas Spadaccino
c676eab1b4
Merge pull request #1226 from Aravindh-Raju/aravindhr/fix-zonechange-bug
Fix zone change history bug
2023-03-02 11:33:30 -05:00
Nicholas Spadaccino
3f83123279
Merge branch 'master' into aravindhr/fix-zonechange-bug 2023-03-02 11:10:29 -05:00
Nicholas Spadaccino
2f73ecb309
Merge pull request #1224 from Aravindh-Raju/aravindhr/fix-autocomplete-search
Fix autocomplete search
2023-03-02 11:10:11 -05:00
Nicholas Spadaccino
7f8a078685
Merge branch 'master' into aravindhr/fix-autocomplete-search 2023-03-02 10:32:11 -05:00
ssranjani06
975b362f0c
Email validation code and test changes 2023-03-02 15:24:12 +05:30
Arpit Shah
dea6369a96
Merge pull request #1227 from Aravindh-Raju/aravindhr/trial-fix-zone-sync-scheduler
Debug zone sync scheduler issue
2023-03-01 06:25:56 -05:00
Aravindh-Raju
82c305eff5
Trigger tests 2023-03-01 09:29:01 +05:30
Aravindh-Raju
09862d7754
Trigger tests 2023-03-01 08:55:39 +05:30
Aravindh-Raju
9132dfc54c
debug zone sync scheduler issue 2023-02-28 17:27:05 +05:30
ssranjani06
4b9ee9ae2d
Email validation regex changes 2023-02-27 12:15:26 +05:30
Aravindh-Raju
59cdf92890
fix zonechange history bug 2023-02-27 12:08:06 +05:30
ssranjani06
14cf663f0e
functional test for wildcard latest 2023-02-27 10:30:43 +05:30
ssranjani06
e3a6413635
functional test for wildcard 2023-02-27 10:28:27 +05:30
Aravindh-Raju
4bbbe323b3
optimize code 2023-02-27 09:36:13 +05:30
Aravindh-Raju
629bf390e8
optimize code 2023-02-24 17:58:40 +05:30
Aravindh-Raju
697c8113c6
fix autocomplete search 2023-02-24 17:41:14 +05:30
Nicholas Spadaccino
8d2d13dec8
Bump version to v0.18.0 2023-02-23 16:24:38 -05:00
Nicholas Spadaccino
b3215ad5db
Merge pull request #1204 from Jay07GIT/failure_change_metrics
Added metrics api for zone change failure and record change failure
2023-02-23 16:23:59 -05:00
Nicholas Spadaccino
3308ad54c9
Merge branch 'master' into failure_change_metrics 2023-02-23 16:09:35 -05:00
Nicholas Spadaccino
09e2c32468
Merge pull request #1214 from Aravindh-Raju/aravindhr/add-zone-sync-scheduler
Add zone sync scheduler
2023-02-23 16:09:07 -05:00
Nicholas Spadaccino
771cab23ea
Merge branch 'master' into aravindhr/add-zone-sync-scheduler 2023-02-23 15:56:19 -05:00
Nicholas Spadaccino
dc0ae0b79c
Merge pull request #1219 from Aravindh-Raju/aravindhr/fix-record-delete-bug
Fix batch change validation bug
2023-02-23 15:55:48 -05:00
Nicholas Spadaccino
ad2b27ee69
Merge branch 'master' into aravindhr/fix-record-delete-bug 2023-02-23 15:39:34 -05:00
Nicholas Spadaccino
97c6cb8468
Merge pull request #1192 from Aravindh-Raju/aravindhr/strongly-typed-crypto
Refactor crypto be strongly typed
2023-02-23 15:39:07 -05:00
nspadaccino
e78f314053
Resolve failing test 2023-02-23 15:21:20 -05:00
Nicholas Spadaccino
14f66b24dc
Merge branch 'master' into aravindhr/strongly-typed-crypto 2023-02-23 15:09:23 -05:00
Nicholas Spadaccino
8d3e9d4fd3
Merge pull request #1222 from nspadaccino/nspadaccino/super-update-owner-group_copy
Allow Super Users to Update Record Owner Group in Shared Zone
2023-02-23 15:08:45 -05:00
nspadaccino
7b7f670e10
Merge branch 'master' into nspadaccino/ptr-filter 2023-02-23 14:05:14 -05:00
Aravindh-Raju
e38353162a
fix styling 2023-02-23 23:29:59 +05:30
Aravindh-Raju
7ffc741e80
add validation for cron strings 2023-02-23 23:15:52 +05:30
ssranjani06
4c05924920
Unit tests for email validation 2023-02-23 15:07:06 +05:30
Aravindh-Raju
2b20b077f4
Trigger tests 2023-02-23 11:57:13 +05:30
Aravindh-Raju
39095c5655
resolve minor issues 2023-02-23 10:42:59 +05:30
Aravindh-Raju
804d0c54c8
resolve automated sync issue 2023-02-22 21:00:57 +05:30
Aravindh-Raju
588806c274
add type for automated zone sync 2023-02-22 19:35:29 +05:30
Aravindh-Raju
6631019eac
update help-block 2023-02-22 18:08:10 +05:30
Aravindh-Raju
8719cf254c
add scheduler in separate section 2023-02-22 18:03:26 +05:30
Aravindh-Raju
9265cb0af1
resolve tests 2023-02-22 09:22:05 +05:30
Aravindh-Raju
066b8dc972
address pr comment 2023-02-22 09:07:34 +05:30
Nicholas Spadaccino
652756a253
Merge branch 'master' into failure_change_metrics 2023-02-21 16:58:04 -05:00
Aravindh-Raju
d7203cbbd4
update test 2023-02-21 17:07:53 +05:30
Aravindh-Raju
5850dbcb9a
add test coverage 2023-02-21 17:04:50 +05:30
Aravindh-Raju
80a31c09c7
fix styling 2023-02-21 16:47:02 +05:30
Aravindh-Raju
edb5f56817
add test 2023-02-21 16:44:22 +05:30
Aravindh-Raju
8309ed187a
update message for additional info column 2023-02-21 15:20:32 +05:30
nspadaccino
e4a1357d7e
Replace old leftResultOf and rightResultOf calls with IO methods 2023-02-20 17:19:08 -05:00
nspadaccino
4aab2e8e17
Change Datetime instance to Instant 2023-02-20 17:05:07 -05:00
nspadaccino
7a31c87fab
Merge branch 'master' into nspadaccino/super-update-owner-group 2023-02-20 16:57:01 -05:00
Aravindh-Raju
af82756577
address css changes requested 2023-02-20 08:37:36 +05:30
Nicholas Spadaccino
0b99d811b3
Merge pull request #1197 from Aravindh-Raju/aravindhr/update-zone-recordchange-pagination
Update zone record change pagination
2023-02-17 15:40:31 -05:00
Aravindh-Raju
b6c91a6312
address css changes requested 2023-02-17 12:47:01 +05:30
Nicholas Spadaccino
3b589c1069
Merge branch 'master' into aravindhr/update-zone-recordchange-pagination 2023-02-16 16:16:40 -05:00
Nicholas Spadaccino
5098b79065
Merge pull request #1189 from Aravindh-Raju/aravindhr/add-autocomplete-zone-search
Add zone autocomplete search
2023-02-16 14:40:37 -05:00
Nicholas Spadaccino
2e95d1df3b
Merge branch 'master' into aravindhr/add-autocomplete-zone-search 2023-02-16 14:23:35 -05:00
Nicholas Spadaccino
b7ff3b83c4
Merge pull request #1199 from Aravindh-Raju/aravindhr/resolve-group-details-update
Resolve bug in group details update
2023-02-16 10:52:25 -05:00
Aravindh-Raju
67f4e43bd2
add autocomplete for search by group 2023-02-15 14:41:40 +05:30
Aravindh R
6918db66a8
Merge branch 'master' into aravindhr/update-zone-recordchange-pagination 2023-02-15 11:17:33 +05:30
Aravindh-Raju
82dc9b028a
address review comment 2023-02-15 11:15:16 +05:30
Aravindh R
5659dbebd8
Merge branch 'master' into aravindhr/strongly-typed-crypto 2023-02-15 09:57:35 +05:30
Aravindh R
7d614ebdcf
Merge branch 'master' into aravindhr/add-autocomplete-zone-search 2023-02-15 09:49:13 +05:30
Nicholas Spadaccino
44a6950dcb
Merge branch 'master' into aravindhr/resolve-group-details-update 2023-02-14 15:53:20 -05:00
Nicholas Spadaccino
b6df9e6c8a
Merge pull request #1206 from Aravindh-Raju/aravindhr/update-group-history
Update group history message
2023-02-14 15:50:50 -05:00
Nicholas Spadaccino
4be3e95296
Merge branch 'master' into aravindhr/update-group-history 2023-02-14 15:16:38 -05:00
Aravindh-Raju
6feacc7f32
code cleanup 2023-02-03 18:58:10 +05:30
Aravindh-Raju
7640b30757
add test 2023-02-03 15:15:38 +05:30
Aravindh-Raju
e0ee86f35d
fix batch delete bug 2023-02-03 14:42:28 +05:30
ssranjani06
6fa2881151
test.com related test 2023-01-20 14:08:24 +05:30
ssranjani06
45f389c102
*dummy.com related test 2023-01-20 13:15:09 +05:30
ssranjani06
9dfa2b0627
unit test modifications 2023-01-19 12:02:08 +05:30
ssranjani06
f4d1f5ff82
committing the config md file 2023-01-19 11:38:15 +05:30
ssranjani06
a40ec652f4
code changes when domain is empty 2023-01-18 14:02:01 +05:30
ssranjani06
c18b6c59d4
reference.conf changes 2023-01-18 10:53:58 +05:30
ssranjani06
ba15d7d359
Removing comcast.com from config file 2023-01-18 10:24:14 +05:30
ssranjani06
73ced4bc1f
functional test fix for invalid email 2023-01-12 14:40:54 +05:30
ssranjani06
cb758304af
Functional test config changes 2023-01-12 10:56:01 +05:30
ssranjani06
23950ebecf
fix for functional test 2023-01-11 10:25:56 +05:30
ssranjani06
fca6bdaa1a
Removing println statements 2023-01-10 15:12:24 +05:30
ssranjani06
9b4c1bf31b
Email validation *comcast.com changes 2023-01-10 15:05:47 +05:30
ssranjani06
f6b03f806f
Syntax error in functional test 2023-01-10 09:54:08 +05:30
Aravindh-Raju
9a12f1a865
add option to remove zone sync schedule 2023-01-09 16:39:30 +05:30
Aravindh-Raju
1a398f4261
update comments and remove unnecessary logs 2023-01-06 16:26:40 +05:30
Aravindh-Raju
1b52390eaf
update comments 2023-01-06 16:18:56 +05:30
ssranjani06
766c551bf9
Functional test for invalid email 2023-01-06 15:25:43 +05:30
Aravindh-Raju
37cd0fd19f
add header 2023-01-06 13:10:05 +05:30
Aravindh-Raju
5208983575
add unit tests 2023-01-06 13:02:25 +05:30
ssranjani06
e304f6859b
MembershipServiceSpec unit test changes 2023-01-05 14:44:54 +05:30
ssranjani06
dc08eb3bd6
group email validation code along with unit test 2023-01-05 12:33:42 +05:30
Aravindh-Raju
7965310d11
add functional tests 2023-01-03 20:49:12 +05:30
Aravindh-Raju
8554b705d7
add integration tests 2023-01-03 19:37:20 +05:30
Aravindh-Raju
ca5702c0b3
update test comment 2023-01-03 18:45:02 +05:30
Aravindh-Raju
275048ec3c
add test 2023-01-03 18:40:35 +05:30
Aravindh-Raju
72f1a1149f
resolve test 2023-01-03 16:43:50 +05:30
Aravindh-Raju
e5e33aab35
add requestor field 2023-01-03 14:37:25 +05:30
Aravindh-Raju
bd5cdd094b
change scheduler format 2023-01-03 13:38:08 +05:30
Aravindh-Raju
a59b580174
remove unused imports 2023-01-02 18:30:58 +05:30
Aravindh-Raju
1a9bf5cd89
Add zone sync scheduler 2023-01-02 18:20:53 +05:30
Nicholas Spadaccino
94277086a6
Bump version to v0.17.2 2022-12-22 13:53:33 -05:00
Arpit Shah
80761d364e
Merge pull request #1212 from Aravindh-Raju/aravindhr/fix-instant-json-serializing
Resolve datetime json response
2022-12-22 13:52:07 -05:00
Aravindh R
8a0d46f7b9
Merge branch 'master' into aravindhr/fix-instant-json-serializing 2022-12-22 11:55:34 +05:30
Aravindh-Raju
7ecaca3e54
Resolve instant json response 2022-12-22 11:45:41 +05:30
Nicholas Spadaccino
4fdb5f5e1d
Bump version to v0.17.1 2022-12-20 12:07:04 -05:00
Arpit Shah
952a6a10be
Merge pull request #1210 from Aravindh-Raju/aravindhr/trim-multiline-log-messages
Trim multi line log messages to single line
2022-12-20 12:05:55 -05:00
Aravindh R
e5ba679eee
Merge branch 'master' into aravindhr/trim-multiline-log-messages 2022-12-20 15:47:11 +05:30
Aravindh-Raju
8edda53454
Revert change 2022-12-20 15:46:50 +05:30
Aravindh-Raju
8cfc8560c7
Remove tab characters from log message 2022-12-20 15:44:48 +05:30
Aravindh-Raju
6c33a7aa89
Resolve test 2022-12-20 15:12:56 +05:30
Aravindh-Raju
0129ca88b9
Remove test code 2022-12-20 12:51:49 +05:30
Aravindh-Raju
75882fa9df
Trim multi line throwables to single line 2022-12-20 12:49:32 +05:30
Aravindh-Raju
96fdb96980
Trim multi line log messages to single line 2022-12-19 20:44:23 +05:30
Jay07GIT
bc21d26aa3
update 2022-11-21 12:50:19 +05:30
Jay07GIT
ee2581cc3a
Dependencies upgraded for both mac M1 and Intel chip 2022-11-21 11:03:53 +05:30
Aravindh R
2eec350bf3
Merge branch 'master' into aravindhr/update-group-history 2022-11-18 12:19:10 +05:30
Aravindh-Raju
e7fec97c7a
Add test 2022-11-18 12:15:44 +05:30
Aravindh-Raju
379178931c
Update group history message 2022-11-18 11:29:57 +05:30
Nicholas Spadaccino
9222fbc817
Merge branch 'master' into groups-maxitems-config 2022-11-17 17:15:31 -05:00
Nicholas Spadaccino
a24436766c
Update limits config
Signed-off-by: Nicholas Spadaccino <nicholas_spadaccino@comcast.com>
2022-11-17 17:07:08 -05:00
Jay07GIT
c697b938a8
Resolved failed tests 2022-11-16 10:04:40 +05:30
Jay07GIT
33e3165cc1
Added metrics api for zone change failure and record change failure 2022-11-16 09:33:32 +05:30
Nicholas Spadaccino
da5f649783
Bump version to v0.17.0 2022-11-15 10:36:39 -05:00
Nicholas Spadaccino
bc66767b85
Add gorups maxitems config to meta class 2022-11-11 16:55:05 -05:00
Nicholas Spadaccino
f9ee92c43d
Merge pull request #1167 from Aravindh-Raju/aravindhr/replace-await-with-io
Replace await with IO
2022-11-09 14:04:04 -05:00
Nicholas Spadaccino
544f42e855
Merge branch 'master' into aravindhr/replace-await-with-io 2022-11-09 13:16:10 -05:00
Nicholas Spadaccino
c9a73a7f63
Merge pull request #1165 from Jay07GIT/updated_CIDR_library
Replaced orchard CIDR library to IP4s library
2022-11-09 12:07:13 -05:00
Aravindh-Raju
63050e90bc
Resolve group edit details update 2022-11-09 11:53:41 +05:30
Jay
c7ce795c27
Merge branch 'master' into updated_CIDR_library 2022-11-09 11:02:53 +05:30
Nicholas Spadaccino
46579ec033
Merge pull request #1162 from nspadaccino/nspadaccino/view-shared-zones
Allow All Users to View Any Shared Zone
2022-11-08 15:15:17 -05:00
Nicholas Spadaccino
92ad4e4309
Merge branch 'master' into nspadaccino/view-shared-zones 2022-11-08 13:38:18 -05:00
Nicholas Spadaccino
c053b3cb8d
Merge pull request #1163 from Aravindh-Raju/aravindhr/change-to-java-time
Change to java8 time
2022-11-08 13:37:30 -05:00
Nicholas Spadaccino
de866d1b2a
Merge branch 'master' into aravindhr/change-to-java-time 2022-11-08 13:20:34 -05:00
Nicholas Spadaccino
1c2c7b3424
Merge pull request #1166 from Aravindh-Raju/aravindhr/add-auth-for-status
Add auth on POST /status
2022-11-08 12:48:40 -05:00
Nicholas Spadaccino
55c531944b
Merge branch 'master' into aravindhr/add-auth-for-status 2022-11-08 12:25:19 -05:00
Nicholas Spadaccino
43852b232a
Merge pull request #1170 from Aravindh-Raju/aravindhr/override-toString-User
Remove keys from User by overriding 'toString'
2022-11-08 12:25:11 -05:00
Aravindh-Raju
f03915c0b4
Trigger tests 2022-11-08 13:31:37 +05:30
Aravindh-Raju
0098097027
Resolve tests 2022-11-08 12:50:56 +05:30
Aravindh-Raju
2b78b4439d
Resolve tests 2022-11-08 11:59:05 +05:30
Aravindh R
58f920054d
Merge branch 'master' into aravindhr/replace-await-with-io 2022-11-08 11:16:43 +05:30
Aravindh-Raju
9c721fd1c2
Resolve tests 2022-11-08 11:02:47 +05:30
Aravindh-Raju
cc1526f6e8
Resolve tests 2022-11-08 10:43:43 +05:30
Aravindh R
22ea714d9d
Merge branch 'master' into aravindhr/change-to-java-time 2022-11-08 10:15:51 +05:30
Aravindh R
10871ef9a7
Merge branch 'master' into aravindhr/add-auth-for-status 2022-11-08 09:45:50 +05:30
Aravindh R
ad1b8c75ba
Merge branch 'master' into aravindhr/override-toString-User 2022-11-08 09:34:29 +05:30
Nicholas Spadaccino
741f376121
Merge pull request #1172 from Aravindh-Raju/aravindhr/fix-spf-record-length
Correctly handle SPF record data
2022-11-07 16:24:28 -05:00
Nicholas Spadaccino
475cbd04ff
Merge branch 'master' into aravindhr/fix-spf-record-length 2022-11-07 15:49:07 -05:00
Nicholas Spadaccino
c6bca186b6
Merge pull request #1171 from Aravindh-Raju/aravindhr/override-toString-ZoneConnection
Override ZoneConnection toString
2022-11-07 15:48:52 -05:00
Nicholas Spadaccino
4b4cf8f943
Merge branch 'master' into aravindhr/fix-spf-record-length 2022-11-07 15:18:25 -05:00
Nicholas Spadaccino
5fa74987b4
Merge branch 'master' into aravindhr/override-toString-ZoneConnection 2022-11-07 15:18:08 -05:00
Nicholas Spadaccino
52aa3846c2
Merge branch 'master' into aravindhr/override-toString-User 2022-11-07 14:42:33 -05:00
Nicholas Spadaccino
ab171c530d
Merge pull request #1168 from Aravindh-Raju/aravindhr/remove-complete-status
Remove 'Complete' zone change status
2022-11-07 14:42:23 -05:00
Nicholas Spadaccino
001e27bcf5
Merge branch 'master' into aravindhr/remove-complete-status 2022-11-07 13:44:46 -05:00
Nicholas Spadaccino
e1df5a8958
Merge branch 'master' into nspadaccino/view-shared-zones 2022-11-07 12:26:04 -05:00
Aravindh R
157072da86
Merge branch 'master' into aravindhr/update-zone-recordchange-pagination 2022-11-01 15:50:26 +05:30
Aravindh-Raju
3561692006
Resolve test 2022-11-01 15:05:28 +05:30
Aravindh-Raju
1e1b105eb8
Use LIMIT and OFFSET 2022-11-01 13:54:57 +05:30
Aravindh-Raju
72895d6f98
Update test comment 2022-10-27 11:46:14 +05:30
Nicholas Spadaccino
5e9616c12f
Bump version to v0.16.2 2022-10-26 15:01:05 -04:00
Nicholas Spadaccino
696a6a64a3
Merge pull request #1195 from nspadaccino/hotfix-membership-maxitems
Hotfix: Raise Limit of Maximum Items in a Group List to 3000
2022-10-26 14:59:49 -04:00
Nicholas Spadaccino
2761889e92
Raised limit for the maximum amount of groups in a groups list to 3000.
Signed-off-by: Nicholas Spadaccino <nicholas_spadaccino@comcast.com>
2022-10-26 14:38:10 -04:00
Aravindh R
99801f4be0
Merge branch 'master' into aravindhr/strongly-typed-crypto 2022-10-26 12:51:58 +05:30
Aravindh-Raju
a833097a91
Add test and resolve errors 2022-10-26 12:35:31 +05:30
Nicholas Spadaccino
1d12551263
Bump version to v0.16.1 2022-10-25 13:58:34 -04:00
Nicholas Spadaccino
c747c947f6
Merge pull request #1194 from Aravindh-Raju/aravindhr/dotted-hosts-update-fix
Fix dotted hosts update (PR #1187)
2022-10-25 13:44:39 -04:00
Aravindh-Raju
99d067d965
Add test 2022-10-25 14:26:42 +05:30
Aravindh R
c30012876b
Merge branch 'master' into aravindhr/strongly-typed-crypto 2022-10-25 11:42:23 +05:30
Aravindh-Raju
dd6f1ba008
Fix dotted hosts update 2022-10-25 10:56:16 +05:30
Aravindh R
bd37b0491c
Merge branch 'vinyldns:master' into aravindhr/update-zone-recordchange-pagination 2022-10-25 10:00:18 +05:30
Nicholas Spadaccino
9e0a38d3b3
Bump version to v0.16.0 2022-10-20 15:45:45 -04:00
Nicholas Spadaccino
03b3cf4bd5
Merge pull request #1187 from Aravindh-Raju/aravindhr/allow-dotted-hosts
Allow dotted hosts creation using config
2022-10-20 15:44:45 -04:00
Nicholas Spadaccino
f593a491f1
Merge branch 'master' into aravindhr/allow-dotted-hosts 2022-10-20 13:31:21 -04:00
Aravindh-Raju
ac79452736
Paginate using id in recordchange 2022-10-20 13:06:49 +05:30
Nicholas Spadaccino
8cf7e44a6d
Bump version to v0.15.1 2022-10-19 13:33:22 -04:00
Nicholas Spadaccino
38ded5c892
Merge pull request #1193 from Aravindh-Raju/aravindhr/fix-group-max-items
Revert groups maxItems (PR #1142 fix)
2022-10-19 13:32:02 -04:00
Aravindh-Raju
2e723268fc
Revert maxItems change 2022-10-19 11:36:52 +05:30
Aravindh-Raju
da7240ae59
Update config 2022-10-14 13:20:31 +05:30
Aravindh-Raju
0d2a47e827
Refactor crypto be strongly typed 2022-10-14 12:26:45 +05:30
Aravindh-Raju
6b0b1a32f6
Rename config properties 2022-10-14 12:02:22 +05:30
Nicholas Spadaccino
3189bcbdfa
Bump version to v0.15.0 2022-10-12 14:35:09 -04:00
Aravindh-Raju
3527fdb722
Add test 2022-10-12 12:22:06 +05:30
Aravindh-Raju
a0448b922b
Update docs 2022-10-12 11:54:47 +05:30
Aravindh-Raju
adb933a783
Address PR comments 2022-10-12 11:37:35 +05:30
Nicholas Spadaccino
b8298de83c
Merge pull request #1186 from Jay07GIT/update_dev_guide_func_test
updated DEVELOPER_GUIDE.md
2022-10-11 16:41:14 -04:00
Nicholas Spadaccino
72697ab337
Merge branch 'master' into update_dev_guide_func_test 2022-10-11 16:18:53 -04:00
Nicholas Spadaccino
6ede611d43
Merge pull request #1185 from Jay07GIT/contribution_authors_md
updated AUTHORS.md
2022-10-11 16:18:25 -04:00
Nicholas Spadaccino
a9e9513ff0
Merge branch 'master' into contribution_authors_md 2022-10-11 16:17:29 -04:00
Nicholas Spadaccino
fa03c5d398
Merge pull request #1191 from JoshSEdwards/JoshSEdwards/documentation-for-apple-m1-support
Update documentation for Apple M1 support
2022-10-11 16:16:34 -04:00
Nicholas Spadaccino
41d9c91a3f
Merge branch 'master' into JoshSEdwards/documentation-for-apple-m1-support 2022-10-11 16:13:36 -04:00
Nicholas Spadaccino
74683b9f19
Merge pull request #1157 from Aravindh-Raju/aravindhr/add-group-change-history
Add group change history
2022-10-11 16:12:03 -04:00
Nicholas Spadaccino
d313214226
Merge branch 'master' into contribution_authors_md 2022-10-11 15:26:17 -04:00
Aravindh-Raju
843dcf20e5
Resolve DNS failure 2022-10-11 18:04:33 +05:30
Nicholas Spadaccino
fbd9ca663f
Merge branch 'master' into aravindhr/add-group-change-history 2022-10-10 17:58:32 -04:00
Nicholas Spadaccino
d36a13fc58
Merge pull request #1158 from Jay07GIT/zone_history_tab
Add Zone history tab in zones page
2022-10-10 17:58:24 -04:00
Nicholas Spadaccino
8eb940b6ba
Merge branch 'master' into zone_history_tab 2022-10-10 15:34:00 -04:00
Nicholas Spadaccino
9e40ab3e52
Merge branch 'master' into aravindhr/add-group-change-history 2022-10-10 15:33:39 -04:00
Nicholas Spadaccino
518f625533
Merge pull request #1142 from Aravindh-Raju/aravindhr/add-groups-pagination
Add pagination in group view
2022-10-10 15:32:36 -04:00
Nicholas Spadaccino
ae73166dce
Merge branch 'master' into aravindhr/add-groups-pagination 2022-10-10 13:54:47 -04:00
Nicholas Spadaccino
d6b7a0aee8
Merge pull request #1145 from Aravindh-Raju/aravindhr/allow-recordnotexists-change
Allow deletion of records that doesn't exist
2022-10-10 13:53:43 -04:00
Joshua Edwards
e165ef8d91 Merge branch 'master' into JoshSEdwards/documentation-for-apple-m1-support 2022-10-10 09:49:17 -04:00
Aravindh R
bb771e902a
Merge branch 'master' into aravindhr/add-groups-pagination 2022-10-10 15:07:00 +05:30
Aravindh-Raju
33cf6537dd
Make sort case insensitive 2022-10-10 14:51:36 +05:30
Aravindh-Raju
125aa892d4
Fix sorting 2022-10-10 12:40:47 +05:30
Jay
5f6ae4803b
Merge branch 'master' into update_dev_guide_func_test 2022-10-10 11:21:19 +05:30
Aravindh R
ecc8d02c54
Merge branch 'master' into aravindhr/allow-recordnotexists-change 2022-10-10 10:13:56 +05:30
Aravindh-Raju
fd0a38802d
Trigger build 2022-10-08 16:38:54 +05:30
Aravindh-Raju
3e509d5ce5
Fix test 2022-10-08 16:10:20 +05:30
Aravindh R
4cfd4ff59c
Merge branch 'master' into aravindhr/allow-dotted-hosts 2022-10-08 15:46:50 +05:30
Aravindh-Raju
552704eea3
Increase test coverage 2022-10-08 15:30:55 +05:30
Nicholas Spadaccino
3addb4bda5
Merge pull request #1144 from Jay07GIT/record_already_exist_add
Allow add for already exists DNS records.
2022-10-07 16:51:37 -04:00
Joshua Edwards
a8dde474a6
Remove sbt-plugin from dependencies that need updating 2022-10-07 16:40:22 -04:00
Nicholas Spadaccino
721a60186e
Merge branch 'master' into record_already_exist_add 2022-10-07 16:35:21 -04:00
Nicholas Spadaccino
e1b5932649
Merge branch 'master' into aravindhr/add-groups-pagination 2022-10-07 15:49:38 -04:00
Nicholas Spadaccino
21c3d908cf
Merge pull request #1124 from Aravindh-Raju/aravindhr/filter-zones
Add ability to search/filter zones
2022-10-07 15:36:34 -04:00
Joshua Edwards
bfd369e9c0
Update developer guide with section on support for M1 Macs 2022-10-07 13:31:24 -04:00
Nicholas Spadaccino
34302ba28d
Merge branch 'master' into aravindhr/filter-zones 2022-10-07 11:24:40 -04:00
Aravindh-Raju
a60991a073
Address PR comments 2022-10-07 19:17:01 +05:30
Aravindh-Raju
91dea76774
Address PR comments 2022-10-07 16:02:03 +05:30
Aravindh-Raju
5950a84215
Address PR comments 2022-10-07 15:13:32 +05:30
Aravindh-Raju
30eb8e3959
Trigger action 2022-10-03 14:35:44 +05:30
Aravindh-Raju
524525e67b
Address PR comments 2022-10-03 14:22:21 +05:30
Aravindh-Raju
c2e364f6da
Add zone autocomplete search 2022-09-28 14:23:51 +05:30
Aravindh-Raju
18333fe3ab
Update config 2022-09-26 11:57:27 +05:30
Aravindh-Raju
371404a5e5
Update doc 2022-09-23 14:02:11 +05:30
Aravindh-Raju
4cf0d8a9a8
Update doc 2022-09-23 13:57:43 +05:30
Aravindh-Raju
97d9a2c3bb
Remove unused changes 2022-09-23 13:38:53 +05:30
Aravindh-Raju
d47d2e1a1d
Update documentation 2022-09-23 13:17:08 +05:30
Aravindh R
b59e637bcf
Merge branch 'master' into aravindhr/allow-dotted-hosts 2022-09-23 12:24:00 +05:30
Aravindh-Raju
e4ad55e5f7
Add documentation 2022-09-23 12:16:53 +05:30
Aravindh-Raju
204ce5f939
Add it test 2022-09-22 16:42:46 +05:30
Jay07GIT
08e2d6b867
update in tests 2022-09-22 15:27:05 +05:30
Aravindh-Raju
228d2c169a
Update tests 2022-09-22 15:16:35 +05:30
Jay07GIT
3303de2548
update 2022-09-22 13:12:06 +05:30
Jay07GIT
05c676e7e3
update 2022-09-22 12:51:07 +05:30
Jay07GIT
a6329251bf
Resolved conflicts 2022-09-22 12:40:45 +05:30
Jay
b9e18fd62d
Merge branch 'master' into disallow_ipv4_in_cname 2022-09-22 12:39:39 +05:30
Jay07GIT
ff674219a2
typo corrections 2022-09-22 12:36:00 +05:30
Aravindh-Raju
75feb72db4
Refactor functionality and tests 2022-09-21 17:40:59 +05:30
Jay07GIT
453d92df20
updated DEVELOPER_GUIDE.md 2022-09-20 14:18:05 +05:30
Jay07GIT
6f3ed18d4d
updated AUTHORS.md 2022-09-20 12:39:33 +05:30
Aravindh-Raju
c59f31f25d
Add tests 2022-09-20 12:21:02 +05:30
Jay07GIT
fc8b1240e4
updated AUTHORS.md 2022-09-20 10:16:14 +05:30
Aravindh-Raju
231a30a923
Resolve existing tests 2022-09-16 13:03:12 +05:30
Nicholas Spadaccino
c10f8e44a9
Bump version to v0.14.1 2022-09-15 16:00:57 -04:00
Nicholas Spadaccino
d21d25ffe9
Merge pull request #1182 from Jay07GIT/autocomplete_record_fix
Fix for safari issue in autocomplete search
2022-09-15 11:08:07 -04:00
Aravindh-Raju
2544d821cd
Restrict record type 2022-09-15 17:44:41 +05:30
Jay07GIT
e9a0063ea9
removed logs 2022-09-15 15:06:27 +05:30
Jay07GIT
d26d53a639
Fix for safari issue in autocomplete group search 2022-09-15 14:51:23 +05:30
Jay07GIT
701b60753c
Fix for safari issue in autocomplete recordset search 2022-09-15 14:24:32 +05:30
Nicholas Spadaccino
c223dc6373
Merge pull request #1150 from Jay07GIT/autocomplete_recordsetsearch
Auto complete feature in Global recordset search
2022-09-14 16:32:43 -04:00
Nicholas Spadaccino
f84f97f6ca
Merge branch 'master' into autocomplete_recordsetsearch 2022-09-14 11:29:19 -04:00
Nicholas Spadaccino
6addc21623
Merge pull request #1151 from Aravindh-Raju/aravindhr/add-groups-autocomplete
Add autocomplete in Group search
2022-09-14 11:05:16 -04:00
Nicholas Spadaccino
b33e12c198
Merge branch 'master' into aravindhr/add-groups-autocomplete 2022-09-14 10:44:45 -04:00
Nicholas Spadaccino
ddb4729183
Merge pull request #1152 from nspadaccino/nspadaccino/case-insensitive-zone-search
Add Unit Tests For Zone Search Case Insensitivity
2022-09-14 10:43:52 -04:00
Jay07GIT
b6a20ed6c3
remove css style 2022-09-14 17:49:35 +05:30
Jay
2debc086f5
Merge branch 'master' into sort_recordtype 2022-09-14 14:56:53 +05:30
Jay07GIT
2a60f7f2e1
update 2022-09-14 14:12:12 +05:30
Jay07GIT
327061f3c0
update tests 2022-09-14 14:07:16 +05:30
Aravindh-Raju
fe69ae7acf
Restrict user access 2022-09-14 14:07:08 +05:30
Jay07GIT
150cbd42ab
update tests 2022-09-14 13:53:18 +05:30
Aravindh R
aa23ef5bed
Merge branch 'master' into aravindhr/add-groups-autocomplete 2022-09-14 11:05:40 +05:30
Nicholas Spadaccino
c133f4c36a
Merge branch 'master' into nspadaccino/case-insensitive-zone-search 2022-09-13 16:10:27 -04:00
Nicholas Spadaccino
2d5914413f
Merge pull request #1159 from nspadaccino/nspadaccino/group-search-case
Add Case Insensitivity to Group Search
2022-09-13 16:09:22 -04:00
Nicholas Spadaccino
fb157bab87
Merge branch 'master' into nspadaccino/group-search-case 2022-09-13 15:53:59 -04:00
Nicholas Spadaccino
fe61181f79
Merge pull request #1134 from Jay07GIT/forwardslash_ignore_forwardzone
Added forward slash validation for forward and reverse zone
2022-09-13 15:38:20 -04:00
Jay07GIT
3c5fc57d01
update tests 2022-09-13 17:56:33 +05:30
Jay07GIT
67e22eb30e
update tests 2022-09-13 17:00:31 +05:30
Jay07GIT
f2cafd5d89
Added tests 2022-09-13 16:40:38 +05:30
Jay07GIT
f9cc06f261
Added record type sort in portal 2022-09-13 11:43:27 +05:30
Nicholas Spadaccino
55b319e4f4
Merge branch 'master' into forwardslash_ignore_forwardzone 2022-09-12 16:50:13 -04:00
Nicholas Spadaccino
ce716a3450
Merge pull request #1121 from Aravindh-Raju/aravindhr/validate-group-fields
Validate Group Fields in API
2022-09-12 16:46:12 -04:00
Nicholas Spadaccino
5f910cc201
Merge branch 'master' into aravindhr/validate-group-fields 2022-09-12 16:24:36 -04:00
Nicholas Spadaccino
7f9f4eed64
Merge pull request #1179 from Aravindh-Raju/aravindhr/menu-bar-bug-fix
PR #1116 Bug Fix
2022-09-12 16:18:14 -04:00
Nicholas Spadaccino
0251c6b4ad
Merge branch 'master' into aravindhr/menu-bar-bug-fix 2022-09-12 16:00:55 -04:00
Nicholas Spadaccino
0c718f739d
Merge pull request #1173 from Jay07GIT/zone_search_loader
Added spinner in zone search
2022-09-12 15:57:04 -04:00
Jay07GIT
6a07f533cb
Added record type sort in API 2022-09-12 18:17:42 +05:30
Aravindh-Raju
7c5bb63160
Fix bug 2022-09-12 13:16:07 +05:30
Jay07GIT
f29ae4af8b
Added func tests 2022-09-09 17:13:04 +05:30
Jay07GIT
813138b597
update 2022-09-09 15:48:43 +05:30
Jay07GIT
2180bae712
Added tests 2022-09-09 15:07:29 +05:30
Jay07GIT
3c3f2a0c00
Added Cname validation without IPaddress 2022-09-09 12:57:34 +05:30
Aravindh-Raju
adc212e04b
Change to debug 2022-09-09 12:23:07 +05:30
Aravindh R
3b77f29177
Merge branch 'vinyldns:master' into aravindhr/add-group-change-history 2022-09-09 12:15:26 +05:30
Aravindh-Raju
0766f64091
Trigger build 2022-09-09 11:58:55 +05:30
Aravindh R
515dca74a6
Merge branch 'master' into aravindhr/add-groups-pagination 2022-09-09 11:38:28 +05:30
Aravindh-Raju
567ae9ea6c
Change to debug 2022-09-09 11:37:33 +05:30
Aravindh R
186f83d6f6
Merge branch 'master' into aravindhr/manage-portal-logging 2022-09-09 10:23:25 +05:30
Aravindh-Raju
7deaa78df7
disable logs in production 2022-09-08 13:35:30 +05:30
Nicholas Spadaccino
f3bcb1c708
Remove todo comments 2022-09-06 16:27:22 -04:00
Aravindh-Raju
cb90f790bd
Update functionality 2022-09-06 17:34:56 +05:30
Aravindh R
bf7547e75c
Merge branch 'master' into aravindhr/override-toString-ZoneConnection 2022-09-05 16:11:06 +05:30
Aravindh-Raju
e04500656a
Update as separate test 2022-09-05 15:15:16 +05:30
Aravindh-Raju
96f179d694
Allow wildcard zones in config 2022-09-05 13:53:33 +05:30
Nicholas Spadaccino
f6b492448b
Update functional tests 2022-09-02 14:20:15 -04:00
Jay
a8fb744317
Merge branch 'master' into zone_search_loader 2022-09-02 18:44:05 +05:30
Jay07GIT
3485401bd3
Added spinner in zone search 2022-09-02 18:42:34 +05:30
Nicholas Spadaccino
1e4d63be20
Added additional tests 2022-09-01 17:44:36 -04:00
Aravindh R
06d9a3fc7c
Merge branch 'master' into aravindhr/fix-spf-record-length 2022-09-01 14:14:10 +05:30
Aravindh-Raju
f741de0b17
Handle SPF correctly 2022-09-01 14:07:53 +05:30
Jay
6f1fe5aab8
Merge branch 'master' into record_already_exist_add 2022-09-01 10:27:37 +05:30
Jay
bed55cd4c1
Merge branch 'master' into forwardslash_ignore_forwardzone 2022-09-01 10:25:49 +05:30
Nicholas Spadaccino
30514caa05
Remove commented code, add and update tests 2022-08-31 17:23:02 -04:00
Arpit Shah
d14ef311e1
Merge pull request #1116 from Aravindh-Raju/aravindhr/make-navbars-fixed
Make menu bars fixed
2022-08-30 08:31:07 -04:00
Aravindh-Raju
ddd4b35557
Add test 2022-08-30 12:27:00 +05:30
Aravindh-Raju
0e80461860
Add test 2022-08-30 12:11:02 +05:30
Aravindh-Raju
774d23d19c
Override ZoneConnection toString 2022-08-30 11:40:19 +05:30
Nicholas Spadaccino
807dde2ff0
Add new condition to canUpdateRecordSet 2022-08-29 17:49:34 -04:00
Aravindh-Raju
376e7b62de
Override User toString 2022-08-29 12:47:03 +05:30
Aravindh-Raju
c6def527bc
Allow dotted host with config 2022-08-26 16:28:32 +05:30
Aravindh-Raju
d95ac71c4c
Remove complete zone sync status 2022-08-25 12:24:05 +05:30
Jay07GIT
fcfd84125e
Update 2022-08-24 14:57:46 +05:30
Nicholas Spadaccino
e1705d4291
Address review comments 2022-08-23 17:19:10 -04:00
Jay07GIT
229efc744b
Update in func tests 2022-08-23 13:26:58 +05:30
Aravindh-Raju
c07754c5cf
Replace await with IO 2022-08-23 12:27:56 +05:30
Jay07GIT
0a0531c618
Update in func tests 2022-08-23 12:21:50 +05:30
Nicholas Spadaccino
e82a649e3d
Add test super user to shared context 2022-08-18 16:14:22 -04:00
Jay07GIT
e805423e3b
Update in tests 2022-08-18 15:01:14 +05:30
Aravindh-Raju
876b7a767a
Remove unused import 2022-08-18 11:26:12 +05:30
Aravindh-Raju
35d9cc7eda
Remove unused case 2022-08-18 11:20:43 +05:30
Aravindh-Raju
aaf171bc34
Add and update tests 2022-08-17 17:13:52 +05:30
Jay07GIT
ed4d324d4a
Replaced orchard CIDR library to IP4s library 2022-08-17 14:01:48 +05:30
Aravindh-Raju
d33d559ce2
Add auth for /status post 2022-08-17 13:31:08 +05:30
Nicholas Spadaccino
4e87fde370
Remove debugging code 2022-08-16 17:47:17 -04:00
Nicholas Spadaccino
f9510b8ca7
Update record modal to require record owner group for shared zones 2022-08-16 17:34:29 -04:00
Nicholas Spadaccino
787ab8959c
Update manage records zone view to limit actions in shared zones 2022-08-15 17:57:18 -04:00
Aravindh-Raju
3e8d86f81d
Revert changes 2022-08-10 12:18:35 +05:30
Aravindh-Raju
bc5972b05d
Update func test 2022-08-10 11:55:52 +05:30
Aravindh-Raju
1bdddb7e25
Update func test 2022-08-10 11:07:01 +05:30
Aravindh-Raju
a6b7c9900a
Resolve func test 2022-08-09 18:15:27 +05:30
Aravindh-Raju
b4920d657a
override json for groups 2022-08-09 15:59:09 +05:30
Aravindh-Raju
4aabf5d63e
Update 2022-08-09 14:38:15 +05:30
Aravindh-Raju
5c37af6963
Rollback jackson version 2022-08-09 12:13:17 +05:30
Aravindh-Raju
4e69b38081
Fix format 2022-08-09 11:06:48 +05:30
Aravindh-Raju
479c08cac9
Resolve tests 2022-08-09 10:58:39 +05:30
Nicholas Spadaccino
959cd22a36
Update func tests
Signed-off-by: Nicholas Spadaccino <nicholas_spadaccino@comcast.com>
2022-08-08 15:46:00 -04:00
Aravindh-Raju
f00f1312c8
Replace joda with java instant 2022-08-08 17:52:54 +05:30
Nicholas Spadaccino
4ae208b2d2
Update failing test
Signed-off-by: Nicholas Spadaccino <nicholas_spadaccino@comcast.com>
2022-08-05 14:47:50 -04:00
Nicholas Spadaccino
49b225e4f0
Allow all users to view shared zones
Signed-off-by: Nicholas Spadaccino <nicholas_spadaccino@comcast.com>
2022-08-05 09:52:08 -04:00
Aravindh-Raju
0ac4c35ced
Update route test 2022-07-29 13:07:32 +05:30
Aravindh-Raju
2c3c87fa7e
Add route test 2022-07-29 13:03:21 +05:30
Aravindh-Raju
6fd4f730b1
Add route for single group change 2022-07-29 12:37:48 +05:30
Aravindh-Raju
49bf335222
Use trim 2022-07-28 16:29:06 +05:30
Aravindh-Raju
77fdcaf3c8
Make requested changes 2022-07-28 16:04:47 +05:30
Jay07GIT
54e377533c
Update in tests 2022-07-28 13:15:05 +05:30
Jay07GIT
61ba1ebca9
Update 2022-07-26 18:11:54 +05:30
Jay07GIT
4c63161f0c
Update 2022-07-26 18:02:55 +05:30
Jay07GIT
7061bb8e1a
Update in portal tests 2022-07-26 15:00:48 +05:30
Aravindh-Raju
3c0c4a26f1
Revert changes 2022-07-26 11:37:37 +05:30
Aravindh-Raju
0fd4ecb1f9
Remove unnecessary changes 2022-07-26 11:15:41 +05:30
Aravindh-Raju
1eeb348579
Remove unused scope variable 2022-07-26 11:02:14 +05:30
Jay07GIT
b58efee7f3
Added portal tests and updated ACL modal view page 2022-07-25 18:04:12 +05:30
Nicholas Spadaccino
0e31b8bfcb
Modify group search filter to ignore case for name queries, update unit tests
Signed-off-by: Nicholas Spadaccino <nicholas_spadaccino@comcast.com>
2022-07-22 15:00:14 -04:00
Jay07GIT
877d4fe76d
Added pagination in zone history tab 2022-07-22 16:58:26 +05:30
Aravindh-Raju
358ccc8a3a
Remove print statements 2022-07-22 15:50:54 +05:30
Aravindh-Raju
fce76169d0
Add test 2022-07-22 15:35:51 +05:30
Aravindh-Raju
8c857bb497
Add test 2022-07-22 14:43:31 +05:30
Aravindh-Raju
d88051493f
Move code to scala 2022-07-22 14:14:59 +05:30
Jay07GIT
94c16361b1
Add Zone history tab in zones page 2022-07-22 10:14:58 +05:30
Aravindh-Raju
c88a892ef3
Add group changes info 2022-07-21 16:03:20 +05:30
Aravindh-Raju
c28d5856cf
Add test 2022-07-21 11:02:33 +05:30
Aravindh-Raju
03155cc376
Add tests for group change data 2022-07-21 10:49:04 +05:30
Aravindh-Raju
ce870db062
Add tests for pagination 2022-07-21 10:29:03 +05:30
Aravindh-Raju
2ac219f4a1
Fix portal tests 2022-07-20 16:33:43 +05:30
Aravindh-Raju
f9ae48d1af
Fix tests 2022-07-20 15:33:14 +05:30
Aravindh-Raju
1ad803533f
Add group change history 2022-07-19 14:01:19 +05:30
Aravindh-Raju
f839c72d05
Add test 2022-07-15 11:12:16 +05:30
Aravindh-Raju
d13b601f69
Resolve error 2022-07-15 10:41:56 +05:30
Aravindh-Raju
a44cafeaf8
Add test 2022-07-13 18:05:35 +05:30
Aravindh-Raju
8493edf5e0
Add test for wildcard search 2022-07-13 15:48:44 +05:30
Aravindh-Raju
8661994397
Resolve test 2022-07-13 15:20:41 +05:30
Aravindh-Raju
2232f9dccf
Allow wildcard search 2022-07-13 14:09:02 +05:30
Jay07GIT
cfcd47abaf
update in tests 2022-07-11 09:54:52 +05:30
Jay07GIT
2427616f3b
update in messages 2022-07-08 19:33:46 +05:30
Nicholas Spadaccino
a61cf559f1
Revert listzones sql query change 2022-07-07 11:51:05 -04:00
Aravindh-Raju
638a44fcad
Minor change 2022-07-06 11:40:08 +05:30
Aravindh-Raju
95bbbe43c9
Address requested changes 2022-07-06 11:36:54 +05:30
Nicholas Spadaccino
eb17c957f7
Add case insensitivity to listZones api, update integration tests 2022-07-05 16:02:53 -04:00
Jay07GIT
0eddd7e0aa
update in css 2022-07-05 17:30:41 +05:30
Aravindh-Raju
d1b8782311
Move lib to devDependencies 2022-07-05 11:38:09 +05:30
Aravindh-Raju
fa93930ae2
Add comment 2022-07-05 10:45:14 +05:30
Jay07GIT
54949ede49
update in css 2022-07-04 19:24:47 +05:30
Aravindh-Raju
db0d8011f7
Add text-highlight 2022-07-04 18:52:27 +05:30
Aravindh-Raju
8ccd1cc8c0
Resolve tests failure 2022-07-04 17:39:27 +05:30
Jay07GIT
80616865d8
update in css 2022-07-04 15:54:25 +05:30
Jay07GIT
59f800883c
update in recordset search 2022-07-04 15:10:18 +05:30
Aravindh-Raju
a1b3cb32de
Add newline at EOF 2022-07-04 13:15:04 +05:30
Aravindh-Raju
8030cbf778
Add css 2022-07-04 12:52:48 +05:30
Aravindh-Raju
e51c2aad5d
add autocomplete for groups search 2022-06-30 17:58:16 +05:30
Jay07GIT
a92713d11d
Auto complete feature in Global recordset search 2022-06-30 14:51:02 +05:30
Nicholas Spadaccino
d4f94918a0
Update unit tests 2022-06-29 08:26:21 -04:00
Jay07GIT
44b88f31f6
update 2022-06-24 12:44:31 +05:30
Jay07GIT
6d311d9e6b
Adding it tests 2022-06-23 15:26:02 +05:30
Jay07GIT
6fc0eee7db
update 2022-06-22 17:01:18 +05:30
Jay07GIT
2410a24fc8
update in functional tests 2022-06-22 16:31:59 +05:30
Jay07GIT
36e3587fad
update 2022-06-22 13:27:44 +05:30
Jay07GIT
37fde02bfc
update 2022-06-21 17:42:03 +05:30
Jay07GIT
3ee689081c
update in integration tests 2022-06-21 17:24:31 +05:30
Jay07GIT
d1ab9a7a4a
update 2022-06-21 15:13:08 +05:30
Jay07GIT
79c922e805
update in integration tests 2022-06-20 17:56:18 +05:30
Aravindh-Raju
6ad28b46d4
Refactor 2022-06-20 13:19:52 +05:30
Jay07GIT
6e22452f49
update in validating record already exist 2022-06-20 11:47:07 +05:30
Jay07GIT
b2f0693c76
update 2022-06-17 12:39:54 +05:30
Jay07GIT
ccae59fe7a
update in tests 2022-06-16 11:52:45 +05:30
Aravindh-Raju
62a79b62a2
update comment 2022-06-15 18:23:00 +05:30
Jay07GIT
71f6dfd7c4
update 2022-06-15 15:04:54 +05:30
Jay07GIT
dd1477f3bc
update 2022-06-15 12:27:42 +05:30
Nicholas Spadaccino
119ebd49f4
Update tests, remove unneeded code 2022-06-13 17:34:52 -04:00
Aravindh-Raju
fca4741721
remove directly hardcoded message 2022-06-13 16:46:14 +05:30
Aravindh-Raju
208a82331b
Complete delete for record that doesn't exists 2022-06-13 16:27:53 +05:30
Jay07GIT
ca334fc54d
Allow add for already exists DNS records. 2022-06-10 12:51:11 +05:30
Nicholas Spadaccino
cc597cf1a5
Fix issue with list zones sql query, update zone controller and view 2022-06-09 17:08:37 -04:00
Nicholas Spadaccino
4fd3bde261
Update api and sql repo to filter reverse zones 2022-06-07 16:34:12 -04:00
Nicholas Spadaccino
946ddeb9e5
Added filter ptr zones checkbox to zones view
Signed-off-by: Nicholas Spadaccino <nicholas_spadaccino@comcast.com>
2022-06-02 17:39:45 -04:00
Aravindh-Raju
7477534580
Add portal test 2022-06-01 15:40:46 +05:30
Aravindh-Raju
27d24478ae
Add pagination in group view 2022-06-01 15:01:01 +05:30
Jay
58e45c5277
Merge branch 'master' into forwardslash_ignore_forwardzone 2022-05-27 17:22:55 +05:30
Aravindh-Raju
01df5bb627
Handle whitespace and null value 2022-05-26 10:57:36 +05:30
Ryan Emerle
7e8e7ecb12
Merge branch 'master' into aravindhr/make-navbars-fixed 2022-05-25 10:57:06 -04:00
Ryan Emerle
92d01157c9
Merge branch 'master' into aravindhr/filter-zones 2022-05-25 10:39:24 -04:00
Jay07GIT
98ab80ecd8
update 2022-05-25 17:09:01 +05:30
Jay07GIT
b21c533941
update 2022-05-25 13:52:22 +05:30
Jay07GIT
3adb04b8e6
update tests 2022-05-25 13:45:17 +05:30
Jay07GIT
4896f4e823
Added tests in domainvalidationspec.scala 2022-05-25 12:55:01 +05:30
Jay07GIT
8e8d1e5aa8
update 2022-05-24 18:02:04 +05:30
Jay07GIT
0080ffa9dd
update 2022-05-24 14:34:27 +05:30
Ryan Emerle
72976c8828
Update docs to include user API change [ci skip] 2022-05-23 14:42:05 -04:00
Ryan Emerle
416b7cda68
Rename action [ci skip] 2022-05-23 14:34:12 -04:00
Ryan Emerle
c561cddf5f
Update docs to include user API change [ci skip] 2022-05-23 14:28:51 -04:00
Ryan Emerle
89ecce2970
Merge pull request #1135 from remerle/master 2022-05-23 09:22:04 -04:00
Ryan Emerle
3a79b99572
Merge branch 'master' into master 2022-05-23 09:21:56 -04:00
Emerle, Ryan
a40c231853
Add twitter handle to documentation [ci skip] 2022-05-23 09:20:29 -04:00
Jay07GIT
7b17e7aca0
update test 2022-05-23 13:57:53 +05:30
Jay07GIT
ad0a37236a
Added forward slash validation for forward and reverse zone 2022-05-23 13:12:33 +05:30
Ryan Emerle
085fbe958e
Bump version to 0.14.0 2022-05-20 14:09:48 -04:00
Ryan Emerle
4c54e787c2
Merge pull request #1118 from nspadaccino/nspadaccino/user-lookup-api 2022-05-20 14:08:52 -04:00
Ryan Emerle
78ddfeea05
Merge branch 'master' into nspadaccino/user-lookup-api 2022-05-20 12:17:00 -04:00
Ryan Emerle
6775ffa3e1
Merge pull request #1133 from remerle/master 2022-05-20 12:15:28 -04:00
Emerle, Ryan
3bfd34849d
Enable flyway configuration from environment 2022-05-20 11:53:33 -04:00
Nicholas Spadaccino
81a28b2f60
Merge branch 'nspadaccino/user-lookup-api' of github.com:nspadaccino/vinyldns into nspadaccino/user-lookup-api 2022-05-20 11:41:03 -04:00
Nicholas Spadaccino
017c8acd09
Modify get user api to just return username and id, update tests and docs, remove unused serializer 2022-05-20 11:39:37 -04:00
Ryan Emerle
5d749bd699
Merge branch 'master' into nspadaccino/user-lookup-api 2022-05-20 10:11:30 -04:00
Ryan Emerle
9897166b43
Merge pull request #1131 from Aravindh-Raju/aravindhr/relax-ds-validations 2022-05-20 10:11:00 -04:00
Ryan Emerle
e618f363eb
Merge branch 'master' into aravindhr/relax-ds-validations 2022-05-20 09:51:30 -04:00
Ryan Emerle
5430d38b29
Merge pull request #1127 from nspadaccino/nspadaccino/full-group-choice-acl 2022-05-20 09:41:56 -04:00
Ryan Emerle
16b4dafd43
Fix portal tests 2022-05-20 09:23:37 -04:00
Ryan Emerle
e7643db419
Update test 2022-05-19 16:25:51 -04:00
Ryan Emerle
95338d963b
Update test 2022-05-19 15:53:04 -04:00
Ryan Emerle
bfca4aef07
Merge branch 'master' into nspadaccino/full-group-choice-acl 2022-05-19 15:43:54 -04:00
Ryan Emerle
8b6b0a8d17
Merge branch 'master' into nspadaccino/user-lookup-api 2022-05-19 15:43:30 -04:00
Ryan Emerle
8262f6e497
Merge pull request #1117 from Jay07GIT/reverse_zone_with_caseinsensitive 2022-05-19 15:43:08 -04:00
Ryan Emerle
680d168c6d
update tests 2022-05-19 15:32:49 -04:00
Ryan Emerle
da3ec42dd7
Merge branch 'master' into nspadaccino/full-group-choice-acl 2022-05-19 14:56:00 -04:00
Emerle, Ryan
02d702f461
Performance tuning
- Add `getGroupsAbridged` which returns a subset of group data for dropdowns and other places where all groups are listed
- Remove unnecessary checks for `canSeeGroup` in `groups.scala.html` since all users can see all groups
- Move `ZoneController` initialization in `manageZone.scala.html` to higher level to avoid waiting for groups to load when expanding the select box
- Add `PreparePortalHook` to automatically run `prepare-portal.sh` when `project porta; run` is executed
2022-05-19 14:41:11 -04:00
Jay
d3155ff66d
Merge branch 'vinyldns:master' into reverse_zone_with_caseinsensitive 2022-05-19 14:42:36 +05:30
Jay07GIT
c13f91333f
update 2022-05-19 13:18:18 +05:30
Jay07GIT
502746d86b
removed java libraries 2022-05-19 13:11:42 +05:30
Aravindh R
3ef1dac262
Merge branch 'master' into aravindhr/relax-ds-validations 2022-05-19 12:10:30 +05:30
Aravindh-Raju
5d37f2dabe
Relax DS validations 2022-05-19 11:57:45 +05:30
Ryan Emerle
9ca8fc88cc
Merge pull request #1130 from remerle/master
Minor updates
2022-05-18 17:26:19 -04:00
Ryan Emerle
d9f986997d
Minor updates
- Remove executable permissions from *.scala files
- Update `prepare-portal.sh` to avoid creating `package-lock.json` as it
can cause more problems than it prevents
2022-05-18 09:05:24 -04:00
Ryan Emerle
8b5244fcda
Merge branch 'vinyldns:master' into master 2022-05-18 09:00:21 -04:00
Ryan Emerle
4222074e8a
Minor updates
- Remove executable permissions from *.scala files
- Update `prepare-portal.sh` to avoid creating `package-lock.json` as it
can cause more problems than it prevents
2022-05-18 08:58:08 -04:00
Ryan Emerle
43c3db1964
Bump to v0.13.1 2022-05-18 07:57:11 -04:00
Ryan Emerle
559d47ea93
Merge branch 'master' into nspadaccino/full-group-choice-acl 2022-05-18 07:55:59 -04:00
Ryan Emerle
52b6a96ee3
Merge pull request #1128 from remerle/master 2022-05-17 16:45:16 -04:00
Emerle, Ryan
20c283dc7a
Logging updates
- Allow log level to be set via environment
- Remove `envsubst` from `quickstart.sh` for macOS compat
2022-05-17 16:23:45 -04:00
Nicholas Spadaccino
222f08512e
Merge remote-tracking branch 'upstream/master' into nspadaccino/full-group-choice-acl 2022-05-17 15:17:44 -04:00
Ryan Emerle
b7647979e6
Update verify.yml 2022-05-17 15:15:26 -04:00
Nicholas Spadaccino
94b6d575b5
Add unit test 2022-05-17 13:53:33 -04:00
Ryan Emerle
78c0c634e8
Bump to v0.13.0 2022-05-17 12:42:41 -04:00
Ryan Emerle
01675c1232
Merge pull request #1125 from remerle/Jay07GIT-wildcardsearch_rs 2022-05-17 12:42:15 -04:00
Ryan Emerle
0bf115a44b
Merge branch 'master' into Jay07GIT-wildcardsearch_rs 2022-05-17 12:20:58 -04:00
Ryan Emerle
d65f22d6dc
Merge pull request #1126 from bashilias/master 2022-05-17 12:20:23 -04:00
Ryan Emerle
79e7e934fa
Merge branch 'master' into master 2022-05-17 09:01:04 -04:00
Ryan Emerle
dbb784f05f
Add retries to codecove 2022-05-17 09:00:09 -04:00
Emerle, Ryan
9ffef4aa80
Fix tests
Update query sorting for recordset search
Update `How to Search` instructions
2022-05-16 14:33:12 -04:00
Ilias
bf9ef64ffc typo change 2022-05-16 14:58:05 +02:00
Emerle, Ryan
c49b74e1aa
Fix files ignored due to "cache" entry in .gitignore 2022-05-13 19:48:04 -04:00
Emerle, Ryan
744b21d9d1
Fix wildcard placement 2022-05-13 15:24:26 -04:00
Emerle, Ryan
b24cf8db54
Updates
- Rename RecordSetData as RecordSetCache
- Refactoring
- Fix ip address storage to be binary instead of string
- Align recordset_data table contents with migration tooling
- Add feature flag for recordset cache `use-recordset-cache`
- Add feature flag for loading test data `load-test-data`
2022-05-13 13:31:37 -04:00
Aravindh-Raju
085b1235e6
Search/filter zone by admin group 2022-05-12 12:39:29 +05:30
Nicholas Spadaccino
e9406ec24d
Add new allGroups scope to manageZones view, update view to allow for viewing all groups when creating an acl rule 2022-05-06 16:19:15 -04:00
Jay07GIT
a7f922d24f
Added unit test with uppercase 2022-05-03 15:05:43 +05:30
Aravindh-Raju
d9a4d7828d
Add tests 2022-05-02 15:32:12 +05:30
Aravindh-Raju
7f65dc6582
Validate group fields 2022-05-02 13:42:43 +05:30
Nicholas Spadaccino
c62b3ad453
Update api documentation, update controller, remove unnecessary unit test 2022-04-28 15:14:21 -04:00
Nicholas Spadaccino
d0b602cef0
Merge branch 'master' into nspadaccino/user-lookup-api 2022-04-28 14:46:11 -04:00
Jay07GIT
a595ca4dc5
Reverse zones are updated as case insensitive 2022-04-28 17:03:30 +05:30
Aravindh-Raju
1cafbf5bf8
Make navbars fixed 2022-04-27 12:00:48 +05:30
Nicholas Spadaccino
4eba2d4d6f
Update user repo integration tests 2022-04-26 17:02:07 -04:00
Nicholas Spadaccino
921746a187
Update userAccountAccessor and membershipService unit tests 2022-04-26 16:40:54 -04:00
Nicholas Spadaccino
511c5c7e4c
Update getUser api with new accessor in user repo 2022-04-26 14:58:04 -04:00
Ryan Emerle
a946e71e00
Merge branch 'master' into wildcardsearch_rs 2022-04-25 10:56:07 -04:00
Ryan Emerle
57479812f9
Merge pull request #1114 from Aravindh-Raju/aravindhr/improve-batch-performance 2022-04-25 10:55:48 -04:00
Ryan Emerle
5893c2e2b0
Merge branch 'master' into aravindhr/improve-batch-performance 2022-04-25 10:14:12 -04:00
Ryan Emerle
f907bd04a2
Merge pull request #1110 from corubba/bugfix/log 2022-04-25 10:13:55 -04:00
Ryan Emerle
416de42d0f
Merge branch 'master' into wildcardsearch_rs 2022-04-25 10:02:14 -04:00
Ryan Emerle
19fef9e6e6
Merge branch 'master' into bugfix/log 2022-04-25 09:58:34 -04:00
Ryan Emerle
c6f71d52de
Merge branch 'master' into aravindhr/improve-batch-performance 2022-04-25 09:58:22 -04:00
Ryan Emerle
6a9eacf4a3
Merge pull request #1115 from Jay07GIT/revert_recorddata_hash 2022-04-25 09:58:04 -04:00
Ryan Emerle
7d36d455c6
Merge branch 'master' into bugfix/log 2022-04-25 09:24:49 -04:00
Ryan Emerle
ac0a5372d2
Merge branch 'master' into aravindhr/improve-batch-performance 2022-04-25 09:24:30 -04:00
Ryan Emerle
6f1b9ba8ae
Remove the original migration change 2022-04-25 09:10:51 -04:00
Jay
5e6e33769f
Merge branch 'master' into revert_recorddata_hash 2022-04-25 12:39:01 +05:30
Jay07GIT
2a67503c04
Update 2022-04-25 12:37:47 +05:30
Jay07GIT
a696929476
Dropped recordset BLOB hash column in recordset table 2022-04-25 12:01:46 +05:30
Aravindh-Raju
1a0f9ce8e6
Add new line 2022-04-22 13:49:43 +05:30
Aravindh-Raju
04b7cf8b0c
Improve batch change query performance 2022-04-22 13:45:55 +05:30
Nicholas Spadaccino
8962575455
getUser method update, added unit tests 2022-04-21 17:05:09 -04:00
Ryan Emerle
51407d7f7f
Merge branch 'master' into wildcardsearch_rs 2022-04-21 13:04:32 -04:00
Ryan Emerle
68991f6af1
Update V3.24__RecordSetData.sql 2022-04-21 12:58:14 -04:00
Ryan Emerle
1aa10abbc9
Merge pull request #1113 from Jay07GIT/record_search_loader_fix 2022-04-21 12:56:29 -04:00
Jay07GIT
9c91fad4f8
Added status code check for the recordset search spinner 2022-04-21 18:32:36 +05:30
Jay07GIT
d06dd56fd4
update 2022-04-21 17:38:00 +05:30
Jay07GIT
a5bb7d0c54
update 2022-04-21 17:36:04 +05:30
Jay07GIT
c82cbbdf66
update 2022-04-21 16:42:08 +05:30
Jay07GIT
1205b7df2a Merge branch 'master' of https://github.com/Jay07GIT/vinyldns into wildcardsearch_rs 2022-04-21 16:41:06 +05:30
Nicholas Spadaccino
b798ab9185
user lookup route wip 2022-04-20 14:26:35 -04:00
Ryan Emerle
6d2b1614f9
Merge pull request #1111 from Aravindh-Raju/aravindhr/update-wildcard-validation 2022-04-20 12:11:13 -04:00
Jay07GIT
aa420e1bb6
update 2022-04-20 12:42:12 +05:30
Aravindh-Raju
e7ed66d205
Update wildcard records validation 2022-04-20 11:59:21 +05:30
Jay07GIT
b60b45e9be
update 2022-04-20 10:12:10 +05:30
Jay07GIT
81e01b5a8b
update 2022-04-20 10:10:45 +05:30
Jay07GIT
e9cdeef724
update 2022-04-20 10:08:51 +05:30
Aravindh-Raju
7d2be12cb7
Update wildcard records validation 2022-04-19 18:48:08 +05:30
Jay07GIT
9770e28751
Drop fulltext index for fqdn and reverse_fqdn using Alter queries 2022-04-18 11:57:03 +05:30
Jay07GIT
61c941f16c Merge branch 'master' of https://github.com/Jay07GIT/vinyldns into wildcardsearch_rs 2022-04-18 10:33:01 +05:30
corubba
bed0d5dfed Remove trailing newlines from log messages
The goal is to reduce visual jitter.
2022-04-17 21:03:59 +02:00
corubba
51227ef753 Demote full SigV4 logging to DEBUG level
As the comment said, it is for debugging; so make it DEBUG level. This
means ~15 lines less logging per request. Also wrapped it in a loglevel
guard to skip the string interpolation when not needed.
2022-04-17 21:03:54 +02:00
corubba
433c6efe19 Disable illegal header warning from akka-http
Fixes #864
2022-04-17 21:03:49 +02:00
corubba
ee481c2e0a Reduce flyway log output
Let's establish a sane default by not having flyway run on full-blast
debug output by default. For normal operation I found INFO to be verbose
enough (as the flyway devs intended I guess).
2022-04-17 21:03:44 +02:00
corubba
1292564bf1 Remove duplicate log output
Appenders in logback are additive [0], specifying them multiple times
leads to log lines being output multiple times. It is usually enough to
set the appender on the root logger, and nowhere else.
Setting the same log level on multiple package levels is also kind of
unneeded, the top-most one would be enough; but not touching that here.

[0] https://logback.qos.ch/manual/configuration.html#cumulative
2022-04-17 21:03:29 +02:00
Nicholas Spadaccino
8fe5330489
Add user lookup route, wip 2022-04-15 17:39:40 -04:00
Ryan Emerle
46c8b3136d
Bump to version 0.12.0 2022-04-15 11:35:29 -04:00
Corubba
b2b9814df8
Fix loading TSIG algorithm from config (#1107)
Co-authored-by: Ryan Emerle <ryan_emerle@comcast.com>
2022-04-15 11:33:49 -04:00
Nicholas Spadaccino
6090006e09
Hotfix: Flyway Migration Issue (#1109) 2022-04-13 13:54:51 -04:00
Jay07GIT
4a396ff8bf
removed fulltext index for fqdn and reverse_fqdn 2022-04-13 21:39:45 +05:30
Jay07GIT
edf882f071 Merge branch 'master' of https://github.com/Jay07GIT/vinyldns into wildcardsearch_rs 2022-04-13 21:10:53 +05:30
Nicholas Spadaccino
230c7f7420
Fix Dev Deployment Failure due to Flyway DB Migration Issue (#1104) 2022-04-12 10:07:22 -04:00
Jay
8ce9b95270
Added spinner and no records found for recordset search (#1096) 2022-04-12 09:49:03 -04:00
Nicholas Spadaccino
829cee7640
Allow Users to View Members of Other Groups (#1097)
* Allow all users to view group details, still read-only
2022-04-11 16:18:25 -04:00
Jay07GIT
350995f433
Updated the wildcard filter based on the review comments 2022-04-08 18:35:56 +05:30
Aravindh R
4b31f8b9d9
Update log levels and adopt ECS (#1103)
* Update log messages, log levels, and log formatting
2022-04-04 16:06:05 -04:00
Jay07GIT
2c609ddbf9
Update 2022-03-23 11:45:58 +05:30
Jay07GIT
1a02729128
Update 2022-03-22 19:59:35 +05:30
Jay07GIT
485cc60b7a
Update 2022-03-22 19:54:11 +05:30
Jay07GIT
29a6eced55
Updated wildcard filter for global recordset search with reverseFQDN using recordset_data table 2022-03-22 19:49:11 +05:30
Ryan Emerle
1c2635a441
Bump version to 0.11.0 2022-03-11 16:52:24 -05:00
Ryan Emerle
c0be3b329d
Add init script support to portal image 2022-03-11 11:18:31 -05:00
Jay
dec57eccca
Update for recordset_data table based on rsdump (#1092)
Co-authored-by: Ryan Emerle <ryan_emerle@comcast.com>
2022-03-09 10:22:19 -05:00
Aravindh R
dc218ab071
Upgrade dependencies (#1095) 2022-03-09 09:32:24 -05:00
Nicholas Spadaccino
c7c6184fd6
ACLs Should Honor Most Permissive Access Level (#1089)
Co-authored-by: Ryan Emerle <ryan_emerle@comcast.com>
2022-03-08 08:05:03 -05:00
Jay
44ed1a4a67
Recordset data (#1054)
Co-authored-by: Ryan Emerle <ryan_emerle@comcast.com>
2022-02-28 14:27:44 -05:00
Aravindh R
1d9bc228aa
Resolve SQL injection vulnerability (#1091) 2022-02-28 13:07:18 -05:00
Aravindh R
d73cc6dd53
Mitigate potential MySQL deadlock (#1087) 2022-02-18 10:21:40 -05:00
Ryan Emerle
e552a0050b
Update notify action [ci skip]
Remove the dump action from the notifier
2022-02-09 14:42:03 -05:00
Ryan Emerle
0a2c8c071c
Update docker image shields [ci skip] 2022-02-09 14:20:03 -05:00
Ryan Emerle
2b48a8cc0c
Update developer guide with prerequisites [ci skip] 2022-02-08 12:54:42 -05:00
Ryan Emerle
0335598818
Update notification workflow [ci skip] 2022-02-07 17:00:04 -05:00
Ryan Emerle
e71bdbfcee
Merge pull request #1085 from remerle/master 2022-02-07 16:42:59 -05:00
Ryan Emerle
879e43cfa9
Merge branch 'master' into master 2022-02-07 16:14:50 -05:00
Emerle, Ryan
0e7de82bfb
Attempt to fix sql deadlock 2022-02-07 16:01:00 -05:00
Emerle, Ryan
a94d829685
Add log dump to functional test 2022-02-07 15:36:13 -05:00
Ryan Emerle
3fb4e7d55b
Merge pull request #1084 from vinyldns/update-sbt-script 2022-02-07 14:53:44 -05:00
Emerle, Ryan
be9990db39
Update Dockerfile for API
- Fix issue with loading external JARs
- Bump version to 0.10.5
2022-02-07 14:52:58 -05:00
Ryan Emerle
43886deee9
Update sbt script to allow for debugging [ci skip] 2022-02-07 14:47:18 -05:00
Ryan Emerle
74bde4381e
Merge pull request #1052 from Jay07GIT/hardcoded-limits 2022-02-07 13:35:08 -05:00
Ryan Emerle
656955fc2c
Merge branch 'master' into hardcoded-limits 2022-02-07 13:22:07 -05:00
Ryan Emerle
10e8fe7387
Update notify workflow [ci skip] 2022-02-07 13:03:07 -05:00
Ryan Emerle
d7497e344e
Update notify.yml 2022-02-07 12:50:41 -05:00
Ryan Emerle
39dc7dde80
Create notify.yml 2022-02-07 12:48:11 -05:00
Ryan Emerle
9ed63bc00f
Remove slack notification [ci skip] 2022-02-07 12:33:16 -05:00
Ryan Emerle
482ece8bf5
Merge branch 'master' into hardcoded-limits 2022-02-07 10:30:00 -05:00
Ryan Emerle
810bcd40b6
Update verify.yml 2022-02-07 10:11:11 -05:00
Ryan Emerle
f282149444
Add slack notification to verify 2022-02-07 09:43:24 -05:00
Ryan Emerle
5f74510e69
Merge branch 'master' into hardcoded-limits 2022-02-07 09:20:34 -05:00
Ryan Emerle
843a285b0f
Merge pull request #1060 from Jay07GIT/recordsetblob_hashing 2022-02-07 09:20:18 -05:00
Ryan Emerle
6d4b8b696b
Merge branch 'master' into recordsetblob_hashing 2022-02-03 07:51:04 -05:00
Ryan Emerle
e23719018a
Merge pull request #1081 from Aravindh-Raju/aravindhr/create-membership-tx 2022-02-03 07:50:52 -05:00
Ryan Emerle
726d5a4532
Merge branch 'master' into hardcoded-limits 2022-02-02 11:51:31 -05:00
Ryan Emerle
b4b8b3057d
Merge branch 'master' into recordsetblob_hashing 2022-02-02 11:51:23 -05:00
Ryan Emerle
a201bd89dd
Merge branch 'master' into aravindhr/create-membership-tx 2022-02-02 11:51:16 -05:00
Ryan Emerle
fb9bd164b0
Remove hard dependency on API for Portal
- Remove `api` from `depends_on` in the QuickStart compose file as the API may be started externally
2022-02-01 10:58:26 -05:00
Ryan Emerle
03edb9a36e
Merge branch 'master' into aravindhr/create-membership-tx 2022-02-01 10:52:29 -05:00
Ryan Emerle
0175cc92a8
Merge pull request #1082 from remerle/fix-dev-setup 2022-02-01 10:52:05 -05:00
Ryan Emerle
5e30894b00
Merge branch 'master' into fix-dev-setup 2022-02-01 10:26:41 -05:00
Ryan Emerle
71e7d59dc6
Merge branch 'master' into recordsetblob_hashing 2022-02-01 10:22:55 -05:00
Ryan Emerle
3d6ecebcfa
Merge branch 'master' into aravindhr/create-membership-tx 2022-02-01 10:22:38 -05:00
Ryan Emerle
0726e96e4c
Merge pull request #1053 from Aravindh-Raju/aravindhr/group-search-filter 2022-02-01 10:22:13 -05:00
Ryan Emerle
ec1283590b
Merge branch 'master' into fix-dev-setup 2022-02-01 10:20:41 -05:00
Aravindh R
dfe4ffbd43
Merge branch 'master' into aravindhr/group-search-filter 2022-02-01 09:59:35 +05:30
Ryan Emerle
a5a94c4858
Merge branch 'master' into recordsetblob_hashing 2022-01-31 17:55:54 -05:00
Ryan Emerle
87cc50fda0
Merge branch 'master' into hardcoded-limits 2022-01-31 17:49:29 -05:00
Ryan Emerle
43cf5bd8da
Merge branch 'master' into aravindhr/create-membership-tx 2022-01-31 17:48:15 -05:00
Ryan Emerle
59ee0c9098
Merge pull request #1076 from Aravindh-Raju/aravindhr/create-transaction 2022-01-31 17:46:07 -05:00
Ryan Emerle
08587fe1be
Fix issues pertaining to running env
- Add `debug` flag to `sbt.sh`
- Remove need for `LOCALSTACK_EXT_HOSTNAME` (provided by update to
backing Docker images)
2022-01-31 13:40:45 -05:00
Aravindh R
678abbb713
Merge branch 'master' into aravindhr/create-membership-tx 2022-01-21 16:56:16 +05:30
Aravindh-Raju
ce6bd1badc Maintain atomicity for membership service 2022-01-21 15:42:23 +05:30
Aravindh-Raju
b8ad28ac41 Wrap single changes within tx 2022-01-20 13:33:08 +05:30
Aravindh-Raju
eb5c8a18cc Resolve func test error 2022-01-19 19:11:40 +05:30
Aravindh-Raju
db3bc48ea3 Resolve func test error 2022-01-19 18:31:13 +05:30
Aravindh R
9a6da3d5b4
Merge branch 'master' into aravindhr/create-transaction 2022-01-19 16:18:58 +05:30
Aravindh-Raju
74788a9a29 Add trait and make changes 2022-01-19 16:10:43 +05:30
Aravindh-Raju
5a96580168 Remove unnecessary handlers 2022-01-19 16:09:14 +05:30
Aravindh-Raju
9ce417c53c Add connection for tests 2022-01-19 16:09:13 +05:30
Aravindh-Raju
1231c97f44 Resolve tests 2022-01-19 16:09:13 +05:30
Aravindh-Raju
8d255c2e3b Add db changes 2022-01-19 16:09:13 +05:30
Aravindh-Raju
421b4c6b23 Add and use "executeWithinTransaction" function 2022-01-19 16:09:13 +05:30
Aravindh-Raju
fcb31adf2c Update transactions 2022-01-19 16:09:13 +05:30
Aravindh-Raju
c71e2e1942 Add tests 2022-01-19 16:09:13 +05:30
Aravindh-Raju
bca8150586 Resolve test failures 2022-01-19 16:09:13 +05:30
Aravindh-Raju
4b368795be Make DB change atomic 2022-01-19 16:08:55 +05:30
Aravindh-Raju
4d81929f6d Use IO monad 2022-01-19 16:02:23 +05:30
Aravindh-Raju
b866c9d896 Verify transaction 2022-01-19 16:02:23 +05:30
Ryan Emerle
f25a95a24e
Merge pull request #1080 from remerle/master 2022-01-13 18:19:29 -05:00
Ryan Emerle
32640c386a
Merge branch 'master' into master 2022-01-13 18:02:15 -05:00
Ryan Emerle
e38ede14be
Bump version to 0.10.4 [ci skip] 2022-01-13 17:13:32 -05:00
Ryan Emerle
28c30f1468
Merge branch 'master' into master 2022-01-13 17:09:55 -05:00
Ryan Emerle
67397f6094
Merge pull request #1078 from jwakemen/parse-csv-better 2022-01-13 17:09:38 -05:00
Emerle, Ryan
74d086f5e3
Update to quickstart script
- Fix the `update` method
2022-01-13 17:07:54 -05:00
Wakemen, James
90b6b4fb82 Parse CSV lines containing quoted commas properly 2022-01-13 14:17:53 -05:00
Ryan Emerle
f34c53eefe
Add note about debugging 2022-01-13 10:05:57 -05:00
Ryan Emerle
393e68ff66
Merge branch 'master' into hardcoded-limits 2022-01-07 10:58:28 -05:00
Ryan Emerle
3161601af6
Merge pull request #1044 from vinyldns/snyk-fix-33af86bb70d4d8fa98080ffd4d49dc6b 2022-01-07 10:57:54 -05:00
Ryan Emerle
cd555e221b
Merge branch 'master' into snyk-fix-33af86bb70d4d8fa98080ffd4d49dc6b 2022-01-07 10:08:33 -05:00
Jay07GIT
c8c847b652 update in hex String for hashing recordset_blob 2021-12-23 23:50:25 +05:30
Ryan Emerle
43416f292c
Merge branch 'master' into recordsetblob_hashing 2021-12-22 08:56:43 -05:00
Ryan Emerle
e919f68a56
Merge branch 'master' into hardcoded-limits 2021-12-22 08:51:16 -05:00
Emerle, Ryan
a5cf3adca0
Update permitted action [ci skip] 2021-12-20 14:12:10 -05:00
Emerle, Ryan
6f754eb364
Update vNext release action [ci skip] 2021-12-20 13:38:27 -05:00
Ryan Emerle
f9921e2b3f
Merge pull request #1075 from remerle/master 2021-12-20 13:26:03 -05:00
Emerle, Ryan
744c65e064
Fix portal login redirect
With the release of `0.10.0` the redirect for OIDC authentication was not working.

- Re-enable redirect in `setOidcSession.scala.html`
- Add support for redirecting to requested page after login, rather than `/index`-purgatory
2021-12-20 11:55:32 -05:00
Emerle, Ryan
34edf0cd11
Fix portal login redirect
With the release of `0.10.0` the redirect for OIDC authentication was not working.

- Re-enable redirect in `setOidcSession.scala.html`
- Add support for redirecting to requested page after login, rather than `/index`-purgatory
2021-12-19 17:19:28 -05:00
Emerle, Ryan
a030fd3567
Fix portal login redirect
With the release of `0.10.0` the redirect for OIDC authentication was not working.

- Re-enable redirect in `setOidcSession.scala.html`
- Add support for redirecting to requested page after login, rather than `/index`-purgatory
2021-12-18 11:40:00 -05:00
Ryan Emerle
3b63751278
Update README.md
[ci skip]
2021-12-17 13:36:17 -05:00
Ryan Emerle
10edb714e6
Update README.md
[ci skip]
2021-12-17 13:31:54 -05:00
Ryan Emerle
e62aeabe15
Merge pull request #1074 from remerle/master 2021-12-17 12:55:34 -05:00
Emerle, Ryan
43de6baf2c
Rollback Flyway dependency to support MySQL 5.x 2021-12-17 12:54:44 -05:00
Ryan Emerle
803e1a4aea
Merge pull request #1073 from remerle/rollback-flyway 2021-12-17 09:25:31 -05:00
Ryan Emerle
5091712905
Merge branch 'master' into rollback-flyway 2021-12-17 08:43:38 -05:00
Emerle, Ryan
5d56f58ab5
Rollback Flyway dependency to support MySQL 5.x 2021-12-17 08:38:58 -05:00
Jay
f8d1846b98
Merge branch 'master' into hardcoded-limits 2021-12-16 10:13:17 +05:30
Jay
27972b599d
Merge branch 'master' into recordsetblob_hashing 2021-12-16 10:13:01 +05:30
Jay
ab94a9fee3
Merge branch 'master' into recordsetblob_hashing 2021-12-15 21:35:28 +05:30
Jay
49a7f5c828
Merge branch 'master' into hardcoded-limits 2021-12-15 21:34:35 +05:30
Ryan Emerle
6994d4d257
Merge branch 'master' into hardcoded-limits 2021-12-15 10:44:53 -05:00
Jay07GIT
183a904d6a update config-api.md 2021-12-15 17:50:34 +05:30
Jay07GIT
dc191320b2 updated tests 2021-12-15 17:16:04 +05:30
Jay
ab09a2bef6
Merge branch 'master' into hardcoded-limits 2021-12-15 16:29:36 +05:30
Aravindh R
472feff21d
Merge branch 'master' into aravindhr/group-search-filter 2021-12-15 15:49:28 +05:30
Ryan Emerle
a600daf158
Merge branch 'master' into hardcoded-limits 2021-12-14 14:51:08 -05:00
Jay07GIT
7b10084167 updated tests 2021-12-14 19:13:43 +05:30
Aravindh R
f8a5b1facf
Merge branch 'vinyldns:master' into aravindhr/group-search-filter 2021-12-10 15:54:57 +05:30
Ryan Emerle
3a8960c0e1
Merge branch 'master' into hardcoded-limits 2021-12-09 17:19:50 -05:00
Ryan Emerle
17146a1d88
Merge branch 'master' into recordsetblob_hashing 2021-12-09 15:48:51 -05:00
Aravindh-Raju
f51861f35a Remove TODO as it's completed 2021-12-09 16:53:21 +05:30
Jay
75fb82ff13
Merge branch 'master' into hardcoded-limits 2021-12-08 15:27:07 +05:30
Jay
81f05187dd
Merge branch 'master' into recordsetblob_hashing 2021-12-08 15:24:46 +05:30
Aravindh R
557d95bae5
Merge branch 'master' into aravindhr/group-search-filter 2021-12-08 14:15:57 +05:30
Jay07GIT
1fef0325bf Recordset blob data has been hashed 2021-11-23 19:11:42 +05:30
Jay
f7755fd4cd
Delete Messages.scala
This file is already in place. Due to conflict issue this file has been removed.
2021-11-19 09:20:54 +05:30
Jay
919dba98f0 limits configurable Changes made for batch changes, zone, recordset. 2021-11-18 19:39:18 +05:30
Jay
d2cae19193 Limits hard coded value for the membership is converted to configurable in reference.conf 2021-11-18 19:39:07 +05:30
Aravindh-Raju
52aaff9593 Add search/filter for groups 2021-10-04 13:24:47 +05:30
snyk-bot
7a6a73c1e0
fix: modules/portal/package.json to reduce vulnerabilities
The following vulnerabilities are fixed with an upgrade:
- https://snyk.io/vuln/SNYK-JS-WS-1296835
2021-05-27 05:45:13 +00:00
417 changed files with 25419 additions and 5055 deletions

55
.github/workflows/notify.yml vendored Normal file
View File

@ -0,0 +1,55 @@
name: Notify on Workflow Complete
on:
workflow_run:
workflows: [Verify and Test, VinylDNS Official Release]
types:
- completed
jobs:
notify:
runs-on: ubuntu-latest
steps:
- name: Send Slack Notification On Success
uses: slackapi/slack-github-action@v1.18.0
if: github.event.workflow_run.conclusion == 'success'
with:
# For posting a rich message using Block Kit
payload: |
{
"text": "GitHub Action ${{ github.event.workflow.name }} completed successfully!\nAction: ${{ github.event.workflow_run.html_url }}",
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": ":check_mark: GitHub Action `${{ github.event.workflow.name }}` completed successfully!\nAction: ${{ github.event.workflow_run.html_url }}"
}
}
]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK }}
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
- name: Send Slack Notification on Failure
uses: slackapi/slack-github-action@v1.18.0
if: github.event.workflow_run.conclusion != 'success'
with:
# For posting a rich message using Block Kit
payload: |
{
"text": "GitHub Action ${{ github.event.workflow.name }} FAILED!\nAction: ${{ github.event.workflow_run.html_url }}",
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": ":x: GitHub Action `${{ github.event.workflow.name }}` FAILED!\nAction: ${{ github.event.workflow_run.html_url }}"
}
}
]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK }}
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK

View File

@ -1,4 +1,4 @@
name: Microsite
name: Documentation Site
concurrency:
cancel-in-progress: true
group: "publish-site"

149
.github/workflows/release-beta.yml vendored Normal file
View File

@ -0,0 +1,149 @@
name: VinylDNS Beta Release
concurrency:
cancel-in-progress: true
group: "release"
defaults:
run:
shell: bash
on:
workflow_dispatch:
inputs:
verify-first:
description: 'Verify First?'
required: true
default: 'true'
create-gh-release:
description: 'Create a GitHub Release?'
required: true
default: 'true'
publish-images:
description: 'Publish Docker Images?'
required: true
default: 'true'
pre-release:
description: 'Is this a pre-release?'
required: true
default: 'true'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
jobs:
verify:
name: Verify Release
runs-on: ubuntu-latest
steps:
- name: Checkout current branch
if: github.event.inputs.verify-first == 'true'
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Run Tests
id: verify
if: github.event.inputs.verify-first == 'true'
run: cd build/ && ./assemble_api.sh && ./run_all_tests.sh
create-gh-release:
name: Create GitHub Release
needs: verify
runs-on: ubuntu-latest
if: github.event.inputs.create-gh-release == 'true'
permissions:
contents: write
steps:
- name: Checkout current branch
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build Artifacts
id: build
run: cd build/ && ./assemble_api.sh && ./assemble_portal.sh
- name: Get Version
id: get-version
run: echo "::set-output name=vinyldns_version::$(awk -F'"' '{print $2}' ./version.sbt)"
- name: Create GitHub Release
id: create_release
uses: softprops/action-gh-release@v2
with:
tag_name: v${{ steps.get-version.outputs.vinyldns_version }}
generate_release_notes: true
files: artifacts/*
prerelease: ${{ github.event.inputs['pre-release'] == 'true' }}
docker-release-api:
name: Release API Docker Image
needs: [ verify, create-gh-release ]
runs-on: ubuntu-latest
if: github.event.inputs.publish-images == 'true'
steps:
- name: Get Version
id: get-version
run: echo "::set-output name=vinyldns_version::$(curl -s https://api.github.com/repos/vinyldns/vinyldns/releases | jq -rc '.[0].tag_name')"
- name: Checkout current branch (full)
uses: actions/checkout@v4
with:
ref: ${{ steps.get-version.outputs.vinyldns_version }}
fetch-depth: 0
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Import Content Trust Key
run: docker trust key load <(echo "${SIGNING_KEY}") --name vinyldns_svc
env:
SIGNING_KEY: ${{ secrets.SIGNING_KEY }}
DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE: ${{ secrets.DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE }}
# This will publish the latest release
- name: Publish API Docker Image
run: make -C build/docker/api publish
env:
DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE: ${{ secrets.DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE }}
docker-release-portal:
name: Release Portal Docker Image
needs: [ verify, create-gh-release ]
runs-on: ubuntu-latest
if: github.event.inputs.publish-images == 'true'
steps:
- name: Get Version
id: get-version
run: echo "::set-output name=vinyldns_version::$(curl -s https://api.github.com/repos/vinyldns/vinyldns/releases | jq -rc '.[0].tag_name')"
- name: Checkout current branch (full)
uses: actions/checkout@v4
with:
ref: ${{ steps.get-version.outputs.vinyldns_version }}
fetch-depth: 0
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Import Content Trust Key
run: docker trust key load <(echo "${SIGNING_KEY}") --name vinyldns_svc
env:
SIGNING_KEY: ${{ secrets.SIGNING_KEY }}
DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE: ${{ secrets.DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE }}
# This will publish the latest release
- name: Publish Portal Docker Image
run: make -C build/docker/portal publish
env:
DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE: ${{ secrets.DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE }}

View File

@ -24,11 +24,13 @@ jobs:
steps:
- name: Checkout current branch
if: github.event_name != 'push' # We only need to verify if this is manually triggered
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Build and Test
if: github.event_name != 'push' # We only need to verify if this is manually triggered
run: cd build/ && ./assemble_api.sh && ./run_all_tests.sh
docker-release-api:

View File

@ -22,7 +22,7 @@ on:
description: 'Publish Docker Images?'
required: true
default: 'true'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@ -34,7 +34,7 @@ jobs:
steps:
- name: Checkout current branch
if: github.event.inputs.verify-first == 'true'
uses: actions/checkout@v2
uses: actions/checkout@v4
with:
fetch-depth: 0
@ -48,12 +48,12 @@ jobs:
needs: verify
runs-on: ubuntu-latest
if: github.event.inputs.create-gh-release == 'true'
permissions:
permissions:
contents: write
steps:
- name: Checkout current branch
uses: actions/checkout@v2
uses: actions/checkout@v4
with:
fetch-depth: 0
@ -67,7 +67,7 @@ jobs:
- name: Create GitHub Release
id: create_release
uses: softprops/action-gh-release@v1
uses: softprops/action-gh-release@v2
with:
tag_name: v${{ steps.get-version.outputs.vinyldns_version }}
generate_release_notes: true
@ -83,15 +83,15 @@ jobs:
- name: Get Version
id: get-version
run: echo "::set-output name=vinyldns_version::$(curl -s https://api.github.com/repos/vinyldns/vinyldns/releases | jq -rc '.[0].tag_name')"
- name: Checkout current branch (full)
uses: actions/checkout@v2
uses: actions/checkout@v4
with:
ref: ${{ steps.get-version.outputs.vinyldns_version }}
fetch-depth: 0
- name: Login to Docker Hub
uses: docker/login-action@v1
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_TOKEN }}
@ -118,15 +118,15 @@ jobs:
- name: Get Version
id: get-version
run: echo "::set-output name=vinyldns_version::$(curl -s https://api.github.com/repos/vinyldns/vinyldns/releases | jq -rc '.[0].tag_name')"
- name: Checkout current branch (full)
uses: actions/checkout@v2
uses: actions/checkout@v4
with:
ref: ${{ steps.get-version.outputs.vinyldns_version }}
fetch-depth: 0
- name: Login to Docker Hub
uses: docker/login-action@v1
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_TOKEN }}

View File

@ -26,10 +26,42 @@ jobs:
fetch-depth: 0
- name: Build and Test
id: build
run: cd build/ && ./assemble_api.sh && ./run_all_tests.sh
- name: Codecov
uses: codecov/codecov-action@v2
id: codecov0
uses: codecov/codecov-action@v4
continue-on-error: true
with:
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: false
- name: Codecov Retry
id: codecov1
if: steps.codecov0.outcome=='failure'
uses: codecov/codecov-action@v4
continue-on-error: true
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: Codecov Retry 2
id: codecov2
if: steps.codecov1.outcome=='failure'
uses: codecov/codecov-action@v4
continue-on-error: true
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: Codecov Retry 3
id: codecov3
if: steps.codecov2.outcome=='failure'
uses: codecov/codecov-action@v4
continue-on-error: true
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: set the status # set the workflow status if command failed
if: steps.build.outcome=='success'
run: |
if ${{ steps.codecov0.outcome=='success' || steps.codecov1.outcome=='success' || steps.codecov2.outcome=='success' || steps.codecov3.outcome=='success' }}; then
echo "Codecov completed successfully"
else
echo "Codecov failed after three retries"
exit 1
fi

13
.gitignore vendored
View File

@ -34,5 +34,16 @@ project/metals.sbt
quickstart/data
**/.virtualenv
**/.venv*
**/*cache*
**/artifacts/
**/.env.overrides
**/node_modules
.run
**/*.zip
**/*.log
modules/portal/public/custom/
modules/portal/public/gentelella/
modules/portal/public/javascripts/
modules/portal/public/project/
modules/portal/public/stylesheets/
**/.env.*
modules/portal/package-lock.json

View File

@ -21,12 +21,14 @@ in any way, but do not see your name here, please open a PR to add yourself (in
- Joe Crowe
- Jearvon Dharrie
- Andrew Dunn
- Josh Edwards
- Ryan Emerle
- David Grizzanti
- Alejandro Guirao
- Daniel Jin
- Harry Kauffman
- Krista Khare
- Sokitha Krishnan
- Patrick Lee
- Sheree Liu
- Michael Ly
@ -36,14 +38,18 @@ in any way, but do not see your name here, please open a PR to add yourself (in
- Joshulyne Park
- Nathan Pierce
- Michael Pilquist
- Aravindh Raju
- Sriram Ramakrishnan
- Khalid Reid
- Timo Schmid
- Trent Schmidt
- Arpit Shah
- Ghafar Shah
- Nick Spadaccino
- Rebecca Star
- Jess Stodola
- Juan Valencia
- Jayaraj Velkumar
- Anastasia Vishnyakova
- Jim Wakemen
- Fei Wan

View File

@ -10,6 +10,7 @@
* [Portal](#portal)
* [Documentation](#documentation)
- [Running VinylDNS Locally](#running-vinyldns-locally)
* [Support for M1 Macs](#support-for-m1-macs)
* [Starting the API Server](#starting-the-api-server)
* [Starting the Portal](#starting-the-portal)
- [Testing](#testing)
@ -27,18 +28,19 @@
## Developer Requirements (Local)
- Java 8+
- Java (version: >= 8, <= 11)
- Scala 2.12
- sbt 1.4+
- curl
- docker
- docker-compose
- GNU Make 3.82+
- grunt
- npm
- Node.js/npm v12+
- Python 3.5+
- [coreutils](https://www.gnu.org/software/coreutils/)
- Linux: `apt install coreutils` or `yum install coreutils`
- macOS: [`brew install coreutils`](https://formulae.brew.sh/formula/coreutils)
## Developer Requirements (Docker)
@ -174,6 +176,7 @@ Once the prerequisites are running, you can start up sbt by running `sbt` from t
- `project api` to change the sbt project to the API
- `reStart` to start up the API server
- To enable interactive debugging, you can run `set Revolver.enableDebugging(port = 5020, suspend = true)` before running `reStart`
- Wait until you see the message `VINYLDNS SERVER STARTED SUCCESSFULLY` before working with the server
- To stop the VinylDNS server, run `reStop` from the api project
- To stop the dependent Docker containers: `utils/clean-vinyldns-containers.sh`
@ -187,7 +190,7 @@ To run the portal locally, you _first_ have to start up the VinylDNS API Server.
instructions for [Staring the API Server](#starting-the-api-server) or by using the QuickStart:
```shell
quickstart/quickstart-vinyldns.sh --api-only
quickstart/quickstart-vinyldns.sh --api
```
Once that is done, in the same `sbt` session or a different one, go to `project portal` and then
@ -265,7 +268,7 @@ Additionally, you can pass `--interactive` to `make run` or `make run-local` to
From there you can run tests with the `/functional_test/run.sh` command. This allows for finer-grained control over the
test execution process as well as easier inspection of logs.
You can run a specific test by name by running `make run -- -k <name of test function>`. Any arguments after
You can run a specific test by name by running `make build` and `make run -- -k <name of test function>`. Any arguments after
`make run --` will be passed to the test runner [`test/api/functional/run.sh`](test/api/functional/run.sh).
Finally, you can execute `make run-deps-bg` to all of the dependencies for the functional test, but not run the tests.

View File

@ -1,6 +1,6 @@
[![VinylDNS Release](https://img.shields.io/github/v/release/vinyldns/vinyldns?label=latest%20release&logo=github)](https://github.com/vinyldns/vinyldns/releases/latest)
[![VinylDNS API Docker Image](https://img.shields.io/docker/v/vinyldns/api?color=brightgreen&label=API%20Image&logo=docker&logoColor=white)](https://hub.docker.com/r/vinyldns/api/tags?page=1&ordering=last_updated)
[![VinylDNS Portal Docker Image](https://img.shields.io/docker/v/vinyldns/portal?color=brightgreen&label=Portal%20Image&logo=docker&logoColor=white)](https://hub.docker.com/r/vinyldns/portal/tags?page=1&ordering=last_updated)
[![VinylDNS API Docker Image](https://img.shields.io/github/v/release/vinyldns/vinyldns?color=brightgreen&label=API%20Image&logo=docker&logoColor=white&cacheSeconds=300)](https://hub.docker.com/r/vinyldns/api/tags?page=1&ordering=last_updated)
[![VinylDNS Portal Docker Image](https://img.shields.io/github/v/release/vinyldns/vinyldns?color=brightgreen&label=Portal%20Image&logo=docker&logoColor=white&cacheSeconds=300)](https://hub.docker.com/r/vinyldns/portal/tags?page=1&ordering=last_updated)
<p align="left">
<a href="https://www.vinyldns.io/">
@ -145,9 +145,9 @@ See the [Contributing Guide](CONTRIBUTING.md).
The current maintainers (people who can merge pull requests) are:
- Ryan Emerle ([@remerle](https://github.com/remerle))
- Sriram Ramakrishnan ([@sramakr](https://github.com/sramakr))
- Jim Wakemen ([@jwakemen](https://github.com/jwakemen))
- Arpit Shah ([@arpit4ever](https://github.com/arpit4ever))
- Nick Spadaccino ([@nspadaccino](https://github.com/nspadaccino))
- Jayaraj Velkumar ([@Jay07GIT](https://github.com/Jay07GIT))
See [AUTHORS.md](AUTHORS.md) for the full list of contributors to VinylDNS.

View File

@ -2,10 +2,9 @@ import CompilerOptions._
import Dependencies._
import microsites._
import org.scalafmt.sbt.ScalafmtPlugin._
import scoverage.ScoverageKeys.{coverageFailOnMinimum, coverageMinimum}
import scoverage.ScoverageKeys.{coverageMinimum, coverageFailOnMinimum}
import scala.language.postfixOps
import scala.sys.env
import scala.util.Try
lazy val IntegrationTest = config("it").extend(Test)
@ -38,7 +37,7 @@ lazy val sharedSettings = Seq(
// coverage options
coverageMinimum := 85,
coverageFailOnMinimum := true,
coverageHighlighting := true,
coverageHighlighting := true
)
lazy val testSettings = Seq(
@ -73,6 +72,10 @@ lazy val apiAssemblySettings = Seq(
MergeStrategy.discard
case PathList("scala", "tools", "nsc", "doc", "html", "resource", "lib", "template.js") =>
MergeStrategy.discard
case "simulacrum/op.class" | "simulacrum/op$.class" | "simulacrum/typeclass$.class"
| "simulacrum/typeclass.class" | "simulacrum/noop.class" =>
MergeStrategy.discard
case x if x.endsWith("module-info.class") => MergeStrategy.discard
case x =>
val oldStrategy = (assemblyMergeStrategy in assembly).value
oldStrategy(x)
@ -113,8 +116,8 @@ lazy val coreBuildSettings = Seq(
// do not use unused params as NoOpCrypto ignores its constructor, we should provide a way
// to write a crypto plugin so that we fall back to a noarg constructor
scalacOptions ++= scalacOptionsByV(scalaVersion.value).filterNot(_ == "-Ywarn-unused:params"),
PB.targets in Compile := Seq(PB.gens.java("2.6.1") -> (sourceManaged in Compile).value),
PB.protocVersion := "-v261"
PB.targets in Compile := Seq(PB.gens.java("3.21.7") -> (sourceManaged in Compile).value),
PB.protocVersion := "3.21.7"
)
lazy val corePublishSettings = Seq(
@ -205,9 +208,11 @@ lazy val portalSettings = Seq(
routesGenerator := InjectedRoutesGenerator,
coverageExcludedPackages := "<empty>;views.html.*;router.*;controllers\\.javascript.*;.*Reverse.*",
javaOptions in Test += "-Dconfig.file=conf/application-test.conf",
// ads the version when working locally with sbt run
// Adds the version when working locally with sbt run
PlayKeys.devSettings += "vinyldns.base-version" -> (version in ThisBuild).value,
// adds an extra classpath to the portal loading so we can externalize jars, make sure to create the lib_extra
// Automatically run the prepare portal script before `run`
PlayKeys.playRunHooks += PreparePortalHook(baseDirectory.value),
// Adds an extra classpath to the portal loading so we can externalize jars, make sure to create the lib_extra
// directory and lay down any dependencies that are required when deploying
scriptClasspath in bashScriptDefines ~= (cp => cp :+ "lib_extra/*"),
mainClass in reStart := None,
@ -240,7 +245,7 @@ lazy val portal = (project in file("modules/portal"))
.settings(testSettings)
.settings(portalSettings)
.settings(
name := "portal",
name := "portal"
)
.dependsOn(mysql)
@ -252,6 +257,8 @@ lazy val docSettings = Seq(
micrositeDescription := "DNS Automation and Governance",
micrositeAuthor := "VinylDNS",
micrositeHomepage := "https://vinyldns.io",
micrositeTwitter := "@vinyldns_oss",
micrositeTwitterCreator := "@vinyldns_oss",
micrositeDocumentationUrl := "/api",
micrositeDocumentationLabelDescription := "API Documentation",
micrositeHighlightLanguages ++= Seq("json", "yaml", "bnf", "plaintext"),

42
build/assemble_artifacts.sh Executable file
View File

@ -0,0 +1,42 @@
#!/usr/bin/env bash
#
# This script will build the API and Portal artifacts from the current
# workspace code
#
set -euo pipefail
DIR=$(
cd "$(dirname "$0")"
pwd -P
)
usage() {
echo "USAGE: assemble_artifacts.sh [options]"
echo -e "\t-c, --clean removes all files from the ./artifacts directory"
}
clean(){
echo "Cleaning artifacts"
if [ -d "${DIR}/../artifacts/" ] && [ -f "${DIR}/../artifacts/vinyldns-api.jar" ]; then
rm "${DIR}/../artifacts/vinyldns-api.jar"
fi
if [ -d "${DIR}/../artifacts/" ] && [ -f "${DIR}/../artifacts/vinyldns-portal.zip" ]; then
rm "${DIR}/../artifacts/vinyldns-portal.zip"
fi
}
while [[ $# -gt 0 ]]; do
case "$1" in
--clean | -c)
clean
exit 0
;;
*)
usage
exit 1
;;
esac
done
clean
"${DIR}/assemble_api.sh" && "${DIR}/assemble_portal.sh"

View File

@ -17,3 +17,4 @@ JDBC_URL=jdbc:mariadb://vinyldns-integration:19002/vinyldns?user=root&password=p
JDBC_MIGRATION_URL=jdbc:mariadb://vinyldns-integration:19002/?user=root&password=pass
JDBC_USER=root
JDBC_PASSWORD=pass
FLYWAY_OUT_OF_ORDER=false

View File

@ -31,8 +31,9 @@ VOLUME ["/opt/vinyldns/lib_extra/", "/opt/vinyldns/conf/"]
EXPOSE 9000
ENV JVM_OPTS=""
ENTRYPOINT ["/bin/bash", "-c", "java ${JVM_OPTS} -Dconfig.file=/opt/vinyldns/conf/application.conf \
ENV INIT_SCRIPT="/bin/true"
ENTRYPOINT ["/bin/bash", "-c", "${INIT_SCRIPT} && java ${JVM_OPTS} -Dconfig.file=/opt/vinyldns/conf/application.conf \
-Dlogback.configurationFile=/opt/vinyldns/conf/logback.xml \
-Dvinyldns.version=$(cat /opt/vinyldns/version) \
-cp /opt/vinyldns/lib_extra/* \
-jar /opt/vinyldns/vinyldns-api.jar" ]
-cp /opt/vinyldns/lib_extra/*:/opt/vinyldns/vinyldns-api.jar \
vinyldns.api.Boot" ]

View File

@ -28,6 +28,16 @@ vinyldns {
multi-record-batch-change-enabled = true
multi-record-batch-change-enabled = ${?MULTI_RECORD_BATCH_CHANGE_ENABLED}
# Server settings
use-recordset-cache = true
use-recordset-cache = ${?USE_RECORDSET_CACHE}
load-test-data = false
load-test-data = ${?LOAD_TEST_DATA}
# should be true while running locally or when we have only one api server/instance, for zone sync scheduler to work
is-zone-sync-schedule-allowed = true
# should be set to true only on a single server/instance else automated sync will be performed at every server/instance
is-zone-sync-schedule-allowed = ${?IS_ZONE_SYNC_SCHEDULE_ALLOWED}
# configured backend providers
backend {
# Use "default" when dns backend legacy = true
@ -151,6 +161,17 @@ vinyldns {
port=${?API_SERVICE_PORT}
}
api {
limits {
batchchange-routing-max-items-limit = 100
membership-routing-default-max-items = 100
membership-routing-max-items-limit = 1000
membership-routing-max-groups-list-limit = 3000
recordset-routing-default-max-items= 100
zone-routing-default-max-items = 100
zone-routing-max-items-limit = 100
}
}
approved-name-servers = [
"172.17.42.1.",
@ -178,14 +199,22 @@ vinyldns {
name = ${?DATABASE_NAME}
driver = "org.mariadb.jdbc.Driver"
driver = ${?JDBC_DRIVER}
migration-url = "jdbc:mariadb://localhost:19002/?user=root&password=pass"
migration-url = "jdbc:mariadb://localhost:19002/?user=root&password=pass&socketTimeout=20000"
migration-url = ${?JDBC_MIGRATION_URL}
url = "jdbc:mariadb://localhost:19002/vinyldns?user=root&password=pass"
url = "jdbc:mariadb://localhost:19002/vinyldns?user=root&password=pass&socketTimeout=20000"
url = ${?JDBC_URL}
user = "root"
user = ${?JDBC_USER}
password = "pass"
password = ${?JDBC_PASSWORD}
flyway-out-of-order = false
flyway-out-of-order = ${?FLYWAY_OUT_OF_ORDER}
max-lifetime = 300000
connection-timeout-millis = 30000
idle-timeout = 150000
maximum-pool-size = 20
minimum-idle = 5
}
# TODO: Remove the need for these useless configuration blocks
@ -198,6 +227,8 @@ vinyldns {
}
record-set {
}
record-set-cache {
}
zone-change {
}
record-change {
@ -320,13 +351,17 @@ akka.http {
# Set to `infinite` to disable.
bind-timeout = 5s
# A default request timeout is applied globally to all routes and can be configured using the
# akka.http.server.request-timeout setting (which defaults to 20 seconds).
# request-timeout = 60s
# Show verbose error messages back to the client
verbose-error-messages = on
}
parsing {
# Spray doesn't like the AWS4 headers
illegal-header-warnings = on
# Don't complain about the / in the AWS SigV4 auth header
ignore-illegal-header-for = ["authorization"]
}
}

View File

@ -1,32 +1,28 @@
<configuration>
<variable name="VINYLDNS_LOG_LEVEL" value="${VINYLDNS_LOG_LEVEL:-INFO}" />
<!-- Test configuration, log to console so we can get the docker logs -->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [test] %-5p | \(%logger{4}:%line\) | %msg %n</pattern>
<encoder class="co.elastic.logging.logback.EcsEncoder">
<serviceName>vinyldns-api</serviceName>
<serviceNodeName>vinyldns-api</serviceNodeName>
</encoder>
</appender>
<logger name="org.flywaydb" level="DEBUG">
<logger name="vinyldns.core.route.Monitor" level="OFF"/>
<logger name="scalikejdbc.StatementExecutor$$anon$1" level="OFF"/>
<logger name="com.zaxxer.hikari" level="ERROR">
<appender-ref ref="CONSOLE"/>
</logger>
<logger name="org.flywaydb.core.internal.dbsupport.SqlScript" level="DEBUG">
<appender-ref ref="CONSOLE"/>
<logger name="BANNER_LOGGER" level="INFO" additivity="false">
<appender-ref ref="BANNER_APPENDER"/>
</logger>
<logger name="org.flywaydb.core.internal.command.DbMigrate" level="DEBUG">
<appender-ref ref="CONSOLE"/>
</logger>
<logger name="vinyldns.core.route.Monitor" level="OFF">
<appender-ref ref="CONSOLE"/>
</logger>
<logger name="scalikejdbc.StatementExecutor$$anon$1" level="OFF">
<appender-ref ref="CONSOLE"/>
</logger>
<root level="INFO">
<root level="${VINYLDNS_LOG_LEVEL}">
<appender-ref ref="CONSOLE"/>
</root>
</configuration>

View File

@ -39,7 +39,8 @@ VOLUME ["/opt/vinyldns/lib_extra/", "/opt/vinyldns/conf/"]
EXPOSE 9001
ENV JVM_OPTS=""
ENTRYPOINT ["/bin/bash","-c", "java ${JVM_OPTS} -Dvinyldns.version=$(cat /opt/vinyldns/version) \
ENV INIT_SCRIPT="/bin/true"
ENTRYPOINT ["/bin/bash","-c", "${INIT_SCRIPT} && java ${JVM_OPTS} -Dvinyldns.version=$(cat /opt/vinyldns/version) \
-Dlogback.configurationFile=/opt/vinyldns/conf/logback.xml \
-Dconfig.file=/opt/vinyldns/conf/application.conf \
-cp /opt/vinyldns/conf:/opt/vinyldns/lib/*:/opt/vinyldns/lib_extra/* \

View File

@ -37,6 +37,38 @@ LDAP {
}
}
mysql {
class-name = "vinyldns.mysql.repository.MySqlDataStoreProvider"
endpoint = "localhost:19002"
endpoint = ${?MYSQL_ENDPOINT}
settings {
# JDBC Settings, these are all values in scalikejdbc-config, not our own
# these must be overridden to use MYSQL for production use
# assumes a docker or mysql instance running locally
name = "vinyldns"
name = ${?DATABASE_NAME}
driver = "org.mariadb.jdbc.Driver"
driver = ${?JDBC_DRIVER}
migration-url = "jdbc:mariadb://"${mysql.endpoint}"/?user=root&password=pass&socketTimeout=20000"
migration-url = ${?JDBC_MIGRATION_URL}
url = "jdbc:mariadb://"${mysql.endpoint}"/vinyldns?user=root&password=pass&socketTimeout=20000"
url = ${?JDBC_URL}
user = "root"
user = ${?JDBC_USER}
password = "pass"
password = ${?JDBC_PASSWORD}
flyway-out-of-order = false
flyway-out-of-order = ${?FLYWAY_OUT_OF_ORDER}
max-lifetime = 300000
connection-timeout-millis = 30000
idle-timeout = 150000
maximum-pool-size = 20
minimum-idle = 5
}
}
# Note: This MUST match the API or strange errors will ensue, NoOpCrypto should not be used for production
crypto {
type = "vinyldns.core.crypto.NoOpCrypto"
@ -44,6 +76,18 @@ crypto {
secret = ${?CRYPTO_SECRET}
}
api {
limits {
batchchange-routing-max-items-limit = 100
membership-routing-default-max-items = 100
membership-routing-max-items-limit = 1000
membership-routing-max-groups-list-limit = 3000
recordset-routing-default-max-items= 100
zone-routing-default-max-items = 100
zone-routing-max-items-limit = 100
}
}
http.port = 9001
http.port = ${?PORTAL_PORT}
@ -57,6 +101,12 @@ shared-display-enabled = ${?SHARED_ZONES_ENABLED}
play.http.secret.key = "changeme"
play.http.secret.key = ${?PLAY_HTTP_SECRET_KEY}
# See https://www.playframework.com/documentation/2.8.x/AllowedHostsFilter for more details.
# Note: allowed = ["."] matches all hosts hence would not be recommended in a production environment.
play.filters.hosts {
allowed = ["."]
}
# You can provide configuration overrides via local.conf if you don't want to replace everything in
# this configuration file
include "local.conf"

View File

@ -1,10 +1,11 @@
<configuration>
<variable name="VINYLDNS_LOG_LEVEL" value="${VINYLDNS_LOG_LEVEL:-INFO}" />
<conversionRule conversionWord="coloredLevel" converterClass="play.api.libs.logback.ColoredLevel" />
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d{"yyyy-MM-dd HH:mm:ss,SSS"} %coloredLevel - %logger - %message%n%xException</pattern>
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="co.elastic.logging.logback.EcsEncoder">
<serviceName>vinyldns-portal</serviceName>
<serviceNodeName>vinyldns-portal</serviceNodeName>
</encoder>
</appender>
<!--
@ -14,8 +15,12 @@
<logger name="play" level="INFO" />
<logger name="application" level="DEBUG" />
<root level="INFO">
<appender-ref ref="STDOUT" />
<logger name="com.zaxxer.hikari" level="ERROR">
<appender-ref ref="CONSOLE"/>
</logger>
<root level="${VINYLDNS_LOG_LEVEL}">
<appender-ref ref="CONSOLE" />
</root>
</configuration>

View File

@ -3,5 +3,47 @@ set -euo pipefail
DIR=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
usage() {
echo "USAGE: sbt.sh [options]"
echo -e "\t-d, --debug enable debugging"
echo -e "\t-p, --debug-port the debug port (default: 5021)"
echo -e "\t-e, --expose expose one or more ports using the Docker format for exposing ports"
}
DEBUG_SETTINGS=""
DEBUG_PORT=5021
EXPOSE_PORTS=""
while [[ $# -gt 0 ]]; do
case "$1" in
--debug | -d)
DEBUG_SETTINGS="-e SBT_OPTS=\"-agentlib:jdwp=transport=dt_socket,server=y,suspend=n"
shift
;;
--debug-port | -p)
DEBUG_PORT=$2
shift
shift
;;
--expose | -e)
EXPOSE_PORTS="${EXPOSE_PORTS} -p \"$2\""
shift
shift
;;
*)
usage
exit 1
;;
esac
done
if [ "${DEBUG_SETTINGS}" != "" ]; then
DEBUG_SETTINGS="${DEBUG_SETTINGS},address=${DEBUG_PORT}\""
# If given a debug port outside of the default range, expose the selected port
if [[ "$DEBUG_PORT" -lt 5020 ]] || [[ "$DEBUG_PORT" -gt 5030 ]]; then
DEBUG_SETTINGS="${DEBUG_SETTINGS} -p \"${DEBUG_PORT}:${DEBUG_PORT}\""
fi
fi
cd "$DIR/../test/api/integration"
make build DOCKER_PARAMS="--build-arg SKIP_API_BUILD=true" && make run-local WITH_ARGS="sbt" DOCKER_PARAMS="-e RUN_SERVICES=none --env-file \"$DIR/../test/api/integration/.env.integration\""
make build DOCKER_PARAMS="--build-arg SKIP_API_BUILD=true" && make run-local WITH_ARGS="sbt" DOCKER_PARAMS="-p \"5020-5030:5020-5030\" ${DEBUG_SETTINGS} ${EXPOSE_PORTS} -e RUN_SERVICES=none --env-file \"$DIR/../test/api/integration/.env.integration\""

View File

@ -2,6 +2,11 @@
set -euo pipefail
DIR=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
source "${DIR}/../utils/includes/terminal_colors.sh"
if [ ! -d "${DIR}/../artifacts" ] || [ ! -f "${DIR}/../artifacts/vinyldns-api.jar" ]; then
echo -e "${F_YELLOW}Warning:${F_RESET} you might want to run 'build/assemble_api.sh' first to improve performance"
fi
cd "${DIR}/../test/api/integration"
make build && make run DOCKER_PARAMS="-v \"$(pwd)/../../../target:/build/target\"" WITH_ARGS="bash -c \"sbt ';validate' && sbt ';verify'\""

View File

@ -49,6 +49,7 @@ vinyldns {
key-name = ${?DEFAULT_DNS_KEY_NAME}
key = "nzisn+4G2ldMn0q1CV3vsg=="
key = ${?DEFAULT_DNS_KEY_SECRET}
algorithm = "HMAC-MD5"
primary-server = "127.0.0.1:19001"
primary-server = ${?DEFAULT_DNS_ADDRESS}
}
@ -58,6 +59,7 @@ vinyldns {
key-name = ${?DEFAULT_DNS_KEY_NAME}
key = "nzisn+4G2ldMn0q1CV3vsg=="
key = ${?DEFAULT_DNS_KEY_SECRET}
algorithm = "HMAC-MD5"
primary-server = "127.0.0.1:19001"
primary-server = ${?DEFAULT_DNS_ADDRESS}
},
@ -161,6 +163,28 @@ vinyldns {
"ns1.parent.com4."
]
# approved zones, individual users, users in groups, record types and no.of.dots that are allowed for dotted hosts
dotted-hosts = {
# for local testing
allowed-settings = [
{
zone = "*mmy."
user-list = ["testuser"]
group-list = ["dummy-group"]
record-types = ["AAAA"]
dots-limit = 3
},
{
# for wildcard zones. Settings will be applied to all matching zones
zone = "parent.com."
user-list = ["professor", "testuser"]
group-list = ["testing-group"]
record-types = ["A", "CNAME"]
dots-limit = 3
}
]
}
# Note: This MUST match the Portal or strange errors will ensue, NoOpCrypto should not be used for production
crypto {
type = "vinyldns.core.crypto.NoOpCrypto"
@ -186,6 +210,8 @@ vinyldns {
user = ${?JDBC_USER}
password = "pass"
password = ${?JDBC_PASSWORD}
flyway-out-of-order = false
flyway-out-of-order = ${?FLYWAY_OUT_OF_ORDER}
}
# TODO: Remove the need for these useless configuration blocks
@ -198,6 +224,8 @@ vinyldns {
}
record-set {
}
record-set-cache {
}
zone-change {
}
record-change {
@ -320,13 +348,17 @@ akka.http {
# Set to `infinite` to disable.
bind-timeout = 5s
# A default request timeout is applied globally to all routes and can be configured using the
# akka.http.server.request-timeout setting (which defaults to 20 seconds).
# request-timeout = 60s
# Show verbose error messages back to the client
verbose-error-messages = on
}
parsing {
# Spray doesn't like the AWS4 headers
illegal-header-warnings = on
# Don't complain about the / in the AWS SigV4 auth header
ignore-illegal-header-for = ["authorization"]
}
}

View File

@ -1,7 +1,8 @@
<configuration>
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%msg%n</pattern>
<encoder class="co.elastic.logging.logback.EcsEncoder">
<serviceName>vinyldns-api</serviceName>
<serviceNodeName>vinyldns-api</serviceNodeName>
</encoder>
</appender>

View File

@ -34,6 +34,6 @@ trait MySqlApiIntegrationSpec extends MySqlIntegrationSpec {
def clearGroupRepo(): Unit =
DB.localTx { s =>
s.executeUpdate("DELETE FROM groups")
s.executeUpdate("DELETE FROM `groups`")
}
}

View File

@ -16,19 +16,14 @@
package vinyldns.api.backend.dns
import org.joda.time.DateTime
import java.time.Instant
import java.time.temporal.ChronoUnit
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import vinyldns.api.backend.dns.DnsProtocol.NoError
import vinyldns.core.crypto.NoOpCrypto
import vinyldns.core.domain.record.{
AData,
RecordSet,
RecordSetChange,
RecordSetChangeType,
RecordSetStatus,
RecordType
}
import vinyldns.core.domain.Encrypted
import vinyldns.core.domain.record.{AData, RecordSet, RecordSetChange, RecordSetChangeType, RecordSetStatus, RecordType}
import vinyldns.core.domain.zone.{Algorithm, Zone, ZoneConnection}
class DnsBackendIntegrationSpec extends AnyWordSpec with Matchers {
@ -36,7 +31,7 @@ class DnsBackendIntegrationSpec extends AnyWordSpec with Matchers {
private val testConnection = ZoneConnection(
"vinyldns.",
"vinyldns.",
"nzisn+4G2ldMn0q1CV3vsg==",
Encrypted("nzisn+4G2ldMn0q1CV3vsg=="),
sys.env.getOrElse("DEFAULT_DNS_ADDRESS", "127.0.0.1:19001"),
Algorithm.HMAC_MD5
)
@ -74,7 +69,7 @@ class DnsBackendIntegrationSpec extends AnyWordSpec with Matchers {
RecordType.A,
200,
RecordSetStatus.Active,
DateTime.now,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
List(AData("10.1.1.1"))
)

View File

@ -16,7 +16,8 @@
package vinyldns.api.domain.batch
import org.joda.time.DateTime
import java.time.Instant
import java.time.temporal.ChronoUnit
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike
import vinyldns.api.MySqlApiIntegrationSpec
@ -44,7 +45,7 @@ class BatchChangeRepositoryIntegrationSpec
okUser.id,
okUser.userName,
None,
DateTime.now,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
List(
SingleAddChange(
Some("some-zone-id"),

View File

@ -19,54 +19,79 @@ package vinyldns.api.domain.record
import cats.effect._
import cats.implicits._
import cats.scalatest.EitherMatchers
import org.joda.time.DateTime
import org.mockito.Matchers.any
import java.time.Instant
import java.time.temporal.ChronoUnit
import org.mockito.Mockito._
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import org.scalatest.matchers.should.Matchers
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.wordspec.AnyWordSpec
import scalikejdbc.DB
import vinyldns.api._
import vinyldns.api.config.VinylDNSConfig
import vinyldns.api.domain.access.AccessValidations
import vinyldns.api.domain.zone._
import vinyldns.api.engine.TestMessageQueue
import vinyldns.core.TestMembershipData._
import vinyldns.mysql.TransactionProvider
import vinyldns.core.TestZoneData.testConnection
import vinyldns.core.domain.{Fqdn, HighValueDomainError}
import vinyldns.core.domain.{Encrypted, Fqdn, HighValueDomainError}
import vinyldns.core.domain.auth.AuthPrincipal
import vinyldns.core.domain.backend.{Backend, BackendResolver}
import vinyldns.core.domain.membership.{Group, GroupRepository, User, UserRepository}
import vinyldns.core.domain.record.RecordType._
import vinyldns.core.domain.record._
import vinyldns.core.domain.zone._
import vinyldns.core.notifier.{AllNotifiers, Notification, Notifier}
import scala.concurrent.ExecutionContext
class RecordSetServiceIntegrationSpec
extends AnyWordSpec
extends AnyWordSpec
with ResultHelpers
with EitherMatchers
with MockitoSugar
with Matchers
with MySqlApiIntegrationSpec
with BeforeAndAfterEach
with BeforeAndAfterAll {
with BeforeAndAfterAll
with TransactionProvider {
private implicit val contextShift: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
private val vinyldnsConfig = VinylDNSConfig.load().unsafeRunSync()
private val recordSetRepo = recordSetRepository
private val recordSetCacheRepo = recordSetCacheRepository
private val mockNotifier = mock[Notifier]
private val mockNotifiers = AllNotifiers(List(mockNotifier))
private val zoneRepo: ZoneRepository = zoneRepository
private val groupRepo: GroupRepository = groupRepository
private var testRecordSetService: RecordSetServiceAlgebra = _
private val user = User("live-test-user", "key", "secret")
private val user2 = User("shared-record-test-user", "key-shared", "secret-shared")
private val user = User("live-test-user", "key", Encrypted("secret"))
private val testUser = User("testuser", "key", Encrypted("secret"))
private val user2 = User("shared-record-test-user", "key-shared", Encrypted("secret-shared"))
private val group = Group(s"test-group", "test@test.com", adminUserIds = Set(user.id))
private val dummyGroup = Group(s"dummy-group", "test@test.com", adminUserIds = Set(testUser.id))
private val group2 = Group(s"test-group", "test@test.com", adminUserIds = Set(user.id, user2.id))
private val sharedGroup =
Group(s"test-shared-group", "test@test.com", adminUserIds = Set(user.id, user2.id))
private val auth = AuthPrincipal(user, Seq(group.id, sharedGroup.id))
private val auth2 = AuthPrincipal(user2, Seq(sharedGroup.id, group2.id))
val dummyAuth: AuthPrincipal = AuthPrincipal(testUser, Seq(dummyGroup.id))
private val dummyZone = Zone(
s"dummy.",
"test@test.com",
status = ZoneStatus.Active,
connection = testConnection,
adminGroupId = dummyGroup.id
)
private val zone = Zone(
s"live-zone-test.",
"test@test.com",
@ -81,7 +106,7 @@ class RecordSetServiceIntegrationSpec
A,
38400,
RecordSetStatus.Active,
DateTime.now,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
List(AData("10.1.1.1"))
)
@ -91,17 +116,30 @@ class RecordSetServiceIntegrationSpec
AAAA,
38400,
RecordSetStatus.Active,
DateTime.now,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
List(AAAAData("fd69:27cc:fe91::60"))
)
private val dottedTestRecord = RecordSet(
dummyZone.id,
"test.dotted",
AAAA,
38400,
RecordSetStatus.Active,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
List(AAAAData("fd69:27cc:fe91::60")),
recordSetGroupChange =
Some(OwnerShipTransfer(ownerShipTransferStatus = OwnerShipTransferStatus.None,
requestedOwnerGroupId = None))
)
private val subTestRecordA = RecordSet(
zone.id,
"a-record",
A,
38400,
RecordSetStatus.Active,
DateTime.now,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
List(AData("10.1.1.1"))
)
@ -111,7 +149,7 @@ class RecordSetServiceIntegrationSpec
AAAA,
38400,
RecordSetStatus.Active,
DateTime.now,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
List(AAAAData("fd69:27cc:fe91::60"))
)
@ -121,7 +159,7 @@ class RecordSetServiceIntegrationSpec
NS,
38400,
RecordSetStatus.Active,
DateTime.now,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
List(NSData(Fqdn("172.17.42.1.")))
)
@ -139,7 +177,7 @@ class RecordSetServiceIntegrationSpec
A,
38400,
RecordSetStatus.Active,
DateTime.now,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
List(AData("10.1.1.1"))
)
@ -149,7 +187,7 @@ class RecordSetServiceIntegrationSpec
A,
38400,
RecordSetStatus.Active,
DateTime.now,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
List(AData("10.1.1.1"))
)
@ -168,7 +206,7 @@ class RecordSetServiceIntegrationSpec
A,
38400,
RecordSetStatus.Active,
DateTime.now,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
List(AData("1.1.1.1"))
)
@ -188,7 +226,7 @@ class RecordSetServiceIntegrationSpec
A,
200,
RecordSetStatus.Active,
DateTime.now,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
List(AData("1.1.1.1")),
ownerGroupId = Some(sharedGroup.id)
@ -200,19 +238,49 @@ class RecordSetServiceIntegrationSpec
A,
200,
RecordSetStatus.Active,
DateTime.now,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
List(AData("1.1.1.1")),
ownerGroupId = Some("non-existent")
)
private val sharedTestRecordPendingReviewOwnerShip = RecordSet(
sharedZone.id,
"shared-record-ownerShip-pendingReview",
A,
200,
RecordSetStatus.Active,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
List(AData("1.1.1.1")),
ownerGroupId = Some(sharedGroup.id),
recordSetGroupChange = Some(OwnerShipTransfer(
ownerShipTransferStatus = OwnerShipTransferStatus.PendingReview,
requestedOwnerGroupId = Some(group.id)))
)
private val sharedTestRecordCancelledOwnerShip = RecordSet(
sharedZone.id,
"shared-record-ownerShip-cancelled",
A,
200,
RecordSetStatus.Active,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
List(AData("1.1.1.1")),
ownerGroupId = Some(sharedGroup.id),
recordSetGroupChange = Some(OwnerShipTransfer(
ownerShipTransferStatus = OwnerShipTransferStatus.Cancelled,
requestedOwnerGroupId = Some(group.id)))
)
private val testOwnerGroupRecordInNormalZone = RecordSet(
zone.id,
"user-in-owner-group-but-zone-not-shared",
A,
38400,
RecordSetStatus.Active,
DateTime.now,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
List(AData("10.1.1.1")),
ownerGroupId = Some(sharedGroup.id)
@ -241,8 +309,16 @@ class RecordSetServiceIntegrationSpec
clearZoneRepo()
clearGroupRepo()
List(group, group2, sharedGroup).traverse(g => groupRepo.save(g).void).unsafeRunSync()
List(zone, zoneTestNameConflicts, zoneTestAddRecords, sharedZone)
def saveGroupData(
groupRepo: GroupRepository,
group: Group
): IO[Group] =
executeWithinTransaction { db: DB =>
groupRepo.save(db, group)
}
List(group, group2, sharedGroup, dummyGroup).traverse(g => saveGroupData(groupRepo, g).void).unsafeRunSync()
List(zone, dummyZone, zoneTestNameConflicts, zoneTestAddRecords, sharedZone)
.traverse(
z => zoneRepo.save(z)
)
@ -251,7 +327,10 @@ class RecordSetServiceIntegrationSpec
// Seeding records in DB
val sharedRecords = List(
sharedTestRecord,
sharedTestRecordBadOwnerGroup
sharedTestRecordBadOwnerGroup,
sharedTestRecordPendingReviewOwnerShip,
sharedTestRecordCancelledOwnerShip
)
val conflictRecords = List(
subTestRecordNameConflict,
@ -260,6 +339,7 @@ class RecordSetServiceIntegrationSpec
val zoneRecords = List(
apexTestRecordA,
apexTestRecordAAAA,
dottedTestRecord,
subTestRecordA,
subTestRecordAAAA,
subTestRecordNS,
@ -271,12 +351,15 @@ class RecordSetServiceIntegrationSpec
conflictRecords.map(makeAddChange(_, zoneTestNameConflicts)) ++
zoneRecords.map(makeAddChange(_, zone))
)
recordSetRepo.apply(changes).unsafeRunSync()
executeWithinTransaction { db: DB =>
recordSetRepo.apply(db, changes)
}.unsafeRunSync()
testRecordSetService = new RecordSetService(
zoneRepo,
groupRepo,
recordSetRepo,
recordSetCacheRepo,
mock[RecordChangeRepository],
mock[UserRepository],
TestMessageQueue,
@ -284,7 +367,10 @@ class RecordSetServiceIntegrationSpec
mockBackendResolver,
false,
vinyldnsConfig.highValueDomainConfig,
vinyldnsConfig.serverConfig.approvedNameServers
vinyldnsConfig.dottedHostsConfig,
vinyldnsConfig.serverConfig.approvedNameServers,
useRecordSetCache = true,
mockNotifiers
)
}
@ -306,7 +392,7 @@ class RecordSetServiceIntegrationSpec
A,
38400,
RecordSetStatus.Active,
DateTime.now,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
List(AData("10.1.1.1"))
)
@ -321,6 +407,314 @@ class RecordSetServiceIntegrationSpec
.name shouldBe "zone-test-add-records."
}
"create dotted record fails if it doesn't satisfy dotted hosts config" in {
val newRecord = RecordSet(
zoneTestAddRecords.id,
"test.dot",
A,
38400,
RecordSetStatus.Active,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
List(AData("10.1.1.1"))
)
val result =
testRecordSetService
.addRecordSet(newRecord, auth)
.value
.unsafeRunSync()
leftValue(result) shouldBe a[InvalidRequest]
}
"create dotted record succeeds if it satisfies all dotted hosts config" in {
val newRecord = RecordSet(
dummyZone.id,
"testing.dotted",
AAAA,
38400,
RecordSetStatus.Active,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
List(AAAAData("fd69:27cc:fe91::60"))
)
// succeeds as zone, user and record type is allowed as defined in application.conf
val result =
testRecordSetService
.addRecordSet(newRecord, dummyAuth)
.value
.unsafeRunSync()
rightValue(result)
.asInstanceOf[RecordSetChange]
.recordSet
.name shouldBe "testing.dotted"
}
"fail creating dotted record if it satisfies all dotted hosts config except dots-limit for the zone" in {
val newRecord = RecordSet(
dummyZone.id,
"test.dotted.more.dots.than.allowed",
AAAA,
38400,
RecordSetStatus.Active,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
List(AAAAData("fd69:27cc:fe91::60"))
)
// The number of dots allowed in the record name for this zone as defined in the config is 3.
// Creating with 4 dots results in an error
val result =
testRecordSetService
.addRecordSet(newRecord, dummyAuth)
.value
.unsafeRunSync()
leftValue(result) shouldBe a[InvalidRequest]
}
"auto-approve ownership transfer request, if user tried to update the ownership" in {
val newRecord = sharedTestRecord.copy(recordSetGroupChange =
Some(OwnerShipTransfer(ownerShipTransferStatus = OwnerShipTransferStatus.AutoApproved,
requestedOwnerGroupId = Some(group.id))))
val result = testRecordSetService
.updateRecordSet(newRecord, auth2)
.value
.unsafeRunSync()
val change = rightValue(result).asInstanceOf[RecordSetChange]
change.recordSet.name shouldBe "shared-record"
change.recordSet.ownerGroupId.get shouldBe group.id
change.recordSet.recordSetGroupChange.get.ownerShipTransferStatus shouldBe OwnerShipTransferStatus.AutoApproved
change.recordSet.recordSetGroupChange.get.requestedOwnerGroupId.get shouldBe group.id
}
"approve ownership transfer request, if user requested for ownership transfer" in {
val newRecord = sharedTestRecordPendingReviewOwnerShip.copy(recordSetGroupChange =
Some(OwnerShipTransfer(
ownerShipTransferStatus = OwnerShipTransferStatus.ManuallyApproved)))
doReturn(IO.unit).when(mockNotifier).notify(any[Notification[_]])
val result = testRecordSetService
.updateRecordSet(newRecord, auth2)
.value
.unsafeRunSync()
val change = rightValue(result).asInstanceOf[RecordSetChange]
change.recordSet.name shouldBe "shared-record-ownerShip-pendingReview"
change.recordSet.ownerGroupId.get shouldBe group.id
change.recordSet.recordSetGroupChange.get.ownerShipTransferStatus shouldBe OwnerShipTransferStatus.ManuallyApproved
change.recordSet.recordSetGroupChange.get.requestedOwnerGroupId.get shouldBe group.id
}
"reject ownership transfer request, if user requested for ownership transfer" in {
val newRecord = sharedTestRecordPendingReviewOwnerShip.copy(recordSetGroupChange =
Some(OwnerShipTransfer(
ownerShipTransferStatus = OwnerShipTransferStatus.ManuallyRejected)))
doReturn(IO.unit).when(mockNotifier).notify(any[Notification[_]])
val result = testRecordSetService
.updateRecordSet(newRecord, auth2)
.value
.unsafeRunSync()
val change = rightValue(result).asInstanceOf[RecordSetChange]
change.recordSet.name shouldBe "shared-record-ownerShip-pendingReview"
change.recordSet.ownerGroupId.get shouldBe sharedGroup.id
change.recordSet.recordSetGroupChange.get.ownerShipTransferStatus shouldBe OwnerShipTransferStatus.ManuallyRejected
change.recordSet.recordSetGroupChange.get.requestedOwnerGroupId.get shouldBe group.id
}
"request ownership transfer, if user not in the owner group and wants to own the record" in {
val newRecord = sharedTestRecord.copy(recordSetGroupChange =
Some(OwnerShipTransfer(
ownerShipTransferStatus = OwnerShipTransferStatus.Requested,
requestedOwnerGroupId = Some(dummyGroup.id))))
doReturn(IO.unit).when(mockNotifier).notify(any[Notification[_]])
val result = testRecordSetService
.updateRecordSet(newRecord, dummyAuth)
.value
.unsafeRunSync()
val change = rightValue(result).asInstanceOf[RecordSetChange]
change.recordSet.name shouldBe "shared-record"
change.recordSet.ownerGroupId.get shouldBe sharedGroup.id
change.recordSet.recordSetGroupChange.get.ownerShipTransferStatus shouldBe OwnerShipTransferStatus.PendingReview
change.recordSet.recordSetGroupChange.get.requestedOwnerGroupId.get shouldBe dummyGroup.id
}
"fail requesting ownership transfer if user is not in owner group and tried to update other fields in record set" in {
val newRecord = sharedTestRecord.copy(
ttl = 3000,
recordSetGroupChange =
Some(OwnerShipTransfer(
ownerShipTransferStatus = OwnerShipTransferStatus.Requested,
requestedOwnerGroupId = Some(dummyGroup.id))))
val result = testRecordSetService
.updateRecordSet(newRecord, dummyAuth)
.value
.unsafeRunSync()
leftValue(result) shouldBe a[InvalidRequest]
}
"fail updating if user is not in owner group for ownership transfer approval" in {
val newRecord = sharedTestRecordPendingReviewOwnerShip.copy(recordSetGroupChange =
Some(OwnerShipTransfer(
ownerShipTransferStatus = OwnerShipTransferStatus.ManuallyApproved)))
val result = testRecordSetService
.updateRecordSet(newRecord, dummyAuth)
.value
.unsafeRunSync()
leftValue(result) shouldBe a[NotAuthorizedError]
}
"fail updating if user is not in owner group for ownership transfer reject" in {
val newRecord = sharedTestRecordPendingReviewOwnerShip.copy(recordSetGroupChange =
Some(OwnerShipTransfer(
ownerShipTransferStatus = OwnerShipTransferStatus.ManuallyRejected)))
val result = testRecordSetService
.updateRecordSet(newRecord, dummyAuth)
.value
.unsafeRunSync()
leftValue(result) shouldBe a[NotAuthorizedError]
}
"cancel the ownership transfer request, if user not require ownership transfer further" in {
val newRecord = sharedTestRecordPendingReviewOwnerShip.copy(recordSetGroupChange =
Some(OwnerShipTransfer(
ownerShipTransferStatus = OwnerShipTransferStatus.Cancelled)))
doReturn(IO.unit).when(mockNotifier).notify(any[Notification[_]])
val result = testRecordSetService
.updateRecordSet(newRecord, auth)
.value
.unsafeRunSync()
val change = rightValue(result).asInstanceOf[RecordSetChange]
change.recordSet.name shouldBe "shared-record-ownerShip-pendingReview"
change.recordSet.ownerGroupId.get shouldBe sharedGroup.id
change.recordSet.recordSetGroupChange.get.ownerShipTransferStatus shouldBe OwnerShipTransferStatus.Cancelled
change.recordSet.recordSetGroupChange.get.requestedOwnerGroupId.get shouldBe group.id
}
"fail approving ownership transfer request, if user is cancelled" in {
val newRecord = sharedTestRecordCancelledOwnerShip.copy(recordSetGroupChange =
Some(OwnerShipTransfer(
ownerShipTransferStatus = OwnerShipTransferStatus.ManuallyApproved)))
val result = testRecordSetService
.updateRecordSet(newRecord, auth)
.value
.unsafeRunSync()
leftValue(result) shouldBe a[InvalidRequest]
}
"fail rejecting ownership transfer request, if user is cancelled" in {
val newRecord = sharedTestRecordCancelledOwnerShip.copy(recordSetGroupChange =
Some(OwnerShipTransfer(
ownerShipTransferStatus = OwnerShipTransferStatus.ManuallyRejected)))
val result = testRecordSetService
.updateRecordSet(newRecord, auth)
.value
.unsafeRunSync()
leftValue(result) shouldBe a[InvalidRequest]
}
"fail auto-approving ownership transfer request, if user is cancelled" in {
val newRecord = sharedTestRecordCancelledOwnerShip.copy(recordSetGroupChange =
Some(OwnerShipTransfer(
ownerShipTransferStatus = OwnerShipTransferStatus.AutoApproved
)))
doReturn(IO.unit).when(mockNotifier).notify(any[Notification[_]])
val result = testRecordSetService
.updateRecordSet(newRecord, auth)
.value
.unsafeRunSync()
leftValue(result) shouldBe a[InvalidRequest]
}
"fail auto-approving ownership transfer request, if zone is not shared" in {
val newRecord = dottedTestRecord.copy(recordSetGroupChange =
Some(OwnerShipTransfer(ownerShipTransferStatus = OwnerShipTransferStatus.AutoApproved,
requestedOwnerGroupId = Some(group.id))))
val result = testRecordSetService
.updateRecordSet(newRecord, auth2)
.value
.unsafeRunSync()
leftValue(result) shouldBe a[InvalidRequest]
}
"fail approving ownership transfer request, if zone is not shared" in {
val newRecord = dottedTestRecord.copy(recordSetGroupChange =
Some(OwnerShipTransfer(
ownerShipTransferStatus = OwnerShipTransferStatus.ManuallyApproved
)))
val result = testRecordSetService
.updateRecordSet(newRecord, auth2)
.value
.unsafeRunSync()
leftValue(result) shouldBe a[InvalidRequest]
}
"fail requesting ownership transfer, if zone is not shared" in {
val newRecord = dottedTestRecord.copy(recordSetGroupChange =
Some(OwnerShipTransfer(
ownerShipTransferStatus = OwnerShipTransferStatus.Requested,
requestedOwnerGroupId = Some(dummyGroup.id)
)))
doReturn(IO.unit).when(mockNotifier).notify(any[Notification[_]])
val result = testRecordSetService
.updateRecordSet(newRecord, dummyAuth)
.value
.unsafeRunSync()
leftValue(result) shouldBe a[InvalidRequest]
}
"update dotted record succeeds if it satisfies all dotted hosts config" in {
val newRecord = dottedTestRecord.copy(ttl = 37000)
val result = testRecordSetService
.updateRecordSet(newRecord, dummyAuth)
.value
.unsafeRunSync()
val change = rightValue(result).asInstanceOf[RecordSetChange]
change.recordSet.name shouldBe "test.dotted"
change.recordSet.ttl shouldBe 37000
}
"update dotted record name fails as updating a record name is not allowed" in {
val newRecord = dottedTestRecord.copy(name = "trial.dotted")
val result = testRecordSetService
.updateRecordSet(newRecord, dummyAuth)
.value
.unsafeRunSync()
// We get an "InvalidRequest: Cannot update RecordSet's name."
leftValue(result) shouldBe a[InvalidRequest]
}
"update apex A record and add trailing dot" in {
val newRecord = apexTestRecordA.copy(ttl = 200)
val result = testRecordSetService
@ -532,26 +926,33 @@ class RecordSetServiceIntegrationSpec
Some(group2.id)
}
"delete dotted host record successfully for user in record owner group" in {
val result = testRecordSetService
.deleteRecordSet(dottedTestRecord.id, dottedTestRecord.zoneId, dummyAuth)
.value
.unsafeRunSync()
result should be(right)
}
"fail deleting for user not in record owner group in shared zone" in {
val result = leftResultOf(
val result =
testRecordSetService
.deleteRecordSet(sharedTestRecord.id, sharedTestRecord.zoneId, dummyAuth)
.value
)
.value.unsafeRunSync().swap.toOption.get
result shouldBe a[NotAuthorizedError]
}
"fail deleting for user in record owner group in non-shared zone" in {
val result = leftResultOf(
val result =
testRecordSetService
.deleteRecordSet(
testOwnerGroupRecordInNormalZone.id,
testOwnerGroupRecordInNormalZone.zoneId,
auth2
)
.value
)
.value.unsafeRunSync().swap.toOption.get
result shouldBe a[NotAuthorizedError]
}

View File

@ -18,7 +18,9 @@ package vinyldns.api.domain.zone
import cats.data.NonEmptyList
import cats.effect._
import org.joda.time.DateTime
import java.time.Instant
import java.time.temporal.ChronoUnit
import org.mockito.Mockito.doReturn
import org.scalatest._
import org.scalatest.matchers.should.Matchers
@ -26,9 +28,12 @@ import org.scalatest.wordspec.AnyWordSpec
import org.scalatest.concurrent.{PatienceConfiguration, ScalaFutures}
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.time.{Seconds, Span}
import scalikejdbc.DB
import vinyldns.api.domain.access.AccessValidations
import vinyldns.api.domain.membership.MembershipService
import vinyldns.api.domain.record.RecordSetChangeGenerator
import vinyldns.api.engine.TestMessageQueue
import vinyldns.mysql.TransactionProvider
import vinyldns.api.{MySqlApiIntegrationSpec, ResultHelpers}
import vinyldns.core.TestMembershipData.{okAuth, okUser}
import vinyldns.core.TestZoneData.okZone
@ -52,13 +57,14 @@ class ZoneServiceIntegrationSpec
with ResultHelpers
with MySqlApiIntegrationSpec
with BeforeAndAfterAll
with BeforeAndAfterEach {
with BeforeAndAfterEach
with TransactionProvider {
private val timeout = PatienceConfiguration.Timeout(Span(10, Seconds))
private val recordSetRepo = recordSetRepository
private val zoneRepo: ZoneRepository = zoneRepository
private val mockMembershipService = mock[MembershipService]
private var testZoneService: ZoneServiceAlgebra = _
private val badAuth = AuthPrincipal(okUser, Seq())
@ -69,7 +75,7 @@ class ZoneServiceIntegrationSpec
typ = RecordType.SOA,
ttl = 38400,
status = RecordSetStatus.Active,
created = DateTime.now,
created = Instant.now.truncatedTo(ChronoUnit.MILLIS),
records =
List(SOAData(Fqdn("172.17.42.1."), "admin.test.com.", 1439234395, 10800, 3600, 604800, 38400))
)
@ -79,7 +85,7 @@ class ZoneServiceIntegrationSpec
typ = RecordType.NS,
ttl = 38400,
status = RecordSetStatus.Active,
created = DateTime.now,
created = Instant.now.truncatedTo(ChronoUnit.MILLIS),
records = List(NSData(Fqdn("172.17.42.1.")))
)
private val testRecordA = RecordSet(
@ -88,7 +94,7 @@ class ZoneServiceIntegrationSpec
typ = RecordType.A,
ttl = 38400,
status = RecordSetStatus.Active,
created = DateTime.now,
created = Instant.now.truncatedTo(ChronoUnit.MILLIS),
records = List(AData("10.1.1.1"))
)
@ -104,10 +110,13 @@ class ZoneServiceIntegrationSpec
waitForSuccess(zoneRepo.save(okZone))
// Seeding records in DB
waitForSuccess(recordSetRepo.apply(changeSetSOA))
waitForSuccess(recordSetRepo.apply(changeSetNS))
waitForSuccess(recordSetRepo.apply(changeSetA))
executeWithinTransaction { db: DB =>
IO {
waitForSuccess(recordSetRepo.apply(db, changeSetSOA))
waitForSuccess(recordSetRepo.apply(db, changeSetNS))
waitForSuccess(recordSetRepo.apply(db, changeSetA))
}
}
doReturn(NonEmptyList.one("func-test-backend")).when(mockBackendResolver).ids
testZoneService = new ZoneService(
@ -120,7 +129,8 @@ class ZoneServiceIntegrationSpec
new ZoneValidations(1000),
new AccessValidations(),
mockBackendResolver,
NoOpCrypto.instance
NoOpCrypto.instance,
mockMembershipService
)
}
@ -142,8 +152,11 @@ class ZoneServiceIntegrationSpec
}
"accept a DeleteZone" in {
val removeARecord = ChangeSet(RecordSetChangeGenerator.forDelete(testRecordA, okZone))
waitForSuccess(recordSetRepo.apply(removeARecord))
executeWithinTransaction { db: DB =>
IO {
waitForSuccess(recordSetRepo.apply(db, removeARecord))
}
}
val result =
testZoneService
.deleteZone(okZone.id, okAuth)

View File

@ -19,8 +19,8 @@ package vinyldns.api.domain.zone
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import org.xbill.DNS.ZoneTransferException
import vinyldns.api.backend.dns.DnsBackend
import vinyldns.api.config.VinylDNSConfig
import vinyldns.core.domain.Encrypted
import vinyldns.core.domain.backend.BackendResolver
import vinyldns.core.domain.zone.{Zone, ZoneConnection}
@ -50,15 +50,13 @@ class ZoneViewLoaderIntegrationSpec extends AnyWordSpec with Matchers {
ZoneConnection(
"vinyldns.",
"vinyldns.",
"nzisn+4G2ldMn0q1CV3vsg==",
Encrypted("nzisn+4G2ldMn0q1CV3vsg=="),
sys.env.getOrElse("DEFAULT_DNS_ADDRESS", "127.0.0.1:19001")
)
),
transferConnection =
Some(ZoneConnection("invalid-connection.", "bad-key", "invalid-key", "10.1.1.1"))
Some(ZoneConnection("invalid-connection.", "bad-key", Encrypted("invalid-key"), "10.1.1.1"))
)
val backend = backendResolver.resolve(zone).asInstanceOf[DnsBackend]
println(s"${backend.id}, ${backend.xfrInfo}, ${backend.resolver.getAddress}")
DnsZoneViewLoader(zone, backendResolver.resolve(zone), 10000)
.load()
.unsafeRunSync()
@ -83,7 +81,7 @@ class ZoneViewLoaderIntegrationSpec extends AnyWordSpec with Matchers {
ZoneConnection(
"vinyldns.",
"vinyldns.",
"nzisn+4G2ldMn0q1CV3vsg==",
Encrypted("nzisn+4G2ldMn0q1CV3vsg=="),
sys.env.getOrElse("DEFAULT_DNS_ADDRESS", "127.0.0.1:19001")
)
),
@ -91,7 +89,7 @@ class ZoneViewLoaderIntegrationSpec extends AnyWordSpec with Matchers {
ZoneConnection(
"vinyldns.",
"vinyldns.",
"nzisn+4G2ldMn0q1CV3vsg==",
Encrypted("nzisn+4G2ldMn0q1CV3vsg=="),
sys.env.getOrElse("DEFAULT_DNS_ADDRESS", "127.0.0.1:19001")
)
)

View File

@ -24,8 +24,9 @@ import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike
import vinyldns.core.domain.batch._
import vinyldns.core.domain.record.RecordType
import vinyldns.core.domain.record.AData
import org.joda.time.DateTime
import vinyldns.core.domain.record.{AData, OwnerShipTransferStatus, RecordSetChange, RecordSetChangeStatus, RecordSetChangeType, RecordType}
import java.time.Instant
import java.time.temporal.ChronoUnit
import vinyldns.core.TestMembershipData._
import java.nio.file.{Files, Path, Paths}
@ -34,6 +35,8 @@ import cats.effect.{IO, Resource}
import scala.collection.JavaConverters._
import org.scalatest.BeforeAndAfterEach
import cats.implicits._
import vinyldns.core.TestRecordSetData.{ownerShipTransfer, rsOk}
import vinyldns.core.TestZoneData.okZone
class EmailNotifierIntegrationSpec
extends MySqlApiIntegrationSpec
@ -56,12 +59,12 @@ class EmailNotifierIntegrationSpec
"Email Notifier" should {
"send an email" taggedAs (SkipCI) in {
"send an email for batch change" taggedAs (SkipCI) in {
val batchChange = BatchChange(
okUser.id,
okUser.userName,
None,
DateTime.now,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
List(
SingleAddChange(
Some("some-zone-id"),
@ -83,7 +86,7 @@ class EmailNotifierIntegrationSpec
val program = for {
_ <- userRepository.save(okUser)
notifier <- new EmailNotifierProvider()
.load(NotifierConfig("", emailConfig), userRepository)
.load(NotifierConfig("", emailConfig), userRepository, groupRepository)
_ <- notifier.notify(Notification(batchChange))
emailFiles <- retrieveEmailFiles(targetDirectory)
} yield emailFiles
@ -93,7 +96,29 @@ class EmailNotifierIntegrationSpec
files.length should be(1)
}
"send an email for recordSetChange ownerShip transfer" taggedAs (SkipCI) in {
val recordSetChange = RecordSetChange(
okZone,
rsOk.copy(ownerGroupId= Some(okGroup.id),recordSetGroupChange =
Some(ownerShipTransfer.copy(ownerShipTransferStatus = OwnerShipTransferStatus.PendingReview, requestedOwnerGroupId = Some(dummyGroup.id)))),
"system",
RecordSetChangeType.Create,
RecordSetChangeStatus.Complete
)
val program = for {
_ <- userRepository.save(okUser)
notifier <- new EmailNotifierProvider()
.load(NotifierConfig("", emailConfig), userRepository, groupRepository)
_ <- notifier.notify(Notification(recordSetChange))
emailFiles <- retrieveEmailFiles(targetDirectory)
} yield emailFiles
val files = program.unsafeRunSync()
files.length should be(1)
}
}
def deleteEmailFiles(path: Path): IO[Unit] =

View File

@ -22,7 +22,7 @@ import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.sns.AmazonSNSClientBuilder
import com.amazonaws.services.sqs.AmazonSQSClientBuilder
import com.typesafe.config.{Config, ConfigFactory}
import org.joda.time.DateTime
import java.time.Instant
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods._
import org.scalatest.matchers.should.Matchers
@ -56,7 +56,7 @@ class SnsNotifierIntegrationSpec
okUser.id,
okUser.userName,
None,
DateTime.parse("2019-07-22T19:38:23Z"),
Instant.parse("2019-07-22T19:38:23Z"),
List(
SingleAddChange(
Some("some-zone-id"),
@ -85,16 +85,17 @@ class SnsNotifierIntegrationSpec
val sns = AmazonSNSClientBuilder.standard
.withEndpointConfiguration(
new EndpointConfiguration(
snsConfig.getString("service-endpoint"),
sys.env.getOrElse("SNS_SERVICE_ENDPOINT", snsConfig.getString("service-endpoint")),
snsConfig.getString("signing-region")
)
)
.withCredentials(credentialsProvider)
.build()
val sqs = AmazonSQSClientBuilder
.standard()
.withEndpointConfiguration(
new EndpointConfiguration(sys.env.getOrElse("SNS_SERVICE_ENDPOINT", "http://127.0.0.1:19003"), "us-east-1")
new EndpointConfiguration(sys.env.getOrElse("SQS_SERVICE_ENDPOINT", "http://127.0.0.1:19003"), "us-east-1")
)
.withCredentials(credentialsProvider)
.build()
@ -110,7 +111,7 @@ class SnsNotifierIntegrationSpec
sns.subscribe(topic, "sqs", queueUrl)
}
notifier <- new SnsNotifierProvider()
.load(NotifierConfig("", snsConfig), userRepository)
.load(NotifierConfig("", snsConfig), userRepository, groupRepository)
_ <- notifier.notify(Notification(batchChange))
_ <- IO.sleep(1.seconds)
messages <- IO {

View File

@ -30,7 +30,7 @@ import vinyldns.api.engine.ZoneSyncHandler
import vinyldns.api.{MySqlApiIntegrationSpec, ResultHelpers}
import vinyldns.core.TestRecordSetData._
import vinyldns.core.domain.backend.{Backend, BackendResolver}
import vinyldns.core.domain.record.{NameSort, RecordType}
import vinyldns.core.domain.record.{NameSort, RecordType, RecordTypeSort}
import vinyldns.core.domain.zone.{Zone, ZoneChange, ZoneChangeType}
import vinyldns.core.health.HealthCheck.HealthCheck
import vinyldns.route53.backend.{Route53Backend, Route53BackendConfig}
@ -57,6 +57,8 @@ class Route53ApiIntegrationSpec
"test",
Some("access"),
Some("secret"),
None,
None,
sys.env.getOrElse("R53_SERVICE_ENDPOINT", "http://localhost:19003"),
"us-east-1"
)
@ -109,6 +111,7 @@ class Route53ApiIntegrationSpec
val syncHandler = ZoneSyncHandler.apply(
recordSetRepository,
recordChangeRepository,
recordSetCacheRepository,
zoneChangeRepository,
zoneRepository,
backendResolver,
@ -119,7 +122,7 @@ class Route53ApiIntegrationSpec
// We should have both the record we created above as well as at least one NS record
val results = recordSetRepository
.listRecordSets(Some(testZone.id), None, None, None, None, None, NameSort.ASC)
.listRecordSets(Some(testZone.id), None, None, None, None, None, NameSort.ASC, RecordTypeSort.ASC)
.unsafeRunSync()
results.recordSets.map(_.typ).distinct should contain theSameElementsAs List(
rsOk.typ,

View File

@ -28,6 +28,16 @@ vinyldns {
multi-record-batch-change-enabled = true
multi-record-batch-change-enabled = ${?MULTI_RECORD_BATCH_CHANGE_ENABLED}
# Server settings
use-recordset-cache = false
use-recordset-cache = ${?USE_RECORDSET_CACHE}
load-test-data = false
load-test-data = ${?LOAD_TEST_DATA}
# should be true while running locally or when we have only one api server/instance, for zone sync scheduler to work
is-zone-sync-schedule-allowed = true
# should be set to true only on a single server/instance else automated sync will be performed at every server/instance
is-zone-sync-schedule-allowed = ${?IS_ZONE_SYNC_SCHEDULE_ALLOWED}
# configured backend providers
backend {
# Use "default" when dns backend legacy = true
@ -127,7 +137,10 @@ vinyldns {
from = ${?EMAIL_FROM}
}
}
valid-email-config{
email-domains = ["test.com","*dummy.com"]
number-of-dots= 2
}
sns {
class-name = "vinyldns.apadi.notifier.sns.SnsNotifierProvider"
class-name = ${?SNS_CLASS_NAME}
@ -151,6 +164,17 @@ vinyldns {
port=${?API_SERVICE_PORT}
}
api {
limits {
batchchange-routing-max-items-limit = 100
membership-routing-default-max-items = 100
membership-routing-max-items-limit = 1000
membership-routing-max-groups-list-limit = 3000
recordset-routing-default-max-items= 100
zone-routing-default-max-items = 100
zone-routing-max-items-limit = 100
}
}
approved-name-servers = [
"172.17.42.1.",
@ -161,6 +185,19 @@ vinyldns {
"ns1.parent.com4."
]
# approved zones, individual users, users in groups, record types and no.of.dots that are allowed for dotted hosts
dotted-hosts = {
allowed-settings = [
{
zone = "zonenamehere."
user-list = []
group-list = []
record-types = []
dots-limit = 0
}
]
}
# Note: This MUST match the Portal or strange errors will ensue, NoOpCrypto should not be used for production
crypto {
type = "vinyldns.core.crypto.NoOpCrypto"
@ -178,14 +215,22 @@ vinyldns {
name = ${?DATABASE_NAME}
driver = "org.mariadb.jdbc.Driver"
driver = ${?JDBC_DRIVER}
migration-url = "jdbc:mariadb://localhost:19002/?user=root&password=pass"
migration-url = "jdbc:mariadb://localhost:19002/?user=root&password=pass&socketTimeout=20000"
migration-url = ${?JDBC_MIGRATION_URL}
url = "jdbc:mariadb://localhost:19002/vinyldns?user=root&password=pass"
url = "jdbc:mariadb://localhost:19002/vinyldns?user=root&password=pass&socketTimeout=20000"
url = ${?JDBC_URL}
user = "root"
user = ${?JDBC_USER}
password = "pass"
password = ${?JDBC_PASSWORD}
flyway-out-of-order = false
flyway-out-of-order = ${?FLYWAY_OUT_OF_ORDER}
max-lifetime = 300000
connection-timeout-millis = 30000
idle-timeout = 150000
maximum-pool-size = 20
minimum-idle = 5
}
# TODO: Remove the need for these useless configuration blocks
@ -198,6 +243,8 @@ vinyldns {
}
record-set {
}
record-set-cache {
}
zone-change {
}
record-change {
@ -320,13 +367,17 @@ akka.http {
# Set to `infinite` to disable.
bind-timeout = 5s
# A default request timeout is applied globally to all routes and can be configured using the
# akka.http.server.request-timeout setting (which defaults to 20 seconds).
# request-timeout = 60s
# Show verbose error messages back to the client
verbose-error-messages = on
}
parsing {
# Spray doesn't like the AWS4 headers
illegal-header-warnings = on
# Don't complain about the / in the AWS SigV4 auth header
ignore-illegal-header-for = ["authorization"]
}
}

View File

@ -1,24 +1,17 @@
<configuration>
<!-- Test configuration, log to console so we can get the docker logs -->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [test] %-5p | \(%logger{4}:%line\) | %msg %n</pattern>
<encoder class="co.elastic.logging.logback.EcsEncoder">
<serviceName>vinyldns-api</serviceName>
<serviceNodeName>vinyldns-api</serviceNodeName>
</encoder>
</appender>
<logger name="org.flywaydb" level="DEBUG">
<logger name="com.zaxxer.hikari" level="ERROR">
<appender-ref ref="CONSOLE"/>
</logger>
<logger name="org.flywaydb.core.internal.dbsupport.SqlScript" level="DEBUG">
<appender-ref ref="CONSOLE"/>
</logger>
<logger name="org.flywaydb.core.internal.command.DbMigrate" level="DEBUG">
<appender-ref ref="CONSOLE"/>
</logger>
<root level="ERROR">
<root level="INFO">
<appender-ref ref="CONSOLE"/>
</root>
</configuration>

View File

@ -90,6 +90,29 @@ vinyldns {
"ns1.parent.com."
]
# approved zones, individual users, users in groups, record types and no.of.dots that are allowed for dotted hosts
dotted-hosts = {
# for local testing
allowed-settings = [
{
# for wildcard zones. Settings will be applied to all matching zones
zone = "*ent.com*."
user-list = ["ok"]
group-list = ["dummy-group"]
record-types = ["CNAME"]
dots-limit = 3
},
{
# for wildcard zones. Settings will be applied to all matching zones
zone = "dummy*."
user-list = ["sharedZoneUser"]
group-list = ["history-group1"]
record-types = ["A"]
dots-limit = 3
}
]
}
# color should be green or blue, used in order to do blue/green deployment
color = "green"
@ -105,7 +128,18 @@ vinyldns {
host = "127.0.0.1"
port = 9000
}
# limits for batchchange routing, membership routing , recordset routing , zone routing
api {
limits {
batchchange-routing-max-items-limit = 100
membership-routing-default-max-items = 100
membership-routing-max-items-limit = 1000
membership-routing-max-groups-list-limit = 3000
recordset-routing-default-max-items= 100
zone-routing-default-max-items = 100
zone-routing-max-items-limit = 100
}
}
mysql {
class-name = "vinyldns.mysql.repository.MySqlDataStoreProvider"
@ -119,6 +153,7 @@ vinyldns {
url = "jdbc:mariadb://localhost:19002/vinyldns?user=root&password=pass"
user = "root"
password = "pass"
flyway-out-of-order = false
}
repositories {
@ -134,7 +169,10 @@ vinyldns {
from = "VinylDNS <do-not-reply@vinyldns.io>"
}
}
valid-email-config{
email-domains = ["test.com","*dummy.com"]
number-of-dots= 2
}
sns {
class-name = "vinyldns.api.notifier.sns.SnsNotifierProvider"
settings {
@ -191,4 +229,15 @@ vinyldns {
default-ttl = 7200
validate-record-lookup-against-dns-backend = false
use-recordset-cache = false
use-recordset-cache = ${?USE_RECORDSET_CACHE}
load-test-data = false
load-test-data = ${?LOAD_TEST_DATA}
# should be true while running locally or when we have only one api server/instance, for zone sync scheduler to work
is-zone-sync-schedule-allowed = true
# should be set to true only on a single server/instance else automated sync will be performed at every server/instance
is-zone-sync-schedule-allowed = ${?IS_ZONE_SYNC_SCHEDULE_ALLOWED}
}

View File

@ -1,24 +1,27 @@
<configuration>
<!-- Test configuration, log to console so we can get the docker logs -->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [test] %-5p | \(%logger{4}:%line\) | %msg %n</pattern>
<encoder class="co.elastic.logging.logback.EcsEncoder">
<serviceName>vinyldns-api</serviceName>
<serviceNodeName>vinyldns-api</serviceNodeName>
</encoder>
</appender>
<logger name="org.flywaydb" level="DEBUG">
<appender-ref ref="CONSOLE"/>
<appender name="BANNER_APPENDER" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%msg%n</pattern>
</encoder>
</appender>
<logger name="org.flywaydb" level="DEBUG"/>
<logger name="org.flywaydb.core.internal.dbsupport.SqlScript" level="DEBUG"/>
<logger name="org.flywaydb.core.internal.command.DbMigrate" level="DEBUG"/>
<logger name="BANNER_LOGGER" level="INFO" additivity="false">
<appender-ref ref="BANNER_APPENDER"/>
</logger>
<logger name="org.flywaydb.core.internal.dbsupport.SqlScript" level="DEBUG">
<appender-ref ref="CONSOLE"/>
</logger>
<logger name="org.flywaydb.core.internal.command.DbMigrate" level="DEBUG">
<appender-ref ref="CONSOLE"/>
</logger>
<root level="ERROR">
<root level="INFO">
<appender-ref ref="CONSOLE"/>
</root>
</configuration>

View File

@ -18,8 +18,9 @@ package vinyldns.api
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.stream.{ActorMaterializer, Materializer}
import cats.effect.{ContextShift, IO, Timer}
import akka.stream.{Materializer, ActorMaterializer}
import cats.effect.{Timer, IO, ContextShift}
import cats.data.NonEmptyList
import com.typesafe.config.ConfigFactory
import fs2.concurrent.SignallingRef
import io.prometheus.client.CollectorRegistry
@ -27,29 +28,35 @@ import io.prometheus.client.dropwizard.DropwizardExports
import io.prometheus.client.hotspot.DefaultExports
import org.slf4j.LoggerFactory
import vinyldns.api.backend.CommandHandler
import vinyldns.api.config.VinylDNSConfig
import vinyldns.api.domain.access.{AccessValidations, GlobalAcls}
import vinyldns.api.config.{LimitsConfig, VinylDNSConfig}
import vinyldns.api.domain.access.{GlobalAcls, AccessValidations}
import vinyldns.api.domain.auth.MembershipAuthPrincipalProvider
import vinyldns.api.domain.batch.{BatchChangeConverter, BatchChangeService, BatchChangeValidations}
import vinyldns.api.domain.batch.{BatchChangeService, BatchChangeConverter, BatchChangeValidations}
import vinyldns.api.domain.membership._
import vinyldns.api.domain.record.RecordSetService
import vinyldns.api.domain.zone._
import vinyldns.api.metrics.APIMetrics
import vinyldns.api.repository.{ApiDataAccessor, ApiDataAccessorProvider, TestDataLoader}
import vinyldns.api.repository.{ApiDataAccessorProvider, ApiDataAccessor, TestDataLoader}
import vinyldns.api.route.VinylDNSService
import vinyldns.core.VinylDNSMetrics
import vinyldns.core.domain.backend.BackendResolver
import vinyldns.core.health.HealthService
import vinyldns.core.queue.{MessageCount, MessageQueueLoader}
import vinyldns.core.repository.DataStoreLoader
import vinyldns.core.queue.{MessageQueueLoader, MessageCount}
import scala.concurrent.{ExecutionContext, Future}
import scala.io.{Codec, Source}
import vinyldns.core.notifier.NotifierLoader
import vinyldns.core.repository.DataStoreLoader
import java.util.concurrent.{Executors, ScheduledExecutorService, TimeUnit}
object Boot extends App {
private val logger = LoggerFactory.getLogger("Boot")
private val bannerLogger = LoggerFactory.getLogger("BANNER_LOGGER")
// Create a ScheduledExecutorService with a new single thread
private val executor: ScheduledExecutorService = Executors.newSingleThreadScheduledExecutor()
private implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.global
private implicit val cs: ContextShift[IO] = IO.contextShift(ec)
private implicit val timer: Timer[IO] = IO.timer(ec)
@ -69,45 +76,59 @@ object Boot extends App {
banner <- vinyldnsBanner()
vinyldnsConfig <- VinylDNSConfig.load()
system <- IO(ActorSystem("VinylDNS", ConfigFactory.load()))
loaderResponse <- DataStoreLoader
.loadAll[ApiDataAccessor](
vinyldnsConfig.dataStoreConfigs,
vinyldnsConfig.crypto,
ApiDataAccessorProvider
)
loaderResponse <- DataStoreLoader.loadAll[ApiDataAccessor](
vinyldnsConfig.dataStoreConfigs,
vinyldnsConfig.crypto,
ApiDataAccessorProvider
)
repositories = loaderResponse.accessor
backendResolver <- BackendResolver.apply(vinyldnsConfig.backendConfigs)
_ <- TestDataLoader
.loadTestData(
_ <- if (vinyldnsConfig.serverConfig.loadTestData) {
TestDataLoader.loadTestData(
repositories.userRepository,
repositories.groupRepository,
repositories.zoneRepository,
repositories.membershipRepository
)
repositories.membershipRepository)
} else {
IO.unit
}
messageQueue <- MessageQueueLoader.load(vinyldnsConfig.messageQueueConfig)
processingSignal <- SignallingRef[IO, Boolean](vinyldnsConfig.serverConfig.processingDisabled)
msgsPerPoll <- IO.fromEither(MessageCount(vinyldnsConfig.messageQueueConfig.messagesPerPoll))
notifiers <- NotifierLoader.loadAll(
vinyldnsConfig.notifierConfigs,
repositories.userRepository
repositories.userRepository,
repositories.groupRepository
)
_ <- APIMetrics.initialize(vinyldnsConfig.apiMetricSettings)
_ <- CommandHandler
.run(
messageQueue,
msgsPerPoll,
processingSignal,
vinyldnsConfig.messageQueueConfig.pollingInterval,
repositories.zoneRepository,
repositories.zoneChangeRepository,
repositories.recordSetRepository,
repositories.recordChangeRepository,
repositories.batchChangeRepository,
notifiers,
backendResolver,
vinyldnsConfig.serverConfig.maxZoneSize
)
.start
// Schedule the zone sync task to be executed every 5 seconds
_ <- if (vinyldnsConfig.serverConfig.isZoneSyncScheduleAllowed){ IO(executor.scheduleAtFixedRate(() => {
val zoneChanges = for {
zoneChanges <- ZoneSyncScheduleHandler.zoneSyncScheduler(repositories.zoneRepository)
_ <- if (zoneChanges.nonEmpty) messageQueue.sendBatch(NonEmptyList.fromList(zoneChanges.toList).get) else IO.unit
} yield ()
zoneChanges.unsafeRunAsync {
case Right(_) =>
logger.debug("Zone sync scheduler ran successfully!")
case Left(error) =>
logger.error(s"An error occurred while performing the scheduled zone sync. Error: $error")
}
}, 0, 1, TimeUnit.SECONDS)) } else IO.unit
_ <- CommandHandler.run(
messageQueue,
msgsPerPoll,
processingSignal,
vinyldnsConfig.messageQueueConfig.pollingInterval,
repositories.zoneRepository,
repositories.zoneChangeRepository,
repositories.recordSetRepository,
repositories.recordChangeRepository,
repositories.recordSetCacheRepository,
repositories.batchChangeRepository,
notifiers,
backendResolver,
vinyldnsConfig.serverConfig.maxZoneSize
).start
} yield {
val batchAccessValidations = new AccessValidations(
vinyldnsConfig.globalAcls,
@ -121,9 +142,11 @@ object Boot extends App {
vinyldnsConfig.highValueDomainConfig,
vinyldnsConfig.manualReviewConfig,
vinyldnsConfig.batchChangeConfig,
vinyldnsConfig.scheduledChangesConfig
vinyldnsConfig.scheduledChangesConfig,
vinyldnsConfig.serverConfig.approvedNameServers
)
val membershipService = MembershipService(repositories)
val membershipService = MembershipService(repositories,vinyldnsConfig.validEmailConfig)
val connectionValidator =
new ZoneConnectionValidator(
backendResolver,
@ -138,7 +161,10 @@ object Boot extends App {
backendResolver,
vinyldnsConfig.serverConfig.validateRecordLookupAgainstDnsBackend,
vinyldnsConfig.highValueDomainConfig,
vinyldnsConfig.serverConfig.approvedNameServers
vinyldnsConfig.dottedHostsConfig,
vinyldnsConfig.serverConfig.approvedNameServers,
vinyldnsConfig.serverConfig.useRecordSetCache,
notifiers
)
val zoneService = ZoneService(
repositories,
@ -147,7 +173,18 @@ object Boot extends App {
zoneValidations,
recordAccessValidations,
backendResolver,
vinyldnsConfig.crypto
vinyldnsConfig.crypto,
membershipService
)
//limits configured in reference.conf passing here
val limits = LimitsConfig(
vinyldnsConfig.limitsconfig.BATCHCHANGE_ROUTING_MAX_ITEMS_LIMIT,
vinyldnsConfig.limitsconfig.MEMBERSHIP_ROUTING_DEFAULT_MAX_ITEMS,
vinyldnsConfig.limitsconfig.MEMBERSHIP_ROUTING_MAX_ITEMS_LIMIT,
vinyldnsConfig.limitsconfig.MEMBERSHIP_ROUTING_MAX_GROUPS_LIST_LIMIT,
vinyldnsConfig.limitsconfig.RECORDSET_ROUTING_DEFAULT_MAX_ITEMS,
vinyldnsConfig.limitsconfig.ZONE_ROUTING_DEFAULT_MAX_ITEMS,
vinyldnsConfig.limitsconfig.ZONE_ROUTING_MAX_ITEMS_LIMIT
)
val healthService = new HealthService(
messageQueue.healthCheck :: backendResolver.healthCheck(
@ -176,6 +213,7 @@ object Boot extends App {
val collectorRegistry = CollectorRegistry.defaultRegistry
val vinyldnsService = new VinylDNSService(
membershipService,
limits,
processingSignal,
zoneService,
healthService,
@ -192,7 +230,7 @@ object Boot extends App {
// Need to register a jvm shut down hook to make sure everything is cleaned up, especially important for
// running locally.
sys.ShutdownHookThread {
logger.error("STOPPING VINYLDNS SERVER...")
logger.info("STOPPING VINYLDNS SERVER...")
//shutdown data store provider
loaderResponse.shutdown()
@ -206,10 +244,10 @@ object Boot extends App {
()
}
logger.error(
logger.info(
s"STARTING VINYLDNS SERVER ON ${vinyldnsConfig.httpConfig.host}:${vinyldnsConfig.httpConfig.port}"
)
logger.error(banner)
bannerLogger.info(banner)
// Starts up our http server
implicit val actorSystem: ActorSystem = system
@ -224,11 +262,12 @@ object Boot extends App {
// runApp gives us a Task, we actually have to run it! Running it will yield a Future, which is our app!
runApp().unsafeRunAsync {
case Right(_) =>
logger.error("VINYLDNS SERVER STARTED SUCCESSFULLY!!")
logger.info("VINYLDNS SERVER STARTED SUCCESSFULLY!!")
case Left(startupFailure) =>
logger.error(s"VINYLDNS SERVER UNABLE TO START $startupFailure")
startupFailure.printStackTrace()
// It doesn't do us much good to keep the application running if it failed to start.
sys.exit(1)
}
}

View File

@ -28,12 +28,18 @@ import vinyldns.api.engine.{
}
import vinyldns.core.domain.backend.{Backend, BackendResolver}
import vinyldns.core.domain.batch.{BatchChange, BatchChangeCommand, BatchChangeRepository}
import vinyldns.core.domain.record.{RecordChangeRepository, RecordSetChange, RecordSetRepository}
import vinyldns.core.domain.record.{
RecordChangeRepository,
RecordSetChange,
RecordSetCacheRepository,
RecordSetRepository
}
import vinyldns.core.domain.zone._
import vinyldns.core.queue.{CommandMessage, MessageCount, MessageQueue}
import scala.concurrent.duration._
import vinyldns.core.notifier.AllNotifiers
import java.io.{PrintWriter, StringWriter}
object CommandHandler {
@ -89,7 +95,9 @@ object CommandHandler {
)
.parJoin(maxOpen)
.handleErrorWith { error =>
logger.error("Encountered unexpected error in main flow", error)
val errorMessage = new StringWriter
error.printStackTrace(new PrintWriter(errorMessage))
logger.error(s"Encountered unexpected error in main flow. Error: ${errorMessage.toString.replaceAll("\n",";").replaceAll("\t"," ")}")
// just continue, the flow should never stop unless explicitly told to do so
flow()
@ -118,7 +126,9 @@ object CommandHandler {
.handleErrorWith { error =>
// on error, we make sure we still continue; should only stop when the app stops
// or processing is disabled
logger.error("Encountered error polling message queue", error)
val errorMessage = new StringWriter
error.printStackTrace(new PrintWriter(errorMessage))
logger.error(s"Encountered error polling message queue. Error: ${errorMessage.toString.replaceAll("\n",";").replaceAll("\t"," ")}")
// just keep going on the stream
pollingStream()
@ -132,7 +142,7 @@ object CommandHandler {
_.evalMap[IO, Any] { message =>
message.command match {
case sync: ZoneChange
if sync.changeType == ZoneChangeType.Sync || sync.changeType == ZoneChangeType.Create =>
if sync.changeType == ZoneChangeType.Sync || sync.changeType == ZoneChangeType.AutomatedSync || sync.changeType == ZoneChangeType.Create =>
logger.info(s"Updating visibility timeout for zone change; changeId=${sync.id}")
mq.changeMessageTimeout(message, 1.hour)
@ -153,7 +163,7 @@ object CommandHandler {
_.evalMap[IO, MessageOutcome] { message =>
message.command match {
case sync: ZoneChange
if sync.changeType == ZoneChangeType.Sync || sync.changeType == ZoneChangeType.Create =>
if sync.changeType == ZoneChangeType.Sync || sync.changeType == ZoneChangeType.AutomatedSync || sync.changeType == ZoneChangeType.Create =>
outcomeOf(message)(zoneSyncProcessor(sync))
case zoneChange: ZoneChange =>
@ -177,7 +187,9 @@ object CommandHandler {
.attempt
.map {
case Left(e) =>
logger.warn(s"Failed processing message need to retry; $message", e)
val errorMessage = new StringWriter
e.printStackTrace(new PrintWriter(errorMessage))
logger.warn(s"Failed processing message need to retry; $message. Error: ${errorMessage.toString.replaceAll("\n",";").replaceAll("\t"," ")}")
RetryMessage(message)
case Right(ok) => ok
}
@ -194,28 +206,30 @@ object CommandHandler {
}.as(())
def run(
mq: MessageQueue,
msgsPerPoll: MessageCount,
processingSignal: SignallingRef[IO, Boolean],
pollingInterval: FiniteDuration,
zoneRepo: ZoneRepository,
zoneChangeRepo: ZoneChangeRepository,
recordSetRepo: RecordSetRepository,
recordChangeRepo: RecordChangeRepository,
batchChangeRepo: BatchChangeRepository,
notifiers: AllNotifiers,
backendResolver: BackendResolver,
maxZoneSize: Int
mq: MessageQueue,
msgsPerPoll: MessageCount,
processingSignal: SignallingRef[IO, Boolean],
pollingInterval: FiniteDuration,
zoneRepo: ZoneRepository,
zoneChangeRepo: ZoneChangeRepository,
recordSetRepo: RecordSetRepository,
recordChangeRepo: RecordChangeRepository,
recordSetCacheRepo: RecordSetCacheRepository,
batchChangeRepo: BatchChangeRepository,
notifiers: AllNotifiers,
backendResolver: BackendResolver,
maxZoneSize: Int
)(implicit timer: Timer[IO]): IO[Unit] = {
// Handlers for each type of change request
val zoneChangeHandler =
ZoneChangeHandler(zoneRepo, zoneChangeRepo, recordSetRepo)
ZoneChangeHandler(zoneRepo, zoneChangeRepo, recordSetRepo, recordSetCacheRepo)
val recordChangeHandler =
RecordSetChangeHandler(recordSetRepo, recordChangeRepo, batchChangeRepo)
RecordSetChangeHandler(recordSetRepo, recordChangeRepo,recordSetCacheRepo, batchChangeRepo )
val zoneSyncHandler =
ZoneSyncHandler(
recordSetRepo,
recordChangeRepo,
recordSetCacheRepo,
zoneChangeRepo,
zoneRepo,
backendResolver,

View File

@ -29,6 +29,7 @@ import vinyldns.core.domain.record.RecordType.RecordType
import vinyldns.core.domain.record.{RecordSet, RecordSetChange, RecordSetChangeType, RecordType}
import vinyldns.core.domain.zone.{Algorithm, Zone, ZoneConnection}
import java.io.{PrintWriter, StringWriter}
import scala.collection.JavaConverters._
object DnsProtocol {
@ -165,6 +166,7 @@ class DnsBackend(val id: String, val resolver: DNS.SimpleResolver, val xfrInfo:
val dnsName = recordDnsName(name, zoneName)
logger.info(s"Querying for dns dnsRecordName='${dnsName.toString}'; recordType='$typ'")
val lookup = new DNS.Lookup(dnsName, toDnsRecordType(typ))
lookup.setResolver(resolver)
lookup.setSearchPath(List(Name.empty).asJava)
lookup.setCache(null)
@ -213,8 +215,29 @@ class DnsBackend(val id: String, val resolver: DNS.SimpleResolver, val xfrInfo:
resp <- toDnsResponse(resp)
} yield resp
val message =
for {
str <- Either.catchNonFatal(s"DNS Resolver: ${resolver.toString}, " +
s"Resolver Address=${resolver.getAddress.getAddress}, Resolver Host=${resolver.getAddress.getHostName}, " +
s"Resolver Port=${resolver.getPort}, Timeout=${resolver.getTimeout.toString}"
)
} yield str
val resolver_debug_message = message match {
case Right(value) => value
case Left(_) => s"DNS Resolver: ${resolver.toString}"
}
val receivedResponse = result match {
case Right(value) => value.toString.replaceAll("\n",";").replaceAll("\t"," ")
case Left(e) =>
val errorMessage = new StringWriter
e.printStackTrace(new PrintWriter(errorMessage))
errorMessage.toString.replaceAll("\n",";").replaceAll("\t"," ")
}
logger.info(
s"DnsConnection.send - Sending DNS Message ${obscuredDnsMessage(msg).toString}\n...received response $result"
s"DnsConnection.send - Sending DNS Message ${obscuredDnsMessage(msg).toString.replaceAll("\n",";").replaceAll("\t"," ")}. Received response: $receivedResponse. DNS Resolver Info: $resolver_debug_message"
)
result
@ -234,10 +257,10 @@ class DnsBackend(val id: String, val resolver: DNS.SimpleResolver, val xfrInfo:
// so if we can parse the error into an rcode, then we need to handle it properly; otherwise, we can try again
// The DNS.Rcode.value function will return -1 if the error cannot be parsed into an integer
if (DNS.Rcode.value(query.error) >= 0) {
logger.info(s"Received TRY_AGAIN from DNS lookup; converting error: ${query.error}")
logger.warn(s"Received TRY_AGAIN from DNS lookup; converting error: ${query.error.replaceAll("\n",";")}")
fromDnsRcodeToError(DNS.Rcode.value(query.error), query.error)
} else {
logger.info(s"Unparseable error code returned from DNS: ${query.error}")
logger.warn(s"Unparseable error code returned from DNS: ${query.error.replaceAll("\n",";")}")
Left(TryAgain(query.error))
}
@ -293,7 +316,7 @@ object DnsBackend {
new DNS.TSIG(
parseAlgorithm(conn.algorithm),
decryptedConnection.keyName,
decryptedConnection.key
decryptedConnection.key.value
)
}

View File

@ -19,7 +19,8 @@ package vinyldns.api.backend.dns
import java.net.InetAddress
import cats.syntax.either._
import org.apache.commons.codec.binary.Hex
import org.joda.time.DateTime
import java.time.Instant
import java.time.temporal.ChronoUnit
import org.xbill.DNS
import scodec.bits.ByteVector
import vinyldns.api.backend.dns.DnsProtocol._
@ -203,8 +204,9 @@ trait DnsConversions {
typ = fromDnsRecordType(r.getType),
ttl = r.getTTL,
status = RecordSetStatus.Active,
created = DateTime.now,
records = f(r)
created = Instant.now.truncatedTo(ChronoUnit.MILLIS),
records = f(r),
recordSetGroupChange = Some(OwnerShipTransfer(ownerShipTransferStatus = OwnerShipTransferStatus.None))
)
// if we do not know the record type, then we cannot parse the records, but we should be able to get everything else
@ -215,8 +217,9 @@ trait DnsConversions {
typ = fromDnsRecordType(r.getType),
ttl = r.getTTL,
status = RecordSetStatus.Active,
created = DateTime.now,
records = Nil
created = Instant.now.truncatedTo(ChronoUnit.MILLIS),
records = Nil,
recordSetGroupChange = Some(OwnerShipTransfer(ownerShipTransferStatus = OwnerShipTransferStatus.None))
)
def fromARecord(r: DNS.ARecord, zoneName: DNS.Name, zoneId: String): RecordSet =
@ -278,7 +281,7 @@ trait DnsConversions {
def fromSPFRecord(r: DNS.SPFRecord, zoneName: DNS.Name, zoneId: String): RecordSet =
fromDnsRecord(r, zoneName, zoneId) { data =>
List(SPFData(data.getStrings.asScala.mkString(",")))
List(SPFData(data.getStrings.asScala.mkString))
}
def fromSRVRecord(r: DNS.SRVRecord, zoneName: DNS.Name, zoneId: String): RecordSet =
@ -393,7 +396,8 @@ trait DnsConversions {
new DNS.SSHFPRecord(recordName, DNS.DClass.IN, ttl, algorithm, typ, Hex.decodeHex(fingerprint.toCharArray()))
case SPFData(text) =>
new DNS.SPFRecord(recordName, DNS.DClass.IN, ttl, text)
val texts = text.grouped(255).toList
new DNS.SPFRecord(recordName, DNS.DClass.IN, ttl, texts.asJava)
case TXTData(text) =>
val texts = text.grouped(255).toList

View File

@ -0,0 +1,31 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.api.config
import pureconfig.ConfigReader
import pureconfig.generic.auto._
final case class ZoneAuthConfigs(zone: String, userList: List[String], groupList: List[String], recordTypes: List[String], dotsLimit: Int)
final case class DottedHostsConfig(zoneAuthConfigs: List[ZoneAuthConfigs])
object DottedHostsConfig {
implicit val configReader: ConfigReader[DottedHostsConfig] =
ConfigReader.forProduct1[DottedHostsConfig, List[ZoneAuthConfigs]](
"allowed-settings",
)(zoneAuthConfigs =>
DottedHostsConfig(zoneAuthConfigs))
}

View File

@ -31,6 +31,6 @@ object HighValueDomainConfig {
"ip-list"
) {
case (regexList, ipList) =>
HighValueDomainConfig(toCaseIgnoredRegexList(regexList), ipList.flatMap(IpAddress(_)))
HighValueDomainConfig(toCaseIgnoredRegexList(regexList), ipList.flatMap(IpAddress.fromString(_)))
}
}

View File

@ -0,0 +1,61 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.api.config
import pureconfig.ConfigReader
final case class LimitsConfig(
BATCHCHANGE_ROUTING_MAX_ITEMS_LIMIT: Int,
MEMBERSHIP_ROUTING_DEFAULT_MAX_ITEMS: Int,
MEMBERSHIP_ROUTING_MAX_ITEMS_LIMIT: Int,
MEMBERSHIP_ROUTING_MAX_GROUPS_LIST_LIMIT: Int,
RECORDSET_ROUTING_DEFAULT_MAX_ITEMS: Int,
ZONE_ROUTING_DEFAULT_MAX_ITEMS: Int,
ZONE_ROUTING_MAX_ITEMS_LIMIT: Int
)
object LimitsConfig {
implicit val configReader: ConfigReader[LimitsConfig] =
ConfigReader.forProduct7[LimitsConfig, Int, Int, Int, Int, Int, Int, Int](
"batchchange-routing-max-items-limit",
"membership-routing-default-max-items",
"membership-routing-max-items-limit",
"membership-routing-max-groups-list-limit",
"recordset-routing-default-max-items",
"zone-routing-default-max-items",
"zone-routing-max-items-limit"
) {
case (
batchchange_routing_max_items_limit,
membership_routing_default_max_items,
membership_routing_max_items_limit,
membership_routing_max_groups_list_limit,
recordset_routing_default_max_items,
zone_routing_default_max_items,
zone_routing_max_items_limit
) =>
LimitsConfig(
batchchange_routing_max_items_limit,
membership_routing_default_max_items,
membership_routing_max_items_limit,
membership_routing_max_groups_list_limit,
recordset_routing_default_max_items,
zone_routing_default_max_items,
zone_routing_max_items_limit
)
}
}

View File

@ -41,7 +41,7 @@ object ManualReviewConfig {
ManualReviewConfig(
enabled,
toCaseIgnoredRegexList(domainsConfig.getStringList("domain-list").asScala.toList),
domainsConfig.getStringList("ip-list").asScala.toList.flatMap(IpAddress(_)),
domainsConfig.getStringList("ip-list").asScala.toList.flatMap(IpAddress.fromString(_)),
domainsConfig.getStringList("zone-name-list").asScala.toSet
)
}

View File

@ -23,20 +23,25 @@ import vinyldns.api.domain.zone.ZoneRecordValidations
import scala.util.matching.Regex
final case class ServerConfig(
healthCheckTimeout: Int,
defaultTtl: Int,
maxZoneSize: Int,
syncDelay: Int,
validateRecordLookupAgainstDnsBackend: Boolean,
approvedNameServers: List[Regex],
color: String,
version: String,
keyName: String,
processingDisabled: Boolean
)
healthCheckTimeout: Int,
defaultTtl: Int,
maxZoneSize: Int,
syncDelay: Int,
validateRecordLookupAgainstDnsBackend: Boolean,
approvedNameServers: List[Regex],
color: String,
version: String,
keyName: String,
processingDisabled: Boolean,
useRecordSetCache: Boolean,
loadTestData: Boolean,
isZoneSyncScheduleAllowed: Boolean,
)
object ServerConfig {
import ZoneRecordValidations.toCaseIgnoredRegexList
implicit val configReader: ConfigReader[ServerConfig] = ConfigReader.forProduct10[
implicit val configReader: ConfigReader[ServerConfig] = ConfigReader.forProduct13[
ServerConfig,
Int,
Int,
@ -47,6 +52,9 @@ object ServerConfig {
String,
String,
Config,
Boolean,
Boolean,
Boolean,
Boolean
](
"health-check-timeout",
@ -58,20 +66,25 @@ object ServerConfig {
"color",
"version",
"defaultZoneConnection",
"processing-disabled"
"processing-disabled",
"use-recordset-cache",
"load-test-data",
"is-zone-sync-schedule-allowed"
) {
case (
timeout,
ttl,
maxZone,
syncDelay,
validateDnsBackend,
approvedNameServers,
color,
version,
zoneConnConfig,
processingDisabled
) =>
timeout,
ttl,
maxZone,
syncDelay,
validateDnsBackend,
approvedNameServers,
color,
version,
zoneConnConfig,
processingDisabled,
useRecordSetCache,
loadTestData,
isZoneSyncScheduleAllowed) =>
ServerConfig(
timeout,
ttl,
@ -82,7 +95,10 @@ object ServerConfig {
color,
version,
zoneConnConfig.getString("keyName"),
processingDisabled
processingDisabled,
useRecordSetCache,
loadTestData,
isZoneSyncScheduleAllowed
)
}
}

View File

@ -0,0 +1,42 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.api.config
import pureconfig.ConfigReader
case class ValidEmailConfig(
valid_domains : List[String],
number_of_dots : Int)
object ValidEmailConfig {
implicit val configReader: ConfigReader[ValidEmailConfig] =
ConfigReader.forProduct2[ValidEmailConfig,List[String],Int](
"email-domains",
"number-of-dots"
)
{
case (
valid_domains,
number_of_dots,
) =>
ValidEmailConfig(
valid_domains,
number_of_dots,
)
}
}

View File

@ -37,6 +37,8 @@ import scala.reflect.ClassTag
final case class VinylDNSConfig(
serverConfig: ServerConfig,
limitsconfig: LimitsConfig,
validEmailConfig: ValidEmailConfig,
httpConfig: HttpConfig,
highValueDomainConfig: HighValueDomainConfig,
manualReviewConfig: ManualReviewConfig,
@ -46,6 +48,7 @@ final case class VinylDNSConfig(
notifierConfigs: List[NotifierConfig],
dataStoreConfigs: List[DataStoreConfig],
backendConfigs: BackendConfigs,
dottedHostsConfig: DottedHostsConfig,
configuredDnsConnections: ConfiguredDnsConnections,
apiMetricSettings: APIMetricsSettings,
crypto: CryptoAlgebra,
@ -80,9 +83,12 @@ object VinylDNSConfig {
for {
config <- IO.delay(ConfigFactory.load())
limitsconfig <- loadIO[LimitsConfig](config, "vinyldns.api.limits") //Added Limitsconfig to fetch data from the reference.config and pass to LimitsConfig.config
validEmailConfig <- loadIO[ValidEmailConfig](config, path="vinyldns.valid-email-config")
serverConfig <- loadIO[ServerConfig](config, "vinyldns")
batchChangeConfig <- loadIO[BatchChangeConfig](config, "vinyldns")
backendConfigs <- loadIO[BackendConfigs](config, "vinyldns.backend")
dottedHostsConfig <- loadIO[DottedHostsConfig](config, "vinyldns.dotted-hosts")
httpConfig <- loadIO[HttpConfig](config, "vinyldns.rest")
hvdConfig <- loadIO[HighValueDomainConfig](config, "vinyldns.high-value-domains")
scheduledChangesConfig <- loadIO[ScheduledChangesConfig](config, "vinyldns")
@ -98,6 +104,8 @@ object VinylDNSConfig {
.map(GlobalAcls.apply)
} yield VinylDNSConfig(
serverConfig,
limitsconfig,
validEmailConfig,
httpConfig,
hvdConfig,
manualReviewConfig,
@ -107,6 +115,7 @@ object VinylDNSConfig {
notifierConfigs,
dataStoreConfigs,
backendConfigs,
dottedHostsConfig,
connections,
metricSettings,
crypto,

View File

@ -27,8 +27,13 @@ import scala.util.matching.Regex
Object to house common domain validations
*/
object DomainValidations {
val validReverseZoneFQDNRegex: Regex =
"""^(?:([0-9a-zA-Z\-\/_]{1,63}|[0-9a-zA-Z\-\/_]{1}[0-9a-zA-Z\-\/_]{0,61}[0-9a-zA-Z\-\/_]{1}|[*.]{2}[0-9a-zA-Z\-\/_]{0,60}[0-9a-zA-Z\-\/_]{1})\.)*$""".r
val validForwardZoneFQDNRegex: Regex =
"""^(?:([0-9a-zA-Z_]{1,63}|[0-9a-zA-Z_]{1}[0-9a-zA-Z\-_]{0,61}[0-9a-zA-Z_]{1}|[*.]{2}[0-9a-zA-Z\-_]{0,60}[0-9a-zA-Z_]{1})\.)*$""".r
val validFQDNRegex: Regex =
"""^(?:([0-9a-zA-Z_]{1,63}|[0-9a-zA-Z_]{1}[0-9a-zA-Z\-\/_]{0,61}[0-9a-zA-Z_]{1})\.)*$""".r
"""^(?:([0-9a-zA-Z_]{1,63}|[0-9a-zA-Z_]{1}[0-9a-zA-Z\-\/_]{0,61}[0-9a-zA-Z_]{1}|[*.]{2}[0-9a-zA-Z\-\/_]{0,60}[0-9a-zA-Z_]{1})\.)*$""".r
val validIpv4Regex: Regex =
"""^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$""".r
val validIpv6Regex: Regex =
@ -54,16 +59,47 @@ object DomainValidations {
val TTL_MIN_LENGTH: Int = 30
val TXT_TEXT_MIN_LENGTH: Int = 1
val TXT_TEXT_MAX_LENGTH: Int = 64764
val MX_PREFERENCE_MIN_VALUE: Int = 0
val MX_PREFERENCE_MAX_VALUE: Int = 65535
val INTEGER_MIN_VALUE: Int = 0
val INTEGER_MAX_VALUE: Int = 65535
// Cname check - Cname should not be IP address
def validateCname(name: Fqdn, isReverse: Boolean): ValidatedNel[DomainValidationError, Fqdn] =
validateIpv4Address(name.fqdn.dropRight(1)).isValid match {
case true => InvalidIPv4CName(name.toString).invalidNel
case false => validateIsReverseCname(name, isReverse)
}
def validateHostName(name: Fqdn): ValidatedNel[DomainValidationError, Fqdn] =
validateHostName(name.fqdn).map(_ => name)
def validateIsReverseCname(name: Fqdn, isReverse: Boolean): ValidatedNel[DomainValidationError, Fqdn] =
validateIsReverseCname(name.fqdn, isReverse).map(_ => name)
def validateIsReverseCname(name: String, isReverse: Boolean): ValidatedNel[DomainValidationError, String] = {
isReverse match {
case true =>
val checkRegex = validReverseZoneFQDNRegex
.findFirstIn(name)
.map(_.validNel)
.getOrElse(InvalidCname(name,isReverse).invalidNel)
val checkLength = validateStringLength(name, Some(HOST_MIN_LENGTH), HOST_MAX_LENGTH)
checkRegex.combine(checkLength).map(_ => name)
case false =>
val checkRegex = validForwardZoneFQDNRegex
.findFirstIn(name)
.map(_.validNel)
.getOrElse(InvalidCname(name,isReverse).invalidNel)
val checkLength = validateStringLength(name, Some(HOST_MIN_LENGTH), HOST_MAX_LENGTH)
checkRegex.combine(checkLength).map(_ => name)
}
}
def validateHostName(name: String): ValidatedNel[DomainValidationError, String] = {
/*
Label rules are as follows (from RFC 952; detailed in RFC 1034):
- Starts with a letter, OR digit, or underscore (as of RFC 1123)
- Starts with a letter, or digit, or underscore or asterisk (as of RFC 1123)
- Interior contains letter, digit or hyphen, or underscore
- Ends with a letter or digit, or underscore
All possible labels permutations:
@ -71,6 +107,8 @@ object DomainValidations {
- A combination of 1-63 letters/digits: [0-9a-zA-Z]{1,63}
- A single letter/digit followed by up to 61 letters, digits, hyphens or slashes
and ending with a letter/digit:[0-9a-zA-Z]{1}[0-9a-zA-Z\-]{0,61}[0-9a-zA-Z]{1}
- A wildcard and dot character (*.) followed by up to 60 letters, digits, hyphens or slashes
and ending with a letter/digit:[*.]{2}[0-9a-zA-Z\-\/_]{0,60}[0-9a-zA-Z_]{1}
A valid domain name is a series of one or more <label>s,
joined by dots/slashes and terminating on a zero-length <label> (ie. dot)
*/
@ -83,6 +121,8 @@ object DomainValidations {
checkRegex.combine(checkLength).map(_ => name)
}
def validateIpv4Address(address: String): ValidatedNel[DomainValidationError, String] =
validIpv4Regex
.findFirstIn(address)
@ -120,7 +160,15 @@ object DomainValidations {
def validateTxtTextLength(value: String): ValidatedNel[DomainValidationError, String] =
validateStringLength(value, Some(TXT_TEXT_MIN_LENGTH), TXT_TEXT_MAX_LENGTH)
def validateMxPreference(pref: Int): ValidatedNel[DomainValidationError, Int] =
if (pref >= MX_PREFERENCE_MIN_VALUE && pref <= MX_PREFERENCE_MAX_VALUE) pref.validNel
else InvalidMxPreference(pref, MX_PREFERENCE_MIN_VALUE, MX_PREFERENCE_MAX_VALUE).invalidNel[Int]
def validateMX_NAPTR_SRVData(number: Int, recordDataType: String, recordType: String): ValidatedNel[DomainValidationError, Int] =
if (number >= INTEGER_MIN_VALUE && number <= INTEGER_MAX_VALUE) number.validNel
else InvalidMX_NAPTR_SRVData(number, INTEGER_MIN_VALUE, INTEGER_MAX_VALUE, recordDataType, recordType).invalidNel[Int]
def validateNaptrFlag(value: String): ValidatedNel[DomainValidationError, String] =
if (value == "U" || value == "S" || value == "A" || value == "P") value.validNel
else InvalidNaptrFlag(value).invalidNel[String]
def validateNaptrRegexp(value: String): ValidatedNel[DomainValidationError, String] =
if ((value.startsWith("!") && value.endsWith("!")) || value == "") value.validNel
else InvalidNaptrRegexp(value).invalidNel[String]
}

View File

@ -17,7 +17,7 @@
package vinyldns.api.domain
import cats.implicits._
import com.aaronbedra.orchard.CIDR
import com.comcast.ip4s.{Cidr, Ipv4Address, Ipv6Address}
import vinyldns.api.domain.zone.InvalidRequest
import vinyldns.core.domain.zone.Zone
import vinyldns.api.backend.dns.DnsConversions._
@ -30,8 +30,9 @@ object ReverseZoneHelpers {
if (zone.isIPv4) {
recordsetIsWithinCidrMaskIpv4(mask: String, zone: Zone, recordName: String)
} else {
val ipAddr = convertPTRtoIPv6(zone, recordName)
Try(CIDR.valueOf(mask).contains(ipAddr)).getOrElse(false)
val ipAddr = Ipv6Address.fromString(convertPTRtoIPv6(zone, recordName))
Try(Cidr(Cidr.fromString6(mask).get.address,Cidr.fromString6(mask).get.prefixBits).contains(ipAddr.get))
.getOrElse(false)
}
// NOTE: this will not work for zones with less than 3 octets
@ -62,7 +63,7 @@ object ReverseZoneHelpers {
}
def convertPTRtoIPv4(zone: Zone, recordName: String): String = {
val zoneName = zone.name.split("in-addr.arpa.")(0)
val zoneName = zone.name.dropRight("in-addr.arpa.".length)
val zoneOctets = ipv4ReverseSplitByOctets(zoneName)
val recordOctets = ipv4ReverseSplitByOctets(recordName)
@ -74,7 +75,7 @@ object ReverseZoneHelpers {
}
def convertPTRtoIPv6(zone: Zone, recordName: String): String = {
val zoneName = zone.name.split("ip6.arpa.")(0)
val zoneName = zone.name.dropRight("ip6.arpa.".length)
val zoneNameNibblesReversed = zoneName.split('.').reverse.toList
val recordSetNibblesReversed = recordName.split('.').reverse.toList
val allUnseparated = (zoneNameNibblesReversed ++ recordSetNibblesReversed).mkString("")
@ -86,11 +87,12 @@ object ReverseZoneHelpers {
zone: Zone,
recordName: String
): Boolean = {
val recordIpAddr = convertPTRtoIPv4(zone, recordName)
val recordIpAddr = Ipv4Address.fromString(convertPTRtoIPv4(zone, recordName))
Try {
// make sure mask contains 4 octets, expand if not
val ipMaskOctets = CIDR.parseBlock(mask).head.split('.').toList
val ipMaskOctets = Cidr.fromString4(mask).get.address.toString.split('.').toList
val fullIp = ipMaskOctets.length match {
case 1 => (ipMaskOctets ++ List("0", "0", "0")).mkString(".")
@ -99,9 +101,8 @@ object ReverseZoneHelpers {
case 4 => ipMaskOctets.mkString(".")
}
val updatedMask = fullIp + "/" + CIDR.valueOf(mask).getMask
CIDR.valueOf(updatedMask).contains(recordIpAddr)
val updatedMask = Cidr(recordIpAddr.get,Cidr.fromString4(mask).get.prefixBits)
updatedMask.contains(Ipv4Address.fromString(fullIp).get)
}.getOrElse(false)
}
@ -109,7 +110,7 @@ object ReverseZoneHelpers {
string.split('.').filter(!_.isEmpty).reverse.toList
private def getZoneAsCIDRString(zone: Zone): Either[Throwable, String] = {
val zoneName = zone.name.split("in-addr.arpa.")(0)
val zoneName = zone.name.dropRight("in-addr.arpa.".length)
val zoneOctets = ipv4ReverseSplitByOctets(zoneName)
val zoneString = zoneOctets.mkString(".")
@ -147,7 +148,7 @@ object ReverseZoneHelpers {
zone: Zone,
recordName: String
): Either[Throwable, Unit] = {
val v6Regex = "([0-9a-f][.]){32}ip6.arpa.".r
val v6Regex = "(?i)([0-9a-f][.]){32}ip6.arpa.".r
s"$recordName.${zone.name}" match {
case v6Regex(_*) => ().asRight

View File

@ -34,10 +34,17 @@ class AccessValidations(
ensuring(
NotAuthorizedError(s"User ${auth.signedInUser.userName} cannot access zone '${zone.name}'")
)(
auth.isSystemAdmin || auth
auth.isSystemAdmin || zone.shared || auth
.isGroupMember(zone.adminGroupId) || userHasAclRules(auth, zone)
)
def canSeeZoneChange(auth: AuthPrincipal, zone: Zone): Either[Throwable, Unit] =
ensuring(
NotAuthorizedError(s"User ${auth.signedInUser.userName} cannot access zone '${zone.name}' changes")
)(
auth.isSystemAdmin || zone.shared || auth.isGroupMember(zone.adminGroupId)
)
def canChangeZone(
auth: AuthPrincipal,
zoneName: String,
@ -73,6 +80,7 @@ class AccessValidations(
recordType: RecordType,
zone: Zone,
recordOwnerGroupId: Option[String],
superUserCanUpdateOwnerGroup: Boolean = false,
newRecordData: List[RecordData] = List.empty
): Either[Throwable, Unit] = {
val accessLevel =
@ -82,7 +90,7 @@ class AccessValidations(
s"User ${auth.signedInUser.userName} does not have access to update " +
s"$recordName.${zone.name}"
)
)(accessLevel == AccessLevel.Delete || accessLevel == AccessLevel.Write)
)(accessLevel == AccessLevel.Delete || accessLevel == AccessLevel.Write || superUserCanUpdateOwnerGroup)
}
def canDeleteRecordSet(
@ -222,7 +230,9 @@ class AccessValidations(
AccessLevel.Delete
case support if support.isSystemAdmin =>
val aclAccess = getAccessFromAcl(auth, recordName, recordType, zone)
if (aclAccess == AccessLevel.NoAccess) AccessLevel.Read else aclAccess
if (aclAccess == AccessLevel.NoAccess)
AccessLevel.Read
else aclAccess
case globalAclUser
if globalAcls.isAuthorized(globalAclUser, recordName, recordType, zone, recordData) =>
AccessLevel.Delete

View File

@ -27,6 +27,8 @@ trait AccessValidationsAlgebra {
def canSeeZone(auth: AuthPrincipal, zone: Zone): Either[Throwable, Unit]
def canSeeZoneChange(auth: AuthPrincipal, zone: Zone): Either[Throwable, Unit]
def canChangeZone(
auth: AuthPrincipal,
zoneName: String,
@ -47,6 +49,7 @@ trait AccessValidationsAlgebra {
recordType: RecordType,
zone: Zone,
recordOwnerGroupId: Option[String],
superUserCanUpdateOwnerGroup: Boolean = false,
newRecordData: List[RecordData] = List.empty
): Either[Throwable, Unit]

View File

@ -18,7 +18,8 @@ package vinyldns.api.domain.batch
import cats.data.NonEmptyList
import cats.syntax.list._
import org.joda.time.DateTime
import java.time.Instant
import java.time.temporal.ChronoUnit
import org.slf4j.LoggerFactory
import vinyldns.api.domain.batch.BatchChangeInterfaces._
import vinyldns.api.domain.batch.BatchTransformations._
@ -27,13 +28,14 @@ import vinyldns.api.domain.record.RecordSetChangeGenerator
import vinyldns.core.domain.record._
import vinyldns.core.domain.zone.Zone
import vinyldns.core.domain.batch._
import vinyldns.core.domain.record.RecordType.RecordType
import vinyldns.core.domain.record.RecordType.{RecordType, UNKNOWN}
import vinyldns.core.queue.MessageQueue
class BatchChangeConverter(batchChangeRepo: BatchChangeRepository, messageQueue: MessageQueue)
extends BatchChangeConverterAlgebra {
private val logger = LoggerFactory.getLogger("BatchChangeConverter")
private val failedMessage: String = "Error queueing RecordSetChange for processing"
private val logger = LoggerFactory.getLogger(classOf[BatchChangeConverter])
def sendBatchForProcessing(
batchChange: BatchChange,
@ -67,15 +69,20 @@ class BatchChangeConverter(batchChangeRepo: BatchChangeRepository, messageQueue:
recordSetChanges: List[RecordSetChange]
): BatchResult[Unit] = {
val convertedIds = recordSetChanges.flatMap(_.singleBatchChangeIds).toSet
singleChanges.find(ch => !convertedIds.contains(ch.id)) match {
case Some(change) => BatchConversionError(change).toLeftBatchResult
case None =>
// Each single change has a corresponding recordset id
// If they're not equal, then there's a delete request for a record that doesn't exist. So we allow this to process
case Some(_) if singleChanges.map(_.id).length != recordSetChanges.map(_.id).length && !singleChanges.map(_.typ).contains(UNKNOWN) =>
logger.info(s"Successfully converted SingleChanges [${singleChanges
.map(_.id)}] to RecordSetChanges [${recordSetChanges.map(_.id)}]")
().toRightBatchResult
case Some(change) => BatchConversionError(change).toLeftBatchResult
case None =>
logger.info(s"Successfully converted SingleChanges [${singleChanges
.map(_.id)}] to RecordSetChanges [${recordSetChanges.map(_.id)}]")
().toRightBatchResult
}
}
}
def putChangesOnQueue(
recordSetChanges: List[RecordSetChange],
@ -104,7 +111,6 @@ class BatchChangeConverter(batchChangeRepo: BatchChangeRepository, messageQueue:
val idsMap = recordSetChanges.flatMap { rsChange =>
rsChange.singleBatchChangeIds.map(batchId => (batchId, rsChange.id))
}.toMap
val withStatus = batchChange.changes.map { change =>
idsMap
.get(change.id)
@ -113,19 +119,27 @@ class BatchChangeConverter(batchChangeRepo: BatchChangeRepository, messageQueue:
change
}
.getOrElse {
// failure here means there was a message queue issue for this change
change.withFailureMessage("Error queueing RecordSetChange for processing")
// Match and check if it's a delete change for a record that doesn't exists.
change match {
case _: SingleDeleteRRSetChange if change.recordSetId.isEmpty =>
// Mark as Complete since we don't want to throw it as an error
change.withDoesNotExistMessage
case _ =>
// Failure here means there was a message queue issue for this change
change.withFailureMessage(failedMessage)
}
}
}
batchChange.copy(changes = withStatus)
}
def storeQueuingFailures(batchChange: BatchChange): BatchResult[Unit] = {
val failedChanges = batchChange.changes.collect {
// Update if Single change is Failed or if a record that does not exist is deleted
val failedAndNotExistsChanges = batchChange.changes.collect {
case change if change.status == SingleChangeStatus.Failed => change
}
batchChangeRepo.updateSingleChanges(failedChanges).as(())
val storeChanges = batchChangeRepo.updateSingleChanges(failedAndNotExistsChanges).as(())
storeChanges
}.toBatchResult
def createRecordSetChangesForBatch(
@ -200,7 +214,7 @@ class BatchChangeConverter(batchChangeRepo: BatchChangeRepository, messageQueue:
}
}
// New record set for add/update or single delete
// New record set for add/update/full deletes
lazy val newRecordSet = {
val firstAddChange = singleChangeNel.collect {
case sac: SingleAddChange => sac
@ -219,7 +233,34 @@ class BatchChangeConverter(batchChangeRepo: BatchChangeRepository, messageQueue:
recordType,
ttl,
RecordSetStatus.Pending,
DateTime.now,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
proposedRecordData.toList,
ownerGroupId = setOwnerGroupId,
recordSetGroupChange = Some(OwnerShipTransfer(ownerShipTransferStatus = OwnerShipTransferStatus.None))
)
}
}
// New record set for single delete which exists in dns backend but not in vinyl
lazy val newDeleteRecordSet = {
val firstDeleteChange = singleChangeNel.collect {
case sad: SingleDeleteRRSetChange => sad
}.headOption
val newTtlRecordNameTuple = firstDeleteChange
.map(del => del.recordName)
.orElse(existingRecordSet.map(rs => Some(rs.name)))
newTtlRecordNameTuple.collect{
case Some(recordName) =>
RecordSet(
zone.id,
recordName,
recordType,
7200L,
RecordSetStatus.Pending,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
None,
proposedRecordData.toList,
ownerGroupId = setOwnerGroupId
@ -242,7 +283,12 @@ class BatchChangeConverter(batchChangeRepo: BatchChangeRepository, messageQueue:
existingRs <- existingRecordSet
newRs <- newRecordSet
} yield RecordSetChangeGenerator.forUpdate(existingRs, newRs, zone, userId, singleChangeIds)
case _ => None // This case should never happen
case OutOfSync =>
newDeleteRecordSet.map { newDelRs =>
RecordSetChangeGenerator.forOutOfSync(newDelRs, zone, userId, singleChangeIds)
}
case _ =>
None // This case should never happen
}
}
}

View File

@ -16,7 +16,7 @@
package vinyldns.api.domain.batch
import org.joda.time.DateTime
import java.time.Instant
import vinyldns.api.domain.batch.BatchChangeInterfaces.ValidatedBatch
import vinyldns.api.domain.batch.BatchTransformations.ChangeForValidation
import vinyldns.core.domain.DomainValidationError
@ -69,7 +69,7 @@ case object ScheduledTimeMustBeInFuture extends BatchChangeErrorResponse {
val message: String = "Scheduled time must be in the future."
}
final case class ScheduledChangeNotDue(scheduledTime: DateTime) extends BatchChangeErrorResponse {
final case class ScheduledChangeNotDue(scheduledTime: Instant) extends BatchChangeErrorResponse {
val message: String =
s"Cannot process scheduled change as it is not past the scheduled date of $scheduledTime"
}

View File

@ -17,7 +17,7 @@
package vinyldns.api.domain.batch
import cats.data.NonEmptyList
import org.joda.time.DateTime
import java.time.Instant
import vinyldns.core.domain.{DomainValidationError, SingleChangeError}
import vinyldns.core.domain.DomainHelpers.ensureTrailingDot
import vinyldns.core.domain.batch._
@ -28,7 +28,7 @@ final case class BatchChangeInput(
comments: Option[String],
changes: List[ChangeInput],
ownerGroupId: Option[String] = None,
scheduledTime: Option[DateTime] = None
scheduledTime: Option[Instant] = None
)
object BatchChangeInput {
@ -44,12 +44,14 @@ object BatchChangeInput {
sealed trait ChangeInput {
val inputName: String
val typ: RecordType
val systemMessage: Option[String]
def asNewStoredChange(errors: NonEmptyList[DomainValidationError], defaultTtl: Long): SingleChange
}
final case class AddChangeInput(
inputName: String,
typ: RecordType,
systemMessage: Option[String],
ttl: Option[Long],
record: RecordData
) extends ChangeInput {
@ -68,7 +70,7 @@ final case class AddChangeInput(
knownTtl,
record,
SingleChangeStatus.NeedsReview,
None,
systemMessage,
None,
None,
errors.toList.map(SingleChangeError(_))
@ -79,6 +81,7 @@ final case class AddChangeInput(
final case class DeleteRRSetChangeInput(
inputName: String,
typ: RecordType,
systemMessage: Option[String],
record: Option[RecordData]
) extends ChangeInput {
def asNewStoredChange(
@ -93,7 +96,7 @@ final case class DeleteRRSetChangeInput(
typ,
record,
SingleChangeStatus.NeedsReview,
None,
systemMessage,
None,
None,
errors.toList.map(SingleChangeError(_))
@ -104,6 +107,7 @@ object AddChangeInput {
def apply(
inputName: String,
typ: RecordType,
systemMessage: Option[String],
ttl: Option[Long],
record: RecordData
): AddChangeInput = {
@ -111,28 +115,29 @@ object AddChangeInput {
case PTR => inputName
case _ => ensureTrailingDot(inputName)
}
new AddChangeInput(transformName, typ, ttl, record)
new AddChangeInput(transformName, typ, systemMessage, ttl, record)
}
def apply(sc: SingleAddChange): AddChangeInput =
AddChangeInput(sc.inputName, sc.typ, Some(sc.ttl), sc.recordData)
AddChangeInput(sc.inputName, sc.typ, sc.systemMessage, Some(sc.ttl), sc.recordData)
}
object DeleteRRSetChangeInput {
def apply(
inputName: String,
typ: RecordType,
systemMessage: Option[String],
record: Option[RecordData] = None
): DeleteRRSetChangeInput = {
val transformName = typ match {
case PTR => inputName
case _ => ensureTrailingDot(inputName)
}
new DeleteRRSetChangeInput(transformName, typ, record)
new DeleteRRSetChangeInput(transformName, typ, systemMessage, record)
}
def apply(sc: SingleDeleteRRSetChange): DeleteRRSetChangeInput =
DeleteRRSetChangeInput(sc.inputName, sc.typ, sc.recordData)
DeleteRRSetChangeInput(sc.inputName, sc.typ, sc.systemMessage, sc.recordData)
}
object ChangeInputType extends Enumeration {

View File

@ -16,10 +16,11 @@
package vinyldns.api.domain.batch
import org.joda.time.DateTime
import java.time.Instant
import java.time.temporal.ChronoUnit
final case class BatchChangeReviewInfo(
reviewerId: String,
reviewComment: Option[String],
reviewTimestamp: DateTime = DateTime.now()
reviewTimestamp: Instant = Instant.now.truncatedTo(ChronoUnit.MILLIS)
)

View File

@ -20,7 +20,9 @@ import cats.data.Validated.{Invalid, Valid}
import cats.data._
import cats.effect._
import cats.implicits._
import org.joda.time.DateTime
import java.time.Instant
import java.time.temporal.ChronoUnit
import org.slf4j.{Logger, LoggerFactory}
import vinyldns.api.domain.DomainValidations._
import vinyldns.api.domain.auth.AuthPrincipalProvider
@ -32,19 +34,9 @@ import vinyldns.core.domain.auth.AuthPrincipal
import vinyldns.core.domain.batch.BatchChangeApprovalStatus.BatchChangeApprovalStatus
import vinyldns.core.domain.batch._
import vinyldns.core.domain.batch.BatchChangeApprovalStatus._
import vinyldns.core.domain.{
CnameAtZoneApexError,
SingleChangeError,
UserIsNotAuthorizedError,
ZoneDiscoveryError
}
import vinyldns.core.domain.membership.{
Group,
GroupRepository,
ListUsersResults,
User,
UserRepository
}
import vinyldns.core.domain.batch.BatchChangeStatus.BatchChangeStatus
import vinyldns.core.domain.{CnameAtZoneApexError, SingleChangeError, UserIsNotAuthorizedError, ZoneDiscoveryError}
import vinyldns.core.domain.membership.{Group, GroupRepository, ListUsersResults, User, UserRepository}
import vinyldns.core.domain.record.RecordType._
import vinyldns.core.domain.record.RecordSetRepository
import vinyldns.core.domain.zone.ZoneRepository
@ -350,7 +342,7 @@ class BatchChangeService(
): ValidatedBatch[ChangeForValidation] =
changes.mapValid { change =>
change.typ match {
case A | AAAA | CNAME | MX | TXT => forwardZoneDiscovery(change, zoneMap)
case A | AAAA | CNAME | MX | TXT | NS | NAPTR | SRV => forwardZoneDiscovery(change, zoneMap)
case PTR if validateIpv4Address(change.inputName).isValid =>
ptrIpv4ZoneDiscovery(change, zoneMap)
case PTR if validateIpv6Address(change.inputName).isValid =>
@ -447,7 +439,7 @@ class BatchChangeService(
auth.userId,
auth.signedInUser.userName,
batchChangeInput.comments,
DateTime.now,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
changes,
batchChangeInput.ownerGroupId,
BatchChangeApprovalStatus.PendingReview,
@ -462,7 +454,7 @@ class BatchChangeService(
auth.userId,
auth.signedInUser.userName,
batchChangeInput.comments,
DateTime.now,
Instant.now.truncatedTo(ChronoUnit.MILLIS),
changes,
batchChangeInput.ownerGroupId,
BatchChangeApprovalStatus.AutoApproved,
@ -480,14 +472,15 @@ class BatchChangeService(
val hardErrorsPresent = allErrors.exists(_.isFatal)
val noErrors = allErrors.isEmpty
val isScheduled = batchChangeInput.scheduledTime.isDefined && this.scheduledChangesEnabled
val isNSRecordsPresent = batchChangeInput.changes.exists(_.typ == NS)
if (hardErrorsPresent) {
// Always error out
errorResponse
} else if (noErrors && !isScheduled) {
} else if (noErrors && !isScheduled && !isNSRecordsPresent) {
// There are no errors and this is not scheduled, so process immediately
processNowResponse
} else if (this.manualReviewEnabled && allowManualReview) {
} else if (this.manualReviewEnabled && allowManualReview || isNSRecordsPresent) {
if ((noErrors && isScheduled) || batchChangeInput.ownerGroupId.isDefined) {
// There are no errors and this is scheduled
// or we have soft errors and owner group is defined
@ -589,15 +582,22 @@ class BatchChangeService(
def listBatchChangeSummaries(
auth: AuthPrincipal,
userName: Option[String] = None,
dateTimeStartRange: Option[String] = None,
dateTimeEndRange: Option[String] = None,
startFrom: Option[Int] = None,
maxItems: Int = 100,
ignoreAccess: Boolean = false,
batchStatus: Option[BatchChangeStatus] = None,
approvalStatus: Option[BatchChangeApprovalStatus] = None
): BatchResult[BatchChangeSummaryList] = {
val userId = if (ignoreAccess && auth.isSystemAdmin) None else Some(auth.userId)
val submitterUserName = if(userName.isDefined && userName.get.isEmpty) None else userName
val startDateTime = if(dateTimeStartRange.isDefined && dateTimeStartRange.get.isEmpty) None else dateTimeStartRange
val endDateTime = if(dateTimeEndRange.isDefined && dateTimeEndRange.get.isEmpty) None else dateTimeEndRange
for {
listResults <- batchChangeRepo
.getBatchChangeSummaries(userId, startFrom, maxItems, approvalStatus)
.getBatchChangeSummaries(userId, submitterUserName, startDateTime, endDateTime, startFrom, maxItems, batchStatus, approvalStatus)
.toBatchResult
rsOwnerGroupIds = listResults.batchChanges.flatMap(_.ownerGroupId).toSet
rsOwnerGroups <- groupRepository.getGroups(rsOwnerGroupIds).toBatchResult
@ -614,7 +614,10 @@ class BatchChangeService(
listWithGroupNames = listResults.copy(
batchChanges = summariesWithReviewerUserNames,
ignoreAccess = ignoreAccess,
approvalStatus = approvalStatus
approvalStatus = approvalStatus,
userName = userName,
dateTimeStartRange = dateTimeStartRange,
dateTimeEndRange = dateTimeEndRange
)
} yield listWithGroupNames
}
@ -631,7 +634,7 @@ class BatchChangeService(
approvalStatus = BatchChangeApprovalStatus.ManuallyRejected,
reviewerId = Some(reviewerId),
reviewComment = reviewComment,
reviewTimestamp = Some(DateTime.now),
reviewTimestamp = Some(Instant.now.truncatedTo(ChronoUnit.MILLIS)),
changes = rejectedSingleChanges
)
@ -644,7 +647,7 @@ class BatchChangeService(
// Update rejection attributes and single changes for batch change
val cancelledBatch = batchChange.copy(
approvalStatus = BatchChangeApprovalStatus.Cancelled,
cancelledTimestamp = Some(DateTime.now),
cancelledTimestamp = Some(Instant.now.truncatedTo(ChronoUnit.MILLIS)),
changes = cancelledSingleChanges
)

View File

@ -19,6 +19,7 @@ package vinyldns.api.domain.batch
import vinyldns.api.domain.batch.BatchChangeInterfaces.BatchResult
import vinyldns.core.domain.auth.AuthPrincipal
import vinyldns.core.domain.batch.BatchChangeApprovalStatus.BatchChangeApprovalStatus
import vinyldns.core.domain.batch.BatchChangeStatus.BatchChangeStatus
import vinyldns.core.domain.batch.{BatchChange, BatchChangeInfo, BatchChangeSummaryList}
// $COVERAGE-OFF$
@ -33,9 +34,13 @@ trait BatchChangeServiceAlgebra {
def listBatchChangeSummaries(
auth: AuthPrincipal,
userName: Option[String] = None,
dateTimeStartRange: Option[String] = None,
dateTimeEndRange: Option[String] = None,
startFrom: Option[Int],
maxItems: Int,
ignoreAccess: Boolean,
batchStatus: Option[BatchChangeStatus],
approvalStatus: Option[BatchChangeApprovalStatus]
): BatchResult[BatchChangeSummaryList]

View File

@ -16,26 +16,26 @@
package vinyldns.api.domain.batch
import java.net.InetAddress
import java.time.Instant
import java.time.temporal.ChronoUnit
import cats.data._
import cats.implicits._
import vinyldns.api.config.{
BatchChangeConfig,
HighValueDomainConfig,
ManualReviewConfig,
ScheduledChangesConfig
}
import vinyldns.api.config.{BatchChangeConfig, HighValueDomainConfig, ManualReviewConfig, ScheduledChangesConfig}
import vinyldns.api.domain.DomainValidations._
import vinyldns.api.domain.access.AccessValidationsAlgebra
import vinyldns.core.domain.auth.AuthPrincipal
import vinyldns.api.domain.batch.BatchChangeInterfaces._
import vinyldns.api.domain.batch.BatchTransformations._
import vinyldns.api.domain.zone.ZoneRecordValidations.isStringInRegexList
import vinyldns.api.domain.zone.ZoneRecordValidations
import vinyldns.core.Messages.{nonExistentRecordDataDeleteMessage, nonExistentRecordDeleteMessage}
import vinyldns.core.domain.DomainHelpers.omitTrailingDot
import vinyldns.core.domain.record._
import vinyldns.core.domain._
import vinyldns.core.domain.batch.{BatchChange, BatchChangeApprovalStatus, OwnerType, RecordKey}
import vinyldns.core.domain.batch.{BatchChange, BatchChangeApprovalStatus, OwnerType, RecordKey, RecordKeyData}
import vinyldns.core.domain.membership.Group
import vinyldns.core.domain.zone.Zone
import scala.util.matching.Regex
trait BatchChangeValidationsAlgebra {
@ -51,10 +51,10 @@ trait BatchChangeValidationsAlgebra {
): ValidatedBatch[ChangeInput]
def validateChangesWithContext(
groupedChanges: ChangeForValidationMap,
auth: AuthPrincipal,
isApproved: Boolean,
batchOwnerGroupId: Option[String]
groupedChanges: ChangeForValidationMap,
auth: AuthPrincipal,
isApproved: Boolean,
batchOwnerGroupId: Option[String]
): ValidatedBatch[ChangeForValidation]
def canGetBatchChange(
@ -85,7 +85,8 @@ class BatchChangeValidations(
highValueDomainConfig: HighValueDomainConfig,
manualReviewConfig: ManualReviewConfig,
batchChangeConfig: BatchChangeConfig,
scheduledChangesConfig: ScheduledChangesConfig
scheduledChangesConfig: ScheduledChangesConfig,
approvedNameServers: List[Regex]
) extends BatchChangeValidationsAlgebra {
import RecordType._
@ -182,7 +183,7 @@ class BatchChangeValidations(
def validateScheduledApproval(batchChange: BatchChange): Either[BatchChangeErrorResponse, Unit] =
batchChange.scheduledTime match {
case Some(dt) if dt.isAfterNow => Left(ScheduledChangeNotDue(dt))
case Some(dt) if dt.isAfter(Instant.now.truncatedTo(ChronoUnit.MILLIS)) => Left(ScheduledChangeNotDue(dt))
case _ => Right(())
}
@ -211,18 +212,18 @@ class BatchChangeValidations(
isApproved: Boolean
): SingleValidation[Unit] = {
val validTTL = addChangeInput.ttl.map(validateTTL(_).asUnit).getOrElse(().valid)
val validRecord = validateRecordData(addChangeInput.record)
val validRecord = validateRecordData(addChangeInput.record, addChangeInput)
val validInput = validateInputName(addChangeInput, isApproved)
validTTL |+| validRecord |+| validInput
}
def validateDeleteRRSetChangeInput(
deleteRRSetChangeInput: DeleteRRSetChangeInput,
isApproved: Boolean
deleteRRSetChangeInput: DeleteRRSetChangeInput,
isApproved: Boolean
): SingleValidation[Unit] = {
val validRecord = deleteRRSetChangeInput.record match {
case Some(recordData) => validateRecordData(recordData)
case Some(recordData) => validateRecordData(recordData, deleteRRSetChangeInput)
case None => ().validNel
}
val validInput = validateInputName(deleteRRSetChangeInput, isApproved)
@ -230,22 +231,32 @@ class BatchChangeValidations(
validRecord |+| validInput
}
def validateRecordData(record: RecordData): SingleValidation[Unit] =
def validateRecordData(record: RecordData,change: ChangeInput): SingleValidation[Unit] =
record match {
case a: AData => validateIpv4Address(a.address).asUnit
case aaaa: AAAAData => validateIpv6Address(aaaa.address).asUnit
case cname: CNAMEData => validateHostName(cname.cname).asUnit
case cname: CNAMEData =>
/*
To validate the zone is reverse
*/
val isIPv4: Boolean = change.inputName.toLowerCase.endsWith("in-addr.arpa.")
val isIPv6: Boolean = change.inputName.toLowerCase.endsWith("ip6.arpa.")
val isReverse: Boolean = isIPv4 || isIPv6
validateCname(cname.cname,isReverse).asUnit
case ptr: PTRData => validateHostName(ptr.ptrdname).asUnit
case txt: TXTData => validateTxtTextLength(txt.text).asUnit
case mx: MXData =>
validateMxPreference(mx.preference).asUnit |+| validateHostName(mx.exchange).asUnit
validateMX_NAPTR_SRVData(mx.preference, "preference", "MX").asUnit |+| validateHostName(mx.exchange).asUnit
case ns: NSData => validateHostName(ns.nsdname).asUnit
case naptr: NAPTRData => validateMX_NAPTR_SRVData(naptr.preference, "preference", "NAPTR").asUnit |+| validateMX_NAPTR_SRVData(naptr.order, "order", "NAPTR").asUnit |+| validateHostName(naptr.replacement).asUnit |+| validateNaptrFlag(naptr.flags).asUnit |+| validateNaptrRegexp(naptr.regexp).asUnit
case srv: SRVData => validateMX_NAPTR_SRVData(srv.priority, "priority", "SRV").asUnit |+| validateMX_NAPTR_SRVData(srv.port, "port", "SRV").asUnit |+| validateMX_NAPTR_SRVData(srv.weight, "weight", "SRV").asUnit |+| validateHostName(srv.target).asUnit
case other =>
InvalidBatchRecordType(other.toString, SupportedBatchChangeRecordTypes.get).invalidNel[Unit]
}
def validateInputName(change: ChangeInput, isApproved: Boolean): SingleValidation[Unit] = {
val typedChecks = change.typ match {
case A | AAAA | MX =>
case A | AAAA | MX | NS | NAPTR | SRV =>
validateHostName(change.inputName).asUnit |+| notInReverseZone(change)
case CNAME | TXT =>
validateHostName(change.inputName).asUnit
@ -272,17 +283,17 @@ class BatchChangeValidations(
/* context validations */
def validateChangesWithContext(
groupedChanges: ChangeForValidationMap,
auth: AuthPrincipal,
isApproved: Boolean,
batchOwnerGroupId: Option[String]
groupedChanges: ChangeForValidationMap,
auth: AuthPrincipal,
isApproved: Boolean,
batchOwnerGroupId: Option[String]
): ValidatedBatch[ChangeForValidation] =
// Updates are a combination of an add and delete for a record with the same name and type in a zone.
// Updates are a combination of an add and delete for a record with the same name and type in a zone.
groupedChanges.changes.mapValid {
case add: AddChangeForValidation
if groupedChanges
.getLogicalChangeType(add.recordKey)
.contains(LogicalChangeType.Add) =>
if groupedChanges
.getLogicalChangeType(add.recordKey)
.contains(LogicalChangeType.Add) =>
validateAddWithContext(add, groupedChanges, auth, isApproved, batchOwnerGroupId)
case addUpdate: AddChangeForValidation =>
validateAddUpdateWithContext(addUpdate, groupedChanges, auth, isApproved, batchOwnerGroupId)
@ -304,35 +315,34 @@ class BatchChangeValidations(
else
().validNel
def matchRecordData(existingRecordSetData: List[RecordData], recordData: RecordData): Boolean =
existingRecordSetData.exists { rd =>
(rd, recordData) match {
case (AAAAData(rdAddress), AAAAData(proposedAddress)) =>
InetAddress.getByName(proposedAddress).getHostName == InetAddress
.getByName(rdAddress)
.getHostName
case _ => rd == recordData
}
def matchRecordData(existingRecordSetData: List[RecordData], recordData: RecordData): Boolean = {
existingRecordSetData.par.exists { rd =>
rd == recordData
}
}
def ensureRecordExists(
change: ChangeForValidation,
groupedChanges: ChangeForValidationMap
): SingleValidation[Unit] =
change: ChangeForValidation,
groupedChanges: ChangeForValidationMap
): Boolean = {
change match {
// For DeleteRecord inputs, need to verify that the record data actually exists
case DeleteRRSetChangeForValidation(
_,
_,
DeleteRRSetChangeInput(inputName, _, Some(recordData))
)
if !groupedChanges
.getExistingRecordSet(change.recordKey)
.exists(rs => matchRecordData(rs.records, recordData)) =>
DeleteRecordDataDoesNotExist(inputName, recordData).invalidNel
case DeleteRRSetChangeForValidation(_, _, DeleteRRSetChangeInput(_, _, _, Some(recordData)))
if !groupedChanges
.getExistingRecordSet(change.recordKey)
.exists(rs => matchRecordData(rs.records, recordData)) =>
false
case _ =>
().validNel
true
}
}
def updateSystemMessage(changeInput: ChangeInput, systemMessage: String): ChangeInput = {
changeInput match {
case dci: DeleteRRSetChangeInput => dci.copy(systemMessage = Some(systemMessage))
case _ => changeInput
}
}
def validateDeleteWithContext(
change: ChangeForValidation,
@ -341,15 +351,34 @@ class BatchChangeValidations(
isApproved: Boolean
): SingleValidation[ChangeForValidation] = {
val validations =
groupedChanges.getExistingRecordSet(change.recordKey) match {
case Some(rs) =>
userCanDeleteRecordSet(change, auth, rs.ownerGroupId, rs.records) |+|
zoneDoesNotRequireManualReview(change, isApproved) |+|
ensureRecordExists(change, groupedChanges)
case None => RecordDoesNotExist(change.inputChange.inputName).invalidNel
}
validations.map(_ => change)
val recordData = change match {
case AddChangeForValidation(_, _, inputChange, _, _) => inputChange.record.toString
case DeleteRRSetChangeForValidation(_, _, inputChange) => inputChange.record.map(_.toString).getOrElse("")
}
val addInBatch = groupedChanges.getProposedAdds(change.recordKey)
val isSameRecordUpdateInBatch = recordData.nonEmpty && addInBatch.contains(RecordData.fromString(recordData, change.inputChange.typ).get)
// Perform the system message update based on the condition
val updatedChange = if (groupedChanges.getExistingRecordSet(change.recordKey).isEmpty && !isSameRecordUpdateInBatch) {
val updatedChangeInput = updateSystemMessage(change.inputChange, nonExistentRecordDeleteMessage)
change.withUpdatedInputChange(updatedChangeInput)
} else if (!ensureRecordExists(change, groupedChanges)) {
val updatedChangeInput = updateSystemMessage(change.inputChange, nonExistentRecordDataDeleteMessage)
change.withUpdatedInputChange(updatedChangeInput)
} else {
change
}
val validations = groupedChanges.getExistingRecordSet(updatedChange.recordKey) match {
case Some(rs) =>
userCanDeleteRecordSet(updatedChange, auth, rs.ownerGroupId, rs.records) |+|
zoneDoesNotRequireManualReview(updatedChange, isApproved)
case None =>
if (isSameRecordUpdateInBatch) InvalidUpdateRequest(updatedChange.inputChange.inputName).invalidNel else ().validNel
}
validations.map(_ => updatedChange)
}
def validateAddUpdateWithContext(
@ -378,7 +407,7 @@ class BatchChangeValidations(
) |+|
zoneDoesNotRequireManualReview(change, isApproved)
case None =>
RecordDoesNotExist(change.inputChange.inputName).invalidNel
InvalidUpdateRequest(change.inputChange.inputName).invalidNel
}
}
@ -393,30 +422,52 @@ class BatchChangeValidations(
auth: AuthPrincipal,
isApproved: Boolean
): SingleValidation[ChangeForValidation] = {
// To handle add and delete for the record with same record data is present in the batch
val recordData = change match {
case AddChangeForValidation(_, _, inputChange, _, _) => inputChange.record.toString
case DeleteRRSetChangeForValidation(_, _, inputChange) => inputChange.record.map(_.toString).getOrElse("")
}
val addInBatch = groupedChanges.getProposedAdds(change.recordKey)
val isSameRecordUpdateInBatch = recordData.nonEmpty && addInBatch.contains(RecordData.fromString(recordData, change.inputChange.typ).get)
// Perform the system message update based on the condition
val updatedChange = if (groupedChanges.getExistingRecordSet(change.recordKey).isEmpty && !isSameRecordUpdateInBatch) {
val updatedChangeInput = updateSystemMessage(change.inputChange, nonExistentRecordDeleteMessage)
change.withUpdatedInputChange(updatedChangeInput)
} else if (!ensureRecordExists(change, groupedChanges)) {
val updatedChangeInput = updateSystemMessage(change.inputChange, nonExistentRecordDataDeleteMessage)
change.withUpdatedInputChange(updatedChangeInput)
} else {
change
}
val validations =
groupedChanges.getExistingRecordSet(change.recordKey) match {
groupedChanges.getExistingRecordSet(updatedChange.recordKey) match {
case Some(rs) =>
val adds = groupedChanges.getProposedAdds(change.recordKey).toList
userCanUpdateRecordSet(change, auth, rs.ownerGroupId, adds) |+|
zoneDoesNotRequireManualReview(change, isApproved) |+|
ensureRecordExists(change, groupedChanges)
val adds = groupedChanges.getProposedAdds(updatedChange.recordKey).toList
userCanUpdateRecordSet(updatedChange, auth, rs.ownerGroupId, adds) |+|
zoneDoesNotRequireManualReview(updatedChange, isApproved)
case None =>
RecordDoesNotExist(change.inputChange.inputName).invalidNel
if(isSameRecordUpdateInBatch) InvalidUpdateRequest(updatedChange.inputChange.inputName).invalidNel else ().validNel
}
validations.map(_ => change)
validations.map(_ => updatedChange)
}
def validateAddWithContext(
change: AddChangeForValidation,
groupedChanges: ChangeForValidationMap,
auth: AuthPrincipal,
isApproved: Boolean,
ownerGroupId: Option[String]
change: AddChangeForValidation,
groupedChanges: ChangeForValidationMap,
auth: AuthPrincipal,
isApproved: Boolean,
ownerGroupId: Option[String]
): SingleValidation[ChangeForValidation] = {
val typedValidations = change.inputChange.typ match {
case A | AAAA | MX =>
case A | AAAA | MX | SRV | NAPTR =>
newRecordSetIsNotDotted(change)
case NS =>
newRecordSetIsNotDotted(change) |+| nsValidations(change.inputChange.record, change.recordName, change.zone, approvedNameServers)
case CNAME =>
cnameHasUniqueNameInBatch(change, groupedChanges) |+|
newRecordSetIsNotDotted(change)
@ -426,8 +477,29 @@ class BatchChangeValidations(
InvalidBatchRecordType(other.toString, SupportedBatchChangeRecordTypes.get).invalidNel
}
// To handle add and delete for the record with same record data is present in the batch
val recordData = change match {
case AddChangeForValidation(_, _, inputChange, _, _) => inputChange.record.toString
}
val deletes = groupedChanges.getProposedDeletes(change.recordKey)
val isDeleteExists = deletes.nonEmpty
val isSameRecordUpdateInBatch = if(recordData.nonEmpty){
if(deletes.contains(RecordData.fromString(recordData, change.inputChange.typ).get)) true else false
} else false
val commonValidations: SingleValidation[Unit] = {
groupedChanges.getExistingRecordSet(change.recordKey) match {
case Some(_) =>
().validNel
case None =>
if(isSameRecordUpdateInBatch) InvalidUpdateRequest(change.inputChange.inputName).invalidNel else ().validNel
}
}
val validations =
typedValidations |+|
commonValidations |+|
noIncompatibleRecordExists(change, groupedChanges) |+|
userCanAddRecordSet(change, auth) |+|
recordDoesNotExist(
@ -435,11 +507,12 @@ class BatchChangeValidations(
change.recordName,
change.inputChange.inputName,
change.inputChange.typ,
groupedChanges
change.inputChange.record,
groupedChanges,
isDeleteExists
) |+|
ownerGroupProvidedIfNeeded(change, None, ownerGroupId) |+|
zoneDoesNotRequireManualReview(change, isApproved)
validations.map(_ => change)
}
@ -477,11 +550,17 @@ class BatchChangeValidations(
recordName: String,
inputName: String,
typ: RecordType,
groupedChanges: ChangeForValidationMap
): SingleValidation[Unit] =
groupedChanges.getExistingRecordSet(RecordKey(zoneId, recordName, typ)) match {
case Some(_) => RecordAlreadyExists(inputName).invalidNel
case None => ().validNel
recordData: RecordData,
groupedChanges: ChangeForValidationMap,
isDeleteExist: Boolean
): SingleValidation[Unit] = {
val record = groupedChanges.getExistingRecordSetData(RecordKeyData(zoneId, recordName, typ, recordData))
if(record.isDefined) {
record.get.records.contains(recordData) match {
case true => ().validNel
case false => if(isDeleteExist) ().validNel else RecordAlreadyExists(inputName).invalidNel
}
} else ().validNel
}
def noIncompatibleRecordExists(
@ -552,6 +631,7 @@ class BatchChangeValidations(
input.inputChange.typ,
input.zone,
ownerGroupId,
false,
addRecords
)
result
@ -670,7 +750,7 @@ class BatchChangeValidations(
): Either[BatchChangeErrorResponse, Unit] =
(scheduledChangesEnabled, input.scheduledTime) match {
case (_, None) => Right(())
case (true, Some(scheduledTime)) if scheduledTime.isAfterNow => Right(())
case (true, Some(scheduledTime)) if scheduledTime.isAfter(Instant.now.truncatedTo(ChronoUnit.MILLIS)) => Right(())
case (true, _) => Left(ScheduledTimeMustBeInFuture)
case (false, _) => Left(ScheduledChangesDisabled)
}
@ -688,4 +768,46 @@ class BatchChangeValidations(
change.inputChange.inputName
)
}
def nsValidations(
newRecordSetData: RecordData,
newRecordSetName: String,
zone: Zone,
approvedNameServers: List[Regex]
): SingleValidation[Unit] = {
isNotOrigin(
newRecordSetName,
zone,
s"Record with name $newRecordSetName is an NS record at apex and cannot be added"
)
containsApprovedNameServers(newRecordSetData, approvedNameServers)
}
def isNotOrigin(recordSet: String, zone: Zone, err: String): SingleValidation[Unit] =
if(!isOriginRecord(recordSet, omitTrailingDot(zone.name))) ().validNel else InvalidBatchRequest(err).invalidNel
def isOriginRecord(recordSetName: String, zoneName: String): Boolean =
recordSetName == "@" || omitTrailingDot(recordSetName) == omitTrailingDot(zoneName)
def containsApprovedNameServers(
nsRecordSet: RecordData,
approvedNameServers: List[Regex]
): SingleValidation[Unit] = {
val nsData = nsRecordSet match {
case ns: NSData => ns
case _ => ??? // this would never be the case
}
isApprovedNameServer(approvedNameServers, nsData)
}
def isApprovedNameServer(
approvedServerList: List[Regex],
nsData: NSData
): SingleValidation[Unit] =
if (isStringInRegexList(approvedServerList, nsData.nsdname.fqdn)) {
().validNel
} else {
NotApprovedNSError(nsData.nsdname.fqdn).invalidNel
}
}

View File

@ -16,7 +16,6 @@
package vinyldns.api.domain.batch
import java.net.InetAddress
import java.util.UUID
import vinyldns.api.domain.ReverseZoneHelpers
@ -24,13 +23,13 @@ import vinyldns.api.domain.batch.BatchChangeInterfaces.ValidatedBatch
import vinyldns.api.domain.batch.BatchTransformations.LogicalChangeType.LogicalChangeType
import vinyldns.api.backend.dns.DnsConversions.getIPv6FullReverseName
import vinyldns.core.domain.batch._
import vinyldns.core.domain.record.{AAAAData, RecordData, RecordSet, RecordSetChange}
import vinyldns.core.domain.record.{RecordData, RecordSet, RecordSetChange}
import vinyldns.core.domain.record.RecordType._
import vinyldns.core.domain.zone.Zone
import vinyldns.core.domain.record.RecordType.RecordType
object SupportedBatchChangeRecordTypes {
val supportedTypes = Set(A, AAAA, CNAME, PTR, TXT, MX)
val supportedTypes = Set(A, AAAA, CNAME, PTR, TXT, MX, NS, SRV, NAPTR)
def get: Set[RecordType] = supportedTypes
}
@ -68,6 +67,9 @@ object BatchTransformations {
def get(recordKey: RecordKey): Option[RecordSet] =
get(recordKey.zoneId, recordKey.recordName, recordKey.recordType)
def get(recordKeyData: RecordKeyData): Option[RecordSet] =
get(recordKeyData.zoneId, recordKeyData.recordName, recordKeyData.recordType)
def getRecordSetMatch(zoneId: String, name: String): List[RecordSet] =
recordSetMap.getOrElse((zoneId, name.toLowerCase), List())
}
@ -79,6 +81,7 @@ object BatchTransformations {
val recordKey = RecordKey(zone.id, recordName, inputChange.typ)
def asStoredChange(changeId: Option[String] = None): SingleChange
def isAddChangeForValidation: Boolean
def withUpdatedInputChange(inputChange: ChangeInput): ChangeForValidation
}
object ChangeForValidation {
@ -115,7 +118,7 @@ object BatchTransformations {
ttl,
inputChange.record,
SingleChangeStatus.Pending,
None,
inputChange.systemMessage,
None,
None,
List.empty,
@ -124,6 +127,10 @@ object BatchTransformations {
}
def isAddChangeForValidation: Boolean = true
def withUpdatedInputChange(inputChange: ChangeInput): ChangeForValidation = {
this.copy(inputChange = inputChange.asInstanceOf[AddChangeInput])
}
}
final case class DeleteRRSetChangeForValidation(
@ -140,7 +147,7 @@ object BatchTransformations {
inputChange.typ,
inputChange.record,
SingleChangeStatus.Pending,
None,
inputChange.systemMessage,
None,
None,
List.empty,
@ -148,6 +155,10 @@ object BatchTransformations {
)
def isAddChangeForValidation: Boolean = false
def withUpdatedInputChange(inputChange: ChangeInput): ChangeForValidation = {
this.copy(inputChange = inputChange.asInstanceOf[DeleteRRSetChangeInput])
}
}
final case class BatchConversionOutput(
@ -171,9 +182,15 @@ object BatchTransformations {
def getExistingRecordSet(recordKey: RecordKey): Option[RecordSet] =
existingRecordSets.get(recordKey)
def getExistingRecordSetData(recordKeyData: RecordKeyData): Option[RecordSet] =
existingRecordSets.get(recordKeyData)
def getProposedAdds(recordKey: RecordKey): Set[RecordData] =
innerMap.get(recordKey).map(_.proposedAdds).toSet.flatten
def getProposedDeletes(recordKey: RecordKey): Set[RecordData] =
innerMap.get(recordKey).map(_.proposedDeletes).toSet.flatten
// The new, net record data factoring in existing records, deletes and adds
// If record is not edited in batch, will fallback to look up record in existing
// records
@ -188,13 +205,6 @@ object BatchTransformations {
}
object ValidationChanges {
def matchRecordData(existingRecord: RecordData, recordData: String): Boolean =
existingRecord match {
case AAAAData(address) =>
InetAddress.getByName(address).getHostName ==
InetAddress.getByName(recordData).getHostName
case _ => false
}
def apply(
changes: List[ChangeForValidation],
@ -214,16 +224,11 @@ object BatchTransformations {
case DeleteRRSetChangeForValidation(
_,
_,
DeleteRRSetChangeInput(_, AAAA, Some(AAAAData(address)))
) =>
existingRecords.filter(r => matchRecordData(r, address))
case DeleteRRSetChangeForValidation(
_,
_,
DeleteRRSetChangeInput(_, _, Some(recordData))
DeleteRRSetChangeInput(_, _, _, Some(recordData))
) =>
Set(recordData)
case _: DeleteRRSetChangeForValidation => existingRecords
case _: DeleteRRSetChangeForValidation =>
existingRecords
}
.toSet
.flatten
@ -231,26 +236,45 @@ object BatchTransformations {
// New proposed record data (assuming all validations pass)
val proposedRecordData = existingRecords -- deleteChangeSet ++ addChangeRecordDataSet
// Note: "Update" where an Add and DeleteRecordSet is provided for a DNS record that does not exist will be
// treated as a logical Add since the delete validation will fail (on record does not exist)
val logicalChangeType = (addChangeRecordDataSet.nonEmpty, deleteChangeSet.nonEmpty) match {
case (true, true) => LogicalChangeType.Update
case (true, false) => LogicalChangeType.Add
case (false, true) =>
if ((existingRecords -- deleteChangeSet).isEmpty) {
LogicalChangeType.FullDelete
case (true, true) =>
if (existingRecords.isEmpty) {
// Note: "Add" where an Add and DeleteRecordSet is provided for a DNS record that does not exist.
// Adds the record if it doesn't exist and ignores the delete.
LogicalChangeType.Add
} else {
// Note: "Update" where an Add and DeleteRecordSet is provided for a DNS record that exist, but record data for DeleteRecordSet does not exist.
// Updates the record and ignores the delete.
LogicalChangeType.Update
}
case (false, false) => LogicalChangeType.NotEditedInBatch
case (true, false) => LogicalChangeType.Add
case (false, true) =>
if (existingRecords == deleteChangeSet) {
LogicalChangeType.FullDelete
} else if (existingRecords.nonEmpty) {
LogicalChangeType.Update
} else {
LogicalChangeType.OutOfSync
}
case (false, false) =>
if(changes.exists {
case _: DeleteRRSetChangeForValidation => true
case _ => false
}
){
LogicalChangeType.OutOfSync
} else {
LogicalChangeType.NotEditedInBatch
}
}
new ValidationChanges(addChangeRecordDataSet, proposedRecordData, logicalChangeType)
new ValidationChanges(addChangeRecordDataSet, deleteChangeSet, proposedRecordData, logicalChangeType)
}
}
final case class ValidationChanges(
proposedAdds: Set[RecordData],
proposedDeletes: Set[RecordData],
proposedRecordData: Set[RecordData],
logicalChangeType: LogicalChangeType
)
@ -263,6 +287,6 @@ object BatchTransformations {
object LogicalChangeType extends Enumeration {
type LogicalChangeType = Value
val Add, FullDelete, Update, NotEditedInBatch = Value
val Add, FullDelete, Update, NotEditedInBatch, OutOfSync = Value
}
}

View File

@ -17,8 +17,9 @@
package vinyldns.api.domain.membership
import java.util.UUID
import org.joda.time.DateTime
import java.time.Instant
import java.time.temporal.ChronoUnit
import vinyldns.core.domain.auth.AuthPrincipal
import vinyldns.core.domain.membership.GroupChangeType.GroupChangeType
import vinyldns.core.domain.membership.GroupStatus.GroupStatus
import vinyldns.core.domain.membership.LockStatus.LockStatus
@ -30,21 +31,26 @@ final case class GroupInfo(
name: String,
email: String,
description: Option[String] = None,
created: DateTime = DateTime.now,
created: Instant = Instant.now.truncatedTo(ChronoUnit.MILLIS),
status: GroupStatus = GroupStatus.Active,
members: Set[UserId] = Set.empty,
admins: Set[UserId] = Set.empty
)
object GroupInfo {
def apply(group: Group): GroupInfo = GroupInfo(
def apply(group: Group): GroupInfo = fromGroup(group, abridged = false, None)
def fromGroup(group: Group, abridged: Boolean = false,
authPrincipal: Option[AuthPrincipal]): GroupInfo = GroupInfo(
id = group.id,
name = group.name,
email = group.email,
description = group.description,
created = group.created,
status = group.status,
members = group.memberIds.map(UserId),
admins = group.adminUserIds.map(UserId)
created = if (abridged) null else group.created,
status = if (abridged) null else group.status,
members = (if (abridged && authPrincipal.isDefined) group.memberIds.filter(x => authPrincipal.get.userId == x && authPrincipal.get.isGroupMember(group.id))
else group.memberIds).map(UserId),
admins = (if (abridged && authPrincipal.isDefined) group.adminUserIds.filter(x => authPrincipal.get.userId == x && authPrincipal.get.isGroupAdmin(group))
else group.adminUserIds).map(UserId)
)
}
@ -54,7 +60,9 @@ final case class GroupChangeInfo(
userId: String,
oldGroup: Option[GroupInfo] = None,
id: String = UUID.randomUUID().toString,
created: String = DateTime.now.getMillis.toString
created: Instant = Instant.now.truncatedTo(ChronoUnit.MILLIS),
userName: String,
groupChangeMessage: String
)
object GroupChangeInfo {
@ -64,7 +72,9 @@ object GroupChangeInfo {
userId = groupChange.userId,
oldGroup = groupChange.oldGroup.map(GroupInfo.apply),
id = groupChange.id,
created = groupChange.created.getMillis.toString
created = groupChange.created,
userName = groupChange.userName.getOrElse("unknown user"),
groupChangeMessage = groupChange.groupChangeMessage.getOrElse("")
)
}
@ -76,7 +86,7 @@ case class UserInfo(
firstName: Option[String] = None,
lastName: Option[String] = None,
email: Option[String] = None,
created: Option[DateTime] = None,
created: Option[Instant] = None,
lockStatus: LockStatus
)
object UserInfo {
@ -92,13 +102,28 @@ object UserInfo {
)
}
case class UserResponseInfo(
id: String,
userName: Option[String] = None,
groupId: Set[String] = Set.empty
)
object UserResponseInfo {
def apply(user: User , group: Group): UserResponseInfo =
UserResponseInfo(
id = user.id,
userName = Some(user.userName),
groupId = Set(group.id)
)
}
case class MemberInfo(
id: String,
userName: Option[String] = None,
firstName: Option[String] = None,
lastName: Option[String] = None,
email: Option[String] = None,
created: Option[DateTime] = None,
created: Option[Instant] = None,
isAdmin: Boolean = false,
lockStatus: LockStatus
)
@ -135,8 +160,8 @@ final case class ListAdminsResponse(admins: Seq[UserInfo])
final case class ListGroupChangesResponse(
changes: Seq[GroupChangeInfo],
startFrom: Option[String] = None,
nextId: Option[String] = None,
startFrom: Option[Int] = None,
nextId: Option[Int] = None,
maxItems: Int
)
@ -153,6 +178,10 @@ final case class GroupNotFoundError(msg: String) extends Throwable(msg)
final case class GroupAlreadyExistsError(msg: String) extends Throwable(msg)
final case class GroupValidationError(msg: String) extends Throwable(msg)
final case class EmailValidationError(msg: String) extends Throwable(msg)
final case class UserNotFoundError(msg: String) extends Throwable(msg)
final case class InvalidGroupError(msg: String) extends Throwable(msg)

View File

@ -16,8 +16,11 @@
package vinyldns.api.domain.membership
import cats.effect.IO
import cats.implicits._
import scalikejdbc.DB
import vinyldns.api.Interfaces._
import vinyldns.api.config.ValidEmailConfig
import vinyldns.api.repository.ApiDataAccessor
import vinyldns.core.domain.auth.AuthPrincipal
import vinyldns.core.domain.membership.LockStatus.LockStatus
@ -25,16 +28,18 @@ import vinyldns.core.domain.zone.ZoneRepository
import vinyldns.core.domain.membership._
import vinyldns.core.domain.record.RecordSetRepository
import vinyldns.core.Messages._
import vinyldns.mysql.TransactionProvider
object MembershipService {
def apply(dataAccessor: ApiDataAccessor): MembershipService =
def apply(dataAccessor: ApiDataAccessor,emailConfig:ValidEmailConfig): MembershipService =
new MembershipService(
dataAccessor.groupRepository,
dataAccessor.userRepository,
dataAccessor.membershipRepository,
dataAccessor.zoneRepository,
dataAccessor.groupChangeRepository,
dataAccessor.recordSetRepository
dataAccessor.recordSetRepository,
emailConfig
)
}
@ -44,8 +49,9 @@ class MembershipService(
membershipRepo: MembershipRepository,
zoneRepo: ZoneRepository,
groupChangeRepo: GroupChangeRepository,
recordSetRepo: RecordSetRepository
) extends MembershipServiceAlgebra {
recordSetRepo: RecordSetRepository,
validDomains: ValidEmailConfig
) extends MembershipServiceAlgebra with TransactionProvider {
import MembershipValidations._
@ -54,21 +60,20 @@ class MembershipService(
val adminMembers = inputGroup.adminUserIds
val nonAdminMembers = inputGroup.memberIds.diff(adminMembers)
for {
_ <- groupValidation(newGroup)
_ <- emailValidation(newGroup.email)
_ <- hasMembersAndAdmins(newGroup).toResult
_ <- groupWithSameNameDoesNotExist(newGroup.name)
_ <- usersExist(newGroup.memberIds)
_ <- groupChangeRepo.save(GroupChange.forAdd(newGroup, authPrincipal)).toResult[GroupChange]
_ <- groupRepo.save(newGroup).toResult[Group]
// save admin and non-admin members separately
_ <- membershipRepo
.saveMembers(newGroup.id, adminMembers, isAdmin = true)
.toResult[Set[String]]
_ <- membershipRepo
.saveMembers(newGroup.id, nonAdminMembers, isAdmin = false)
.toResult[Set[String]]
_ <- createGroupData(GroupChange.forAdd(newGroup, authPrincipal), newGroup, adminMembers, nonAdminMembers).toResult[Unit]
} yield newGroup
}
def listEmailDomains(authPrincipal: AuthPrincipal): Result[List[String]] = {
val validEmailDomains = validDomains.valid_domains
IO(validEmailDomains).toResult
}
def updateGroup(
groupId: String,
name: String,
@ -81,6 +86,8 @@ class MembershipService(
for {
existingGroup <- getExistingGroup(groupId)
newGroup = existingGroup.withUpdates(name, email, description, memberIds, adminUserIds)
_ <- groupValidation(newGroup)
_ <- emailValidation(newGroup.email)
_ <- canEditGroup(existingGroup, authPrincipal).toResult
addedAdmins = newGroup.adminUserIds.diff(existingGroup.adminUserIds)
// new non-admin members ++ admins converted to non-admins
@ -90,18 +97,7 @@ class MembershipService(
_ <- hasMembersAndAdmins(newGroup).toResult
_ <- usersExist(addedNonAdmins)
_ <- differentGroupWithSameNameDoesNotExist(newGroup.name, existingGroup.id)
_ <- groupChangeRepo
.save(GroupChange.forUpdate(newGroup, existingGroup, authPrincipal))
.toResult[GroupChange]
_ <- groupRepo.save(newGroup).toResult[Group]
// save admin and non-admin members separately
_ <- membershipRepo
.saveMembers(existingGroup.id, addedAdmins, isAdmin = true)
.toResult[Set[String]]
_ <- membershipRepo
.saveMembers(existingGroup.id, addedNonAdmins, isAdmin = false)
.toResult[Set[String]]
_ <- membershipRepo.removeMembers(existingGroup.id, removedMembers).toResult[Set[String]]
_ <- updateGroupData(GroupChange.forUpdate(newGroup, existingGroup, authPrincipal), newGroup, existingGroup, addedAdmins, addedNonAdmins, removedMembers).toResult[Unit]
} yield newGroup
def deleteGroup(groupId: String, authPrincipal: AuthPrincipal): Result[Group] =
@ -111,16 +107,64 @@ class MembershipService(
_ <- isNotZoneAdmin(existingGroup)
_ <- isNotRecordOwnerGroup(existingGroup)
_ <- isNotInZoneAclRule(existingGroup)
_ <- groupChangeRepo
.save(GroupChange.forDelete(existingGroup, authPrincipal))
.toResult[GroupChange]
_ <- membershipRepo
.removeMembers(existingGroup.id, existingGroup.memberIds)
.toResult[Set[String]]
deletedGroup = existingGroup.copy(status = GroupStatus.Deleted)
_ <- groupRepo.delete(deletedGroup).toResult[Group]
deletedGroup <- deleteGroupData(GroupChange.forDelete(existingGroup, authPrincipal), existingGroup).toResult[Group]
} yield deletedGroup
def createGroupData(
groupChangeData: GroupChange,
newGroup: Group,
adminMembers: Set[String],
nonAdminMembers: Set[String]
): IO[Unit] =
executeWithinTransaction { db: DB =>
for {
_ <- groupChangeRepo.save(db, groupChangeData)
_ <- groupRepo.save(db, newGroup)
// save admin and non-admin members separately
_ <- membershipRepo
.saveMembers(db, newGroup.id, adminMembers, isAdmin = true)
_ <- membershipRepo
.saveMembers(db, newGroup.id, nonAdminMembers, isAdmin = false)
} yield ()
}
def updateGroupData(
groupChangeData: GroupChange,
newGroup: Group,
existingGroup: Group,
addedAdmins: Set[String],
addedNonAdmins: Set[String],
removedMembers: Set[String]
): IO[Unit] =
executeWithinTransaction { db: DB =>
for {
_ <- groupChangeRepo
.save(db, groupChangeData)
_ <- groupRepo.save(db, newGroup)
// save admin and non-admin members separately
_ <- membershipRepo
.saveMembers(db, existingGroup.id, addedAdmins, isAdmin = true)
_ <- membershipRepo
.saveMembers(db, existingGroup.id, addedNonAdmins, isAdmin = false)
_ <- membershipRepo.removeMembers(db, existingGroup.id, removedMembers)
} yield ()
}
def deleteGroupData(
groupChangeData: GroupChange,
existingGroup: Group,
): IO[Group] =
executeWithinTransaction { db: DB =>
for {
_ <- groupChangeRepo
.save(db, groupChangeData)
_ <- membershipRepo
.removeMembers(db, existingGroup.id, existingGroup.memberIds)
deletedGroup = existingGroup.copy(status = GroupStatus.Deleted)
_ <- groupRepo.delete(deletedGroup)
} yield deletedGroup
}
def getGroup(id: String, authPrincipal: AuthPrincipal): Result[Group] =
for {
group <- getExistingGroup(id)
@ -156,7 +200,8 @@ class MembershipService(
startFrom: Option[String],
maxItems: Int,
authPrincipal: AuthPrincipal,
ignoreAccess: Boolean
ignoreAccess: Boolean,
abridged: Boolean = false
): Result[ListMyGroupsResponse] = {
val groupsCall =
if (authPrincipal.isSystemAdmin || ignoreAccess) {
@ -166,7 +211,7 @@ class MembershipService(
}
groupsCall.map { grp =>
pageListGroupsResponse(grp.toList, groupNameFilter, startFrom, maxItems, ignoreAccess)
pageListGroupsResponse(grp.toList, groupNameFilter, startFrom, maxItems, ignoreAccess, abridged, authPrincipal)
}
}.toResult
@ -175,16 +220,24 @@ class MembershipService(
groupNameFilter: Option[String],
startFrom: Option[String],
maxItems: Int,
ignoreAccess: Boolean
): ListMyGroupsResponse = {
ignoreAccess: Boolean,
abridged: Boolean = false,
authPrincipal: AuthPrincipal
): ListMyGroupsResponse = {
val allMyGroups = allGroups
.filter(_.status == GroupStatus.Active)
.sortBy(_.id)
.map(GroupInfo.apply)
.sortBy(_.name.toLowerCase)
.map(x => GroupInfo.fromGroup(x, abridged, Some(authPrincipal)))
val filtered = allMyGroups
.filter(grp => groupNameFilter.forall(grp.name.contains(_)))
.filter(grp => startFrom.forall(grp.id > _))
val filtered = if(startFrom.isDefined){
val prevPageGroup = allMyGroups.filter(_.id == startFrom.get).head.name
allMyGroups
.filter(grp => groupNameFilter.map(_.toLowerCase).forall(grp.name.toLowerCase.contains(_)))
.filter(grp => grp.name.toLowerCase > prevPageGroup.toLowerCase)
} else {
allMyGroups
.filter(grp => groupNameFilter.map(_.toLowerCase).forall(grp.name.toLowerCase.contains(_)))
}
val nextId = if (filtered.length > maxItems) Some(filtered(maxItems - 1).id) else None
val groups = filtered.take(maxItems)
@ -192,24 +245,131 @@ class MembershipService(
ListMyGroupsResponse(groups, groupNameFilter, startFrom, nextId, maxItems, ignoreAccess)
}
def getGroupChange(
groupChangeId: String,
authPrincipal: AuthPrincipal
): Result[GroupChangeInfo] =
for {
result <- groupChangeRepo
.getGroupChange(groupChangeId)
.toResult[Option[GroupChange]]
_ <- isGroupChangePresent(result).toResult
_ <- canSeeGroupChange(result.get.newGroup.id, authPrincipal).toResult
allUserIds = getGroupUserIds(Seq(result.get))
allUserMap <- getUsers(allUserIds).map(_.users.map(x => x.id -> x.userName).toMap.withDefaultValue("unknown user"))
groupChangeMessage <- determineGroupDifference(Seq(result.get), allUserMap)
groupChanges = (groupChangeMessage, Seq(result.get)).zipped.map{ (a, b) => b.copy(groupChangeMessage = Some(a)) }
userIds = Seq(result.get).map(_.userId).toSet
users <- getUsers(userIds).map(_.users)
userMap = users.map(u => (u.id, u.userName)).toMap
} yield groupChanges.map(change => GroupChangeInfo.apply(change.copy(userName = userMap.get(change.userId)))).head
def getGroupActivity(
groupId: String,
startFrom: Option[String],
startFrom: Option[Int],
maxItems: Int,
authPrincipal: AuthPrincipal
): Result[ListGroupChangesResponse] =
for {
_ <- canSeeGroup(groupId, authPrincipal).toResult
_ <- canSeeGroupChange(groupId, authPrincipal).toResult
result <- groupChangeRepo
.getGroupChanges(groupId, startFrom, maxItems)
.toResult[ListGroupChangesResults]
allUserIds = getGroupUserIds(result.changes)
allUserMap <- getUsers(allUserIds).map(_.users.map(x => x.id -> x.userName).toMap.withDefaultValue("unknown user"))
groupChangeMessage <- determineGroupDifference(result.changes, allUserMap)
groupChanges = (groupChangeMessage, result.changes).zipped.map{ (a, b) => b.copy(groupChangeMessage = Some(a)) }
userIds = result.changes.map(_.userId).toSet
users <- getUsers(userIds).map(_.users)
userMap = users.map(u => (u.id, u.userName)).toMap
} yield ListGroupChangesResponse(
result.changes.map(GroupChangeInfo.apply),
groupChanges.map(change => GroupChangeInfo.apply(change.copy(userName = userMap.get(change.userId)))),
startFrom,
result.lastEvaluatedTimeStamp,
result.nextId,
maxItems
)
def getGroupUserIds(groupChange: Seq[GroupChange]): Set[String] = {
var userIds: Set[String] = Set.empty[String]
for (change <- groupChange) {
if (change.oldGroup.isDefined) {
val adminAddDifference = change.newGroup.adminUserIds.diff(change.oldGroup.get.adminUserIds)
val adminRemoveDifference = change.oldGroup.get.adminUserIds.diff(change.newGroup.adminUserIds)
val memberAddDifference = change.newGroup.memberIds.diff(change.oldGroup.get.memberIds)
val memberRemoveDifference = change.oldGroup.get.memberIds.diff(change.newGroup.memberIds)
userIds = userIds ++ adminAddDifference ++ adminRemoveDifference ++ memberAddDifference ++ memberRemoveDifference
}
}
userIds
}
def determineGroupDifference(groupChange: Seq[GroupChange], allUserMap: Map[String, String]): Result[Seq[String]] = {
var groupChangeMessage: Seq[String] = Seq.empty[String]
for (change <- groupChange) {
val sb = new StringBuilder
if (change.oldGroup.isDefined) {
if (change.oldGroup.get.name != change.newGroup.name) {
sb.append(s"Group name changed to '${change.newGroup.name}'. ")
}
if (change.oldGroup.get.email != change.newGroup.email) {
sb.append(s"Group email changed to '${change.newGroup.email}'. ")
}
if (change.oldGroup.get.description != change.newGroup.description) {
val description = if(change.newGroup.description.isEmpty) "" else change.newGroup.description.get
sb.append(s"Group description changed to '$description'. ")
}
val adminAddDifference = change.newGroup.adminUserIds.diff(change.oldGroup.get.adminUserIds)
if (adminAddDifference.nonEmpty) {
sb.append(s"Group admin/s with user name/s '${adminAddDifference.map(x => allUserMap(x)).mkString("','")}' added. ")
}
val adminRemoveDifference = change.oldGroup.get.adminUserIds.diff(change.newGroup.adminUserIds)
if (adminRemoveDifference.nonEmpty) {
sb.append(s"Group admin/s with user name/s '${adminRemoveDifference.map(x => allUserMap(x)).mkString("','")}' removed. ")
}
val memberAddDifference = change.newGroup.memberIds.diff(change.oldGroup.get.memberIds)
if (memberAddDifference.nonEmpty) {
sb.append(s"Group member/s with user name/s '${memberAddDifference.map(x => allUserMap(x)).mkString("','")}' added. ")
}
val memberRemoveDifference = change.oldGroup.get.memberIds.diff(change.newGroup.memberIds)
if (memberRemoveDifference.nonEmpty) {
sb.append(s"Group member/s with user name/s '${memberRemoveDifference.map(x => allUserMap(x)).mkString("','")}' removed. ")
}
groupChangeMessage = groupChangeMessage :+ sb.toString().trim
}
// It'll be in else statement if the group was created or deleted
else {
if (change.changeType == GroupChangeType.Create) {
sb.append("Group Created.")
}
else if (change.changeType == GroupChangeType.Delete){
sb.append("Group Deleted.")
}
groupChangeMessage = groupChangeMessage :+ sb.toString()
}
}
groupChangeMessage
}.toResult
/**
* Retrieves the requested User from the given userIdentifier, which can be a userId or username
* @param userIdentifier The userId or username
* @return The found User
*/
def getUser(userIdentifier: String, authPrincipal: AuthPrincipal): Result[User] =
userRepo
.getUserByIdOrName(userIdentifier)
.orFail(UserNotFoundError(s"User $userIdentifier was not found"))
.toResult[User]
def getUserDetails(userIdentifier: String, authPrincipal: AuthPrincipal): Result[UserResponseInfo] =
for{
user <- getUser(userIdentifier,authPrincipal)
group <- membershipRepo.getGroupsForUser(user.id).toResult[Set[String]]
} yield UserResponseInfo(user.id, Some(user.userName), group)
def getUsers(
userIds: Set[String],
startFrom: Option[String] = None,
@ -231,6 +391,47 @@ class MembershipService(
.orFail(GroupNotFoundError(s"Group with ID $groupId was not found"))
.toResult[Group]
// Validate group details. Group name and email cannot be empty
def groupValidation(group: Group): Result[Unit] = {
Option(group) match {
case Some(value) if Option(value.name).forall(_.trim.isEmpty) || Option(value.email).forall(_.trim.isEmpty) =>
GroupValidationError(GroupValidationErrorMsg).asLeft
case _ =>
().asRight
}
}.toResult
// Validate email details.Email domains details are fetched from the config file.
def emailValidation(email: String): Result[Unit] = {
val emailDomains = validDomains.valid_domains
val numberOfDots= validDomains.number_of_dots
val splitEmailDomains = emailDomains.mkString(",")
val emailRegex ="""^(?!\.)(?!.*\.$)(?!.*\.\.)[a-zA-Z0-9._+!&-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$""".r
val index = email.indexOf('@');
val emailSplit = if(index != -1){
email.substring(index+1,email.length)}
val wildcardEmailDomains=if(splitEmailDomains.contains("*")){
emailDomains.map(x=>x.replaceAllLiterally("*",""))}
else emailDomains
Option(email) match {
case Some(value) if (emailRegex.findFirstIn(value) != None && emailSplit.toString.count(_ == '.')>0)=>
if ((emailDomains.contains(emailSplit) || emailDomains.isEmpty || wildcardEmailDomains.exists(x => emailSplit.toString.endsWith(x)))&&
emailSplit.toString.count(_ == '.')<=numberOfDots)
().asRight
else {
if(emailSplit.toString.count(_ == '.')>numberOfDots){
EmailValidationError(DotsValidationErrorMsg + " " + numberOfDots).asLeft
}
else {
EmailValidationError(EmailValidationErrorMsg + " " + wildcardEmailDomains.mkString(",")).asLeft
}
}
case _ =>
EmailValidationError(InvalidEmailValidationErrorMsg).asLeft
}}.toResult
def groupWithSameNameDoesNotExist(name: String): Result[Unit] =
groupRepo
.getGroupByName(name)
@ -257,7 +458,7 @@ class MembershipService(
.getGroupByName(name)
.map {
case Some(existingGroup)
if existingGroup.status != GroupStatus.Deleted && existingGroup.id != groupId =>
if existingGroup.status != GroupStatus.Deleted && existingGroup.id != groupId =>
GroupAlreadyExistsError(GroupAlreadyExistsErrorMsg.format(name, existingGroup.email)).asLeft
case _ =>
().asRight

View File

@ -25,6 +25,8 @@ trait MembershipServiceAlgebra {
def createGroup(inputGroup: Group, authPrincipal: AuthPrincipal): Result[Group]
def listEmailDomains(authPrincipal: AuthPrincipal):Result[List[String]]
def updateGroup(
groupId: String,
name: String,
@ -39,12 +41,15 @@ trait MembershipServiceAlgebra {
def getGroup(id: String, authPrincipal: AuthPrincipal): Result[Group]
def getGroupChange(id: String, authPrincipal: AuthPrincipal): Result[GroupChangeInfo]
def listMyGroups(
groupNameFilter: Option[String],
startFrom: Option[String],
maxItems: Int,
authPrincipal: AuthPrincipal,
ignoreAccess: Boolean
ignoreAccess: Boolean,
abridged: Boolean = false
): Result[ListMyGroupsResponse]
def listMembers(
@ -58,7 +63,7 @@ trait MembershipServiceAlgebra {
def getGroupActivity(
groupId: String,
startFrom: Option[String],
startFrom: Option[Int],
maxItems: Int,
authPrincipal: AuthPrincipal
): Result[ListGroupChangesResponse]
@ -68,4 +73,14 @@ trait MembershipServiceAlgebra {
lockStatus: LockStatus,
authPrincipal: AuthPrincipal
): Result[User]
def getUser(
userIdentifier: String,
authPrincipal: AuthPrincipal
): Result[User]
def getUserDetails(
userIdentifier: String,
authPrincipal: AuthPrincipal
): Result[UserResponseInfo]
}

View File

@ -19,10 +19,12 @@ package vinyldns.api.domain.membership
import vinyldns.api.Interfaces.ensuring
import vinyldns.core.domain.auth.AuthPrincipal
import vinyldns.api.domain.zone.NotAuthorizedError
import vinyldns.core.domain.membership.Group
import vinyldns.core.domain.membership.{Group, GroupChange}
object MembershipValidations {
private val canViewGroupDetails = true
def hasMembersAndAdmins(group: Group): Either[Throwable, Unit] =
ensuring(InvalidGroupError("Group must have at least one member and one admin")) {
group.memberIds.nonEmpty && group.adminUserIds.nonEmpty
@ -39,7 +41,17 @@ object MembershipValidations {
}
def canSeeGroup(groupId: String, authPrincipal: AuthPrincipal): Either[Throwable, Unit] =
ensuring(NotAuthorizedError("Not authorized")) {
authPrincipal.isGroupMember(groupId) || authPrincipal.isSystemAdmin || canViewGroupDetails
}
def canSeeGroupChange(groupId: String, authPrincipal: AuthPrincipal): Either[Throwable, Unit] =
ensuring(NotAuthorizedError("Not authorized")) {
authPrincipal.isGroupMember(groupId) || authPrincipal.isSystemAdmin
}
def isGroupChangePresent(groupChange: Option[GroupChange]): Either[Throwable, Unit] =
ensuring(InvalidGroupRequestError("Invalid Group Change ID")) {
groupChange.isDefined
}
}

View File

@ -17,13 +17,13 @@
package vinyldns.api.domain.record
import vinyldns.api.domain.zone.RecordSetChangeInfo
import vinyldns.core.domain.record.ListRecordSetChangesResults
import vinyldns.core.domain.record.{ListFailedRecordSetChangesResults, ListRecordSetChangesResults, RecordSetChange}
case class ListRecordSetChangesResponse(
zoneId: String,
recordSetChanges: List[RecordSetChangeInfo] = Nil,
nextId: Option[String],
startFrom: Option[String],
nextId: Option[Int],
startFrom: Option[Int],
maxItems: Int
)
@ -41,3 +41,43 @@ object ListRecordSetChangesResponse {
listResults.maxItems
)
}
case class ListRecordSetHistoryResponse(
zoneId: Option[String],
recordSetChanges: List[RecordSetChangeInfo] = Nil,
nextId: Option[Int],
startFrom: Option[Int],
maxItems: Int
)
object ListRecordSetHistoryResponse {
def apply(
zoneId: Option[String],
listResults: ListRecordSetChangesResults,
info: List[RecordSetChangeInfo]
): ListRecordSetHistoryResponse =
ListRecordSetHistoryResponse(
zoneId,
info,
listResults.nextId,
listResults.startFrom,
listResults.maxItems
)
}
case class ListFailedRecordSetChangesResponse(
failedRecordSetChanges: List[RecordSetChange] = Nil,
nextId: Int,
startFrom: Int,
maxItems: Int
)
object ListFailedRecordSetChangesResponse {
def apply(
ListFailedRecordSetChanges: ListFailedRecordSetChangesResults
): ListFailedRecordSetChangesResponse =
ListFailedRecordSetChangesResponse(
ListFailedRecordSetChanges.items,
ListFailedRecordSetChanges.nextId,
ListFailedRecordSetChanges.startFrom,
ListFailedRecordSetChanges.maxItems)}

View File

@ -0,0 +1,47 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.api.domain.record
import cats.effect.IO
import org.slf4j.LoggerFactory
import scalikejdbc.DB
import vinyldns.core.domain.record.{ListRecordSetResults, NameSort, RecordSetCacheRepository, RecordSetRepository, RecordTypeSort}
import vinyldns.mysql.TransactionProvider
class RecordSetCacheService(recordSetRepository: RecordSetRepository,
recordSetCacheRepository: RecordSetCacheRepository) extends TransactionProvider {
private val logger = LoggerFactory.getLogger(classOf[RecordSetCacheService])
final def populateRecordSetCache(nextId: Option[String] = None): IO[ListRecordSetResults] = {
logger.info(s"Populating recordset data. Starting at $nextId")
for {
result <- recordSetRepository.listRecordSets(None, nextId, Some(1000), None, None, None, NameSort.ASC, RecordTypeSort.ASC)
_ <- executeWithinTransaction { db: DB =>
IO {
result.recordSets.par.foreach(recordSet => {
recordSetCacheRepository.updateRecordDataList(db, recordSet.id, recordSet.records, recordSet.typ, recordSet.zoneId, recordSet.fqdn.get)
})
}
}
_ <- populateRecordSetCache(result.nextId)
} yield {
result
}
}
}

View File

@ -17,8 +17,8 @@
package vinyldns.api.domain.record
import java.util.UUID
import org.joda.time.DateTime
import java.time.temporal.ChronoUnit
import java.time.Instant
import vinyldns.api.backend.dns.DnsConversions
import vinyldns.core.domain.auth.AuthPrincipal
import vinyldns.core.domain.zone.Zone
@ -37,7 +37,7 @@ object RecordSetChangeGenerator extends DnsConversions {
recordSet = recordSet.copy(
name = relativize(recordSet.name, zone.name),
id = UUID.randomUUID().toString, // TODO once user cant specify ID, no need to refresh it here
created = DateTime.now,
created = Instant.now.truncatedTo(ChronoUnit.MILLIS),
status = RecordSetStatus.Pending
),
userId = userId,
@ -69,7 +69,7 @@ object RecordSetChangeGenerator extends DnsConversions {
id = replacing.id,
name = relativize(newRecordSet.name, zone.name),
status = RecordSetStatus.PendingUpdate,
updated = Some(DateTime.now)
updated = Some(Instant.now.truncatedTo(ChronoUnit.MILLIS))
),
userId = userId,
changeType = RecordSetChangeType.Update,
@ -105,7 +105,7 @@ object RecordSetChangeGenerator extends DnsConversions {
recordSet = recordSet.copy(
name = relativize(recordSet.name, zone.name),
status = RecordSetStatus.PendingDelete,
updated = Some(DateTime.now)
updated = Some(Instant.now.truncatedTo(ChronoUnit.MILLIS))
),
userId = userId,
changeType = RecordSetChangeType.Delete,
@ -113,6 +113,25 @@ object RecordSetChangeGenerator extends DnsConversions {
singleBatchChangeIds = singleBatchChangeIds
)
def forOutOfSync(
recordSet: RecordSet,
zone: Zone,
userId: String,
singleBatchChangeIds: List[String]
): RecordSetChange =
RecordSetChange(
zone = zone,
recordSet = recordSet.copy(
name = relativize(recordSet.name, zone.name),
status = RecordSetStatus.PendingDelete,
updated = Some(Instant.now.truncatedTo(ChronoUnit.MILLIS))
),
userId = userId,
changeType = RecordSetChangeType.Sync,
updates = Some(recordSet),
singleBatchChangeIds = singleBatchChangeIds
)
def forDelete(
recordSet: RecordSet,
zone: Zone,
@ -150,7 +169,7 @@ object RecordSetChangeGenerator extends DnsConversions {
id = replacing.id,
name = relativize(newRecordSet.name, zone.name),
status = RecordSetStatus.Active,
updated = Some(DateTime.now),
updated = Some(Instant.now.truncatedTo(ChronoUnit.MILLIS)),
ownerGroupId = replacing.ownerGroupId
),
userId = "system",

View File

@ -27,31 +27,38 @@ import vinyldns.core.domain.zone.{Zone, ZoneCommandResult, ZoneRepository}
import vinyldns.core.queue.MessageQueue
import cats.data._
import cats.effect.IO
import org.slf4j.{Logger, LoggerFactory}
import org.xbill.DNS.ReverseMap
import vinyldns.api.config.HighValueDomainConfig
import vinyldns.api.config.{ZoneAuthConfigs, DottedHostsConfig, HighValueDomainConfig}
import vinyldns.api.domain.DomainValidations.{validateIpv4Address, validateIpv6Address}
import vinyldns.api.domain.access.AccessValidationsAlgebra
import vinyldns.core.domain.record.NameSort.NameSort
import vinyldns.core.domain.record.RecordType.RecordType
import vinyldns.core.domain.DomainHelpers.ensureTrailingDot
import vinyldns.core.domain.backend.{Backend, BackendResolver}
import vinyldns.core.domain.record.RecordTypeSort.RecordTypeSort
import vinyldns.core.notifier.{AllNotifiers, Notification}
import scala.util.matching.Regex
object RecordSetService {
def apply(
dataAccessor: ApiDataAccessor,
messageQueue: MessageQueue,
accessValidation: AccessValidationsAlgebra,
backendResolver: BackendResolver,
validateRecordLookupAgainstDnsBackend: Boolean,
highValueDomainConfig: HighValueDomainConfig,
approvedNameServers: List[Regex]
): RecordSetService =
dataAccessor: ApiDataAccessor,
messageQueue: MessageQueue,
accessValidation: AccessValidationsAlgebra,
backendResolver: BackendResolver,
validateRecordLookupAgainstDnsBackend: Boolean,
highValueDomainConfig: HighValueDomainConfig,
dottedHostsConfig: DottedHostsConfig,
approvedNameServers: List[Regex],
useRecordSetCache: Boolean,
notifiers: AllNotifiers
): RecordSetService =
new RecordSetService(
dataAccessor.zoneRepository,
dataAccessor.groupRepository,
dataAccessor.recordSetRepository,
dataAccessor.recordSetCacheRepository,
dataAccessor.recordChangeRepository,
dataAccessor.userRepository,
messageQueue,
@ -59,30 +66,44 @@ object RecordSetService {
backendResolver,
validateRecordLookupAgainstDnsBackend,
highValueDomainConfig,
approvedNameServers
dottedHostsConfig,
approvedNameServers,
useRecordSetCache,
notifiers
)
}
class RecordSetService(
zoneRepository: ZoneRepository,
groupRepository: GroupRepository,
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository,
userRepository: UserRepository,
messageQueue: MessageQueue,
accessValidation: AccessValidationsAlgebra,
backendResolver: BackendResolver,
validateRecordLookupAgainstDnsBackend: Boolean,
highValueDomainConfig: HighValueDomainConfig,
approvedNameServers: List[Regex]
) extends RecordSetServiceAlgebra {
zoneRepository: ZoneRepository,
groupRepository: GroupRepository,
recordSetRepository: RecordSetRepository,
recordSetCacheRepository: RecordSetCacheRepository,
recordChangeRepository: RecordChangeRepository,
userRepository: UserRepository,
messageQueue: MessageQueue,
accessValidation: AccessValidationsAlgebra,
backendResolver: BackendResolver,
validateRecordLookupAgainstDnsBackend: Boolean,
highValueDomainConfig: HighValueDomainConfig,
dottedHostsConfig: DottedHostsConfig,
approvedNameServers: List[Regex],
useRecordSetCache: Boolean,
notifiers: AllNotifiers
) extends RecordSetServiceAlgebra {
import RecordSetValidations._
import accessValidation._
val logger: Logger = LoggerFactory.getLogger(classOf[RecordSetService])
val approverOwnerShipTransferStatus = List(OwnerShipTransferStatus.ManuallyApproved , OwnerShipTransferStatus.AutoApproved, OwnerShipTransferStatus.ManuallyRejected)
val requestorOwnerShipTransferStatus = List(OwnerShipTransferStatus.Cancelled , OwnerShipTransferStatus.Requested, OwnerShipTransferStatus.PendingReview)
def addRecordSet(recordSet: RecordSet, auth: AuthPrincipal): Result[ZoneCommandResult] =
for {
zone <- getZone(recordSet.zoneId)
authZones = dottedHostsConfig.zoneAuthConfigs.map(x => x.zone)
change <- RecordSetChangeGenerator.forAdd(recordSet, zone, Some(auth)).toResult
// because changes happen to the RS in forAdd itself, converting 1st and validating on that
rsForValidations = change.recordSet
@ -102,13 +123,27 @@ class RecordSetService(
ownerGroup <- getGroupIfProvided(rsForValidations.ownerGroupId)
_ <- canUseOwnerGroup(rsForValidations.ownerGroupId, ownerGroup, auth).toResult
_ <- noCnameWithNewName(rsForValidations, existingRecordsWithName, zone).toResult
allowedZoneList <- getAllowedZones(authZones).toResult[Set[String]]
isInAllowedUsers = checkIfInAllowedUsers(zone, dottedHostsConfig, auth)
isUserInAllowedGroups <- checkIfInAllowedGroups(zone, dottedHostsConfig, auth).toResult[Boolean]
isAllowedUser = isInAllowedUsers || isUserInAllowedGroups
isRecordTypeAllowed = checkIfInAllowedRecordType(zone, dottedHostsConfig, rsForValidations)
isRecordTypeAndUserAllowed = isAllowedUser && isRecordTypeAllowed
allowedDotsLimit = getAllowedDotsLimit(zone, dottedHostsConfig)
recordFqdnDoesNotAlreadyExist <- recordFQDNDoesNotExist(rsForValidations, zone).toResult[Boolean]
_ <- typeSpecificValidations(
rsForValidations,
existingRecordsWithName,
zone,
None,
approvedNameServers
approvedNameServers,
recordFqdnDoesNotAlreadyExist,
allowedZoneList,
isRecordTypeAndUserAllowed,
allowedDotsLimit
).toResult
_ <- if(allowedZoneList.contains(zone.name)) checkAllowedDots(allowedDotsLimit, rsForValidations, zone).toResult else ().toResult
_ <- if(allowedZoneList.contains(zone.name)) isNotApexEndsWithDot(rsForValidations, zone).toResult else ().toResult
_ <- messageQueue.send(change).toResult[Unit]
} yield change
@ -119,13 +154,31 @@ class RecordSetService(
_ <- unchangedRecordName(existing, recordSet, zone).toResult
_ <- unchangedRecordType(existing, recordSet).toResult
_ <- unchangedZoneId(existing, recordSet).toResult
_ <- if(requestorOwnerShipTransferStatus.contains(recordSet.recordSetGroupChange.map(_.ownerShipTransferStatus).getOrElse("<none>"))
&& !auth.isSuper && !auth.isGroupMember(existing.ownerGroupId.getOrElse("None")))
unchangedRecordSet(existing, recordSet).toResult else ().toResult
_ <- if(existing.recordSetGroupChange.map(_.ownerShipTransferStatus).getOrElse("<none>") == OwnerShipTransferStatus.Cancelled
&& !auth.isSuper) {
recordSetOwnerShipApproveStatus(recordSet).toResult
} else ().toResult
_ = logger.info(s"updated recordsetgroupchange: ${recordSet.recordSetGroupChange}")
_ = logger.info(s"existing recordsetgroupchange: ${existing.recordSetGroupChange}")
recordSet <- updateRecordSetGroupChangeStatus(recordSet, existing, zone)
change <- RecordSetChangeGenerator.forUpdate(existing, recordSet, zone, Some(auth)).toResult
// because changes happen to the RS in forUpdate itself, converting 1st and validating on that
rsForValidations = change.recordSet
superUserCanUpdateOwnerGroup = canSuperUserUpdateOwnerGroup(existing, recordSet, zone, auth)
_ <- isNotHighValueDomain(recordSet, zone, highValueDomainConfig).toResult
_ <- canUpdateRecordSet(auth, existing.name, existing.typ, zone, existing.ownerGroupId).toResult
_ <- if(requestorOwnerShipTransferStatus.contains(recordSet.recordSetGroupChange.map(_.ownerShipTransferStatus).getOrElse("<none>"))
&& !auth.isSuper && !auth.isGroupMember(existing.ownerGroupId.getOrElse("None"))) ().toResult
else canUpdateRecordSet(auth, existing.name, existing.typ, zone, existing.ownerGroupId, superUserCanUpdateOwnerGroup).toResult
ownerGroup <- getGroupIfProvided(rsForValidations.ownerGroupId)
_ <- canUseOwnerGroup(rsForValidations.ownerGroupId, ownerGroup, auth).toResult
_ <- if(requestorOwnerShipTransferStatus.contains(recordSet.recordSetGroupChange.map(_.ownerShipTransferStatus).getOrElse("<none>"))
&& !auth.isSuper && !auth.isGroupMember(existing.ownerGroupId.getOrElse("None")))
canUseOwnerGroup(rsForValidations.recordSetGroupChange.map(_.requestedOwnerGroupId).get, ownerGroup, auth).toResult
else if(approverOwnerShipTransferStatus.contains(recordSet.recordSetGroupChange.map(_.ownerShipTransferStatus).getOrElse("<none>"))
&& !auth.isSuper) canUseOwnerGroup(existing.ownerGroupId, ownerGroup, auth).toResult
else canUseOwnerGroup(rsForValidations.ownerGroupId, ownerGroup, auth).toResult
_ <- notPending(existing).toResult
existingRecordsWithName <- recordSetRepository
.getRecordSetsByName(zone.id, rsForValidations.name)
@ -138,21 +191,40 @@ class RecordSetService(
validateRecordLookupAgainstDnsBackend
)
_ <- noCnameWithNewName(rsForValidations, existingRecordsWithName, zone).toResult
authZones = dottedHostsConfig.zoneAuthConfigs.map(x => x.zone)
allowedZoneList <- getAllowedZones(authZones).toResult[Set[String]]
isInAllowedUsers = checkIfInAllowedUsers(zone, dottedHostsConfig, auth)
isUserInAllowedGroups <- checkIfInAllowedGroups(zone, dottedHostsConfig, auth).toResult[Boolean]
isAllowedUser = isInAllowedUsers || isUserInAllowedGroups
isRecordTypeAllowed = checkIfInAllowedRecordType(zone, dottedHostsConfig, rsForValidations)
isRecordTypeAndUserAllowed = isAllowedUser && isRecordTypeAllowed
allowedDotsLimit = getAllowedDotsLimit(zone, dottedHostsConfig)
_ <- typeSpecificValidations(
rsForValidations,
existingRecordsWithName,
zone,
Some(existing),
approvedNameServers
approvedNameServers,
true,
allowedZoneList,
isRecordTypeAndUserAllowed,
allowedDotsLimit
).toResult
_ <- if(existing.name == rsForValidations.name) ().toResult else if(allowedZoneList.contains(zone.name)) checkAllowedDots(allowedDotsLimit, rsForValidations, zone).toResult else ().toResult
_ <- if(allowedZoneList.contains(zone.name)) isNotApexEndsWithDot(rsForValidations, zone).toResult else ().toResult
_ <- messageQueue.send(change).toResult[Unit]
_ <- if(recordSet.recordSetGroupChange != None &&
recordSet.recordSetGroupChange.map(_.ownerShipTransferStatus).getOrElse("<none>") != OwnerShipTransferStatus.None &&
recordSet.recordSetGroupChange.map(_.ownerShipTransferStatus).getOrElse("<none>") != OwnerShipTransferStatus.AutoApproved)
notifiers.notify(Notification(change)).toResult
else ().toResult
} yield change
def deleteRecordSet(
recordSetId: String,
zoneId: String,
auth: AuthPrincipal
): Result[ZoneCommandResult] =
recordSetId: String,
zoneId: String,
auth: AuthPrincipal
): Result[ZoneCommandResult] =
for {
zone <- getZone(zoneId)
existing <- getRecordSet(recordSetId)
@ -164,20 +236,259 @@ class RecordSetService(
_ <- messageQueue.send(change).toResult[Unit]
} yield change
//update ownership transfer is zone is shared
def updateRecordSetGroupChangeStatus(recordSet: RecordSet, existing: RecordSet, zone: Zone): Result[RecordSet] = {
val existingOwnerShipTransfer = existing.recordSetGroupChange.getOrElse(OwnerShipTransfer.apply(OwnerShipTransferStatus.None, Some("none")))
val ownerShipTransfer = recordSet.recordSetGroupChange.getOrElse(OwnerShipTransfer.apply(OwnerShipTransferStatus.None, Some("none")))
if (recordSet.recordSetGroupChange != None &&
ownerShipTransfer.ownerShipTransferStatus != OwnerShipTransferStatus.None)
if (zone.shared){
if (approverOwnerShipTransferStatus.contains(ownerShipTransfer.ownerShipTransferStatus)) {
val recordSetOwnerApproval =
ownerShipTransfer.ownerShipTransferStatus match {
case OwnerShipTransferStatus.ManuallyApproved =>
recordSet.copy(ownerGroupId = existingOwnerShipTransfer.requestedOwnerGroupId,
recordSetGroupChange = Some(ownerShipTransfer.copy(ownerShipTransferStatus = OwnerShipTransferStatus.ManuallyApproved,
requestedOwnerGroupId = existingOwnerShipTransfer.requestedOwnerGroupId)))
case OwnerShipTransferStatus.ManuallyRejected =>
recordSet.copy(
recordSetGroupChange = Some(ownerShipTransfer.copy(ownerShipTransferStatus = OwnerShipTransferStatus.ManuallyRejected,
requestedOwnerGroupId = existingOwnerShipTransfer.requestedOwnerGroupId)))
case OwnerShipTransferStatus.AutoApproved =>
recordSet.copy(
ownerGroupId = ownerShipTransfer.requestedOwnerGroupId,
recordSetGroupChange = Some(ownerShipTransfer.copy(ownerShipTransferStatus = OwnerShipTransferStatus.AutoApproved,
requestedOwnerGroupId = ownerShipTransfer.requestedOwnerGroupId)))
case _ => recordSet.copy(
recordSetGroupChange = Some(ownerShipTransfer.copy(
ownerShipTransferStatus = OwnerShipTransferStatus.None,
requestedOwnerGroupId = Some("null"))))
}
for {
recordSet <- recordSetOwnerApproval.toResult
} yield recordSet
}
else {
val recordSetOwnerRequest =
ownerShipTransfer.ownerShipTransferStatus match {
case OwnerShipTransferStatus.Cancelled =>
recordSet.copy(recordSetGroupChange = Some(ownerShipTransfer.copy(
ownerShipTransferStatus = OwnerShipTransferStatus.Cancelled,
requestedOwnerGroupId = existingOwnerShipTransfer.requestedOwnerGroupId)))
case OwnerShipTransferStatus.Requested | OwnerShipTransferStatus.PendingReview => recordSet.copy(
recordSetGroupChange = Some(ownerShipTransfer.copy(ownerShipTransferStatus = OwnerShipTransferStatus.PendingReview)))
}
for {
recordSet <- recordSetOwnerRequest.toResult
} yield recordSet
}
} else for {
_ <- unchangedRecordSetOwnershipStatus(recordSet, existing).toResult
} yield recordSet.copy(
recordSetGroupChange = Some(ownerShipTransfer.copy(
ownerShipTransferStatus = OwnerShipTransferStatus.None,
requestedOwnerGroupId = Some("null"))))
else recordSet.copy(
recordSetGroupChange = Some(ownerShipTransfer.copy(
ownerShipTransferStatus = OwnerShipTransferStatus.None,
requestedOwnerGroupId = Some("null")))).toResult
}
// For dotted hosts. Check if a record that may conflict with dotted host exist or not
def recordFQDNDoesNotExist(newRecordSet: RecordSet, zone: Zone): IO[Boolean] = {
// Use fqdn for searching through `recordset` mysql table to see if it already exist
val newRecordFqdn = if(newRecordSet.name != zone.name) newRecordSet.name + "." + zone.name else newRecordSet.name
for {
record <- recordSetRepository.getRecordSetsByFQDNs(Set(newRecordFqdn))
isRecordAlreadyExist = doesRecordWithSameTypeExist(record, newRecordSet)
doesNotExist = if(isRecordAlreadyExist) false else true
} yield doesNotExist
}
// Check if a record with same type already exist in 'recordset' mysql table
def doesRecordWithSameTypeExist(oldRecord: List[RecordSet], newRecord: RecordSet): Boolean = {
if(oldRecord.nonEmpty) {
val typeExists = oldRecord.map(x => x.typ == newRecord.typ)
if (typeExists.contains(true)) true else false
}
else {
false
}
}
// Get zones that are allowed to create dotted hosts using the zones present in dotted hosts config
def getAllowedZones(zones: List[String]): IO[Set[String]] = {
if(zones.isEmpty){
val noZones: IO[Set[String]] = IO(Set.empty)
noZones
}
else {
// Wildcard zones needs to be passed to a separate method
val wildcardZones = zones.filter(_.contains("*")).map(_.replace("*", "%"))
// Zones without wildcard character are passed to a separate function
val namedZones = zones.filter(zone => !zone.contains("*"))
for{
namedZoneResult <- zoneRepository.getZonesByNames(namedZones.toSet)
wildcardZoneResult <- zoneRepository.getZonesByFilters(wildcardZones.toSet)
zoneResult = namedZoneResult ++ wildcardZoneResult // Combine the zones
} yield zoneResult.map(x => x.name)
}
}
// Check if user is allowed to create dotted hosts using the users present in dotted hosts config
def getAllowedDotsLimit(zone: Zone, config: DottedHostsConfig): Int = {
val configZones = config.zoneAuthConfigs.map(x => x.zone)
val zoneName = if(zone.name.takeRight(1) != ".") zone.name + "." else zone.name
val dottedZoneConfig = configZones.filter(_.contains("*")).map(_.replace("*", "[A-Za-z0-9.]*"))
val isContainWildcardZone = dottedZoneConfig.exists(x => zoneName.matches(x))
val isContainNormalZone = configZones.contains(zoneName)
if(isContainNormalZone){
config.zoneAuthConfigs.filter(x => x.zone == zoneName).head.dotsLimit
}
else if(isContainWildcardZone){
config.zoneAuthConfigs.filter(x => zoneName.matches(x.zone.replace("*", "[A-Za-z0-9.]*"))).head.dotsLimit
}
else {
0
}
}
// Check if user is allowed to create dotted hosts using the users present in dotted hosts config
def checkIfInAllowedUsers(zone: Zone, config: DottedHostsConfig, auth: AuthPrincipal): Boolean = {
val configZones = config.zoneAuthConfigs.map(x => x.zone)
val zoneName = if(zone.name.takeRight(1) != ".") zone.name + "." else zone.name
val dottedZoneConfig = configZones.filter(_.contains("*")).map(_.replace("*", "[A-Za-z0-9.]*"))
val isContainWildcardZone = dottedZoneConfig.exists(x => zoneName.matches(x))
val isContainNormalZone = configZones.contains(zoneName)
if(isContainNormalZone){
val users = config.zoneAuthConfigs.flatMap {
x: ZoneAuthConfigs =>
if (x.zone == zoneName) x.userList else List.empty
}
if(users.contains(auth.signedInUser.userName)){
true
}
else {
false
}
}
else if(isContainWildcardZone){
val users = config.zoneAuthConfigs.flatMap {
x: ZoneAuthConfigs =>
if (x.zone.contains("*")) {
val wildcardZone = x.zone.replace("*", "[A-Za-z0-9.]*")
if (zoneName.matches(wildcardZone)) x.userList else List.empty
} else List.empty
}
if(users.contains(auth.signedInUser.userName)){
true
}
else {
false
}
}
else {
false
}
}
// Check if user is allowed to create dotted hosts using the record types present in dotted hosts config
def checkIfInAllowedRecordType(zone: Zone, config: DottedHostsConfig, rs: RecordSet): Boolean = {
val configZones = config.zoneAuthConfigs.map(x => x.zone)
val zoneName = if(zone.name.takeRight(1) != ".") zone.name + "." else zone.name
val dottedZoneConfig = configZones.filter(_.contains("*")).map(_.replace("*", "[A-Za-z0-9.]*"))
val isContainWildcardZone = dottedZoneConfig.exists(x => zoneName.matches(x))
val isContainNormalZone = configZones.contains(zoneName)
if(isContainNormalZone){
val rType = config.zoneAuthConfigs.flatMap {
x: ZoneAuthConfigs =>
if (x.zone == zoneName) x.recordTypes else List.empty
}
if(rType.contains(rs.typ.toString)){
true
}
else {
false
}
}
else if(isContainWildcardZone){
val rType = config.zoneAuthConfigs.flatMap {
x: ZoneAuthConfigs =>
if (x.zone.contains("*")) {
val wildcardZone = x.zone.replace("*", "[A-Za-z0-9.]*")
if (zoneName.matches(wildcardZone)) x.recordTypes else List.empty
} else List.empty
}
if(rType.contains(rs.typ.toString)){
true
}
else {
false
}
}
else {
false
}
}
// Check if user is allowed to create dotted hosts using the groups present in dotted hosts config
def checkIfInAllowedGroups(zone: Zone, config: DottedHostsConfig, auth: AuthPrincipal): IO[Boolean] = {
val configZones = config.zoneAuthConfigs.map(x => x.zone)
val zoneName = if(zone.name.takeRight(1) != ".") zone.name + "." else zone.name
val dottedZoneConfig = configZones.filter(_.contains("*")).map(_.replace("*", "[A-Za-z0-9.]*"))
val isContainWildcardZone = dottedZoneConfig.exists(x => zoneName.matches(x))
val isContainNormalZone = configZones.contains(zoneName)
val groups = if(isContainNormalZone){
config.zoneAuthConfigs.flatMap {
x: ZoneAuthConfigs =>
if (x.zone == zoneName) x.groupList else List.empty
}
}
else if(isContainWildcardZone){
config.zoneAuthConfigs.flatMap {
x: ZoneAuthConfigs =>
if (x.zone.contains("*")) {
val wildcardZone = x.zone.replace("*", "[A-Za-z0-9.]*")
if (zoneName.matches(wildcardZone)) x.groupList else List.empty
} else List.empty
}
}
else {
List.empty
}
for{
groupsInConfig <- groupRepository.getGroupsByName(groups.toSet)
members = groupsInConfig.flatMap(x => x.memberIds)
usersList <- if(members.isEmpty) IO(Seq.empty) else userRepository.getUsers(members, None, None).map(x => x.users)
users = if(usersList.isEmpty) Seq.empty else usersList.map(x => x.userName)
isPresent = users.contains(auth.signedInUser.userName)
} yield isPresent
}
def getRecordSet(
recordSetId: String,
authPrincipal: AuthPrincipal
): Result[RecordSetInfo] =
recordSetId: String,
authPrincipal: AuthPrincipal
): Result[RecordSetInfo] =
for {
recordSet <- getRecordSet(recordSetId)
groupName <- getGroupName(recordSet.ownerGroupId)
} yield RecordSetInfo(recordSet, groupName)
def getRecordSetCount(zoneId: String, authPrincipal: AuthPrincipal): Result[RecordSetCount] = {
for {
zone <- getZone(zoneId)
_ <- canSeeZone(authPrincipal, zone).toResult
count <- recordSetRepository.getRecordSetCount(zoneId).toResult
} yield RecordSetCount(count)
}
def getRecordSetByZone(
recordSetId: String,
zoneId: String,
authPrincipal: AuthPrincipal
): Result[RecordSetInfo] =
recordSetId: String,
zoneId: String,
authPrincipal: AuthPrincipal
): Result[RecordSetInfo] =
for {
zone <- getZone(zoneId)
recordSet <- getRecordSet(recordSetId)
@ -192,14 +503,15 @@ class RecordSetService(
} yield RecordSetInfo(recordSet, groupName)
def listRecordSets(
startFrom: Option[String],
maxItems: Option[Int],
recordNameFilter: String,
recordTypeFilter: Option[Set[RecordType]],
recordOwnerGroupFilter: Option[String],
nameSort: NameSort,
authPrincipal: AuthPrincipal
): Result[ListGlobalRecordSetsResponse] =
startFrom: Option[String],
maxItems: Option[Int],
recordNameFilter: String,
recordTypeFilter: Option[Set[RecordType]],
recordOwnerGroupFilter: Option[String],
nameSort: NameSort,
authPrincipal: AuthPrincipal,
recordTypeSort: RecordTypeSort
): Result[ListGlobalRecordSetsResponse] =
for {
_ <- validRecordNameFilterLength(recordNameFilter).toResult
formattedRecordNameFilter <- formatRecordNameFilter(recordNameFilter)
@ -211,7 +523,8 @@ class RecordSetService(
Some(formattedRecordNameFilter),
recordTypeFilter,
recordOwnerGroupFilter,
nameSort
nameSort,
recordTypeSort
)
.toResult[ListRecordSetResults]
rsOwnerGroupIds = recordSetResults.recordSets.flatMap(_.ownerGroupId).toSet
@ -230,16 +543,83 @@ class RecordSetService(
recordSetResults.nameSort
)
/**
* Searches recordsets, optionally using the recordset cache (controlled by the 'use-recordset-cache' setting)
*
* @param startFrom The starting record
* @param maxItems The maximum number of items
* @param recordNameFilter The record name filter
* @param recordTypeFilter The record type filter
* @param recordOwnerGroupId The owner group identifier
* @param nameSort The sort direction
* @param authPrincipal The authenticated principal
* @return A {@link ListGlobalRecordSetsResponse}
*/
def searchRecordSets(
startFrom: Option[String],
maxItems: Option[Int],
recordNameFilter: String,
recordTypeFilter: Option[Set[RecordType]],
recordOwnerGroupFilter: Option[String],
nameSort: NameSort,
authPrincipal: AuthPrincipal,
recordTypeSort: RecordTypeSort
): Result[ListGlobalRecordSetsResponse] = {
for {
_ <- validRecordNameFilterLength(recordNameFilter).toResult
formattedRecordNameFilter <- formatRecordNameFilter(recordNameFilter)
recordSetResults <- if (useRecordSetCache) {
// Search the cache
recordSetCacheRepository.listRecordSetData(
None,
startFrom,
maxItems,
Some(formattedRecordNameFilter),
recordTypeFilter,
recordOwnerGroupFilter,
nameSort
).toResult[ListRecordSetResults]
} else {
// Search the record table directly
recordSetRepository.listRecordSets(
None,
startFrom,
maxItems,
Some(formattedRecordNameFilter),
recordTypeFilter,
recordOwnerGroupFilter,
nameSort,
recordTypeSort
).toResult[ListRecordSetResults]
}
rsOwnerGroupIds = recordSetResults.recordSets.flatMap(_.ownerGroupId).toSet
rsZoneIds = recordSetResults.recordSets.map(_.zoneId).toSet
rsGroups <- groupRepository.getGroups(rsOwnerGroupIds).toResult[Set[Group]]
rsZones <- zoneRepository.getZones(rsZoneIds).toResult[Set[Zone]]
setsWithSupplementalInfo = getSupplementalInfo(recordSetResults.recordSets, rsGroups, rsZones)
} yield ListGlobalRecordSetsResponse(
setsWithSupplementalInfo,
recordSetResults.startFrom,
recordSetResults.nextId,
recordSetResults.maxItems,
recordNameFilter,
recordSetResults.recordTypeFilter,
recordSetResults.recordOwnerGroupFilter,
recordSetResults.nameSort
)
}
def listRecordSetsByZone(
zoneId: String,
startFrom: Option[String],
maxItems: Option[Int],
recordNameFilter: Option[String],
recordTypeFilter: Option[Set[RecordType]],
recordOwnerGroupFilter: Option[String],
nameSort: NameSort,
authPrincipal: AuthPrincipal
): Result[ListRecordSetsByZoneResponse] =
zoneId: String,
startFrom: Option[String],
maxItems: Option[Int],
recordNameFilter: Option[String],
recordTypeFilter: Option[Set[RecordType]],
recordOwnerGroupFilter: Option[String],
nameSort: NameSort,
authPrincipal: AuthPrincipal,
recordTypeSort: RecordTypeSort
): Result[ListRecordSetsByZoneResponse] =
for {
zone <- getZone(zoneId)
_ <- canSeeZone(authPrincipal, zone).toResult
@ -251,7 +631,8 @@ class RecordSetService(
recordNameFilter,
recordTypeFilter,
recordOwnerGroupFilter,
nameSort
nameSort,
recordTypeSort
)
.toResult[ListRecordSetResults]
rsOwnerGroupIds = recordSetResults.recordSets.flatMap(_.ownerGroupId).toSet
@ -266,14 +647,15 @@ class RecordSetService(
recordSetResults.recordNameFilter,
recordSetResults.recordTypeFilter,
recordSetResults.recordOwnerGroupFilter,
recordSetResults.nameSort
recordSetResults.nameSort,
recordSetResults.recordTypeSort
)
def getRecordSetChange(
zoneId: String,
changeId: String,
authPrincipal: AuthPrincipal
): Result[RecordSetChange] =
zoneId: String,
changeId: String,
authPrincipal: AuthPrincipal
): Result[RecordSetChange] =
for {
zone <- getZone(zoneId)
change <- recordChangeRepository
@ -294,19 +676,62 @@ class RecordSetService(
} yield change
def listRecordSetChanges(
zoneId: String,
startFrom: Option[String] = None,
maxItems: Int = 100,
authPrincipal: AuthPrincipal
): Result[ListRecordSetChangesResponse] =
zoneId: String,
startFrom: Option[Int] = None,
maxItems: Int = 100,
authPrincipal: AuthPrincipal
): Result[ListRecordSetChangesResponse] =
for {
zone <- getZone(zoneId)
_ <- canSeeZone(authPrincipal, zone).toResult
recordSetChangesResults <- recordChangeRepository
.listRecordSetChanges(Some(zone.id), startFrom, maxItems, None, None)
.toResult[ListRecordSetChangesResults]
recordSetChangesInfo <- buildRecordSetChangeInfo(recordSetChangesResults.items)
} yield ListRecordSetChangesResponse(zoneId, recordSetChangesResults, recordSetChangesInfo)
def listRecordSetChangeHistory(
zoneId: Option[String] = None,
startFrom: Option[Int] = None,
maxItems: Int = 100,
fqdn: Option[String] = None,
recordType: Option[RecordType] = None,
authPrincipal: AuthPrincipal
): Result[ListRecordSetHistoryResponse] =
for {
zone <- getZone(zoneId)
zone <- getZone(zoneId.get)
_ <- canSeeZone(authPrincipal, zone).toResult
recordSetChangesResults <- recordChangeRepository
.listRecordSetChanges(zone.id, startFrom, maxItems)
.listRecordSetChanges(zoneId, startFrom, maxItems, fqdn, recordType)
.toResult[ListRecordSetChangesResults]
recordSetChangesInfo <- buildRecordSetChangeInfo(recordSetChangesResults.items)
} yield ListRecordSetChangesResponse(zoneId, recordSetChangesResults, recordSetChangesInfo)
} yield ListRecordSetHistoryResponse(zoneId, recordSetChangesResults, recordSetChangesInfo)
def listFailedRecordSetChanges(
authPrincipal: AuthPrincipal,
zoneId: Option[String] = None,
startFrom: Int= 0,
maxItems: Int = 100
): Result[ListFailedRecordSetChangesResponse] =
for {
recordSetChangesFailedResults <- recordChangeRepository
.listFailedRecordSetChanges(zoneId, maxItems, startFrom)
.toResult[ListFailedRecordSetChangesResults]
_ <- zoneAccess(recordSetChangesFailedResults.items, authPrincipal).toResult
} yield
ListFailedRecordSetChangesResponse(
recordSetChangesFailedResults.items,
recordSetChangesFailedResults.nextId,
startFrom,
maxItems)
def zoneAccess(
RecordSetCh: List[RecordSetChange],
auth: AuthPrincipal
): List[Result[Unit]] =
RecordSetCh.map { zn =>
canSeeZone(auth, zn.zone).toResult
}
def getZone(zoneId: String): Result[Zone] =
zoneRepository
@ -340,8 +765,8 @@ class RecordSetService(
.toResult
def buildRecordSetChangeInfo(
changes: List[RecordSetChange]
): Result[List[RecordSetChangeInfo]] = {
changes: List[RecordSetChange]
): Result[List[RecordSetChangeInfo]] = {
val userIds = changes.map(_.userId).toSet
for {
users <- userRepository.getUsers(userIds, None, None).map(_.users).toResult[Seq[User]]
@ -360,10 +785,10 @@ class RecordSetService(
}
def getSupplementalInfo(
recordsets: List[RecordSet],
groups: Set[Group],
zones: Set[Zone]
): List[RecordSetGlobalInfo] =
recordsets: List[RecordSet],
groups: Set[Group],
zones: Set[Zone]
): List[RecordSetGlobalInfo] =
recordsets.map { rs =>
val ownerGroupName =
rs.ownerGroupId.flatMap(groupId => groups.find(_.id == groupId).map(_.name))
@ -391,14 +816,14 @@ class RecordSetService(
}
def recordSetDoesNotExist(
backendConnection: Zone => Backend,
zone: Zone,
recordSet: RecordSet,
validateRecordLookupAgainstDnsBackend: Boolean
): Result[Unit] =
backendConnection: Zone => Backend,
zone: Zone,
recordSet: RecordSet,
validateRecordLookupAgainstDnsBackend: Boolean
): Result[Unit] =
recordSetDoesNotExistInDatabase(recordSet, zone).value.flatMap {
case Left(recordSetAlreadyExists: RecordSetAlreadyExists)
if validateRecordLookupAgainstDnsBackend =>
if validateRecordLookupAgainstDnsBackend =>
backendConnection(zone)
.resolve(recordSet.name, zone.name, recordSet.typ)
.attempt
@ -412,16 +837,16 @@ class RecordSetService(
}.toResult
def isUniqueUpdate(
backendConnection: Zone => Backend,
newRecordSet: RecordSet,
existingRecordsWithName: List[RecordSet],
zone: Zone,
validateRecordLookupAgainstDnsBackend: Boolean
): Result[Unit] =
backendConnection: Zone => Backend,
newRecordSet: RecordSet,
existingRecordsWithName: List[RecordSet],
zone: Zone,
validateRecordLookupAgainstDnsBackend: Boolean
): Result[Unit] =
RecordSetValidations
.recordSetDoesNotExist(newRecordSet, existingRecordsWithName, zone) match {
case Left(recordSetAlreadyExists: RecordSetAlreadyExists)
if validateRecordLookupAgainstDnsBackend =>
if validateRecordLookupAgainstDnsBackend =>
backendConnection(zone)
.resolve(newRecordSet.name, zone.name, newRecordSet.typ)
.attempt

View File

@ -17,12 +17,13 @@
package vinyldns.api.domain.record
import vinyldns.api.Interfaces.Result
import vinyldns.api.domain.zone.RecordSetInfo
import vinyldns.api.domain.zone.{RecordSetCount, RecordSetInfo}
import vinyldns.core.domain.auth.AuthPrincipal
import vinyldns.core.domain.zone.ZoneCommandResult
import vinyldns.api.route.{ListGlobalRecordSetsResponse, ListRecordSetsByZoneResponse}
import vinyldns.core.domain.record.NameSort.NameSort
import vinyldns.core.domain.record.RecordType.RecordType
import vinyldns.core.domain.record.RecordTypeSort.RecordTypeSort
import vinyldns.core.domain.record.{RecordSet, RecordSetChange}
trait RecordSetServiceAlgebra {
@ -31,54 +32,97 @@ trait RecordSetServiceAlgebra {
def updateRecordSet(recordSet: RecordSet, auth: AuthPrincipal): Result[ZoneCommandResult]
def deleteRecordSet(
recordSetId: String,
zoneId: String,
auth: AuthPrincipal
): Result[ZoneCommandResult]
recordSetId: String,
zoneId: String,
auth: AuthPrincipal
): Result[ZoneCommandResult]
def getRecordSet(
recordSetId: String,
authPrincipal: AuthPrincipal
): Result[RecordSetInfo]
recordSetId: String,
authPrincipal: AuthPrincipal
): Result[RecordSetInfo]
def getRecordSetByZone(
recordSetId: String,
zoneId: String,
authPrincipal: AuthPrincipal
): Result[RecordSetInfo]
recordSetId: String,
zoneId: String,
authPrincipal: AuthPrincipal
): Result[RecordSetInfo]
def listRecordSets(
startFrom: Option[String],
maxItems: Option[Int],
recordNameFilter: String,
recordTypeFilter: Option[Set[RecordType]],
recordOwnerGroupId: Option[String],
nameSort: NameSort,
authPrincipal: AuthPrincipal
): Result[ListGlobalRecordSetsResponse]
startFrom: Option[String],
maxItems: Option[Int],
recordNameFilter: String,
recordTypeFilter: Option[Set[RecordType]],
recordOwnerGroupId: Option[String],
nameSort: NameSort,
authPrincipal: AuthPrincipal,
recordTypeSort: RecordTypeSort
): Result[ListGlobalRecordSetsResponse]
/**
* Searches recordsets, optionally using the recordset cache (controlled by the 'use-recordset-cache' setting)
*
* @param startFrom The starting record
* @param maxItems The maximum number of items
* @param recordNameFilter The record name filter
* @param recordTypeFilter The record type filter
* @param recordOwnerGroupId THe owner group identifier
* @param nameSort The sort direction
* @param authPrincipal The authenticated principal
* @return A {@link ListGlobalRecordSetsResponse}
*/
def searchRecordSets(
startFrom: Option[String],
maxItems: Option[Int],
recordNameFilter: String,
recordTypeFilter: Option[Set[RecordType]],
recordOwnerGroupId: Option[String],
nameSort: NameSort,
authPrincipal: AuthPrincipal,
recordTypeSort: RecordTypeSort
): Result[ListGlobalRecordSetsResponse]
def listRecordSetsByZone(
zoneId: String,
startFrom: Option[String],
maxItems: Option[Int],
recordNameFilter: Option[String],
recordTypeFilter: Option[Set[RecordType]],
recordOwnerGroupId: Option[String],
nameSort: NameSort,
authPrincipal: AuthPrincipal
): Result[ListRecordSetsByZoneResponse]
zoneId: String,
startFrom: Option[String],
maxItems: Option[Int],
recordNameFilter: Option[String],
recordTypeFilter: Option[Set[RecordType]],
recordOwnerGroupId: Option[String],
nameSort: NameSort,
authPrincipal: AuthPrincipal,
recordTypeSort: RecordTypeSort
): Result[ListRecordSetsByZoneResponse]
def getRecordSetChange(
zoneId: String,
changeId: String,
authPrincipal: AuthPrincipal
): Result[RecordSetChange]
zoneId: String,
changeId: String,
authPrincipal: AuthPrincipal
): Result[RecordSetChange]
def listRecordSetChanges(
zoneId: String,
startFrom: Option[String],
maxItems: Int,
authPrincipal: AuthPrincipal
): Result[ListRecordSetChangesResponse]
zoneId: String,
startFrom: Option[Int],
maxItems: Int,
authPrincipal: AuthPrincipal
): Result[ListRecordSetChangesResponse]
def listRecordSetChangeHistory(
zoneId: Option[String],
startFrom: Option[Int],
maxItems: Int,
fqdn: Option[String],
recordType: Option[RecordType],
authPrincipal: AuthPrincipal
): Result[ListRecordSetHistoryResponse]
def listFailedRecordSetChanges(
authPrincipal: AuthPrincipal,
zoneId: Option[String],
startFrom: Int,
maxItems: Int
): Result[ListFailedRecordSetChangesResponse]
def getRecordSetCount(zoneId: String, authPrincipal: AuthPrincipal): Result[RecordSetCount]
}

View File

@ -20,13 +20,14 @@ import cats.syntax.either._
import vinyldns.api.Interfaces._
import vinyldns.api.backend.dns.DnsConversions
import vinyldns.api.config.HighValueDomainConfig
import vinyldns.api.domain.DomainValidations.validateIpv4Address
import vinyldns.api.domain._
import vinyldns.core.domain.DomainHelpers._
import vinyldns.core.domain.record.RecordType._
import vinyldns.api.domain.zone._
import vinyldns.core.domain.auth.AuthPrincipal
import vinyldns.core.domain.membership.Group
import vinyldns.core.domain.record.{RecordSet, RecordType}
import vinyldns.core.domain.record.{OwnerShipTransferStatus, RecordSet, RecordType}
import vinyldns.core.domain.zone.Zone
import vinyldns.core.Messages._
@ -90,6 +91,69 @@ object RecordSetValidations {
!existingRecordsWithName.exists(rs => rs.id != newRecordSet.id && rs.typ == newRecordSet.typ)
)
// Check whether the record has dot or not
def checkForDot(
newRecordSet: RecordSet,
zone: Zone,
existingRecordSet: Option[RecordSet] = None,
recordFqdnDoesNotExist: Boolean,
dottedHostZoneConfig: Set[String],
isRecordTypeAndUserAllowed: Boolean,
allowedDotsLimit: Int = 0
): Either[Throwable, Unit] = {
val zoneName = if(zone.name.takeRight(1) != ".") zone.name + "." else zone.name
// Check if the zone of the recordset is present in dotted hosts config list
val isDomainAllowed = dottedHostZoneConfig.contains(zoneName)
// Check if record set contains dot and if it is in zone which is allowed to have dotted records from dotted hosts config
if(allowedDotsLimit != 0 && newRecordSet.name.contains(".") && isDomainAllowed && newRecordSet.name != zone.name) {
if(!isRecordTypeAndUserAllowed){
isUserAndRecordTypeAuthorized(newRecordSet, zone, existingRecordSet, recordFqdnDoesNotExist, isRecordTypeAndUserAllowed)
}
else {
isDotted(newRecordSet, zone, existingRecordSet, recordFqdnDoesNotExist, isRecordTypeAndUserAllowed)
}
}
else {
isNotDotted(newRecordSet, zone, existingRecordSet)
}
}
// For dotted host. Check if a record is already present which conflicts with the new dotted record. If so, throw an error
def isDotted(
newRecordSet: RecordSet,
zone: Zone,
existingRecordSet: Option[RecordSet] = None,
recordFqdnDoesNotExist: Boolean,
isRecordTypeAndUserAllowed: Boolean
): Either[Throwable, Unit] =
ensuring(
InvalidRequest(
s"Record with fqdn '${newRecordSet.name}.${zone.name}' cannot be created. " +
s"Please check if a record with the same FQDN and type already exist and make the change there."
)
)(
(newRecordSet.name != zone.name || existingRecordSet.exists(_.name == newRecordSet.name)) && recordFqdnDoesNotExist && isRecordTypeAndUserAllowed
)
// For dotted host. Check if the user is authorized and the record type is allowed. If not, throw an error
def isUserAndRecordTypeAuthorized(
newRecordSet: RecordSet,
zone: Zone,
existingRecordSet: Option[RecordSet] = None,
recordFqdnDoesNotExist: Boolean,
isRecordTypeAndUserAllowed: Boolean
): Either[Throwable, Unit] =
ensuring(
InvalidRequest(
s"Record type is not allowed or the user is not authorized to create a dotted host in the zone '${zone.name}'"
)
)(
(newRecordSet.name != zone.name || existingRecordSet.exists(_.name == newRecordSet.name)) && recordFqdnDoesNotExist && isRecordTypeAndUserAllowed
)
// Check if the recordset contains dot but is not in the allowed zones to create dotted records. If so, throw an error
def isNotDotted(
newRecordSet: RecordSet,
zone: Zone,
@ -110,16 +174,20 @@ object RecordSetValidations {
existingRecordsWithName: List[RecordSet],
zone: Zone,
existingRecordSet: Option[RecordSet],
approvedNameServers: List[Regex]
approvedNameServers: List[Regex],
recordFqdnDoesNotExist: Boolean,
dottedHostZoneConfig: Set[String],
isRecordTypeAndUserAllowed: Boolean,
allowedDotsLimit: Int = 0
): Either[Throwable, Unit] =
newRecordSet.typ match {
case CNAME => cnameValidations(newRecordSet, existingRecordsWithName, zone, existingRecordSet)
case NS => nsValidations(newRecordSet, zone, existingRecordSet, approvedNameServers)
case SOA => soaValidations(newRecordSet, zone)
case CNAME => cnameValidations(newRecordSet, existingRecordsWithName, zone, existingRecordSet, recordFqdnDoesNotExist, dottedHostZoneConfig, isRecordTypeAndUserAllowed, allowedDotsLimit)
case NS => nsValidations(newRecordSet, zone, existingRecordSet, approvedNameServers, recordFqdnDoesNotExist, dottedHostZoneConfig, isRecordTypeAndUserAllowed, allowedDotsLimit)
case SOA => soaValidations(newRecordSet, zone, recordFqdnDoesNotExist, dottedHostZoneConfig, isRecordTypeAndUserAllowed, allowedDotsLimit)
case PTR => ptrValidations(newRecordSet, zone)
case SRV | TXT | NAPTR => ().asRight // SRV, TXT and NAPTR do not go through dotted host check
case DS => dsValidations(newRecordSet, existingRecordsWithName, zone)
case _ => isNotDotted(newRecordSet, zone, existingRecordSet)
case DS => dsValidations(newRecordSet, existingRecordsWithName, zone, recordFqdnDoesNotExist, dottedHostZoneConfig, isRecordTypeAndUserAllowed, allowedDotsLimit)
case _ => checkForDot(newRecordSet, zone, existingRecordSet, recordFqdnDoesNotExist, dottedHostZoneConfig, isRecordTypeAndUserAllowed, allowedDotsLimit)
}
def typeSpecificDeleteValidations(recordSet: RecordSet, zone: Zone): Either[Throwable, Unit] =
@ -140,7 +208,11 @@ object RecordSetValidations {
newRecordSet: RecordSet,
existingRecordsWithName: List[RecordSet],
zone: Zone,
existingRecordSet: Option[RecordSet] = None
existingRecordSet: Option[RecordSet] = None,
recordFqdnDoesNotExist: Boolean,
dottedHostZoneConfig: Set[String],
isRecordTypeAndUserAllowed: Boolean,
allowedDotsLimit: Int = 0
): Either[Throwable, Unit] = {
// cannot create a cname record if a record with the same exists
val noRecordWithName = {
@ -165,6 +237,16 @@ object RecordSetValidations {
)
}
val isNotIPv4inCname = {
ensuring(
RecordSetValidation(
s"""Invalid CNAME: ${newRecordSet.records.head.toString.dropRight(1)}, valid CNAME record data cannot be an IP address."""
)
)(
validateIpv4Address(newRecordSet.records.head.toString.dropRight(1)).isInvalid
)
}
for {
_ <- isNotOrigin(
newRecordSet,
@ -172,8 +254,9 @@ object RecordSetValidations {
"CNAME RecordSet cannot have name '@' because it points to zone origin"
)
_ <- noRecordWithName
_ <- isNotIPv4inCname
_ <- RDataWithConsecutiveDots
_ <- isNotDotted(newRecordSet, zone, existingRecordSet)
_ <- checkForDot(newRecordSet, zone, existingRecordSet, recordFqdnDoesNotExist, dottedHostZoneConfig, isRecordTypeAndUserAllowed, allowedDotsLimit)
} yield ()
}
@ -181,15 +264,15 @@ object RecordSetValidations {
def dsValidations(
newRecordSet: RecordSet,
existingRecordsWithName: List[RecordSet],
zone: Zone
zone: Zone,
recordFqdnDoesNotExist: Boolean,
dottedHostZoneConfig: Set[String],
isRecordTypeAndUserAllowed: Boolean,
allowedDotsLimit: Int = 0
): Either[Throwable, Unit] = {
// see https://tools.ietf.org/html/rfc4035#section-2.4
val nsChecks = existingRecordsWithName.find(_.typ == NS) match {
case Some(ns) if ns.ttl == newRecordSet.ttl => ().asRight
case Some(ns) =>
InvalidRequest(
s"DS record [${newRecordSet.name}] must have TTL matching its linked NS (${ns.ttl})"
).asLeft
case Some(_) => ().asRight
case None =>
InvalidRequest(
s"DS record [${newRecordSet.name}] is invalid because there is no NS record with that " +
@ -198,7 +281,7 @@ object RecordSetValidations {
}
for {
_ <- isNotDotted(newRecordSet, zone)
_ <- checkForDot(newRecordSet, zone, None, recordFqdnDoesNotExist, dottedHostZoneConfig, isRecordTypeAndUserAllowed, allowedDotsLimit)
_ <- isNotOrigin(
newRecordSet,
zone,
@ -212,10 +295,14 @@ object RecordSetValidations {
newRecordSet: RecordSet,
zone: Zone,
oldRecordSet: Option[RecordSet],
approvedNameServers: List[Regex]
approvedNameServers: List[Regex],
recordFqdnDoesNotExist: Boolean,
dottedHostZoneConfig: Set[String],
isRecordTypeAndUserAllowed: Boolean,
allowedDotsLimit: Int = 0
): Either[Throwable, Unit] = {
// TODO kept consistency with old validation. Not sure why NS could be dotted in reverse specifically
val isNotDottedHost = if (!zone.isReverse) isNotDotted(newRecordSet, zone) else ().asRight
val isNotDottedHost = if (!zone.isReverse) checkForDot(newRecordSet, zone, None, recordFqdnDoesNotExist, dottedHostZoneConfig, isRecordTypeAndUserAllowed, allowedDotsLimit) else ().asRight
for {
_ <- isNotDottedHost
@ -237,9 +324,9 @@ object RecordSetValidations {
} yield ()
}
def soaValidations(newRecordSet: RecordSet, zone: Zone): Either[Throwable, Unit] =
def soaValidations(newRecordSet: RecordSet, zone: Zone, recordFqdnDoesNotExist: Boolean, dottedHostZoneConfig: Set[String], isRecordTypeAndUserAllowed: Boolean, allowedDotsLimit: Int = 0): Either[Throwable, Unit] =
// TODO kept consistency with old validation. in theory if SOA always == zone name, no special case is needed here
if (!zone.isReverse) isNotDotted(newRecordSet, zone) else ().asRight
if (!zone.isReverse) checkForDot(newRecordSet, zone, None, recordFqdnDoesNotExist, dottedHostZoneConfig, isRecordTypeAndUserAllowed, allowedDotsLimit) else ().asRight
def ptrValidations(newRecordSet: RecordSet, zone: Zone): Either[Throwable, Unit] =
// TODO we don't check for PTR as dotted...not sure why
@ -282,6 +369,29 @@ object RecordSetValidations {
.leftMap(errors => InvalidRequest(errors.toList.map(_.message).mkString(", ")))
}
def checkAllowedDots(allowedDotsLimit: Int, recordSet: RecordSet, zone: Zone): Either[Throwable, Unit] = {
ensuring(
InvalidRequest(
s"RecordSet with name ${recordSet.name} has more dots than that is allowed in config for this zone " +
s"which is, 'dots-limit = $allowedDotsLimit'."
)
)(
recordSet.name.count(_ == '.') <= allowedDotsLimit || (recordSet.name.count(_ == '.') == 1 &&
recordSet.name.takeRight(1) == ".") || recordSet.name == zone.name ||
(recordSet.typ.toString == "PTR" || recordSet.typ.toString == "SRV" || recordSet.typ.toString == "TXT" || recordSet.typ.toString == "NAPTR")
)
}
def isNotApexEndsWithDot(recordSet: RecordSet, zone: Zone): Either[Throwable, Unit] = {
ensuring(
InvalidRequest(
"RecordSet name cannot end with a dot, unless it's an apex record."
)
)(
recordSet.name.endsWith(zone.name) || !recordSet.name.endsWith(".")
)
}
def canUseOwnerGroup(
ownerGroupId: Option[String],
group: Option[Group],
@ -327,11 +437,66 @@ object RecordSetValidations {
InvalidRequest("Cannot update RecordSet's zone ID.")
)
/**
* Checks of the user is a superuser, the zone is shared, and the only record attribute being changed
* is the record owner group.
*/
def canSuperUserUpdateOwnerGroup(
existing: RecordSet,
updates: RecordSet,
zone: Zone,
auth: AuthPrincipal
): Boolean =
(updates.ownerGroupId != existing.ownerGroupId
&& updates.zoneId == existing.zoneId
&& updates.name == existing.name
&& updates.typ == existing.typ
&& updates.ttl == existing.ttl
&& updates.records == existing.records
&& zone.shared
&& auth.isSuper)
def validRecordNameFilterLength(recordNameFilter: String): Either[Throwable, Unit] =
ensuring(
InvalidRequest(RecordNameFilterError)
) {
val searchRegex: Regex = """[a-zA-Z0-9].*[a-zA-Z0-9]+""".r
searchRegex.findFirstIn(recordNameFilter).isDefined
ensuring(onError = InvalidRequest(RecordNameFilterError)) {
val searchRegex = "[a-zA-Z0-9].*[a-zA-Z0-9]+".r
val wildcardRegex = raw"^\s*[*%].*[*%]\s*$$".r
searchRegex.findFirstIn(recordNameFilter).isDefined && wildcardRegex.findFirstIn(recordNameFilter).isEmpty
}
def unchangedRecordSet(
existing: RecordSet,
updates: RecordSet
): Either[Throwable, Unit] =
Either.cond(
updates.typ == existing.typ &&
updates.records == existing.records &&
updates.id == existing.id &&
updates.zoneId == existing.zoneId &&
updates.name == existing.name &&
updates.ownerGroupId == existing.ownerGroupId &&
updates.ttl == existing.ttl,
(),
InvalidRequest("Cannot update RecordSet's if user not a member of ownership group. User can only request for ownership transfer")
)
def recordSetOwnerShipApproveStatus(
updates: RecordSet,
): Either[Throwable, Unit] =
Either.cond(
updates.recordSetGroupChange.map(_.ownerShipTransferStatus).getOrElse("<none>") != OwnerShipTransferStatus.ManuallyApproved &&
updates.recordSetGroupChange.map(_.ownerShipTransferStatus).getOrElse("<none>") != OwnerShipTransferStatus.AutoApproved &&
updates.recordSetGroupChange.map(_.ownerShipTransferStatus).getOrElse("<none>") != OwnerShipTransferStatus.ManuallyRejected,
(),
InvalidRequest("Cannot update RecordSet OwnerShip Status when request is cancelled.")
)
def unchangedRecordSetOwnershipStatus(
updates: RecordSet,
existing: RecordSet
): Either[Throwable, Unit] =
Either.cond(
updates.recordSetGroupChange == existing.recordSetGroupChange || existing.recordSetGroupChange.isEmpty,
(),
InvalidRequest("Cannot update RecordSet OwnerShip Status when zone is not shared.")
)
}

View File

@ -16,7 +16,7 @@
package vinyldns.api.domain.zone
import com.aaronbedra.orchard.CIDR
import com.comcast.ip4s.Cidr
import vinyldns.core.domain.record.RecordType
import vinyldns.core.domain.zone.ACLRule
@ -61,7 +61,7 @@ object ACLRuleOrdering extends ACLRuleOrdering {
object PTRACLRuleOrdering extends ACLRuleOrdering {
def sortableRecordMaskValue(rule: ACLRule): Int = {
val slash = rule.recordMask match {
case Some(cidrRule) => CIDR.valueOf(cidrRule).getMask
case Some(cidrRule) => Cidr.fromString(cidrRule).get.prefixBits
case None => 0
}
128 - slash

View File

@ -17,7 +17,7 @@
package vinyldns.api.domain.zone
import vinyldns.api.domain.zone
import vinyldns.core.domain.zone.{ListZoneChangesResults, ZoneChange}
import vinyldns.core.domain.zone.{ListFailedZoneChangesResults, ListZoneChangesResults, ZoneChange}
case class ListZoneChangesResponse(
zoneId: String,
@ -37,3 +37,29 @@ object ListZoneChangesResponse {
listResults.maxItems
)
}
case class ListDeletedZoneChangesResponse(
zonesDeletedInfo: List[ZoneChangeDeletedInfo],
zoneChangeFilter: Option[String] = None,
nextId: Option[String] = None,
startFrom: Option[String] = None,
maxItems: Int = 100,
ignoreAccess: Boolean = false
)
case class ListFailedZoneChangesResponse(
failedZoneChanges: List[ZoneChange] = Nil,
nextId: Int,
startFrom: Int,
maxItems: Int
)
object ListFailedZoneChangesResponse {
def apply(listResults: ListFailedZoneChangesResults): ListFailedZoneChangesResponse =
zone.ListFailedZoneChangesResponse(
listResults.items,
listResults.nextId,
listResults.startFrom,
listResults.maxItems
)
}

View File

@ -17,8 +17,8 @@
package vinyldns.api.domain.zone
import java.util.UUID
import org.joda.time.DateTime
import java.time.temporal.ChronoUnit
import java.time.Instant
import vinyldns.core.crypto.CryptoAlgebra
import vinyldns.core.domain.auth.AuthPrincipal
import vinyldns.core.domain.zone._
@ -34,7 +34,7 @@ object ZoneChangeGenerator {
): ZoneChange =
ZoneChange(
zone
.copy(id = UUID.randomUUID().toString, created = DateTime.now, status = ZoneStatus.Syncing),
.copy(id = UUID.randomUUID().toString, created = Instant.now.truncatedTo(ChronoUnit.MILLIS), status = ZoneStatus.Syncing),
authPrincipal.userId,
ZoneChangeType.Create,
status
@ -47,7 +47,7 @@ object ZoneChangeGenerator {
crypto: CryptoAlgebra
): ZoneChange =
ZoneChange(
newZone.copy(updated = Some(DateTime.now), connection = fixConn(oldZone, newZone, crypto)),
newZone.copy(updated = Some(Instant.now.truncatedTo(ChronoUnit.MILLIS)), connection = fixConn(oldZone, newZone, crypto)),
authPrincipal.userId,
ZoneChangeType.Update,
ZoneChangeStatus.Pending
@ -55,15 +55,23 @@ object ZoneChangeGenerator {
def forSync(zone: Zone, authPrincipal: AuthPrincipal): ZoneChange =
ZoneChange(
zone.copy(updated = Some(DateTime.now), status = ZoneStatus.Syncing),
zone.copy(updated = Some(Instant.now.truncatedTo(ChronoUnit.MILLIS)), status = ZoneStatus.Syncing),
authPrincipal.userId,
ZoneChangeType.Sync,
ZoneChangeStatus.Pending
)
def forSyncs(zone: Zone): ZoneChange =
ZoneChange(
zone.copy(updated = Some(Instant.now.truncatedTo(ChronoUnit.MILLIS)), status = ZoneStatus.Syncing),
zone.scheduleRequestor.get,
ZoneChangeType.AutomatedSync,
ZoneChangeStatus.Pending
)
def forDelete(zone: Zone, authPrincipal: AuthPrincipal): ZoneChange =
ZoneChange(
zone.copy(updated = Some(DateTime.now), status = ZoneStatus.Deleted),
zone.copy(updated = Some(Instant.now.truncatedTo(ChronoUnit.MILLIS)), status = ZoneStatus.Deleted),
authPrincipal.userId,
ZoneChangeType.Delete,
ZoneChangeStatus.Pending

View File

@ -16,44 +16,46 @@
package vinyldns.api.domain.zone
import org.joda.time.DateTime
import java.time.Instant
import vinyldns.core.domain.record.RecordSetChangeStatus.RecordSetChangeStatus
import vinyldns.core.domain.record.RecordSetChangeType.RecordSetChangeType
import vinyldns.core.domain.record.RecordSetStatus.RecordSetStatus
import vinyldns.core.domain.record.RecordType.RecordType
import vinyldns.core.domain.record.{RecordData, RecordSet, RecordSetChange}
import vinyldns.core.domain.zone.{ACLRuleInfo, AccessLevel, Zone, ZoneACL, ZoneConnection}
import vinyldns.core.domain.record.{RecordData, RecordSet, RecordSetChange, OwnerShipTransfer}
import vinyldns.core.domain.zone.{ACLRuleInfo, AccessLevel, Zone, ZoneACL, ZoneChange, ZoneConnection}
import vinyldns.core.domain.zone.AccessLevel.AccessLevel
import vinyldns.core.domain.zone.ZoneStatus.ZoneStatus
case class ZoneACLInfo(rules: Set[ACLRuleInfo])
case class ZoneInfo(
name: String,
email: String,
status: ZoneStatus,
created: DateTime,
updated: Option[DateTime],
id: String,
connection: Option[ZoneConnection],
transferConnection: Option[ZoneConnection],
account: String,
shared: Boolean,
acl: ZoneACLInfo,
adminGroupId: String,
adminGroupName: String,
latestSync: Option[DateTime],
backendId: Option[String],
accessLevel: AccessLevel
)
name: String,
email: String,
status: ZoneStatus,
created: Instant,
updated: Option[Instant],
id: String,
connection: Option[ZoneConnection],
transferConnection: Option[ZoneConnection],
account: String,
shared: Boolean,
acl: ZoneACLInfo,
adminGroupId: String,
adminGroupName: String,
latestSync: Option[Instant],
backendId: Option[String],
recurrenceSchedule: Option[String],
scheduleRequestor: Option[String],
accessLevel: AccessLevel
)
object ZoneInfo {
def apply(
zone: Zone,
aclInfo: ZoneACLInfo,
groupName: String,
accessLevel: AccessLevel
): ZoneInfo =
zone: Zone,
aclInfo: ZoneACLInfo,
groupName: String,
accessLevel: AccessLevel
): ZoneInfo =
ZoneInfo(
name = zone.name,
email = zone.email,
@ -70,28 +72,54 @@ object ZoneInfo {
adminGroupName = groupName,
latestSync = zone.latestSync,
backendId = zone.backendId,
recurrenceSchedule = zone.recurrenceSchedule,
scheduleRequestor = zone.scheduleRequestor,
accessLevel = accessLevel
)
}
case class ZoneDetails(
name: String,
email: String,
status: ZoneStatus,
adminGroupId: String,
adminGroupName: String,
)
object ZoneDetails {
def apply(
zone: Zone,
groupName: String,
): ZoneDetails =
ZoneDetails(
name = zone.name,
email = zone.email,
status = zone.status,
adminGroupId = zone.adminGroupId,
adminGroupName = groupName,
)
}
case class ZoneSummaryInfo(
name: String,
email: String,
status: ZoneStatus,
created: DateTime,
updated: Option[DateTime],
id: String,
connection: Option[ZoneConnection],
transferConnection: Option[ZoneConnection],
account: String,
shared: Boolean,
acl: ZoneACL,
adminGroupId: String,
adminGroupName: String,
latestSync: Option[DateTime],
backendId: Option[String],
accessLevel: AccessLevel
)
name: String,
email: String,
status: ZoneStatus,
created: Instant,
updated: Option[Instant],
id: String,
connection: Option[ZoneConnection],
transferConnection: Option[ZoneConnection],
account: String,
shared: Boolean,
acl: ZoneACL,
adminGroupId: String,
adminGroupName: String,
latestSync: Option[Instant],
backendId: Option[String],
recurrenceSchedule: Option[String],
scheduleRequestor: Option[String],
accessLevel: AccessLevel
)
object ZoneSummaryInfo {
def apply(zone: Zone, groupName: String, accessLevel: AccessLevel): ZoneSummaryInfo =
@ -111,26 +139,50 @@ object ZoneSummaryInfo {
adminGroupName = groupName,
latestSync = zone.latestSync,
zone.backendId,
recurrenceSchedule = zone.recurrenceSchedule,
scheduleRequestor = zone.scheduleRequestor,
accessLevel = accessLevel
)
}
case class ZoneChangeDeletedInfo(
zoneChange: ZoneChange,
adminGroupName: String,
userName: String,
accessLevel: AccessLevel
)
object ZoneChangeDeletedInfo {
def apply(zoneChange: List[ZoneChange],
groupName: String,
userName: String,
accessLevel: AccessLevel)
: ZoneChangeDeletedInfo =
ZoneChangeDeletedInfo(
zoneChange= zoneChange,
groupName = groupName,
userName = userName,
accessLevel = accessLevel
)
}
case class RecordSetListInfo(
zoneId: String,
name: String,
typ: RecordType,
ttl: Long,
status: RecordSetStatus,
created: DateTime,
updated: Option[DateTime],
records: List[RecordData],
id: String,
account: String,
accessLevel: AccessLevel,
ownerGroupId: Option[String],
ownerGroupName: Option[String],
fqdn: Option[String]
)
zoneId: String,
name: String,
typ: RecordType,
ttl: Long,
status: RecordSetStatus,
created: Instant,
updated: Option[Instant],
records: List[RecordData],
id: String,
account: String,
accessLevel: AccessLevel,
ownerGroupId: Option[String],
ownerGroupName: Option[String],
recordSetGroupChange: Option[OwnerShipTransfer],
fqdn: Option[String]
)
object RecordSetListInfo {
def apply(recordSet: RecordSetInfo, accessLevel: AccessLevel): RecordSetListInfo =
@ -148,25 +200,27 @@ object RecordSetListInfo {
accessLevel = accessLevel,
ownerGroupId = recordSet.ownerGroupId,
ownerGroupName = recordSet.ownerGroupName,
recordSetGroupChange = recordSet.recordSetGroupChange,
fqdn = recordSet.fqdn
)
}
case class RecordSetInfo(
zoneId: String,
name: String,
typ: RecordType,
ttl: Long,
status: RecordSetStatus,
created: DateTime,
updated: Option[DateTime],
records: List[RecordData],
id: String,
account: String,
ownerGroupId: Option[String],
ownerGroupName: Option[String],
fqdn: Option[String]
)
zoneId: String,
name: String,
typ: RecordType,
ttl: Long,
status: RecordSetStatus,
created: Instant,
updated: Option[Instant],
records: List[RecordData],
id: String,
account: String,
ownerGroupId: Option[String],
ownerGroupName: Option[String],
recordSetGroupChange: Option[OwnerShipTransfer],
fqdn: Option[String]
)
object RecordSetInfo {
def apply(recordSet: RecordSet, groupName: Option[String]): RecordSetInfo =
@ -183,35 +237,37 @@ object RecordSetInfo {
account = recordSet.account,
ownerGroupId = recordSet.ownerGroupId,
ownerGroupName = groupName,
recordSetGroupChange = recordSet.recordSetGroupChange,
fqdn = recordSet.fqdn
)
}
case class RecordSetGlobalInfo(
zoneId: String,
name: String,
typ: RecordType,
ttl: Long,
status: RecordSetStatus,
created: DateTime,
updated: Option[DateTime],
records: List[RecordData],
id: String,
account: String,
ownerGroupId: Option[String],
ownerGroupName: Option[String],
fqdn: Option[String],
zoneName: String,
zoneShared: Boolean
)
zoneId: String,
name: String,
typ: RecordType,
ttl: Long,
status: RecordSetStatus,
created: Instant,
updated: Option[Instant],
records: List[RecordData],
id: String,
account: String,
ownerGroupId: Option[String],
ownerGroupName: Option[String],
recordSetGroupChange: Option[OwnerShipTransfer],
fqdn: Option[String],
zoneName: String,
zoneShared: Boolean
)
object RecordSetGlobalInfo {
def apply(
recordSet: RecordSet,
zoneName: String,
zoneShared: Boolean,
groupName: Option[String]
): RecordSetGlobalInfo =
recordSet: RecordSet,
zoneName: String,
zoneShared: Boolean,
groupName: Option[String]
): RecordSetGlobalInfo =
RecordSetGlobalInfo(
zoneId = recordSet.zoneId,
name = recordSet.name,
@ -225,6 +281,7 @@ object RecordSetGlobalInfo {
account = recordSet.account,
ownerGroupId = recordSet.ownerGroupId,
ownerGroupName = groupName,
recordSetGroupChange = recordSet.recordSetGroupChange,
fqdn = recordSet.fqdn,
zoneName = zoneName,
zoneShared = zoneShared
@ -232,17 +289,17 @@ object RecordSetGlobalInfo {
}
case class RecordSetChangeInfo(
zone: Zone,
recordSet: RecordSet,
userId: String,
changeType: RecordSetChangeType,
status: RecordSetChangeStatus,
created: DateTime,
systemMessage: Option[String],
updates: Option[RecordSet],
id: String,
userName: String
)
zone: Zone,
recordSet: RecordSet,
userId: String,
changeType: RecordSetChangeType,
status: RecordSetChangeStatus,
created: Instant,
systemMessage: Option[String],
updates: Option[RecordSet],
id: String,
userName: String
)
object RecordSetChangeInfo {
def apply(recordSetChange: RecordSetChange, name: Option[String]): RecordSetChangeInfo =
@ -261,13 +318,16 @@ object RecordSetChangeInfo {
}
case class ListZonesResponse(
zones: List[ZoneSummaryInfo],
nameFilter: Option[String],
startFrom: Option[String] = None,
nextId: Option[String] = None,
maxItems: Int = 100,
ignoreAccess: Boolean = false
)
zones: List[ZoneSummaryInfo],
nameFilter: Option[String],
startFrom: Option[String] = None,
nextId: Option[String] = None,
maxItems: Int = 100,
ignoreAccess: Boolean = false,
includeReverse: Boolean = true
)
case class RecordSetCount( count: Int = 0 )
// Errors
case class InvalidRequest(msg: String) extends Throwable(msg)
@ -305,7 +365,7 @@ case class ConnectionFailed(zone: Zone, message: String) extends Throwable(messa
case class RecordSetValidation(msg: String) extends Throwable(msg)
case class ZoneValidationFailed(zone: Zone, errors: List[String], message: String)
extends Throwable(message)
extends Throwable(message)
case class ZoneTooLargeError(msg: String) extends Throwable(msg)

View File

@ -19,13 +19,7 @@ package vinyldns.api.domain.zone
import cats.implicits._
import cats.data._
import com.comcast.ip4s.IpAddress
import com.comcast.ip4s.interop.cats.implicits._
import vinyldns.core.domain.{
DomainHelpers,
DomainValidationError,
HighValueDomainError,
RecordRequiresManualReview
}
import vinyldns.core.domain.{DomainHelpers, DomainValidationError, HighValueDomainError, RecordRequiresManualReview}
import vinyldns.core.domain.record.{NSData, RecordSet}
import scala.util.matching.Regex
@ -41,7 +35,7 @@ object ZoneRecordValidations {
/* Checks to see if an ip address is part of the ip address list */
def isIpInIpList(ipList: List[IpAddress], ipToTest: String): Boolean =
IpAddress(ipToTest).exists(ip => ipList.exists(_ === ip))
IpAddress.fromString(ipToTest).exists(ip => ipList.exists(_ === ip))
/* Checks to see if an individual ns data is part of the approved server list */
def isApprovedNameServer(

View File

@ -16,17 +16,23 @@
package vinyldns.api.domain.zone
import cats.effect.IO
import cats.implicits._
import vinyldns.api.domain.access.AccessValidationsAlgebra
import vinyldns.api.Interfaces
import vinyldns.core.domain.auth.AuthPrincipal
import vinyldns.api.repository.ApiDataAccessor
import vinyldns.core.crypto.CryptoAlgebra
import vinyldns.core.domain.membership.{Group, GroupRepository, User, UserRepository}
import vinyldns.core.domain.membership.{Group, GroupRepository, ListUsersResults, User, UserRepository}
import vinyldns.core.domain.zone._
import vinyldns.core.queue.MessageQueue
import vinyldns.core.domain.DomainHelpers.ensureTrailingDot
import vinyldns.core.domain.backend.BackendResolver
import com.cronutils.model.definition.CronDefinition
import com.cronutils.model.definition.CronDefinitionBuilder
import com.cronutils.parser.CronParser
import com.cronutils.model.CronType
import vinyldns.api.domain.membership.MembershipService
object ZoneService {
def apply(
@ -36,7 +42,8 @@ object ZoneService {
zoneValidations: ZoneValidations,
accessValidation: AccessValidationsAlgebra,
backendResolver: BackendResolver,
crypto: CryptoAlgebra
crypto: CryptoAlgebra,
membershipService:MembershipService
): ZoneService =
new ZoneService(
dataAccessor.zoneRepository,
@ -48,7 +55,8 @@ object ZoneService {
zoneValidations,
accessValidation,
backendResolver,
crypto
crypto,
membershipService
)
}
@ -62,7 +70,8 @@ class ZoneService(
zoneValidations: ZoneValidations,
accessValidation: AccessValidationsAlgebra,
backendResolver: BackendResolver,
crypto: CryptoAlgebra
crypto: CryptoAlgebra,
membershipService:MembershipService
) extends ZoneServiceAlgebra {
import accessValidation._
@ -75,12 +84,17 @@ class ZoneService(
): Result[ZoneCommandResult] =
for {
_ <- isValidZoneAcl(createZoneInput.acl).toResult
_ <- membershipService.emailValidation(createZoneInput.email)
_ <- connectionValidator.isValidBackendId(createZoneInput.backendId).toResult
_ <- validateSharedZoneAuthorized(createZoneInput.shared, auth.signedInUser).toResult
_ <- zoneDoesNotExist(createZoneInput.name)
_ <- adminGroupExists(createZoneInput.adminGroupId)
_ <- if(createZoneInput.recurrenceSchedule.isDefined) canScheduleZoneSync(auth).toResult else IO.unit.toResult
isCronStringValid = if(createZoneInput.recurrenceSchedule.isDefined) isValidCronString(createZoneInput.recurrenceSchedule.get) else true
_ <- validateCronString(isCronStringValid).toResult
_ <- canChangeZone(auth, createZoneInput.name, createZoneInput.adminGroupId).toResult
zoneToCreate = Zone(createZoneInput, auth.isTestUser)
createdZoneInput = if(createZoneInput.recurrenceSchedule.isDefined) createZoneInput.copy(scheduleRequestor = Some(auth.signedInUser.userName)) else createZoneInput
zoneToCreate = Zone(createdZoneInput, auth.isTestUser)
_ <- connectionValidator.validateZoneConnections(zoneToCreate)
createZoneChange <- ZoneChangeGenerator.forAdd(zoneToCreate, auth).toResult
_ <- messageQueue.send(createZoneChange).toResult[Unit]
@ -89,6 +103,7 @@ class ZoneService(
def updateZone(updateZoneInput: UpdateZoneInput, auth: AuthPrincipal): Result[ZoneCommandResult] =
for {
_ <- isValidZoneAcl(updateZoneInput.acl).toResult
_ <- membershipService.emailValidation(updateZoneInput.email)
_ <- connectionValidator.isValidBackendId(updateZoneInput.backendId).toResult
existingZone <- getZoneOrFail(updateZoneInput.id)
_ <- validateSharedZoneAuthorized(
@ -97,10 +112,14 @@ class ZoneService(
auth.signedInUser
).toResult
_ <- canChangeZone(auth, existingZone.name, existingZone.adminGroupId).toResult
_ <- if(updateZoneInput.recurrenceSchedule.isDefined) canScheduleZoneSync(auth).toResult else IO.unit.toResult
isCronStringValid = if(updateZoneInput.recurrenceSchedule.isDefined) isValidCronString(updateZoneInput.recurrenceSchedule.get) else true
_ <- validateCronString(isCronStringValid).toResult
_ <- adminGroupExists(updateZoneInput.adminGroupId)
// if admin group changes, this confirms user has access to new group
_ <- canChangeZone(auth, updateZoneInput.name, updateZoneInput.adminGroupId).toResult
zoneWithUpdates = Zone(updateZoneInput, existingZone)
updatedZoneInput = if(updateZoneInput.recurrenceSchedule.isDefined) updateZoneInput.copy(scheduleRequestor = Some(auth.signedInUser.userName)) else updateZoneInput
zoneWithUpdates = Zone(updatedZoneInput, existingZone)
_ <- validateZoneConnectionIfChanged(zoneWithUpdates, existingZone)
updateZoneChange <- ZoneChangeGenerator
.forUpdate(zoneWithUpdates, existingZone, auth, crypto)
@ -134,6 +153,12 @@ class ZoneService(
accessLevel = getZoneAccess(auth, zone)
} yield ZoneInfo(zone, aclInfo, groupName, accessLevel)
def getCommonZoneDetails(zoneId: String, auth: AuthPrincipal): Result[ZoneDetails] =
for {
zone <- getZoneOrFail(zoneId)
groupName <- getGroupName(zone.adminGroupId)
} yield ZoneDetails(zone, groupName)
def getZoneByName(zoneName: String, auth: AuthPrincipal): Result[ZoneInfo] =
for {
zone <- getZoneByNameOrFail(ensureTrailingDot(zoneName))
@ -142,20 +167,25 @@ class ZoneService(
accessLevel = getZoneAccess(auth, zone)
} yield ZoneInfo(zone, aclInfo, groupName, accessLevel)
// List zones. Uses zone name as default while using search to list zones or by admin group name if selected.
def listZones(
authPrincipal: AuthPrincipal,
nameFilter: Option[String] = None,
startFrom: Option[String] = None,
maxItems: Int = 100,
ignoreAccess: Boolean = false
searchByAdminGroup: Boolean = false,
ignoreAccess: Boolean = false,
includeReverse: Boolean = true
): Result[ListZonesResponse] = {
for {
listZonesResult <- zoneRepository.listZones(
authPrincipal,
nameFilter,
startFrom,
maxItems,
ignoreAccess
if(!searchByAdminGroup || nameFilter.isEmpty){
for {
listZonesResult <- zoneRepository.listZones(
authPrincipal,
nameFilter,
startFrom,
maxItems,
ignoreAccess,
includeReverse
)
zones = listZonesResult.zones
groupIds = zones.map(_.adminGroupId).toSet
@ -167,10 +197,87 @@ class ZoneService(
listZonesResult.startFrom,
listZonesResult.nextId,
listZonesResult.maxItems,
listZonesResult.ignoreAccess
)
listZonesResult.ignoreAccess,
listZonesResult.includeReverse
)}
else {
for {
groupIds <- getGroupsIdsByName(nameFilter.get)
listZonesResult <- zoneRepository.listZonesByAdminGroupIds(
authPrincipal,
startFrom,
maxItems,
groupIds,
ignoreAccess,
includeReverse
)
zones = listZonesResult.zones
groups <- groupRepository.getGroups(groupIds)
zoneSummaryInfos = zoneSummaryInfoMapping(zones, authPrincipal, groups)
} yield ListZonesResponse(
zoneSummaryInfos,
nameFilter,
listZonesResult.startFrom,
listZonesResult.nextId,
listZonesResult.maxItems,
listZonesResult.ignoreAccess,
listZonesResult.includeReverse
)
}
}.toResult
def listDeletedZones(
authPrincipal: AuthPrincipal,
nameFilter: Option[String] = None,
startFrom: Option[String] = None,
maxItems: Int = 100,
ignoreAccess: Boolean = false
): Result[ListDeletedZoneChangesResponse] = {
for {
listZonesChangeResult <- zoneChangeRepository.listDeletedZones(
authPrincipal,
nameFilter,
startFrom,
maxItems,
ignoreAccess
)
zoneChanges = listZonesChangeResult.zoneDeleted
groupIds = zoneChanges.map(_.zone.adminGroupId).toSet
groups <- groupRepository.getGroups(groupIds)
userId = zoneChanges.map(_.userId).toSet
users <- userRepository.getUsers(userId,None,None)
zoneDeleteSummaryInfos = ZoneChangeDeletedInfoMapping(zoneChanges, authPrincipal, groups, users)
} yield {
ListDeletedZoneChangesResponse(
zoneDeleteSummaryInfos,
listZonesChangeResult.zoneChangeFilter,
listZonesChangeResult.nextId,
listZonesChangeResult.startFrom,
listZonesChangeResult.maxItems,
listZonesChangeResult.ignoreAccess
)
}
}.toResult
private def ZoneChangeDeletedInfoMapping(
zoneChange: List[ZoneChange],
auth: AuthPrincipal,
groups: Set[Group],
users: ListUsersResults
): List[ZoneChangeDeletedInfo] =
zoneChange.map { zc =>
val groupName = groups.find(_.id == zc.zone.adminGroupId) match {
case Some(group) => group.name
case None => "Unknown group name"
}
val userName = users.users.find(_.id == zc.userId) match {
case Some(user) => user.userName
case None => "Unknown user name"
}
val zoneAccess = getZoneAccess(auth, zc.zone)
ZoneChangeDeletedInfo(zc, groupName,userName, zoneAccess)
}
def zoneSummaryInfoMapping(
zones: List[Zone],
auth: AuthPrincipal,
@ -193,12 +300,38 @@ class ZoneService(
): Result[ListZoneChangesResponse] =
for {
zone <- getZoneOrFail(zoneId)
_ <- canSeeZone(authPrincipal, zone).toResult
_ <- canSeeZoneChange(authPrincipal, zone).toResult
zoneChangesResults <- zoneChangeRepository
.listZoneChanges(zone.id, startFrom, maxItems)
.toResult[ListZoneChangesResults]
} yield ListZoneChangesResponse(zone.id, zoneChangesResults)
def listFailedZoneChanges(
authPrincipal: AuthPrincipal,
startFrom: Int= 0,
maxItems: Int = 100
): Result[ListFailedZoneChangesResponse] =
for {
zoneChangesFailedResults <- zoneChangeRepository
.listFailedZoneChanges(maxItems, startFrom)
.toResult[ListFailedZoneChangesResults]
_ <- zoneAccess(zoneChangesFailedResults.items, authPrincipal).toResult
} yield
ListFailedZoneChangesResponse(
zoneChangesFailedResults.items,
zoneChangesFailedResults.nextId,
startFrom,
maxItems
)
def zoneAccess(
zoneCh: List[ZoneChange],
auth: AuthPrincipal
): List[Result[Unit]] =
zoneCh.map { zn =>
canSeeZone(auth, zn.zone).toResult
}
def addACLRule(
zoneId: String,
aclRuleInfo: ACLRuleInfo,
@ -242,9 +375,35 @@ class ZoneService(
} yield zoneChange
}
def getGroupsIdsByName(groupName: String): IO[Set[String]] = {
groupRepository.getGroupsByName(groupName).map(x => x.map(_.id))
}
def getBackendIds(): Result[List[String]] =
backendResolver.ids.toList.toResult
def isValidCronString(maybeString: String): Boolean = {
val isValid = try {
val cronDefinition: CronDefinition = CronDefinitionBuilder.instanceDefinitionFor(CronType.QUARTZ)
val parser: CronParser = new CronParser(cronDefinition)
val quartzCron = parser.parse(maybeString)
quartzCron.validate
true
}
catch {
case _: Exception =>
false
}
isValid
}
def validateCronString(isValid: Boolean): Either[Throwable, Unit] =
ensuring(
InvalidRequest("Invalid cron expression. Please enter a valid cron expression in 'recurrenceSchedule'.")
)(
isValid
)
def zoneDoesNotExist(zoneName: String): Result[Unit] =
zoneRepository
.getZoneByName(zoneName)
@ -258,6 +417,13 @@ class ZoneService(
}
.toResult
def canScheduleZoneSync(auth: AuthPrincipal): Either[Throwable, Unit] =
ensuring(
NotAuthorizedError(s"User '${auth.signedInUser.userName}' is not authorized to schedule zone sync in this zone.")
)(
auth.isSystemAdmin
)
def adminGroupExists(groupId: String): Result[Unit] =
groupRepository
.getGroup(groupId)

View File

@ -35,6 +35,8 @@ trait ZoneServiceAlgebra {
def getZone(zoneId: String, auth: AuthPrincipal): Result[ZoneInfo]
def getCommonZoneDetails(zoneId: String, auth: AuthPrincipal): Result[ZoneDetails]
def getZoneByName(zoneName: String, auth: AuthPrincipal): Result[ZoneInfo]
def listZones(
@ -42,9 +44,19 @@ trait ZoneServiceAlgebra {
nameFilter: Option[String],
startFrom: Option[String],
maxItems: Int,
ignoreAccess: Boolean
searchByAdminGroup: Boolean,
ignoreAccess: Boolean,
includeReverse: Boolean
): Result[ListZonesResponse]
def listDeletedZones(
authPrincipal: AuthPrincipal,
nameFilter: Option[String],
startFrom: Option[String],
maxItems: Int,
ignoreAccess: Boolean
): Result[ListDeletedZoneChangesResponse]
def listZoneChanges(
zoneId: String,
authPrincipal: AuthPrincipal,
@ -66,4 +78,9 @@ trait ZoneServiceAlgebra {
def getBackendIds(): Result[List[String]]
def listFailedZoneChanges(
authPrincipal: AuthPrincipal,
startFrom: Int,
maxItems: Int
): Result[ListFailedZoneChangesResponse]
}

View File

@ -0,0 +1,74 @@
/*
* Copyright 2018 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vinyldns.api.domain.zone
import cats.effect.IO
import com.cronutils.model.CronType
import com.cronutils.model.definition.{CronDefinition, CronDefinitionBuilder}
import com.cronutils.model.time.ExecutionTime
import com.cronutils.parser.CronParser
import org.slf4j.LoggerFactory
import vinyldns.core.domain.zone.{Zone, ZoneChange, ZoneRepository}
import java.time.{Instant, ZoneId}
import java.time.temporal.ChronoUnit
object ZoneSyncScheduleHandler {
private val logger = LoggerFactory.getLogger("ZoneSyncScheduleHandler")
def zoneSyncScheduler(zoneRepository: ZoneRepository): IO[Set[ZoneChange]] = {
for {
zones <- zoneRepository.getAllZonesWithSyncSchedule
zoneScheduleIds = getZonesWithSchedule(zones.toList)
zoneChanges <- getZoneChanges(zoneRepository, zoneScheduleIds)
} yield zoneChanges
}
def getZoneChanges(zoneRepository: ZoneRepository, zoneScheduleIds: List[String]): IO[Set[ZoneChange]] = {
if(zoneScheduleIds.nonEmpty) {
for{
getZones <- zoneRepository.getZones(zoneScheduleIds.toSet)
syncZoneChange = getZones.map(zone => ZoneChangeGenerator.forSyncs(zone))
} yield syncZoneChange
} else {
IO(Set.empty)
}
}
def getZonesWithSchedule(zone: List[Zone]): List[String] = {
var zonesWithSchedule: List[String] = List.empty
for(z <- zone) {
if (z.recurrenceSchedule.isDefined) {
val now = Instant.now().atZone(ZoneId.of("UTC"))
val cronDefinition: CronDefinition = CronDefinitionBuilder.instanceDefinitionFor(CronType.QUARTZ)
val parser: CronParser = new CronParser(cronDefinition)
val executionTime: ExecutionTime = ExecutionTime.forCron(parser.parse(z.recurrenceSchedule.get))
val nextExecution = executionTime.nextExecution(now).get()
val diff = ChronoUnit.SECONDS.between(now, nextExecution)
if (diff == 1) {
zonesWithSchedule = zonesWithSchedule :+ z.id
logger.info("Zones with sync schedule: " + zonesWithSchedule)
} else {
List.empty
}
} else {
List.empty
}
}
zonesWithSchedule
}
}

View File

@ -17,8 +17,9 @@
package vinyldns.api.domain.zone
import cats.syntax.either._
import com.aaronbedra.orchard.CIDR
import org.joda.time.DateTime
import com.comcast.ip4s.Cidr
import java.time.Instant
import java.time.temporal.ChronoUnit
import vinyldns.api.Interfaces.ensuring
import vinyldns.core.domain.membership.User
import vinyldns.core.domain.record.RecordType
@ -30,7 +31,7 @@ class ZoneValidations(syncDelayMillis: Int) {
def outsideSyncDelay(zone: Zone): Either[Throwable, Unit] =
zone.latestSync match {
case Some(time) if DateTime.now.getMillis - time.getMillis < syncDelayMillis => {
case Some(time) if Instant.now.truncatedTo(ChronoUnit.MILLIS).toEpochMilli - time.toEpochMilli < syncDelayMillis => {
RecentSyncError(s"Zone ${zone.name} was recently synced. Cannot complete sync").asLeft
}
case _ => Right(())
@ -56,10 +57,10 @@ class ZoneValidations(syncDelayMillis: Int) {
def aclRuleMaskIsValid(rule: ACLRule): Either[Throwable, Unit] =
rule.recordMask match {
case Some(mask) if rule.recordTypes == Set(RecordType.PTR) =>
Try(CIDR.valueOf(mask)) match {
Try(Cidr.fromString(mask).get) match {
case Success(_) => Right(())
case Failure(e) =>
InvalidRequest(s"PTR types must have no mask or a valid CIDR mask: ${e.getMessage}").asLeft
case Failure(_) =>
InvalidRequest(s"PTR types must have no mask or a valid CIDR mask: Invalid CIDR block").asLeft
}
case Some(_) if rule.recordTypes.contains(RecordType.PTR) =>
InvalidRequest("Multiple record types including PTR must have no mask").asLeft

View File

@ -20,7 +20,7 @@ import cats.effect._
import org.slf4j.LoggerFactory
import vinyldns.api.backend.dns.DnsConversions
import vinyldns.core.domain.backend.Backend
import vinyldns.core.domain.record.{NameSort, RecordSetRepository}
import vinyldns.core.domain.record.{NameSort, RecordSetCacheRepository, RecordSetRepository, RecordTypeSort}
import vinyldns.core.domain.zone.Zone
import vinyldns.core.route.Monitored
@ -52,8 +52,11 @@ case class DnsZoneViewLoader(
object VinylDNSZoneViewLoader {
val logger = LoggerFactory.getLogger("VinylDNSZoneViewLoader")
}
case class VinylDNSZoneViewLoader(zone: Zone, recordSetRepository: RecordSetRepository)
extends ZoneViewLoader
case class VinylDNSZoneViewLoader(
zone: Zone,
recordSetRepository: RecordSetRepository,
recordSetCacheRepository: RecordSetCacheRepository
) extends ZoneViewLoader
with Monitored {
def load: () => IO[ZoneView] =
() =>
@ -66,7 +69,8 @@ case class VinylDNSZoneViewLoader(zone: Zone, recordSetRepository: RecordSetRepo
recordNameFilter = None,
recordTypeFilter = None,
recordOwnerGroupFilter = None,
nameSort = NameSort.ASC
nameSort = NameSort.ASC,
recordTypeSort = RecordTypeSort.ASC
)
.map { result =>
VinylDNSZoneViewLoader.logger.info(

View File

@ -19,31 +19,41 @@ package vinyldns.api.engine
import cats.effect.{ContextShift, IO, Timer}
import cats.implicits._
import org.slf4j.LoggerFactory
import scalikejdbc.DB
import vinyldns.api.backend.dns.DnsProtocol.TryAgain
import vinyldns.api.domain.record.RecordSetChangeGenerator
import vinyldns.api.domain.record.RecordSetHelpers._
import vinyldns.core.Messages.{nonExistentRecordDataDeleteMessage, nonExistentRecordDeleteMessage}
import vinyldns.core.domain.backend.{Backend, BackendResponse}
import vinyldns.core.domain.batch.{BatchChangeRepository, SingleChange}
import vinyldns.core.domain.record._
import vinyldns.core.domain.zone.Zone
import vinyldns.mysql.TransactionProvider
object RecordSetChangeHandler {
object RecordSetChangeHandler extends TransactionProvider {
private val logger = LoggerFactory.getLogger("vinyldns.api.engine.RecordSetChangeHandler")
private implicit val cs: ContextShift[IO] =
IO.contextShift(scala.concurrent.ExecutionContext.global)
private val outOfSyncFailureMessage: String = "This record set is out of sync with the DNS backend; sync this zone before attempting to update this record set."
private val incompatibleRecordFailureMessage: String = "Incompatible record in DNS."
private val syncZoneMessage: String = "This record set is out of sync with the DNS backend. Sync this zone before attempting to update this record set."
private val recordConflictMessage: String = "Conflict due to the record having the same name as an NS record in the same zone. Please create the record using the DNS service the NS record has been delegated to (ex. AWS r53), or use a different record name."
final case class Requeue(change: RecordSetChange) extends Throwable
def apply(
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository,
batchChangeRepository: BatchChangeRepository
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository,
recordSetCacheRepository: RecordSetCacheRepository,
batchChangeRepository: BatchChangeRepository
)(implicit timer: Timer[IO]): (Backend, RecordSetChange) => IO[RecordSetChange] =
(conn, recordSetChange) => {
process(
recordSetRepository,
recordChangeRepository,
recordSetCacheRepository,
batchChangeRepository,
conn,
recordSetChange
@ -51,11 +61,12 @@ object RecordSetChangeHandler {
}
def process(
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository,
batchChangeRepository: BatchChangeRepository,
conn: Backend,
recordSetChange: RecordSetChange
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository,
recordSetCacheRepository: RecordSetCacheRepository,
batchChangeRepository: BatchChangeRepository,
conn: Backend,
recordSetChange: RecordSetChange
)(implicit timer: Timer[IO]): IO[RecordSetChange] =
for {
wildCardExists <- wildCardExistsForRecord(recordSetChange.recordSet, recordSetRepository)
@ -64,18 +75,53 @@ object RecordSetChangeHandler {
conn,
wildCardExists,
recordSetRepository,
recordChangeRepository
recordChangeRepository,
recordSetCacheRepository
)
changeSet = ChangeSet(completedState.change).complete(completedState.change)
_ <- recordSetRepository.apply(changeSet)
_ <- recordChangeRepository.save(changeSet)
singleBatchChanges <- batchChangeRepository.getSingleChanges(
recordSetChange.singleBatchChangeIds
)
singleChangeStatusUpdates = updateBatchStatuses(singleBatchChanges, completedState.change)
_ <- batchChangeRepository.updateSingleChanges(singleChangeStatusUpdates)
_ <- saveChangeSet(recordSetRepository, recordChangeRepository,recordSetCacheRepository, batchChangeRepository, recordSetChange, completedState, changeSet)
} yield completedState.change
def saveChangeSet(
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository,
recordSetCacheRepository: RecordSetCacheRepository,
batchChangeRepository: BatchChangeRepository,
recordSetChange: RecordSetChange,
completedState: ProcessorState,
changeSet: ChangeSet
): IO[Unit] =
executeWithinTransaction { db: DB =>
for {
// Update single changes within this transaction to rollback the changes made to recordset and record change repo
// when exception occurs while updating single changes
singleBatchChanges <- batchChangeRepository.getSingleChanges(
recordSetChange.singleBatchChangeIds
)
singleChangeStatusUpdates = updateBatchStatuses(singleBatchChanges, completedState.change)
updatedChangeSet = if (singleChangeStatusUpdates.size == 1) {
// Filter out RecordSetChange from changeSet if systemMessage matches
val filteredChangeSetChanges = changeSet.changes.filterNot { recordSetChange =>
// Find the corresponding singleChangeStatusUpdate by recordChangeId
singleChangeStatusUpdates.exists { singleChange =>
singleChange.recordChangeId.contains(recordSetChange.id) &&
singleChange.systemMessage.exists(msg =>
msg == nonExistentRecordDeleteMessage || msg == nonExistentRecordDataDeleteMessage
)
}
}
// Create a new ChangeSet with filtered changes
changeSet.copy(changes = filteredChangeSetChanges)
} else {
changeSet
}
_ <- recordSetRepository.apply(db, updatedChangeSet)
_ <- recordChangeRepository.save(db, updatedChangeSet)
_ <- recordSetCacheRepository.save(db, updatedChangeSet)
_ <- batchChangeRepository.updateSingleChanges(singleChangeStatusUpdates)
} yield ()
}
def updateBatchStatuses(
singleChanges: List[SingleChange],
recordSetChange: RecordSetChange
@ -123,11 +169,12 @@ object RecordSetChangeHandler {
final case class Retry(change: RecordSetChange) extends ProcessingStatus
def syncAndGetProcessingStatusFromDnsBackend(
change: RecordSetChange,
conn: Backend,
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository,
performSync: Boolean = false
change: RecordSetChange,
conn: Backend,
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository,
recordSetCacheRepository: RecordSetCacheRepository,
performSync: Boolean = false
): IO[ProcessingStatus] = {
def isDnsMatch(dnsResult: List[RecordSet], recordSet: RecordSet, zoneName: String): Boolean =
dnsResult.exists(matches(_, recordSet, zoneName))
@ -142,11 +189,11 @@ object RecordSetChangeHandler {
if (existingRecords.isEmpty) ReadyToApply(change)
else if (isDnsMatch(existingRecords, change.recordSet, change.zone.name))
AlreadyApplied(change)
else Failure(change, "Incompatible record already exists in DNS.")
else Failure(change, incompatibleRecordFailureMessage)
case RecordSetChangeType.Update =>
if (isDnsMatch(existingRecords, change.recordSet, change.zone.name))
AlreadyApplied(change)
AlreadyApplied(change)
else {
// record must not exist in the DNS backend, or be synced if it exists
val canApply = existingRecords.isEmpty ||
@ -156,14 +203,23 @@ object RecordSetChangeHandler {
else
Failure(
change,
"This record set is out of sync with the DNS backend; " +
"sync this zone before attempting to update this record set."
outOfSyncFailureMessage
)
}
case RecordSetChangeType.Delete =>
if (existingRecords.nonEmpty) ReadyToApply(change) // we have a record set, move forward
else AlreadyApplied(change) // we did not find the record set, so already applied
case RecordSetChangeType.Sync =>
if (existingRecords.nonEmpty) {
Failure(
change,
outOfSyncFailureMessage
)
} else {
AlreadyApplied(change)
}
}
}
@ -175,7 +231,8 @@ object RecordSetChangeHandler {
change,
existingRecords,
recordSetRepository,
recordChangeRepository
recordChangeRepository,
recordSetCacheRepository
)
processingStatus <- getProcessingStatus(change, dnsBackendRRSet)
} yield processingStatus
@ -192,7 +249,8 @@ object RecordSetChangeHandler {
conn: Backend,
wildcardExists: Boolean,
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository
recordChangeRepository: RecordChangeRepository,
recordSetCacheRepository: RecordSetCacheRepository
)(
implicit timer: Timer[IO]
): IO[ProcessorState] = {
@ -216,26 +274,54 @@ object RecordSetChangeHandler {
val toRun =
if (wildcardExists || state.change.recordSet.typ == RecordType.NS) IO.pure(skip) else orElse
toRun.flatMap(fsm(_, conn, wildcardExists, recordSetRepository, recordChangeRepository))
toRun.flatMap(
fsm(
_,
conn,
wildcardExists,
recordSetRepository,
recordChangeRepository,
recordSetCacheRepository
)
)
}
state match {
case Pending(change) =>
logger.info(s"CHANGE PENDING; ${getChangeLog(change)}")
bypassValidation(Validated(change))(
orElse = validate(change, conn, recordSetRepository, recordChangeRepository)
orElse = validate(
change,
conn,
recordSetRepository,
recordChangeRepository,
recordSetCacheRepository
)
)
case Validated(change) =>
logger.info(s"CHANGE VALIDATED; ${getChangeLog(change)}")
apply(change, conn).flatMap(
fsm(_, conn, wildcardExists, recordSetRepository, recordChangeRepository)
fsm(
_,
conn,
wildcardExists,
recordSetRepository,
recordChangeRepository,
recordSetCacheRepository
)
)
case Applied(change) =>
logger.info(s"CHANGE APPLIED; ${getChangeLog(change)}")
bypassValidation(Verified(change.successful))(
orElse = verify(change, conn, recordSetRepository, recordChangeRepository)
orElse = verify(
change,
conn,
recordSetRepository,
recordChangeRepository,
recordSetCacheRepository
)
)
case Verified(change) =>
@ -254,10 +340,11 @@ object RecordSetChangeHandler {
}
private def syncAgainstDnsBackend(
change: RecordSetChange,
dnsBackendRRSet: List[RecordSet],
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository
change: RecordSetChange,
dnsBackendRRSet: List[RecordSet],
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository,
recordSetCacheRepository: RecordSetCacheRepository
): IO[List[RecordSet]] = {
/*
@ -266,11 +353,12 @@ object RecordSetChangeHandler {
* - Delete record from database for ADD request if exists in database but does not exist in DNS backend
*/
def syncDnsBackendRRSet(
storedRRSet: Option[RecordSet],
dnsBackendRRSet: Option[RecordSet],
zone: Zone,
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository
storedRRSet: Option[RecordSet],
dnsBackendRRSet: Option[RecordSet],
zone: Zone,
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository,
recordSetCacheRepository: RecordSetCacheRepository
): IO[Unit] = {
val recordSetToSync = (storedRRSet, dnsBackendRRSet) match {
case (Some(savedRs), None) if change.changeType == RecordSetChangeType.Create =>
@ -286,10 +374,15 @@ object RecordSetChangeHandler {
recordSetToSync
.map { rsc =>
val changeSet = ChangeSet(rsc)
for {
_ <- recordChangeRepository.save(changeSet)
_ <- recordSetRepository.apply(changeSet)
} yield ()
executeWithinTransaction { db: DB =>
for {
_ <- recordSetRepository.apply(db, changeSet)
_ <- recordChangeRepository.save(db, changeSet)
_ <- recordSetCacheRepository.save(db, changeSet)
} yield ()
}
}
.getOrElse(IO.unit)
}
@ -302,33 +395,49 @@ object RecordSetChangeHandler {
dnsBackendRRSet.headOption,
change.zone,
recordSetRepository,
recordChangeRepository
recordChangeRepository,
recordSetCacheRepository
)
} yield dnsBackendRRSet
}
/* Step 1: Validate the change hasn't already been applied */
private def validate(
change: RecordSetChange,
conn: Backend,
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository
change: RecordSetChange,
conn: Backend,
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository,
recordSetCacheRepository: RecordSetCacheRepository
): IO[ProcessorState] =
syncAndGetProcessingStatusFromDnsBackend(
change,
conn,
recordSetRepository,
recordChangeRepository,
true
recordSetCacheRepository
).map {
case AlreadyApplied(_) => Completed(change.successful)
case ReadyToApply(_) => Validated(change)
case Failure(_, message) =>
Completed(
change.failed(
s"Failed validating update to DNS for change ${change.id}:${change.recordSet.name}: " + message
if(message == outOfSyncFailureMessage || message == incompatibleRecordFailureMessage){
Completed(
change.failed(
syncZoneMessage
)
)
)
} else if (message == "referral") {
Completed(
change.failed(
recordConflictMessage
)
)
} else {
Completed(
change.failed(
s"""Failed validating update to DNS for change "${change.id}": "${change.recordSet.name}": """ + message
)
)
}
case Retry(_) => Retrying(change)
}
@ -352,19 +461,21 @@ object RecordSetChangeHandler {
change: RecordSetChange,
conn: Backend,
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository
recordChangeRepository: RecordChangeRepository,
recordSetCacheRepository: RecordSetCacheRepository
): IO[ProcessorState] =
syncAndGetProcessingStatusFromDnsBackend(
change,
conn,
recordSetRepository,
recordChangeRepository
recordChangeRepository,
recordSetCacheRepository
).map {
case AlreadyApplied(_) => Completed(change.successful)
case Failure(_, message) =>
Completed(
change.failed(
s"Failed verifying update to DNS for change ${change.id}:${change.recordSet.name}: $message"
s"""Failed verifying update to DNS for change "${change.id}":"${change.recordSet.name}": $message"""
)
)
case _ => Retrying(change)

View File

@ -17,14 +17,22 @@
package vinyldns.api.engine
import cats.effect.IO
import vinyldns.core.domain.record.RecordSetRepository
import org.slf4j.{Logger, LoggerFactory}
import scalikejdbc.DB
import vinyldns.api.engine.ZoneSyncHandler.executeWithinTransaction
import vinyldns.core.domain.record.{RecordSetCacheRepository, RecordSetRepository}
import vinyldns.core.domain.zone._
object ZoneChangeHandler {
private implicit val logger: Logger = LoggerFactory.getLogger("vinyldns.engine.ZoneChangeHandler")
def apply(
zoneRepository: ZoneRepository,
zoneChangeRepository: ZoneChangeRepository,
recordSetRepository: RecordSetRepository
zoneRepository: ZoneRepository,
zoneChangeRepository: ZoneChangeRepository,
recordSetRepository: RecordSetRepository,
recordSetCacheRepository: RecordSetCacheRepository,
): ZoneChange => IO[ZoneChange] =
zoneChange =>
zoneRepository.save(zoneChange.zone).flatMap {
@ -36,13 +44,21 @@ object ZoneChangeHandler {
)
)
case Right(_) if zoneChange.changeType == ZoneChangeType.Delete =>
recordSetRepository
.deleteRecordSetsInZone(zoneChange.zone.id, zoneChange.zone.name)
executeWithinTransaction { db: DB =>
for {
_ <- recordSetRepository
.deleteRecordSetsInZone(db,zoneChange.zone.id, zoneChange.zone.name)
_ <- recordSetCacheRepository
.deleteRecordSetDataInZone(db,zoneChange.zone.id, zoneChange.zone.name)}
yield ()
}
.attempt
.flatMap { _ =>
zoneChangeRepository.save(zoneChange.copy(status = ZoneChangeStatus.Synced))
}
case Right(_) =>
logger.info(s"Saving zone change with id: '${zoneChange.id}', zone name: '${zoneChange.zone.name}'")
zoneChangeRepository.save(zoneChange.copy(status = ZoneChangeStatus.Synced))
}
}

View File

@ -18,35 +18,34 @@ package vinyldns.api.engine
import cats.effect.{ContextShift, IO}
import cats.syntax.all._
import org.joda.time.DateTime
import java.time.Instant
import java.time.temporal.ChronoUnit
import org.slf4j.{Logger, LoggerFactory}
import scalikejdbc.DB
import vinyldns.api.backend.dns.DnsConversions
import vinyldns.api.domain.zone.{DnsZoneViewLoader, VinylDNSZoneViewLoader}
import vinyldns.core.domain.backend.BackendResolver
import vinyldns.core.domain.record._
import vinyldns.core.domain.zone.{Zone, ZoneStatus}
import vinyldns.core.domain.zone._
import vinyldns.core.route.Monitored
import vinyldns.core.domain.zone.{
ZoneChange,
ZoneChangeRepository,
ZoneChangeStatus,
ZoneRepository
}
import vinyldns.mysql.TransactionProvider
import java.io.{PrintWriter, StringWriter}
object ZoneSyncHandler extends DnsConversions with Monitored {
object ZoneSyncHandler extends DnsConversions with Monitored with TransactionProvider {
private implicit val logger: Logger = LoggerFactory.getLogger("vinyldns.engine.ZoneSyncHandler")
private implicit val cs: ContextShift[IO] =
IO.contextShift(scala.concurrent.ExecutionContext.global)
def apply(
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository,
zoneChangeRepository: ZoneChangeRepository,
zoneRepository: ZoneRepository,
backendResolver: BackendResolver,
maxZoneSize: Int,
vinyldnsLoader: (Zone, RecordSetRepository) => VinylDNSZoneViewLoader =
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository,
recordSetCacheRepository: RecordSetCacheRepository,
zoneChangeRepository: ZoneChangeRepository,
zoneRepository: ZoneRepository,
backendResolver: BackendResolver,
maxZoneSize: Int,
vinyldnsLoader: (Zone, RecordSetRepository, RecordSetCacheRepository) => VinylDNSZoneViewLoader =
VinylDNSZoneViewLoader.apply
): ZoneChange => IO[ZoneChange] =
zoneChange =>
@ -56,6 +55,7 @@ object ZoneSyncHandler extends DnsConversions with Monitored {
syncChange <- runSync(
recordSetRepository,
recordChangeRepository,
recordSetCacheRepository,
zoneChange,
backendResolver,
maxZoneSize,
@ -79,16 +79,18 @@ object ZoneSyncHandler extends DnsConversions with Monitored {
)
)
case Right(_) =>
logger.info(s"Saving zone sync details for zone change with id: '${zoneChange.id}', zone name: '${zoneChange.zone.name}'")
zoneChangeRepository.save(zoneChange)
}
def runSync(
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository,
zoneChange: ZoneChange,
backendResolver: BackendResolver,
maxZoneSize: Int,
vinyldnsLoader: (Zone, RecordSetRepository) => VinylDNSZoneViewLoader =
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository,
recordSetCacheRepository: RecordSetCacheRepository,
zoneChange: ZoneChange,
backendResolver: BackendResolver,
maxZoneSize: Int,
vinyldnsLoader: (Zone, RecordSetRepository, RecordSetCacheRepository) => VinylDNSZoneViewLoader =
VinylDNSZoneViewLoader.apply
): IO[ZoneChange] =
monitor("zone.sync") {
@ -100,12 +102,11 @@ object ZoneSyncHandler extends DnsConversions with Monitored {
s"zone.sync.loadDnsView; zoneName='${zone.name}'; zoneChange='${zoneChange.id}'"
)(dnsLoader.load())
val vinyldnsView = time(s"zone.sync.loadVinylDNSView; zoneName='${zone.name}'")(
vinyldnsLoader(zone, recordSetRepository).load()
vinyldnsLoader(zone, recordSetRepository, recordSetCacheRepository).load()
)
val recordSetChanges = (dnsView, vinyldnsView).parTupled.map {
case (dnsZoneView, vinylDnsZoneView) => vinylDnsZoneView.diff(dnsZoneView)
}
recordSetChanges.flatMap { allChanges =>
val changesWithUserIds = allChanges.map(_.withUserId(zoneChange.userId))
@ -115,7 +116,7 @@ object ZoneSyncHandler extends DnsConversions with Monitored {
)
IO.pure(
zoneChange.copy(
zone.copy(status = ZoneStatus.Active, latestSync = Some(DateTime.now)),
zone.copy(status = ZoneStatus.Active, latestSync = Some(Instant.now.truncatedTo(ChronoUnit.MILLIS))),
status = ZoneChangeStatus.Synced
)
)
@ -143,32 +144,37 @@ object ZoneSyncHandler extends DnsConversions with Monitored {
)
val changeSet = ChangeSet(changesWithUserIds).copy(status = ChangeSetStatus.Applied)
// we want to make sure we write to both the change repo and record set repo
// at the same time as this can take a while
val saveRecordChanges = time(s"zone.sync.saveChanges; zoneName='${zone.name}'")(
recordChangeRepository.save(changeSet)
)
val saveRecordSets = time(s"zone.sync.saveRecordSets; zoneName='${zone.name}'")(
recordSetRepository.apply(changeSet)
)
executeWithinTransaction { db: DB =>
// we want to make sure we write to both the change repo and record set repo
// at the same time as this can take a while
val saveRecordChanges = time(s"zone.sync.saveChanges; zoneName='${zone.name}'")(
recordChangeRepository.save(db, changeSet)
)
val saveRecordSets = time(s"zone.sync.saveRecordSets; zoneName='${zone.name}'")(
recordSetRepository.apply(db, changeSet)
)
val saveRecordSetDatas = time(s"zone.sync.saveRecordSetDatas; zoneName='${zone.name}'")(
recordSetCacheRepository.save(db,changeSet)
)
// join together the results of saving both the record changes as well as the record sets
for {
_ <- saveRecordChanges
_ <- saveRecordSets
} yield zoneChange.copy(
zone.copy(status = ZoneStatus.Active, latestSync = Some(DateTime.now)),
status = ZoneChangeStatus.Synced
)
// join together the results of saving both the record changes as well as the record sets
for {
_ <- saveRecordChanges
_ <- saveRecordSets
_ <- saveRecordSetDatas
} yield zoneChange.copy(
zone.copy(status = ZoneStatus.Active, latestSync = Some(Instant.now.truncatedTo(ChronoUnit.MILLIS))),
status = ZoneChangeStatus.Synced
)
}
}
}
}
}.attempt
.map {
}.attempt.map {
case Left(e: Throwable) =>
val errorMessage = new StringWriter
e.printStackTrace(new PrintWriter(errorMessage))
logger.error(
s"Encountered error syncing ; zoneName='${zoneChange.zone.name}'; zoneChange='${zoneChange.id}'",
e
s"Encountered error syncing ; zoneName='${zoneChange.zone.name}'; zoneChange='${zoneChange.id}'. Error: ${errorMessage.toString.replaceAll("\n",";").replaceAll("\t"," ")}"
)
// We want to just move back to an active status, do not update latest sync
zoneChange.copy(
@ -177,4 +183,5 @@ object ZoneSyncHandler extends DnsConversions with Monitored {
)
case Right(ok) => ok
}
}
}

View File

@ -18,45 +18,41 @@ package vinyldns.api.notifier.email
import vinyldns.core.notifier.{Notification, Notifier}
import cats.effect.IO
import vinyldns.core.domain.batch.{
BatchChange,
BatchChangeApprovalStatus,
SingleAddChange,
SingleChange,
SingleDeleteRRSetChange
}
import vinyldns.core.domain.membership.UserRepository
import vinyldns.core.domain.membership.User
import cats.implicits._
import cats.effect.IO
import vinyldns.core.domain.batch.{BatchChange, BatchChangeApprovalStatus, SingleAddChange, SingleChange, SingleDeleteRRSetChange}
import vinyldns.core.domain.membership.{GroupRepository, User, UserRepository}
import org.slf4j.LoggerFactory
import javax.mail.internet.{InternetAddress, MimeMessage}
import javax.mail.{Address, Message, Session}
import scala.util.Try
import vinyldns.core.domain.record.AData
import vinyldns.core.domain.record.AAAAData
import vinyldns.core.domain.record.CNAMEData
import vinyldns.core.domain.record.MXData
import vinyldns.core.domain.record.TXTData
import vinyldns.core.domain.record.PTRData
import vinyldns.core.domain.record.RecordData
import org.joda.time.format.DateTimeFormat
import vinyldns.core.domain.record.{AAAAData, AData, CNAMEData, MXData, OwnerShipTransferStatus, PTRData, RecordData, RecordSetChange, TXTData}
import vinyldns.core.domain.record.OwnerShipTransferStatus.OwnerShipTransferStatus
import java.time.format.{DateTimeFormatter, FormatStyle}
import vinyldns.core.domain.batch.BatchChangeStatus._
import vinyldns.core.domain.batch.BatchChangeApprovalStatus._
class EmailNotifier(config: EmailNotifierConfig, session: Session, userRepository: UserRepository)
import java.time.ZoneId
class EmailNotifier(config: EmailNotifierConfig, session: Session, userRepository: UserRepository, groupRepository: GroupRepository)
extends Notifier {
private val logger = LoggerFactory.getLogger("EmailNotifier")
private val logger = LoggerFactory.getLogger(classOf[EmailNotifier])
def notify(notification: Notification[_]): IO[Unit] =
notification.change match {
case bc: BatchChange => sendBatchChangeNotification(bc)
case rsc: RecordSetChange => sendRecordSetOwnerTransferNotification(rsc)
case _ => IO.unit
}
def send(addresses: Address*)(buildMessage: Message => Message): IO[Unit] = IO {
def send(toAddresses: Address*)(ccAddresses: Address*)(buildMessage: Message => Message): IO[Unit] = IO {
val message = new MimeMessage(session)
message.setRecipients(Message.RecipientType.TO, addresses.toArray)
message.setRecipients(Message.RecipientType.TO, toAddresses.toArray)
message.setRecipients(Message.RecipientType.CC, ccAddresses.toArray)
message.setFrom(config.from)
buildMessage(message)
message.saveChanges()
@ -66,10 +62,10 @@ class EmailNotifier(config: EmailNotifierConfig, session: Session, userRepositor
transport.close()
}
def sendBatchChangeNotification(bc: BatchChange): IO[Unit] =
def sendBatchChangeNotification(bc: BatchChange): IO[Unit] = {
userRepository.getUser(bc.userId).flatMap {
case Some(UserWithEmail(email)) =>
send(email) { message =>
case Some(UserWithEmail(email)) =>
send(email)() { message =>
message.setSubject(s"VinylDNS Batch change ${bc.id} results")
message.setContent(formatBatchChange(bc), "text/html")
message
@ -80,9 +76,58 @@ class EmailNotifier(config: EmailNotifierConfig, session: Session, userRepositor
s"Unable to properly parse email for ${user.id}: ${user.email.getOrElse("<none>")}"
)
}
case None => IO { logger.warn(s"Unable to find user: ${bc.userId}") }
case None => IO {
logger.warn(s"Unable to find user: ${bc.userId}")
}
case _ => IO.unit
}
}
def sendRecordSetOwnerTransferNotification(rsc: RecordSetChange): IO[Unit] = {
for {
toGroup <- groupRepository.getGroup(rsc.recordSet.ownerGroupId.getOrElse("<none>"))
ccGroup <- groupRepository.getGroup(rsc.recordSet.recordSetGroupChange.map(_.requestedOwnerGroupId.getOrElse("<none>")).getOrElse("<none>"))
_ <- toGroup match {
case Some(group) =>
group.memberIds.toList.traverse { id =>
userRepository.getUser(id).flatMap {
case Some(UserWithEmail(toEmail)) =>
ccGroup match {
case Some(ccg) =>
ccg.memberIds.toList.traverse { id =>
userRepository.getUser(id).flatMap {
case Some(ccUser) =>
val ccEmail = ccUser.email.getOrElse("<none>")
send(toEmail)(new InternetAddress(ccEmail)) { message =>
message.setSubject(s"VinylDNS RecordSet change ${rsc.id} results")
message.setContent(formatRecordSetChange(rsc), "text/html")
message
}
case None =>
IO.unit
}
}
case None => IO.unit
}
case Some(user: User) if user.email.isDefined =>
IO {
logger.warn(
s"Unable to properly parse email for ${user.id}: ${user.email.getOrElse("<none>")}"
)
}
case None =>
IO {
logger.warn(s"Unable to find user: ${rsc.userId}")
}
case _ =>
IO.unit
}
}
case None => IO.unit // Handle case where toGroup is None
}
} yield ()
}
def formatBatchChange(bc: BatchChange): String = {
val sb = new StringBuilder
@ -90,9 +135,9 @@ class EmailNotifier(config: EmailNotifierConfig, session: Session, userRepositor
sb.append(s"""<h1>Batch Change Results</h1>
| <b>Submitter:</b> ${bc.userName} <br/>
| ${bc.comments.map(comments => s"<b>Description:</b> $comments</br>").getOrElse("")}
| <b>Created:</b> ${bc.createdTimestamp.toString(DateTimeFormat.fullDateTime)} <br/>
| <b>Created:</b> ${DateTimeFormatter.ofLocalizedDateTime(FormatStyle.FULL).withZone(ZoneId.systemDefault()).format(bc.createdTimestamp)} <br/>
| <b>Id:</b> ${bc.id}<br/>
| <b>Status:</b> ${formatStatus(bc.approvalStatus, bc.status)}<br/>""".stripMargin)
| <b>Status:</b> ${formatBatchStatus(bc.approvalStatus, bc.status)}<br/>""".stripMargin)
// For manually reviewed e-mails, add additional info; e-mails are not sent for pending batch changes
if (bc.approvalStatus != AutoApproved) {
@ -102,7 +147,7 @@ class EmailNotifier(config: EmailNotifierConfig, session: Session, userRepositor
bc.reviewTimestamp.foreach(
reviewTimestamp =>
sb.append(
s"<b>Time reviewed:</b> ${reviewTimestamp.toString(DateTimeFormat.fullDateTime)} <br/>"
s"<b>Time reviewed:</b> ${DateTimeFormatter.ofLocalizedDateTime(FormatStyle.FULL).withZone(ZoneId.systemDefault()).format(reviewTimestamp)} <br/>"
)
)
}
@ -110,7 +155,7 @@ class EmailNotifier(config: EmailNotifierConfig, session: Session, userRepositor
bc.cancelledTimestamp.foreach(
cancelledTimestamp =>
sb.append(
s"<b>Time cancelled:</b> ${cancelledTimestamp.toString(DateTimeFormat.fullDateTime)} <br/>"
s"<b>Time cancelled:</b> ${DateTimeFormatter.ofLocalizedDateTime(FormatStyle.FULL).withZone(ZoneId.systemDefault()).format(cancelledTimestamp)} <br/>"
)
)
@ -124,7 +169,8 @@ class EmailNotifier(config: EmailNotifierConfig, session: Session, userRepositor
sb.toString
}
def formatStatus(approval: BatchChangeApprovalStatus, status: BatchChangeStatus): String =
def formatBatchStatus(approval: BatchChangeApprovalStatus, status: BatchChangeStatus): String =
(approval, status) match {
case (ManuallyRejected, _) => "Rejected"
case (BatchChangeApprovalStatus.PendingReview, _) => "Pending Review"
@ -132,6 +178,28 @@ class EmailNotifier(config: EmailNotifierConfig, session: Session, userRepositor
case (_, status) => status.toString
}
def formatRecordSetChange(rsc: RecordSetChange): String = {
val sb = new StringBuilder
sb.append(s"""<h1>RecordSet Ownership Transfer</h1>
| <b>Submitter:</b> ${ userRepository.getUser(rsc.userId).map(_.get.userName)}
| <b>Id:</b> ${rsc.id}<br/>
| <b>Submitted time:</b> ${DateTimeFormatter.ofLocalizedDateTime(FormatStyle.FULL).withZone(ZoneId.systemDefault()).format(rsc.created)} <br/>
| <b>OwnerShip Current Group:</b> ${rsc.recordSet.ownerGroupId.getOrElse("none")} <br/>
| <b>OwnerShip Transfer Group:</b> ${rsc.recordSet.recordSetGroupChange.map(_.requestedOwnerGroupId.getOrElse("none")).getOrElse("none")} <br/>
| <b>OwnerShip Transfer Status:</b> ${formatOwnerShipStatus(rsc.recordSet.recordSetGroupChange.map(_.ownerShipTransferStatus).get)}<br/>
""".stripMargin)
sb.toString
}
def formatOwnerShipStatus(status: OwnerShipTransferStatus): String =
status match {
case OwnerShipTransferStatus.ManuallyRejected => "Rejected"
case OwnerShipTransferStatus.PendingReview => "Pending Review"
case OwnerShipTransferStatus.ManuallyApproved => "Approved"
case OwnerShipTransferStatus.Cancelled => "Cancelled"
}
def formatSingleChange(sc: SingleChange, index: Int): String = sc match {
case SingleAddChange(
_,

View File

@ -17,7 +17,7 @@
package vinyldns.api.notifier.email
import vinyldns.core.notifier.{Notifier, NotifierConfig, NotifierProvider}
import vinyldns.core.domain.membership.UserRepository
import vinyldns.core.domain.membership.{GroupRepository, UserRepository}
import pureconfig._
import pureconfig.generic.auto._
import pureconfig.module.catseffect.syntax._
@ -30,13 +30,13 @@ class EmailNotifierProvider extends NotifierProvider {
private implicit val cs: ContextShift[IO] =
IO.contextShift(scala.concurrent.ExecutionContext.global)
def load(config: NotifierConfig, userRepository: UserRepository): IO[Notifier] =
def load(config: NotifierConfig, userRepository: UserRepository, groupRepository: GroupRepository): IO[Notifier] =
for {
emailConfig <- Blocker[IO].use(
ConfigSource.fromConfig(config.settings).loadF[IO, EmailNotifierConfig](_)
)
session <- createSession(emailConfig)
} yield new EmailNotifier(emailConfig, session, userRepository)
} yield new EmailNotifier(emailConfig, session, userRepository, groupRepository)
def createSession(config: EmailNotifierConfig): IO[Session] = IO {
Session.getInstance(config.smtp)

View File

@ -25,6 +25,7 @@ import org.slf4j.LoggerFactory
import vinyldns.api.route.VinylDNSJsonProtocol
import vinyldns.core.domain.batch.BatchChange
import vinyldns.core.notifier.{Notification, Notifier}
import java.io.{PrintWriter, StringWriter}
class SnsNotifier(config: SnsNotifierConfig, sns: AmazonSNS)
extends Notifier
@ -52,6 +53,8 @@ class SnsNotifier(config: SnsNotifierConfig, sns: AmazonSNS)
sns.publish(request)
logger.info(s"Sending batch change success; batchChange='${bc.id}'")
}.handleErrorWith { e =>
IO(logger.error(s"Failed sending batch change; batchChange='${bc.id}'", e))
val errorMessage = new StringWriter
e.printStackTrace(new PrintWriter(errorMessage))
IO(logger.error(s"Failed sending batch change; batchChange='${bc.id}'. Error: ${errorMessage.toString.replaceAll("\n",";").replaceAll("\t"," ")}"))
}.void
}

View File

@ -17,7 +17,7 @@
package vinyldns.api.notifier.sns
import vinyldns.core.notifier.{Notifier, NotifierConfig, NotifierProvider}
import vinyldns.core.domain.membership.UserRepository
import vinyldns.core.domain.membership.{GroupRepository, UserRepository}
import pureconfig._
import pureconfig.generic.auto._
import pureconfig.module.catseffect.syntax._
@ -35,7 +35,7 @@ class SnsNotifierProvider extends NotifierProvider {
IO.contextShift(scala.concurrent.ExecutionContext.global)
private val logger = LoggerFactory.getLogger(classOf[SnsNotifierProvider])
def load(config: NotifierConfig, userRepository: UserRepository): IO[Notifier] =
def load(config: NotifierConfig, userRepository: UserRepository, groupRepository: GroupRepository): IO[Notifier] =
for {
snsConfig <- Blocker[IO].use(
ConfigSource.fromConfig(config.settings).loadF[IO, SnsNotifierConfig](_)

View File

@ -17,14 +17,9 @@
package vinyldns.api.repository
import vinyldns.core.domain.batch.BatchChangeRepository
import vinyldns.core.domain.membership.{
GroupChangeRepository,
GroupRepository,
MembershipRepository,
UserRepository
}
import vinyldns.core.domain.record.{RecordChangeRepository, RecordSetRepository}
import vinyldns.core.domain.zone.{ZoneChangeRepository, ZoneRepository}
import vinyldns.core.domain.membership.{UserRepository, GroupChangeRepository, MembershipRepository, GroupRepository}
import vinyldns.core.domain.record.{RecordChangeRepository, RecordSetCacheRepository, RecordSetRepository}
import vinyldns.core.domain.zone.{ZoneRepository, ZoneChangeRepository}
import vinyldns.core.repository.DataAccessor
final case class ApiDataAccessor(
@ -34,6 +29,7 @@ final case class ApiDataAccessor(
groupChangeRepository: GroupChangeRepository,
recordSetRepository: RecordSetRepository,
recordChangeRepository: RecordChangeRepository,
recordSetCacheRepository: RecordSetCacheRepository,
zoneChangeRepository: ZoneChangeRepository,
zoneRepository: ZoneRepository,
batchChangeRepository: BatchChangeRepository

View File

@ -20,7 +20,11 @@ import cats.data.ValidatedNel
import cats.implicits._
import vinyldns.core.domain.batch.BatchChangeRepository
import vinyldns.core.domain.membership._
import vinyldns.core.domain.record.{RecordChangeRepository, RecordSetRepository}
import vinyldns.core.domain.record.{
RecordChangeRepository,
RecordSetCacheRepository,
RecordSetRepository
}
import vinyldns.core.domain.zone.{ZoneChangeRepository, ZoneRepository}
import vinyldns.core.repository.{DataAccessorProvider, DataStore, DataStoreConfig}
import vinyldns.core.repository.DataStoreLoader.getRepoOf
@ -35,6 +39,7 @@ object ApiDataAccessorProvider extends DataAccessorProvider[ApiDataAccessor] {
groupChange,
recordSet,
recordChange,
recordSetCache,
zoneChange,
zone,
batchChange
@ -50,6 +55,7 @@ object ApiDataAccessorProvider extends DataAccessorProvider[ApiDataAccessor] {
getRepoOf[GroupChangeRepository](dataStores, groupChange),
getRepoOf[RecordSetRepository](dataStores, recordSet),
getRepoOf[RecordChangeRepository](dataStores, recordChange),
getRepoOf[RecordSetCacheRepository](dataStores, recordSetCache),
getRepoOf[ZoneChangeRepository](dataStores, zoneChange),
getRepoOf[ZoneRepository](dataStores, zone),
getRepoOf[BatchChangeRepository](dataStores, batchChange)

View File

@ -18,13 +18,17 @@ package vinyldns.api.repository
import cats.effect.{ContextShift, IO}
import cats.implicits._
import org.joda.time.DateTime
import java.time.Instant
import java.time.temporal.ChronoUnit
import org.slf4j.{Logger, LoggerFactory}
import scalikejdbc.DB
import vinyldns.core.domain.Encrypted
import vinyldns.core.domain.membership._
import vinyldns.core.domain.zone._
import vinyldns.mysql.TransactionProvider
// $COVERAGE-OFF$
object TestDataLoader {
object TestDataLoader extends TransactionProvider {
private implicit val cs: ContextShift[IO] =
IO.contextShift(scala.concurrent.ExecutionContext.global)
@ -34,9 +38,9 @@ object TestDataLoader {
final val testUser = User(
userName = "testuser",
id = "testuser",
created = DateTime.now.secondOfDay().roundFloorCopy(),
created = Instant.now.truncatedTo(ChronoUnit.SECONDS),
accessKey = "testUserAccessKey",
secretKey = "testUserSecretKey",
secretKey = Encrypted("testUserSecretKey"),
firstName = Some("Test"),
lastName = Some("User"),
email = Some("test@test.com"),
@ -45,9 +49,9 @@ object TestDataLoader {
final val okUser = User(
userName = "ok",
id = "ok",
created = DateTime.now.secondOfDay().roundFloorCopy(),
created = Instant.now.truncatedTo(ChronoUnit.SECONDS),
accessKey = "okAccessKey",
secretKey = "okSecretKey",
secretKey = Encrypted("okSecretKey"),
firstName = Some("ok"),
lastName = Some("ok"),
email = Some("test@test.com"),
@ -56,17 +60,17 @@ object TestDataLoader {
final val dummyUser = User(
userName = "dummy",
id = "dummy",
created = DateTime.now.secondOfDay().roundFloorCopy(),
created = Instant.now.truncatedTo(ChronoUnit.SECONDS),
accessKey = "dummyAccessKey",
secretKey = "dummySecretKey",
secretKey = Encrypted("dummySecretKey"),
isTest = true
)
final val sharedZoneUser = User(
userName = "sharedZoneUser",
id = "sharedZoneUser",
created = DateTime.now.secondOfDay().roundFloorCopy(),
created = Instant.now.truncatedTo(ChronoUnit.SECONDS),
accessKey = "sharedZoneUserAccessKey",
secretKey = "sharedZoneUserSecretKey",
secretKey = Encrypted("sharedZoneUserSecretKey"),
firstName = Some("sharedZoneUser"),
lastName = Some("sharedZoneUser"),
email = Some("test@test.com"),
@ -75,9 +79,9 @@ object TestDataLoader {
final val lockedUser = User(
userName = "locked",
id = "locked",
created = DateTime.now.secondOfDay().roundFloorCopy(),
created = Instant.now.truncatedTo(ChronoUnit.SECONDS),
accessKey = "lockedAccessKey",
secretKey = "lockedSecretKey",
secretKey = Encrypted("lockedSecretKey"),
firstName = Some("Locked"),
lastName = Some("User"),
email = Some("testlocked@test.com"),
@ -88,18 +92,18 @@ object TestDataLoader {
User(
userName = "name-dummy%03d".format(runner),
id = "dummy%03d".format(runner),
created = DateTime.now.secondOfDay().roundFloorCopy(),
created = Instant.now.truncatedTo(ChronoUnit.SECONDS),
accessKey = "dummy",
secretKey = "dummy",
secretKey = Encrypted("dummy"),
isTest = true
)
}
final val listGroupUser = User(
userName = "list-group-user",
id = "list-group-user",
created = DateTime.now.secondOfDay().roundFloorCopy(),
created = Instant.now.truncatedTo(ChronoUnit.SECONDS),
accessKey = "listGroupAccessKey",
secretKey = "listGroupSecretKey",
secretKey = Encrypted("listGroupSecretKey"),
firstName = Some("list-group"),
lastName = Some("list-group"),
email = Some("test@test.com"),
@ -109,9 +113,9 @@ object TestDataLoader {
final val listZonesUser = User(
userName = "list-zones-user",
id = "list-zones-user",
created = DateTime.now.secondOfDay().roundFloorCopy(),
created = Instant.now.truncatedTo(ChronoUnit.SECONDS),
accessKey = "listZonesAccessKey",
secretKey = "listZonesSecretKey",
secretKey = Encrypted("listZonesSecretKey"),
firstName = Some("list-zones"),
lastName = Some("list-zones"),
email = Some("test@test.com"),
@ -121,9 +125,9 @@ object TestDataLoader {
final val zoneHistoryUser = User(
userName = "history-user",
id = "history-id",
created = DateTime.now.secondOfDay().roundFloorCopy(),
created = Instant.now.truncatedTo(ChronoUnit.SECONDS),
accessKey = "history-key",
secretKey = "history-secret",
secretKey = Encrypted("history-secret"),
firstName = Some("history-first"),
lastName = Some("history-last"),
email = Some("history@history.com"),
@ -133,9 +137,9 @@ object TestDataLoader {
final val listRecordsUser = User(
userName = "list-records-user",
id = "list-records-user",
created = DateTime.now.secondOfDay().roundFloorCopy(),
created = Instant.now.truncatedTo(ChronoUnit.SECONDS),
accessKey = "listRecordsAccessKey",
secretKey = "listRecordsSecretKey",
secretKey = Encrypted("listRecordsSecretKey"),
firstName = Some("list-records"),
lastName = Some("list-records"),
email = Some("test@test.com"),
@ -145,9 +149,9 @@ object TestDataLoader {
final val listBatchChangeSummariesUser = User(
userName = "list-batch-summaries-user",
id = "list-batch-summaries-id",
created = DateTime.now.secondOfDay().roundFloorCopy(),
created = Instant.now.truncatedTo(ChronoUnit.SECONDS),
accessKey = "listBatchSummariesAccessKey",
secretKey = "listBatchSummariesSecretKey",
secretKey = Encrypted("listBatchSummariesSecretKey"),
firstName = Some("list-batch-summaries"),
lastName = Some("list-batch-summaries"),
email = Some("test@test.com"),
@ -165,9 +169,9 @@ object TestDataLoader {
final val listZeroBatchChangeSummariesUser = User(
userName = "list-zero-summaries-user",
id = "list-zero-summaries-id",
created = DateTime.now.secondOfDay().roundFloorCopy(),
created = Instant.now.truncatedTo(ChronoUnit.SECONDS),
accessKey = "listZeroSummariesAccessKey",
secretKey = "listZeroSummariesSecretKey",
secretKey = Encrypted("listZeroSummariesSecretKey"),
firstName = Some("list-zero-summaries"),
lastName = Some("list-zero-summaries"),
email = Some("test@test.com"),
@ -177,9 +181,9 @@ object TestDataLoader {
final val supportUser = User(
userName = "support-user",
id = "support-user-id",
created = DateTime.now.secondOfDay().roundFloorCopy(),
created = Instant.now.truncatedTo(ChronoUnit.SECONDS),
accessKey = "supportUserAccessKey",
secretKey = "supportUserSecretKey",
secretKey = Encrypted("supportUserSecretKey"),
firstName = Some("support-user"),
lastName = Some("support-user"),
email = Some("test@test.com"),
@ -187,6 +191,19 @@ object TestDataLoader {
isTest = true
)
final val superUser = User(
userName = "super-user",
id = "super-user-id",
created = Instant.now.truncatedTo(ChronoUnit.SECONDS),
accessKey = "superUserAccessKey",
secretKey = Encrypted("superUserSecretKey"),
firstName = Some("super-user"),
lastName = Some("super-user"),
email = Some("test@test.com"),
isSuper = true,
isTest = true
)
final val sharedZoneGroup = Group(
name = "testSharedZoneGroup",
id = "shared-zone-group",
@ -237,15 +254,43 @@ object TestDataLoader {
)
def loadTestData(
userRepo: UserRepository,
groupRepo: GroupRepository,
zoneRepo: ZoneRepository,
membershipRepo: MembershipRepository
): IO[Unit] =
userRepo: UserRepository,
groupRepo: GroupRepository,
zoneRepo: ZoneRepository,
membershipRepo: MembershipRepository
): IO[Unit] = {
def saveMembersData(
groupId: String,
memberUserIds: Set[String],
isAdmin: Boolean
): IO[Unit] = {
executeWithinTransaction { db: DB =>
for {
_ <- membershipRepo.saveMembers(
db: DB,
groupId = groupId,
memberUserIds = memberUserIds,
isAdmin = isAdmin
)
} yield ()
}
}
def saveGroupData(
group: Group
): IO[Unit] = {
executeWithinTransaction { db: DB =>
for {
_ <- groupRepo.save(db, group)
} yield ()
}
}
for {
_ <- (testUser :: okUser :: dummyUser :: sharedZoneUser :: lockedUser :: listGroupUser :: listZonesUser ::
listBatchChangeSummariesUser :: listZeroBatchChangeSummariesUser :: zoneHistoryUser :: supportUser ::
listRecordsUser :: listOfDummyUsers).map { user =>
superUser :: listRecordsUser :: listOfDummyUsers).map { user =>
userRepo.save(user)
}.parSequence
// if the test shared zones exist already, clean them out
@ -267,32 +312,32 @@ object TestDataLoader {
IO.unit
}
_ <- toDelete.map(zoneRepo.save).parSequence
_ <- groupRepo.save(sharedZoneGroup)
_ <- groupRepo.save(globalACLGroup)
_ <- groupRepo.save(anotherGlobalACLGroup)
_ <- groupRepo.save(duGroup)
_ <- groupRepo.save(listBatchChangeSummariesGroup)
_ <- membershipRepo.saveMembers(
_ <- saveGroupData(sharedZoneGroup)
_ <- saveGroupData(globalACLGroup)
_ <- saveGroupData(anotherGlobalACLGroup)
_ <- saveGroupData(duGroup)
_ <- saveGroupData(listBatchChangeSummariesGroup)
_ <- saveMembersData(
groupId = "shared-zone-group",
memberUserIds = Set(sharedZoneUser.id),
isAdmin = true
)
_ <- membershipRepo.saveMembers(
_ <- saveMembersData(
groupId = "global-acl-group-id",
memberUserIds = Set(okUser.id, dummyUser.id),
isAdmin = true
)
_ <- membershipRepo.saveMembers(
_ <- saveMembersData(
groupId = "another-global-acl-group",
memberUserIds = Set(testUser.id),
isAdmin = true
)
_ <- membershipRepo.saveMembers(
_ <- saveMembersData(
groupId = duGroup.id,
memberUserIds = duGroup.memberIds,
isAdmin = true
)
_ <- membershipRepo.saveMembers(
_ <- saveMembersData(
groupId = listBatchChangeSummariesGroup.id,
memberUserIds = listBatchChangeSummariesGroup.memberIds,
isAdmin = true
@ -300,5 +345,6 @@ object TestDataLoader {
_ <- zoneRepo.save(sharedZone)
_ <- zoneRepo.save(nonTestSharedZone)
} yield ()
}
}
// $COVERAGE-ON$

View File

@ -22,8 +22,8 @@ import javax.crypto.spec.SecretKeySpec
import javax.crypto.{Mac, SecretKey}
import akka.http.scaladsl.model.HttpRequest
import org.joda.time.DateTime
import org.joda.time.format.{DateTimeFormat, ISODateTimeFormat}
import java.time.{Instant, ZoneId, ZonedDateTime}
import java.time.format.DateTimeFormatter
import org.slf4j.LoggerFactory
import scala.collection.SortedSet
@ -60,23 +60,21 @@ class Aws4Authenticator {
type Credentials = String
val iso8601Format =
ISODateTimeFormat.basicDateTimeNoMillis.withZoneUTC()
val iso8601Format = DateTimeFormatter.ofPattern("yyyyMMdd'T'HHmmssz").withZone(ZoneId.of("UTC"))
val rfc822Format =
DateTimeFormat.forPattern("EEE, d MMM yyyy HH:mm:ss z").withZoneUTC()
val rfc822Format = DateTimeFormatter.ofPattern("EEE, d MMM yyyy HH:mm:ss z").withZone(ZoneId.of("UTC"))
val logger = LoggerFactory.getLogger("Aws4Authenticator")
def getDate(req: HttpRequest): Option[DateTime] = {
def getDate(req: HttpRequest): Option[Instant] = {
val xAmzDate = getHeader(req, "X-Amz-Date")
val dateHeader = getHeader(req, "Date")
if (xAmzDate.isDefined) {
Try(iso8601Format.parseDateTime(xAmzDate.get)).toOption
Try(ZonedDateTime.parse(xAmzDate.get, iso8601Format).toInstant).toOption
} else {
Try(iso8601Format.parseDateTime(dateHeader.get))
.orElse(Try(rfc822Format.parseDateTime(dateHeader.get)))
Try(ZonedDateTime.parse(dateHeader.get, iso8601Format).toInstant)
.orElse(Try(ZonedDateTime.parse(dateHeader.get, rfc822Format).toInstant))
.toOption
}
}
@ -110,7 +108,7 @@ class Aws4Authenticator {
) = authorization
val signedHeaders = Set() ++ signatureHeaders.split(';')
// convert Date header to canonical form required by AWS
val dateTime = iso8601Format.print(getDate(req).get)
val dateTime = iso8601Format.format(getDate(req).get).replace("UTC", "Z")
// get canonical headers, but only those that were in the signed set
val headers = canonicalHeaders(req, signedHeaders).toSeq
// create a canonical representation of the request
@ -118,17 +116,19 @@ class Aws4Authenticator {
// calculate the sig using the generated signing key
val signature = calculateSig(canonicalRequest, dateTime, signatureScope, secret)
// This is worthwhile during the upgrade to akka http and beyond as debugging auth issues is difficult
val sb = new StringBuilder
sb.append(s"SIGNATURE_SCOPE: $signatureScope\r\n")
sb.append(s"SIGNATURE_HEADERS: $signatureHeaders\r\n")
sb.append(s"SIGNED_HEADERS: $signedHeaders\r\n")
sb.append(s"DATE_TIME: $dateTime\r\n")
sb.append(s"HEADERS: $headers\r\n")
sb.append(s"CANONICAL_REQUEST: $canonicalRequest\r\n")
sb.append(s"SIGNATURE_RECEIVED: $signatureReceived\r\n")
sb.append(s"CALCULATED_SIGNATURE: $signature\r\n")
logger.info(sb.toString)
if (logger.isDebugEnabled) {
logger.debug(
s"""SIGNATURE_SCOPE: $signatureScope
|SIGNATURE_HEADERS: $signatureHeaders
|SIGNED_HEADERS: $signedHeaders
|DATE_TIME: $dateTime
|HEADERS: $headers
|CANONICAL_REQUEST: $canonicalRequest
|SIGNATURE_RECEIVED: $signatureReceived
|CALCULATED_SIGNATURE: $signature"""
.stripMargin
)
}
signature.equals(signatureReceived)
}

View File

@ -21,7 +21,7 @@ import cats.data.Validated._
import org.json4s.JsonDSL._
import org.json4s._
import cats.implicits._
import org.joda.time.DateTime
import java.time.Instant
import vinyldns.core.domain.{DomainValidationError, DomainValidationErrorType}
import vinyldns.api.domain.batch.ChangeInputType._
import vinyldns.api.domain.batch._
@ -33,6 +33,7 @@ trait BatchChangeJsonProtocol extends JsonValidation {
val batchChangeSerializers = Seq(
JsonEnumV(ChangeInputType),
JsonEnumV(RecordType),
JsonEnumV(SingleChangeStatus),
JsonEnumV(BatchChangeStatus),
JsonEnumV(BatchChangeApprovalStatus),
@ -62,7 +63,7 @@ trait BatchChangeJsonProtocol extends JsonValidation {
(js \ "comments").optional[String],
changeList,
(js \ "ownerGroupId").optional[String],
(js \ "scheduledTime").optional[DateTime]
(js \ "scheduledTime").optional[Instant]
).mapN(BatchChangeInput(_, _, _, _))
}
}
@ -87,6 +88,7 @@ trait BatchChangeJsonProtocol extends JsonValidation {
(
(js \ "inputName").required[String]("Missing BatchChangeInput.changes.inputName"),
recordType,
(js \ "systemMessage").optional[String],
(js \ "ttl").optional[Long],
recordType.andThen(extractRecord(_, js \ "record"))
).mapN(AddChangeInput.apply)
@ -113,6 +115,7 @@ trait BatchChangeJsonProtocol extends JsonValidation {
(
(js \ "inputName").required[String]("Missing BatchChangeInput.changes.inputName"),
recordType,
(js \ "systemMessage").optional[String],
recordData
).mapN(DeleteRRSetChangeInput.apply)
}
@ -248,8 +251,23 @@ trait BatchChangeJsonProtocol extends JsonValidation {
js.required[MXData](
"Missing BatchChangeInput.changes.record.preference and BatchChangeInput.changes.record.exchange"
)
case NS => js.required[NSData]("Missing BatchChangeInput.changes.record.nsdname")
case SRV => js.required[SRVData](
"Missing BatchChangeInput.changes.record.priority and " +
"Missing BatchChangeInput.changes.record.weight and " +
"Missing BatchChangeInput.changes.record.port and " +
"Missing BatchChangeInput.changes.record.target"
)
case NAPTR => js.required[NAPTRData](
"Missing BatchChangeInput.changes.record.order and " +
"Missing BatchChangeInput.changes.record.preference and " +
"Missing BatchChangeInput.changes.record.flags and " +
"Missing BatchChangeInput.changes.record.service and " +
"Missing BatchChangeInput.changes.record.regexp and " +
"Missing BatchChangeInput.changes.record.replacement"
)
case _ =>
s"Unsupported type $typ, valid types include: A, AAAA, CNAME, PTR, TXT, and MX".invalidNel
s"Unsupported type $typ, valid types include: A, AAAA, CNAME, PTR, TXT, MX, NS, SRV and NAPTR".invalidNel
}
}
}

View File

@ -16,15 +16,16 @@
package vinyldns.api.route
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.{RejectionHandler, Route, ValidationRejection}
import org.slf4j.{Logger, LoggerFactory}
import vinyldns.api.config.ManualReviewConfig
import vinyldns.core.domain.batch._
import vinyldns.api.config.{LimitsConfig, ManualReviewConfig}
import vinyldns.api.domain.batch._
import vinyldns.core.domain.batch._
class BatchChangeRoute(
batchChangeService: BatchChangeServiceAlgebra,
limitsConfig: LimitsConfig,
val vinylDNSAuthenticator: VinylDNSAuthenticator,
manualReviewConfig: ManualReviewConfig
) extends VinylDNSJsonProtocol
@ -54,7 +55,7 @@ class BatchChangeRoute(
case scnpd: ScheduledChangeNotDue => complete(StatusCodes.Forbidden, scnpd.message)
}
final private val MAX_ITEMS_LIMIT: Int = 100
final private val MAX_ITEMS_LIMIT: Int = limitsConfig.BATCHCHANGE_ROUTING_MAX_ITEMS_LIMIT
val batchChangeRoute: Route = {
val standardBatchChangeRoutes = path("zones" / "batchrecordchanges") {
@ -69,22 +70,28 @@ class BatchChangeRoute(
}
}
} ~
(get & monitor("Endpoint.listBatchChangeSummaries")) {
parameters(
"startFrom".as[Int].?,
"maxItems".as[Int].?(MAX_ITEMS_LIMIT),
"ignoreAccess".as[Boolean].?(false),
"approvalStatus".as[String].?
) {
(
startFrom: Option[Int],
maxItems: Int,
ignoreAccess: Boolean,
approvalStatus: Option[String]
) =>
{
val convertApprovalStatus = approvalStatus.flatMap(BatchChangeApprovalStatus.find)
(get & monitor("Endpoint.listBatchChangeSummaries")) {
parameters(
"userName".as[String].?,
"dateTimeRangeStart".as[String].?,
"dateTimeRangeEnd".as[String].?,
"startFrom".as[Int].?,
"maxItems".as[Int].?(MAX_ITEMS_LIMIT),
"ignoreAccess".as[Boolean].?(false),
"approvalStatus".as[String].?
) {
(
userName: Option[String],
dateTimeRangeStart: Option[String],
dateTimeRangeEnd: Option[String],
startFrom: Option[Int],
maxItems: Int,
ignoreAccess: Boolean,
approvalStatus: Option[String]
) =>
{
val convertApprovalStatus = approvalStatus.flatMap(BatchChangeApprovalStatus.find)
handleRejections(invalidQueryHandler) {
validate(
0 < maxItems && maxItems <= MAX_ITEMS_LIMIT,
@ -93,9 +100,14 @@ class BatchChangeRoute(
authenticateAndExecute(
batchChangeService.listBatchChangeSummaries(
_,
userName,
dateTimeRangeStart,
dateTimeRangeEnd,
startFrom,
maxItems,
ignoreAccess,
// TODO: Update batch status from None to its actual value when the feature is ready for release
None,
convertApprovalStatus
)
) { summaries =>
@ -104,9 +116,9 @@ class BatchChangeRoute(
}
}
}
}
}
}
} ~
} ~
path("zones" / "batchrecordchanges" / Segment) { id =>
(get & monitor("Endpoint.getBatchChange")) {
authenticateAndExecute(batchChangeService.getBatchChange(id, _)) { chg =>

View File

@ -17,20 +17,22 @@
package vinyldns.api.route
import java.util.UUID
import cats.data._
import cats.implicits._
import org.joda.time.DateTime
import java.time.Instant
import java.time.temporal.ChronoUnit
import org.json4s.JsonDSL._
import org.json4s._
import scodec.bits.{Bases, ByteVector}
import vinyldns.api.domain.zone.{RecordSetGlobalInfo, RecordSetInfo, RecordSetListInfo}
import vinyldns.core.domain.DomainHelpers.ensureTrailingDot
import vinyldns.core.domain.DomainHelpers.removeWhitespace
import vinyldns.core.domain.Fqdn
import vinyldns.core.domain.{EncryptFromJson, Encrypted, Fqdn}
import vinyldns.core.domain.record._
import vinyldns.core.domain.zone._
import vinyldns.core.Messages._
import vinyldns.core.domain.record.OwnerShipTransferStatus
import vinyldns.core.domain.record.OwnerShipTransferStatus.OwnerShipTransferStatus
trait DnsJsonProtocol extends JsonValidation {
import vinyldns.core.domain.record.RecordType._
@ -40,12 +42,15 @@ trait DnsJsonProtocol extends JsonValidation {
UpdateZoneInputSerializer,
ZoneConnectionSerializer,
AlgorithmSerializer,
EncryptedSerializer,
RecordSetSerializer,
ownerShipTransferSerializer,
RecordSetListInfoSerializer,
RecordSetGlobalInfoSerializer,
RecordSetInfoSerializer,
RecordSetChangeSerializer,
JsonEnumV(ZoneStatus),
JsonEnumV(OwnerShipTransferStatus),
JsonEnumV(ZoneChangeStatus),
JsonEnumV(RecordSetStatus),
JsonEnumV(RecordSetChangeStatus),
@ -53,6 +58,7 @@ trait DnsJsonProtocol extends JsonValidation {
JsonEnumV(ZoneChangeType),
JsonEnumV(RecordSetChangeType),
JsonEnumV(NameSort),
JsonEnumV(RecordTypeSort),
ASerializer,
AAAASerializer,
CNAMESerializer,
@ -78,12 +84,24 @@ trait DnsJsonProtocol extends JsonValidation {
(js \ "userId").required[String]("Missing RecordSetChange.userId"),
(js \ "changeType").required(RecordSetChangeType, "Missing RecordSetChange.changeType"),
(js \ "status").default(RecordSetChangeStatus, RecordSetChangeStatus.Pending),
(js \ "created").default[DateTime](DateTime.now),
(js \ "created").default[Instant](Instant.now.truncatedTo(ChronoUnit.MILLIS)),
(js \ "systemMessage").optional[String],
(js \ "updates").optional[RecordSet],
(js \ "id").default[String](UUID.randomUUID.toString),
(js \ "singleBatchChangeIds").default[List[String]](List())
).mapN(RecordSetChange.apply)
).mapN(RecordSetChange.apply)
override def toJson(rs: RecordSetChange): JValue =
("zone" -> Extraction.decompose(rs.zone)) ~
("recordSet" -> Extraction.decompose(rs.recordSet)) ~
("userId" -> rs.userId) ~
("changeType" -> Extraction.decompose(rs.changeType)) ~
("status" -> Extraction.decompose(rs.status)) ~
("created" -> Extraction.decompose(rs.created)) ~
("systemMessage" -> rs.systemMessage) ~
("updates" -> Extraction.decompose(rs.updates)) ~
("id" -> rs.id) ~
("singleBatchChangeIds" -> Extraction.decompose(rs.singleBatchChangeIds))
}
case object CreateZoneInputSerializer extends ValidationSerializer[CreateZoneInput] {
@ -99,8 +117,10 @@ trait DnsJsonProtocol extends JsonValidation {
(js \ "shared").default[Boolean](false),
(js \ "acl").default[ZoneACL](ZoneACL()),
(js \ "adminGroupId").required[String]("Missing Zone.adminGroupId"),
(js \ "backendId").optional[String]
).mapN(CreateZoneInput.apply)
(js \ "backendId").optional[String],
(js \ "recurrenceSchedule").optional[String],
(js \ "scheduleRequestor").optional[String],
).mapN(CreateZoneInput.apply)
}
case object UpdateZoneInputSerializer extends ValidationSerializer[UpdateZoneInput] {
@ -116,8 +136,10 @@ trait DnsJsonProtocol extends JsonValidation {
(js \ "shared").default[Boolean](false),
(js \ "acl").default[ZoneACL](ZoneACL()),
(js \ "adminGroupId").required[String]("Missing Zone.adminGroupId"),
(js \ "backendId").optional[String]
).mapN(UpdateZoneInput.apply)
(js \ "recurrenceSchedule").optional[String],
(js \ "scheduleRequestor").optional[String],
(js \ "backendId").optional[String],
).mapN(UpdateZoneInput.apply)
}
case object AlgorithmSerializer extends ValidationSerializer[Algorithm] {
@ -130,18 +152,30 @@ trait DnsJsonProtocol extends JsonValidation {
override def toJson(a: Algorithm): JValue = JString(a.name)
}
case object EncryptedSerializer extends ValidationSerializer[Encrypted] {
override def fromJson(js: JValue): ValidatedNel[String, Encrypted] =
js match {
case JString(value) => EncryptFromJson.fromString(value).toValidatedNel
case _ => "Unsupported type for zone connection key, must be a string".invalidNel
}
override def toJson(a: Encrypted): JValue = JString(a.value)
}
case object ZoneConnectionSerializer extends ValidationSerializer[ZoneConnection] {
override def fromJson(js: JValue): ValidatedNel[String, ZoneConnection] =
(
(js \ "name").required[String]("Missing ZoneConnection.name"),
(js \ "keyName").required[String]("Missing ZoneConnection.keyName"),
(js \ "key").required[String]("Missing ZoneConnection.key"),
(js \ "key").required[Encrypted]("Missing ZoneConnection.key"),
(js \ "primaryServer").required[String]("Missing ZoneConnection.primaryServer"),
(js \ "algorithm").default[Algorithm](Algorithm.HMAC_MD5)
).mapN(ZoneConnection.apply)
).mapN(ZoneConnection.apply)
}
def checkDomainNameLen(s: String): Boolean = s.length <= 255
def validateNaptrFlag(flag: String): Boolean = flag == "U" || flag == "S" || flag == "A" || flag == "P"
def validateNaptrRegexp(regexp: String): Boolean = regexp.startsWith("!") && regexp.endsWith("!") || regexp == ""
def nameContainsDots(s: String): Boolean = s.contains(".")
def nameDoesNotContainSpaces(s: String): Boolean = !s.contains(" ")
@ -151,22 +185,22 @@ trait DnsJsonProtocol extends JsonValidation {
// Adapted from https://stackoverflow.com/questions/53497/regular-expression-that-matches-valid-ipv6-addresses
// As noted in comments, might fail in very unusual edge cases
val ipv6Re =
"""^(
#([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|
#([0-9a-fA-F]{1,4}:){1,7}:|
#([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|
#([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|
#([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|
#([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|
#([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|
#[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|
#:((:[0-9a-fA-F]{1,4}){1,7}|:)|
#fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|
#::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|
#(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|
#([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|
#(2[0-4]|1{0,1}[0-9]){0,1}[0-9])
#)$""".stripMargin('#').replaceAll("\n", "").r
"""^(
#([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|
#([0-9a-fA-F]{1,4}:){1,7}:|
#([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|
#([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|
#([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|
#([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|
#([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|
#[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|
#:((:[0-9a-fA-F]{1,4}){1,7}|:)|
#fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|
#::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|
#(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|
#([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|
#(2[0-4]|1{0,1}[0-9]){0,1}[0-9])
#)$""".stripMargin('#').replaceAll("\n", "").r
def ipv4Match(s: String): Boolean = ipv4Re.findFirstIn(s).isDefined
def ipv6Match(s: String): Boolean = ipv6Re.findFirstIn(s).isDefined
@ -195,15 +229,16 @@ trait DnsJsonProtocol extends JsonValidation {
"RecordSet.ttl must be a positive signed 32 bit number greater than or equal to 30" -> (_ >= 30)
),
(js \ "status").default(RecordSetStatus, RecordSetStatus.Pending),
(js \ "created").default[DateTime](DateTime.now),
(js \ "updated").optional[DateTime],
(js \ "created").default[Instant](Instant.now.truncatedTo(ChronoUnit.MILLIS)),
(js \ "updated").optional[Instant],
recordType
.andThen(extractRecords(_, js \ "records")),
(js \ "id").default[String](UUID.randomUUID().toString),
(js \ "account").default[String]("system"),
(js \ "ownerGroupId").optional[String],
(js \ "recordSetGroupChange").optional[OwnerShipTransfer],
(js \ "fqdn").optional[String]
).mapN(RecordSet.apply)
).mapN(RecordSet.apply)
// Put additional record set level checks below
recordSetResult.checkIf(recordTypeGet == RecordType.CNAME)(
@ -226,15 +261,29 @@ trait DnsJsonProtocol extends JsonValidation {
("id" -> rs.id) ~
("account" -> rs.account) ~
("ownerGroupId" -> rs.ownerGroupId) ~
("recordSetGroupChange" -> Extraction.decompose(rs.recordSetGroupChange)) ~
("fqdn" -> rs.fqdn)
}
case object ownerShipTransferSerializer extends ValidationSerializer[OwnerShipTransfer] {
override def fromJson(js: JValue): ValidatedNel[String, OwnerShipTransfer] =
(
(js \ "ownerShipTransferStatus").required[OwnerShipTransferStatus]("Missing ownerShipTransfer.ownerShipTransferStatus"),
(js \ "requestedOwnerGroupId").optional[String],
).mapN(OwnerShipTransfer.apply)
override def toJson(rsa: OwnerShipTransfer): JValue =
("ownerShipTransferStatus" -> Extraction.decompose(rsa.ownerShipTransferStatus)) ~
("requestedOwnerGroupId" -> Extraction.decompose(rsa.requestedOwnerGroupId))
}
case object RecordSetListInfoSerializer extends ValidationSerializer[RecordSetListInfo] {
override def fromJson(js: JValue): ValidatedNel[String, RecordSetListInfo] =
(
RecordSetInfoSerializer.fromJson(js),
(js \ "accessLevel").required[AccessLevel.AccessLevel]("Missing RecordSet.zoneId")
).mapN(RecordSetListInfo.apply)
).mapN(RecordSetListInfo.apply)
override def toJson(rs: RecordSetListInfo): JValue =
("type" -> Extraction.decompose(rs.typ)) ~
@ -250,6 +299,7 @@ trait DnsJsonProtocol extends JsonValidation {
("accessLevel" -> rs.accessLevel.toString) ~
("ownerGroupId" -> rs.ownerGroupId) ~
("ownerGroupName" -> rs.ownerGroupName) ~
("recordSetGroupChange" -> Extraction.decompose(rs.recordSetGroupChange)) ~
("fqdn" -> rs.fqdn)
}
@ -271,6 +321,7 @@ trait DnsJsonProtocol extends JsonValidation {
("account" -> rs.account) ~
("ownerGroupId" -> rs.ownerGroupId) ~
("ownerGroupName" -> rs.ownerGroupName) ~
("recordSetGroupChange" -> Extraction.decompose(rs.recordSetGroupChange)) ~
("fqdn" -> rs.fqdn)
}
@ -281,7 +332,7 @@ trait DnsJsonProtocol extends JsonValidation {
(js \ "zoneName").required[String]("Missing Zone.name"),
(js \ "zoneShared").required[Boolean]("Missing Zone.shared"),
(js \ "ownerGroupName").optional[String]
).mapN(RecordSetGlobalInfo.apply)
).mapN(RecordSetGlobalInfo.apply)
override def toJson(rs: RecordSetGlobalInfo): JValue =
("type" -> Extraction.decompose(rs.typ)) ~
@ -296,6 +347,7 @@ trait DnsJsonProtocol extends JsonValidation {
("account" -> rs.account) ~
("ownerGroupId" -> rs.ownerGroupId) ~
("ownerGroupName" -> rs.ownerGroupName) ~
("recordSetGroupChange" -> Extraction.decompose(rs.recordSetGroupChange)) ~
("fqdn" -> rs.fqdn) ~
("zoneName" -> rs.zoneName) ~
("zoneShared" -> rs.zoneShared)
@ -365,7 +417,7 @@ trait DnsJsonProtocol extends JsonValidation {
"MX.exchange must be less than 255 characters" -> checkDomainNameLen
)
.map(Fqdn.apply)
).mapN(MXData.apply)
).mapN(MXData.apply)
}
case object NSSerializer extends ValidationSerializer[NSData] {
@ -431,7 +483,7 @@ trait DnsJsonProtocol extends JsonValidation {
.check(
"SOA.minimum must be an unsigned 32 bit number" -> (i => i <= 4294967295L && i >= 0)
)
).mapN(SOAData.apply)
).mapN(SOAData.apply)
}
case object SPFSerializer extends ValidationSerializer[SPFData] {
@ -468,7 +520,7 @@ trait DnsJsonProtocol extends JsonValidation {
"SRV.target must be less than 255 characters" -> checkDomainNameLen
)
.map(Fqdn.apply)
).mapN(SRVData.apply)
).mapN(SRVData.apply)
}
case object NAPTRSerializer extends ValidationSerializer[NAPTRData] {
@ -487,7 +539,7 @@ trait DnsJsonProtocol extends JsonValidation {
(js \ "flags")
.required[String]("Missing NAPTR.flags")
.check(
"NAPTR.flags must be less than 2 characters" -> (_.length < 2)
"Invalid NAPTR.flag. Valid NAPTR flag value must be U, S, A or P" -> validateNaptrFlag
),
(js \ "service")
.required[String]("Missing NAPTR.service")
@ -497,16 +549,16 @@ trait DnsJsonProtocol extends JsonValidation {
(js \ "regexp")
.required[String]("Missing NAPTR.regexp")
.check(
"NAPTR.regexp must be less than 255 characters" -> checkDomainNameLen
"Invalid NAPTR.regexp. Valid NAPTR regexp value must start and end with '!' or can be empty" -> validateNaptrRegexp
),
// should also check regex validity
(js \ "replacement")
.required[String]("Missing NAPTR.replacement")
.check(
"NAPTR.replacement must be less than 255 characters" -> checkDomainNameLen
)
.map(Fqdn.apply)
).mapN(NAPTRData.apply)
).mapN(NAPTRData.apply)
}
case object SSHFPSerializer extends ValidationSerializer[SSHFPData] {
@ -523,7 +575,7 @@ trait DnsJsonProtocol extends JsonValidation {
"SSHFP.type must be an unsigned 8 bit number" -> (i => i <= 255 && i >= 0)
),
(js \ "fingerprint").required[String]("Missing SSHFP.fingerprint")
).mapN(SSHFPData.apply)
).mapN(SSHFPData.apply)
// necessary because type != typ
override def toJson(rr: SSHFPData): JValue =
@ -561,7 +613,7 @@ trait DnsJsonProtocol extends JsonValidation {
case Some(v) => v.validNel
case None => "Could not convert digest to valid hex".invalidNel
}
).mapN(DSData.apply)
).mapN(DSData.apply)
override def toJson(rr: DSData): JValue =
("keytag" -> Extraction.decompose(rr.keyTag)) ~

View File

@ -23,7 +23,8 @@ import cats.data._
import cats.implicits._
import com.fasterxml.jackson.core.JsonParseException
import de.heikoseeberger.akkahttpjson4s.Json4sSupport
import org.joda.time.DateTime
import java.util.Date
import java.time.Instant
import org.json4s.JsonDSL._
import org.json4s._
import org.json4s.ext._
@ -33,29 +34,16 @@ import scala.reflect.ClassTag
case class JsonErrors(errors: List[String])
// TODO: An update to json4s changed the date time formatting. In order to stay compatible, had to
// revert the date time formatting here. When changing to circe (updating to java8 instant),
// be sure to check the format of date time
object VinylDateParser {
def parse(s: String, format: Formats): Long =
format.dateFormat
.parse(s)
.map(_.getTime)
.getOrElse(
throw new MappingException(
s"Invalid date format $s; provide the date format as YYYY-MM-DDTHH:MM:SSZ"
)
)
}
case object VinylDateTimeSerializer
extends CustomSerializer[DateTime](
extends CustomSerializer[Instant](
format =>
(
{
case JString(s) => new DateTime(VinylDateParser.parse(s, format))
case JInt(s) => Instant.ofEpochMilli(s.longValue())
case JString(s) => Instant.ofEpochMilli(format.dateFormat.parse(s).map(_.getTime).getOrElse(0L))
case JNull => null
}, {
case d: DateTime => JString(format.dateFormat.format(d.toDate))
case d: Instant => JString(format.dateFormat.format(Date.from(d)))
}
)
)
@ -65,7 +53,7 @@ trait JsonValidationSupport extends Json4sSupport {
import scala.collection._
// this is where you define all serializers, custom and validating serializers
val serializers: Traversable[Serializer[_]]
val serializers: Iterable[Serializer[_]]
// TODO: needed in order to stay backward compatible for date time formatting,
// should be removed when we upgrade json libs
@ -87,7 +75,7 @@ trait JsonValidationSupport extends Json4sSupport {
* @return An adjusted Formats without the serializer passed in
*/
private[route] def adjustedFormats(ser: Serializer[_]) =
DefaultFormats ++ JodaTimeSerializers.all ++ serializers.filterNot(_.equals(ser))
DefaultFormats ++ JavaTimeSerializers.all ++ serializers.filterNot(_.equals(ser))
implicit def json4sJacksonFormats: Formats = DefaultFormats ++ dtSerializers ++ serializers
}

View File

@ -17,11 +17,12 @@
package vinyldns.api.route
import java.util.UUID
import cats.data._
import cats.implicits._
import org.joda.time.DateTime
import java.time.Instant
import java.time.temporal.ChronoUnit
import org.json4s._
import org.json4s.JsonDSL._
import vinyldns.api.domain.membership._
import vinyldns.core.domain.membership.{Group, GroupChangeType, GroupStatus, LockStatus}
@ -92,7 +93,7 @@ trait MembershipJsonProtocol extends JsonValidation {
(js \ "email").required[String]("Missing Group.email"),
(js \ "description").optional[String],
(js \ "id").default[String](UUID.randomUUID().toString),
(js \ "created").default[DateTime](DateTime.now),
(js \ "created").default[Instant](Instant.now.truncatedTo(ChronoUnit.MILLIS)),
(js \ "status").default(GroupStatus, GroupStatus.Active),
(js \ "memberIds").default[Set[String]](Set.empty),
(js \ "adminUserIds").default[Set[String]](Set.empty)
@ -100,28 +101,52 @@ trait MembershipJsonProtocol extends JsonValidation {
}
case object GroupInfoSerializer extends ValidationSerializer[GroupInfo] {
override def fromJson(js: JValue): ValidatedNel[String, GroupInfo] =
override def fromJson(js: JValue): ValidatedNel[String, GroupInfo] = {
(
(js \ "id").default[String](UUID.randomUUID().toString),
(js \ "name").required[String]("Missing Group.name"),
(js \ "email").required[String]("Missing Group.email"),
(js \ "description").optional[String],
(js \ "created").default[DateTime](DateTime.now),
(js \ "created").default[Instant](Instant.now.truncatedTo(ChronoUnit.MILLIS)),
(js \ "status").default(GroupStatus, GroupStatus.Active),
(js \ "members").default[Set[UserId]](Set.empty),
(js \ "admins").default[Set[UserId]](Set.empty)
).mapN(GroupInfo.apply)
}
override def toJson(gi: GroupInfo): JValue =
("id" -> gi.id) ~
("name" -> gi.name) ~
("email" -> gi.email) ~
("description" -> gi.description) ~
("created" -> Extraction.decompose(gi.created)) ~
("status" -> Extraction.decompose(gi.status)) ~
("members" -> Extraction.decompose(gi.members)) ~
("admins" -> Extraction.decompose(gi.admins))
}
case object GroupChangeInfoSerializer extends ValidationSerializer[GroupChangeInfo] {
override def fromJson(js: JValue): ValidatedNel[String, GroupChangeInfo] =
override def fromJson(js: JValue): ValidatedNel[String, GroupChangeInfo] = {
(
(js \ "newGroup").required[GroupInfo]("Missing new group"),
(js \ "changeType").required(GroupChangeType, "Missing change type"),
(js \ "userId").required[String]("Missing userId"),
(js \ "oldGroup").optional[GroupInfo],
(js \ "id").default[String](UUID.randomUUID().toString),
(js \ "created").default[String](DateTime.now.getMillis.toString)
(js \ "created").default[Instant](Instant.now.truncatedTo(ChronoUnit.MILLIS)),
(js \ "userName").required[String]("Missing userName"),
(js \ "groupChangeMessage").required[String]("Missing groupChangeMessage"),
).mapN(GroupChangeInfo.apply)
}
override def toJson(gci: GroupChangeInfo): JValue =
("newGroup" -> Extraction.decompose(gci.newGroup)) ~
("changeType" -> Extraction.decompose(gci.changeType)) ~
("userId" -> gci.userId) ~
("oldGroup" -> Extraction.decompose(gci.oldGroup)) ~
("id" -> gci.id) ~
("created" -> gci.created.toString) ~
("userName" -> gci.userName) ~
("groupChangeMessage" -> gci.groupChangeMessage)
}
}

View File

@ -19,6 +19,7 @@ package vinyldns.api.route
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server._
import org.slf4j.{Logger, LoggerFactory}
import vinyldns.api.config.LimitsConfig
import vinyldns.api.domain.membership._
import vinyldns.api.domain.zone.NotAuthorizedError
import vinyldns.api.route.MembershipJsonProtocol.{CreateGroupInput, UpdateGroupInput}
@ -26,12 +27,15 @@ import vinyldns.core.domain.membership.{Group, LockStatus}
class MembershipRoute(
membershipService: MembershipServiceAlgebra,
limitsConfig: LimitsConfig,
val vinylDNSAuthenticator: VinylDNSAuthenticator
) extends VinylDNSJsonProtocol
with VinylDNSDirectives[Throwable] {
final private val DEFAULT_MAX_ITEMS: Int = 100
final private val MAX_ITEMS_LIMIT: Int = 1000
final private val MAX_GROUPS_LIST_LIMIT: Int = 1500
final private val DEFAULT_MAX_ITEMS: Int = limitsConfig.MEMBERSHIP_ROUTING_DEFAULT_MAX_ITEMS
final private val MAX_ITEMS_LIMIT: Int = limitsConfig.MEMBERSHIP_ROUTING_MAX_ITEMS_LIMIT
final private val MAX_GROUPS_LIST_LIMIT: Int =
limitsConfig.MEMBERSHIP_ROUTING_MAX_GROUPS_LIST_LIMIT
def getRoutes: Route = membershipRoute
@ -41,9 +45,11 @@ class MembershipRoute(
case GroupNotFoundError(msg) => complete(StatusCodes.NotFound, msg)
case NotAuthorizedError(msg) => complete(StatusCodes.Forbidden, msg)
case GroupAlreadyExistsError(msg) => complete(StatusCodes.Conflict, msg)
case GroupValidationError(msg) => complete(StatusCodes.BadRequest, msg)
case InvalidGroupError(msg) => complete(StatusCodes.BadRequest, msg)
case UserNotFoundError(msg) => complete(StatusCodes.NotFound, msg)
case InvalidGroupRequestError(msg) => complete(StatusCodes.BadRequest, msg)
case EmailValidationError(msg) => complete(StatusCodes.BadRequest, msg)
}
val membershipRoute: Route = path("groups" / Segment) { groupId =>
@ -75,16 +81,18 @@ class MembershipRoute(
} ~
(get & monitor("Endpoint.listMyGroups")) {
parameters(
"startFrom".?,
"startFrom".as[String].?,
"maxItems".as[Int].?(DEFAULT_MAX_ITEMS),
"groupNameFilter".?,
"ignoreAccess".as[Boolean].?(false)
"ignoreAccess".as[Boolean].?(false),
"abridged".as[Boolean].?(false),
) {
(
startFrom: Option[String],
maxItems: Int,
groupNameFilter: Option[String],
ignoreAccess: Boolean
ignoreAccess: Boolean,
abridged: Boolean
) =>
{
handleRejections(invalidQueryHandler) {
@ -97,7 +105,7 @@ class MembershipRoute(
) {
authenticateAndExecute(
membershipService
.listMyGroups(groupNameFilter, startFrom, maxItems, _, ignoreAccess)
.listMyGroups(groupNameFilter, startFrom, maxItems, _, ignoreAccess, abridged)
) { groups =>
complete(StatusCodes.OK, groups)
}
@ -154,8 +162,8 @@ class MembershipRoute(
} ~
path("groups" / Segment / "activity") { groupId =>
(get & monitor("Endpoint.groupActivity")) {
parameters("startFrom".?, "maxItems".as[Int].?(DEFAULT_MAX_ITEMS)) {
(startFrom: Option[String], maxItems: Int) =>
parameters("startFrom".as[Int].?, "maxItems".as[Int].?(DEFAULT_MAX_ITEMS)) {
(startFrom: Option[Int], maxItems: Int) =>
handleRejections(invalidQueryHandler) {
validate(
0 < maxItems && maxItems <= MAX_ITEMS_LIMIT,
@ -172,6 +180,20 @@ class MembershipRoute(
}
}
} ~
path("groups" / "change" / Segment) { groupChangeId =>
(get & monitor("Endpoint.groupSingleChange")) {
authenticateAndExecute(membershipService.getGroupChange(groupChangeId, _)) { groupChange =>
complete(StatusCodes.OK, groupChange)
}
}
} ~
path("groups" / "valid" / "domains") {
(get & monitor("Endpoint.validdomains")) {
authenticateAndExecute(membershipService.listEmailDomains) { emailDomains =>
complete(StatusCodes.OK, emailDomains)
}
}
} ~
path("users" / Segment / "lock") { id =>
(put & monitor("Endpoint.lockUser")) {
authenticateAndExecute(membershipService.updateUserLockStatus(id, LockStatus.Locked, _)) {
@ -187,6 +209,14 @@ class MembershipRoute(
complete(StatusCodes.OK, UserInfo(user))
}
}
} ~
path("users" / Segment) { id =>
(get & monitor("Endpoint.getUser")) {
authenticateAndExecute(membershipService.getUserDetails(id, _)) {
user =>
complete(StatusCodes.OK, user)
}
}
}
private val invalidQueryHandler = RejectionHandler

View File

@ -22,10 +22,12 @@ import akka.util.Timeout
import org.slf4j.{Logger, LoggerFactory}
import vinyldns.api.Interfaces._
import vinyldns.api.domain.record.RecordSetServiceAlgebra
import vinyldns.api.config.LimitsConfig
import vinyldns.api.domain.zone._
import vinyldns.core.domain.record.NameSort.NameSort
import vinyldns.core.domain.record.RecordType.RecordType
import vinyldns.core.domain.record.{NameSort, RecordSet, RecordType}
import vinyldns.core.domain.record.RecordTypeSort.RecordTypeSort
import vinyldns.core.domain.record.{NameSort, RecordSet, RecordType, RecordTypeSort}
import vinyldns.core.domain.zone.ZoneCommandResult
import scala.concurrent.duration._
@ -33,38 +35,40 @@ import scala.concurrent.duration._
case class GetRecordSetResponse(recordSet: RecordSetInfo)
case class ListGlobalRecordSetsResponse(
recordSets: List[RecordSetGlobalInfo],
startFrom: Option[String] = None,
nextId: Option[String] = None,
maxItems: Option[Int] = None,
recordNameFilter: String,
recordTypeFilter: Option[Set[RecordType]] = None,
recordOwnerGroupFilter: Option[String] = None,
nameSort: NameSort
)
recordSets: List[RecordSetGlobalInfo],
startFrom: Option[String] = None,
nextId: Option[String] = None,
maxItems: Option[Int] = None,
recordNameFilter: String,
recordTypeFilter: Option[Set[RecordType]] = None,
recordOwnerGroupFilter: Option[String] = None,
nameSort: NameSort
)
case class ListRecordSetsByZoneResponse(
recordSets: List[RecordSetListInfo],
startFrom: Option[String] = None,
nextId: Option[String] = None,
maxItems: Option[Int] = None,
recordNameFilter: Option[String] = None,
recordTypeFilter: Option[Set[RecordType]] = None,
recordOwnerGroupFilter: Option[String] = None,
nameSort: NameSort
)
recordSets: List[RecordSetListInfo],
startFrom: Option[String] = None,
nextId: Option[String] = None,
maxItems: Option[Int] = None,
recordNameFilter: Option[String] = None,
recordTypeFilter: Option[Set[RecordType]] = None,
recordOwnerGroupFilter: Option[String] = None,
nameSort: NameSort,
recordTypeSort: RecordTypeSort
)
class RecordSetRoute(
recordSetService: RecordSetServiceAlgebra,
val vinylDNSAuthenticator: VinylDNSAuthenticator
) extends VinylDNSJsonProtocol
with VinylDNSDirectives[Throwable] {
recordSetService: RecordSetServiceAlgebra,
limitsConfig: LimitsConfig,
val vinylDNSAuthenticator: VinylDNSAuthenticator
) extends VinylDNSJsonProtocol
with VinylDNSDirectives[Throwable] {
def getRoutes: Route = recordSetRoute
def logger: Logger = LoggerFactory.getLogger(classOf[RecordSetRoute])
final private val DEFAULT_MAX_ITEMS: Int = 100
final private val DEFAULT_MAX_ITEMS: Int = limitsConfig.RECORDSET_ROUTING_DEFAULT_MAX_ITEMS
// Timeout must be long enough to allow the cluster to form
implicit val rsCmdTimeout: Timeout = Timeout(10.seconds)
@ -98,15 +102,17 @@ class RecordSetRoute(
"recordNameFilter".?,
"recordTypeFilter".?,
"recordOwnerGroupFilter".?,
"nameSort".as[String].?("ASC")
"nameSort".as[String].?("ASC"),
"recordTypeSort".as[String].?("None")
) {
(
startFrom: Option[String],
maxItems: Int,
recordNameFilter: Option[String],
recordTypeFilter: Option[String],
recordOwnerGroupFilter: Option[String],
nameSort: String
startFrom: Option[String],
maxItems: Int,
recordNameFilter: Option[String],
recordTypeFilter: Option[String],
recordOwnerGroupFilter: Option[String],
nameSort: String,
recordTypeSort: String
) =>
val convertedRecordTypeFilter = convertRecordTypeFilter(recordTypeFilter)
handleRejections(invalidQueryHandler) {
@ -124,8 +130,9 @@ class RecordSetRoute(
convertedRecordTypeFilter,
recordOwnerGroupFilter,
NameSort.find(nameSort),
_
)
_,
RecordTypeSort.find(recordTypeSort),
)
) { rsResponse =>
complete(StatusCodes.OK, rsResponse)
}
@ -142,15 +149,17 @@ class RecordSetRoute(
"recordNameFilter".as[String],
"recordTypeFilter".?,
"recordOwnerGroupFilter".?,
"nameSort".as[String].?("ASC")
"nameSort".as[String].?("ASC"),
"recordTypeSort".as[String].?("NONE")
) {
(
startFrom: Option[String],
maxItems: Int,
recordNameFilter: String,
recordTypeFilter: Option[String],
recordOwnerGroupFilter: Option[String],
nameSort: String
startFrom: Option[String],
maxItems: Int,
recordNameFilter: String,
recordTypeFilter: Option[String],
recordOwnerGroupFilter: Option[String],
nameSort: String,
recordTypeSort: String
) =>
val convertedRecordTypeFilter = convertRecordTypeFilter(recordTypeFilter)
handleRejections(invalidQueryHandler) {
@ -160,14 +169,15 @@ class RecordSetRoute(
) {
authenticateAndExecute(
recordSetService
.listRecordSets(
.searchRecordSets(
startFrom,
Some(maxItems),
recordNameFilter,
convertedRecordTypeFilter,
recordOwnerGroupFilter,
NameSort.find(nameSort),
_
_,
RecordTypeSort.find(recordTypeSort)
)
) { rsResponse =>
complete(StatusCodes.OK, rsResponse)
@ -177,6 +187,13 @@ class RecordSetRoute(
}
}
} ~
path("zones" / Segment / "recordsetcount") { zoneId =>
(get & monitor("Endpoint.getRecordSetCount")) {
authenticateAndExecute(recordSetService.getRecordSetCount(zoneId, _)) { count =>
complete(StatusCodes.OK, count)
}
}
} ~
path("zones" / Segment / "recordsets" / Segment) { (zoneId, rsId) =>
(get & monitor("Endpoint.getRecordSetByZone")) {
authenticateAndExecute(recordSetService.getRecordSetByZone(rsId, zoneId, _)) { rs =>
@ -213,8 +230,8 @@ class RecordSetRoute(
} ~
path("zones" / Segment / "recordsetchanges") { zoneId =>
(get & monitor("Endpoint.listRecordSetChanges")) {
parameters("startFrom".?, "maxItems".as[Int].?(DEFAULT_MAX_ITEMS)) {
(startFrom: Option[String], maxItems: Int) =>
parameters("startFrom".as[Int].?, "maxItems".as[Int].?(DEFAULT_MAX_ITEMS)) {
(startFrom: Option[Int], maxItems: Int) =>
handleRejections(invalidQueryHandler) {
validate(
check = 0 < maxItems && maxItems <= DEFAULT_MAX_ITEMS,
@ -231,6 +248,52 @@ class RecordSetRoute(
}
}
}
} ~
path("recordsetchange" / "history") {
(get & monitor("Endpoint.listRecordSetChangeHistory")) {
parameters("zoneId".as[String].?, "startFrom".as[Int].?, "maxItems".as[Int].?(DEFAULT_MAX_ITEMS), "fqdn".as[String].?, "recordType".as[String].?) {
(zoneId: Option[String], startFrom: Option[Int], maxItems: Int, fqdn: Option[String], recordType: Option[String]) =>
handleRejections(invalidQueryHandler) {
val errorMessage = if(fqdn.isEmpty || recordType.isEmpty || zoneId.isEmpty) {
"recordType, fqdn and zoneId cannot be empty"
} else {
s"maxItems was $maxItems, maxItems must be between 0 exclusive " +
s"and $DEFAULT_MAX_ITEMS inclusive"
}
val isValid = (0 < maxItems && maxItems <= DEFAULT_MAX_ITEMS) && (fqdn.nonEmpty && recordType.nonEmpty && zoneId.nonEmpty)
validate(
check = isValid,
errorMsg = errorMessage
){
authenticateAndExecute(
recordSetService
.listRecordSetChangeHistory(zoneId, startFrom, maxItems, fqdn, RecordType.find(recordType.get), _)
) { changes =>
complete(StatusCodes.OK, changes)
}
}
}
}
}
} ~
path("metrics" / "health" / "zones" / Segment / "recordsetchangesfailure") {zoneId =>
(get & monitor("Endpoint.listFailedRecordSetChanges")) {
parameters("startFrom".as[Int].?(0), "maxItems".as[Int].?(DEFAULT_MAX_ITEMS)) {
(startFrom: Int, maxItems: Int) =>
handleRejections(invalidQueryHandler) {
validate(
check = 0 < maxItems && maxItems <= DEFAULT_MAX_ITEMS,
errorMsg = s"maxItems was $maxItems, maxItems must be between 0 exclusive " +
s"and $DEFAULT_MAX_ITEMS inclusive"
){
authenticateAndExecute(recordSetService.listFailedRecordSetChanges(_, Some(zoneId), startFrom, maxItems)) {
changes =>
complete(StatusCodes.OK, changes)
}
}
}
}
}
}
private val invalidQueryHandler = RejectionHandler

View File

@ -17,10 +17,15 @@
package vinyldns.api.route
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives
import akka.http.scaladsl.server.Route
import akka.util.Timeout
import cats.effect.IO
import fs2.concurrent.SignallingRef
import org.slf4j.{Logger, LoggerFactory}
import vinyldns.api.Interfaces.{EitherImprovements, Result, ensuring}
import vinyldns.api.config.ServerConfig
import vinyldns.api.domain.zone.NotAuthorizedError
import vinyldns.core.domain.auth.AuthPrincipal
import scala.concurrent.duration._
@ -31,39 +36,63 @@ final case class CurrentStatus(
version: String
)
trait StatusRoute extends Directives {
this: VinylDNSJsonProtocol =>
class StatusRoute(
serverConfig: ServerConfig,
val vinylDNSAuthenticator: VinylDNSAuthenticator,
val processingDisabled: SignallingRef[IO, Boolean]
) extends VinylDNSJsonProtocol
with VinylDNSDirectives[Throwable] {
implicit val timeout = Timeout(10.seconds)
def getRoutes: Route = statusRoute
def processingDisabled: SignallingRef[IO, Boolean]
implicit val timeout: Timeout = Timeout(10.seconds)
def statusRoute(color: String, version: String, keyName: String) =
def logger: Logger = LoggerFactory.getLogger(classOf[StatusRoute])
def handleErrors(e: Throwable): PartialFunction[Throwable, Route] = {
case NotAuthorizedError(msg) => complete(StatusCodes.Forbidden, msg)
}
def postStatus(isProcessingDisabled: Boolean, authPrincipal: AuthPrincipal): Result[Boolean] = {
for {
_ <- isAdmin(authPrincipal).toResult
isDisabled = isProcessingDisabled
} yield isDisabled
}
def isAdmin(authPrincipal: AuthPrincipal): Either[Throwable, Unit] =
ensuring(NotAuthorizedError(s"Not authorized. User '${authPrincipal.signedInUser.userName}' cannot make the requested change.")) {
authPrincipal.isSystemAdmin
}
val statusRoute: Route =
(get & path("status")) {
onSuccess(processingDisabled.get.unsafeToFuture()) { isProcessingDisabled =>
complete(
StatusCodes.OK,
CurrentStatus(
isProcessingDisabled,
color,
keyName,
version
serverConfig.color,
serverConfig.keyName,
serverConfig.version
)
)
}
} ~
(post & path("status")) {
parameters("processingDisabled".as[Boolean]) { isProcessingDisabled =>
onSuccess(processingDisabled.set(isProcessingDisabled).unsafeToFuture()) {
complete(
StatusCodes.OK,
CurrentStatus(
isProcessingDisabled,
color,
keyName,
version
authenticateAndExecute(postStatus(isProcessingDisabled, _)){ isProcessingDisabled =>
onSuccess(processingDisabled.set(isProcessingDisabled).unsafeToFuture()) {
complete(
StatusCodes.OK,
CurrentStatus(
isProcessingDisabled,
serverConfig.color,
serverConfig.keyName,
serverConfig.version
)
)
)
}
}
}
}

View File

@ -22,7 +22,7 @@ import cats.effect.IO
import fs2.concurrent.SignallingRef
import io.prometheus.client.CollectorRegistry
import org.json4s.MappingException
import vinyldns.api.config.VinylDNSConfig
import vinyldns.api.config.{LimitsConfig, VinylDNSConfig}
import vinyldns.api.domain.auth.AuthPrincipalProvider
import vinyldns.api.domain.batch.BatchChangeServiceAlgebra
import vinyldns.api.domain.membership.MembershipServiceAlgebra
@ -57,6 +57,7 @@ object VinylDNSService {
// $COVERAGE-OFF$
class VinylDNSService(
val membershipService: MembershipServiceAlgebra,
val limits: LimitsConfig,
val processingDisabled: SignallingRef[IO, Boolean],
val zoneService: ZoneServiceAlgebra,
val healthService: HealthService,
@ -68,7 +69,6 @@ class VinylDNSService(
) extends PingRoute
with HealthCheckRoute
with BlueGreenRoute
with StatusRoute
with PrometheusRoute
with VinylDNSJsonProtocol
with RequestLogging {
@ -84,37 +84,41 @@ class VinylDNSService(
)
val zoneRoute: Route =
new ZoneRoute(zoneService, vinylDNSAuthenticator, vinyldnsConfig.crypto).getRoutes
val recordSetRoute: Route = new RecordSetRoute(recordSetService, vinylDNSAuthenticator).getRoutes
new ZoneRoute(zoneService, limits, vinylDNSAuthenticator, vinyldnsConfig.crypto).getRoutes
val recordSetRoute: Route =
new RecordSetRoute(recordSetService, limits, vinylDNSAuthenticator).getRoutes
val membershipRoute: Route =
new MembershipRoute(membershipService, vinylDNSAuthenticator).getRoutes
new MembershipRoute(membershipService, limits, vinylDNSAuthenticator).getRoutes
val batchChangeRoute: Route =
new BatchChangeRoute(
batchChangeService,
limits,
vinylDNSAuthenticator,
vinyldnsConfig.manualReviewConfig
).getRoutes
val statusRoute: Route =
new StatusRoute(
vinyldnsConfig.serverConfig,
vinylDNSAuthenticator,
processingDisabled
).getRoutes
val unloggedUris = Seq(
Uri.Path("/health"),
Uri.Path("/color"),
Uri.Path("/ping"),
Uri.Path("/status"),
Uri.Path("/metrics/prometheus")
)
val unloggedRoutes: Route = healthCheckRoute ~ pingRoute ~ colorRoute(
vinyldnsConfig.serverConfig.color
) ~ statusRoute(
vinyldnsConfig.serverConfig.color,
vinyldnsConfig.serverConfig.version,
vinyldnsConfig.serverConfig.keyName
) ~ prometheusRoute
val allRoutes: Route = unloggedRoutes ~
batchChangeRoute ~
zoneRoute ~
recordSetRoute ~
membershipRoute
membershipRoute ~
statusRoute
val vinyldnsRoutes: Route = logRequestResult(requestLogger(unloggedUris))(allRoutes)

View File

@ -20,6 +20,8 @@ import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server._
import akka.util.Timeout
import org.slf4j.{Logger, LoggerFactory}
import vinyldns.api.config.LimitsConfig
import vinyldns.api.domain.membership.EmailValidationError
import vinyldns.api.domain.zone._
import vinyldns.core.crypto.CryptoAlgebra
import vinyldns.core.domain.zone._
@ -27,10 +29,12 @@ import vinyldns.core.domain.zone._
import scala.concurrent.duration._
case class GetZoneResponse(zone: ZoneInfo)
case class GetZoneDetailsResponse(zone: ZoneDetails)
case class ZoneRejected(zone: Zone, errors: List[String])
class ZoneRoute(
zoneService: ZoneServiceAlgebra,
limitsConfig: LimitsConfig,
val vinylDNSAuthenticator: VinylDNSAuthenticator,
crypto: CryptoAlgebra
) extends VinylDNSJsonProtocol
@ -40,8 +44,8 @@ class ZoneRoute(
def logger: Logger = LoggerFactory.getLogger(classOf[ZoneRoute])
final private val DEFAULT_MAX_ITEMS: Int = 100
final private val MAX_ITEMS_LIMIT: Int = 100
final private val DEFAULT_MAX_ITEMS: Int = limitsConfig.ZONE_ROUTING_DEFAULT_MAX_ITEMS
final private val MAX_ITEMS_LIMIT: Int = limitsConfig.ZONE_ROUTING_MAX_ITEMS_LIMIT
// Timeout must be long enough to allow the cluster to form
implicit val zoneCmdTimeout: Timeout = Timeout(10.seconds)
@ -60,6 +64,7 @@ class ZoneRoute(
case RecentSyncError(msg) => complete(StatusCodes.Forbidden, msg)
case ZoneInactiveError(msg) => complete(StatusCodes.BadRequest, msg)
case InvalidRequest(msg) => complete(StatusCodes.BadRequest, msg)
case EmailValidationError(msg) => complete(StatusCodes.BadRequest, msg)
}
val zoneRoute: Route = path("zones") {
@ -76,13 +81,17 @@ class ZoneRoute(
"nameFilter".?,
"startFrom".as[String].?,
"maxItems".as[Int].?(DEFAULT_MAX_ITEMS),
"ignoreAccess".as[Boolean].?(false)
"searchByAdminGroup".as[Boolean].?(false),
"ignoreAccess".as[Boolean].?(false),
"includeReverse".as[Boolean].?(true)
) {
(
nameFilter: Option[String],
startFrom: Option[String],
maxItems: Int,
ignoreAccess: Boolean
searchByAdminGroup: Boolean,
ignoreAccess: Boolean,
includeReverse: Boolean
) =>
{
handleRejections(invalidQueryHandler) {
@ -92,7 +101,7 @@ class ZoneRoute(
) {
authenticateAndExecute(
zoneService
.listZones(_, nameFilter, startFrom, maxItems, ignoreAccess)
.listZones(_, nameFilter, startFrom, maxItems, searchByAdminGroup, ignoreAccess, includeReverse)
) { result =>
complete(StatusCodes.OK, result)
}
@ -102,6 +111,38 @@ class ZoneRoute(
}
}
} ~
path("zones" / "deleted" / "changes") {
(get & monitor("Endpoint.listDeletedZones")) {
parameters(
"nameFilter".?,
"startFrom".as[String].?,
"maxItems".as[Int].?(DEFAULT_MAX_ITEMS),
"ignoreAccess".as[Boolean].?(false)
) {
(
nameFilter: Option[String],
startFrom: Option[String],
maxItems: Int,
ignoreAccess: Boolean
) =>
{
handleRejections(invalidQueryHandler) {
validate(
0 < maxItems && maxItems <= MAX_ITEMS_LIMIT,
s"maxItems was $maxItems, maxItems must be between 0 and $MAX_ITEMS_LIMIT"
) {
authenticateAndExecute(
zoneService
.listDeletedZones(_, nameFilter, startFrom, maxItems, ignoreAccess)
) { result =>
complete(StatusCodes.OK, result)
}
}
}
}
}
}
} ~
path("zones" / "backendids") {
(get & monitor("Endpoint.getBackendIds")) {
authenticateAndExecute(_ => zoneService.getBackendIds()) { ids =>
@ -134,6 +175,13 @@ class ZoneRoute(
}
}
} ~
path("zones" / Segment / "details") { id =>
(get & monitor("Endpoint.getCommonZoneDetails")) {
authenticateAndExecute(zoneService.getCommonZoneDetails(id, _)) { zone =>
complete(StatusCodes.OK, GetZoneDetailsResponse(zone))
}
}
} ~
path("zones" / Segment / "sync") { id =>
(post & monitor("Endpoint.syncZone")) {
authenticateAndExecute(zoneService.syncZone(id, _)) { chg =>
@ -159,6 +207,24 @@ class ZoneRoute(
}
}
} ~
path("metrics" / "health" / "zonechangesfailure") {
(get & monitor("Endpoint.listFailedZoneChanges")) {
parameters("startFrom".as[Int].?(0), "maxItems".as[Int].?(DEFAULT_MAX_ITEMS)) {
(startFrom: Int, maxItems: Int) =>
handleRejections(invalidQueryHandler) {
validate(
0 < maxItems && maxItems <= DEFAULT_MAX_ITEMS,
s"maxItems was $maxItems, maxItems must be between 0 exclusive and $DEFAULT_MAX_ITEMS inclusive"
) {
authenticateAndExecute(zoneService.listFailedZoneChanges(_, startFrom, maxItems)) {
changes =>
complete(StatusCodes.OK, changes)
}
}
}
}
}
} ~
path("zones" / Segment / "acl" / "rules") { id =>
(put & monitor("Endpoint.addZoneACLRule")) {
authenticateAndExecuteWithEntity[ZoneCommandResult, ACLRuleInfo](

Some files were not shown because too many files have changed in this diff Show More