diff --git a/detector/server.py b/detector/server.py index 90cdcf8..34a0c85 100644 --- a/detector/server.py +++ b/detector/server.py @@ -68,7 +68,7 @@ def serve_forever(server, model, tokenizer, device): globals()['tokenizer'] = tokenizer globals()['device'] = device - log('Ready to serve') + log(f'Ready to serve at http://localhost:{server.server_address[1]}') server.serve_forever() @@ -93,7 +93,7 @@ def main(checkpoint, port=8080, device='cuda' if torch.cuda.is_available() else server = HTTPServer(('0.0.0.0', port), RequestHandler) # avoid calling CUDA API before forking; doing so in a subprocess is fine. - num_workers = int(subprocess.check_output(['python', '-c', 'import torch; print(torch.cuda.device_count())'])) + num_workers = int(subprocess.check_output([sys.executable, '-c', 'import torch; print(torch.cuda.device_count())'])) if num_workers <= 1: serve_forever(server, model, tokenizer, device) diff --git a/detector/train.py b/detector/train.py index ff8d79a..748ef4d 100644 --- a/detector/train.py +++ b/detector/train.py @@ -281,7 +281,7 @@ if __name__ == '__main__': parser.add_argument('--weight-decay', type=float, default=0) args = parser.parse_args() - nproc = int(subprocess.check_output(['python', '-c', "import torch;" + nproc = int(subprocess.check_output([sys.executable, '-c', "import torch;" "print(torch.cuda.device_count() if torch.cuda.is_available() else 1)"])) if nproc > 1: print(f'Launching {nproc} processes ...', file=sys.stderr)