diff --git a/VERSION b/VERSION new file mode 100644 index 000000000..267577d47 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +0.4.1 diff --git a/app/background.py b/app/background.py new file mode 100644 index 000000000..6f1745cf7 --- /dev/null +++ b/app/background.py @@ -0,0 +1,40 @@ +from threading import Thread + +import logging +from django import db +from app.testwatch import testWatch + +logger = logging.getLogger('app.logger') + +def background(func): + """ + Adds background={True|False} param to any function + so that we can call update_nodes_info(background=True) from the outside + """ + def wrapper(*args,**kwargs): + background = kwargs.get('background', False) + if 'background' in kwargs: del kwargs['background'] + + if background: + if testWatch.hook_pre(func, *args, **kwargs): return + + # Create a function that closes all + # db connections at the end of the thread + # This is necessary to make sure we don't leave + # open connections lying around. + def execute_and_close_db(): + ret = None + try: + ret = func(*args, **kwargs) + finally: + db.connections.close_all() + testWatch.hook_post(func, *args, **kwargs) + return ret + + t = Thread(target=execute_and_close_db) + t.daemon = True + t.start() + return t + else: + return func(*args, **kwargs) + return wrapper \ No newline at end of file diff --git a/app/scheduler.py b/app/scheduler.py new file mode 100644 index 000000000..203371384 --- /dev/null +++ b/app/scheduler.py @@ -0,0 +1,96 @@ +import logging +import traceback +from multiprocessing.dummy import Pool as ThreadPool +from threading import Lock + +from apscheduler.schedulers import SchedulerAlreadyRunningError, SchedulerNotRunningError +from apscheduler.schedulers.background import BackgroundScheduler +from django import db +from django.db.models import Q, Count +from webodm import settings + +from app.models import Task, Project +from nodeodm import status_codes +from nodeodm.models import ProcessingNode +from app.background import background + +logger = logging.getLogger('app.logger') +scheduler = BackgroundScheduler({ + 'apscheduler.job_defaults.coalesce': 'true', + 'apscheduler.job_defaults.max_instances': '3', +}) + +@background +def update_nodes_info(): + processing_nodes = ProcessingNode.objects.all() + for processing_node in processing_nodes: + processing_node.update_node_info() + +tasks_mutex = Lock() + +@background +def process_pending_tasks(): + tasks = [] + try: + tasks_mutex.acquire() + + # All tasks that have a processing node assigned + # Or that need one assigned (via auto) + # or tasks that need a status update + # or tasks that have a pending action + # and that are not locked (being processed by another thread) + tasks = Task.objects.filter(Q(processing_node__isnull=True, auto_processing_node=True) | + Q(Q(status=None) | Q(status__in=[status_codes.QUEUED, status_codes.RUNNING]), processing_node__isnull=False) | + Q(pending_action__isnull=False)).exclude(Q(processing_lock=True)) + for task in tasks: + task.processing_lock = True + task.save() + finally: + tasks_mutex.release() + + def process(task): + try: + task.process() + except Exception as e: + logger.error("Uncaught error! This is potentially bad. Please report it to http://github.com/OpenDroneMap/WebODM/issues: {} {}".format(e, traceback.format_exc())) + if settings.TESTING: raise e + finally: + # Might have been deleted + if task.pk is not None: + task.processing_lock = False + task.save() + + db.connections.close_all() + + if tasks.count() > 0: + pool = ThreadPool(tasks.count()) + pool.map(process, tasks, chunksize=1) + pool.close() + pool.join() + + +def cleanup_projects(): + # Delete all projects that are marked for deletion + # and that have no tasks left + total, count_dict = Project.objects.filter(deleting=True).annotate( + tasks_count=Count('task') + ).filter(tasks_count=0).delete() + if total > 0 and 'app.Project' in count_dict: + logger.info("Deleted {} projects".format(count_dict['app.Project'])) + +def setup(): + try: + scheduler.start() + scheduler.add_job(update_nodes_info, 'interval', seconds=30) + scheduler.add_job(process_pending_tasks, 'interval', seconds=5) + scheduler.add_job(cleanup_projects, 'interval', seconds=60) + except SchedulerAlreadyRunningError: + logger.warning("Scheduler already running (this is OK while testing)") + +def teardown(): + logger.info("Stopping scheduler...") + try: + scheduler.shutdown() + logger.info("Scheduler stopped") + except SchedulerNotRunningError: + logger.warning("Scheduler not running") diff --git a/app/templates/app/dashboard.html b/app/templates/app/dashboard.html index 1abb2f921..4762efeb8 100644 --- a/app/templates/app/dashboard.html +++ b/app/templates/app/dashboard.html @@ -39,6 +39,12 @@
+ {% blocktrans %} You are currently in a demo. You cannot create new tasks and some features such as volume calculation and contours have been disabled, but you can look around to explore the software. {% endblocktrans %} +
+ + {% endif %} diff --git a/app/templates/app/index.html b/app/templates/app/index.html new file mode 100644 index 000000000..70f30a4b3 --- /dev/null +++ b/app/templates/app/index.html @@ -0,0 +1,5 @@ +{% extends "app/base.html" %} + +{% block content %} +{{ hello }} +{% endblock %} diff --git a/app/views/app.py b/app/views/app.py index 58dbc9079..2dd701ce5 100644 --- a/app/views/app.py +++ b/app/views/app.py @@ -28,17 +28,22 @@ def index(request): if settings.SINGLE_USER_MODE and not request.user.is_authenticated: login(request, User.objects.get(username="admin"), 'django.contrib.auth.backends.ModelBackend') + # Auto login + if not request.user.is_authenticated: + login(request, User.objects.get(username="demouser"), 'django.contrib.auth.backends.ModelBackend') + return redirect('dashboard') + return redirect(settings.LOGIN_REDIRECT_URL if request.user.is_authenticated else settings.LOGIN_URL) @login_required def dashboard(request): no_processingnodes = ProcessingNode.objects.count() == 0 + no_tasks = False + if no_processingnodes and settings.PROCESSING_NODES_ONBOARDING is not None: return redirect(settings.PROCESSING_NODES_ONBOARDING) - no_tasks = Task.objects.filter(project__owner=request.user).count() == 0 - # Create first project automatically if Project.objects.count() == 0: Project.objects.create(owner=request.user, name=_("First Project")) diff --git a/locale b/locale index 12f8546a1..4e01fc1cc 160000 --- a/locale +++ b/locale @@ -1 +1 @@ -Subproject commit 12f8546a1779a1e86254a806a2c88661cee07d84 +Subproject commit 4e01fc1ccea78e41494191e082997000ee032c06 diff --git a/nginx/nginx-ssl.conf.template b/nginx/nginx-ssl.conf.template index c630bf011..9544b3a4e 100644 --- a/nginx/nginx-ssl.conf.template +++ b/nginx/nginx-ssl.conf.template @@ -31,7 +31,7 @@ http { server { server_name $WO_HOST; listen 8080; - return 301 https://$WO_HOST:$WO_PORT$request_uri; + return 301 https://$WO_HOST$request_uri; } server { diff --git a/nodeodm/external/NodeODM b/nodeodm/external/NodeODM index eb2ea1bba..5e560fb63 160000 --- a/nodeodm/external/NodeODM +++ b/nodeodm/external/NodeODM @@ -1 +1 @@ -Subproject commit eb2ea1bbab0ae3c60950c94796ed3a9f24fd0389 +Subproject commit 5e560fb635b6f1597a9b5d253e51d89798404953 diff --git a/run.sh b/run.sh new file mode 100755 index 000000000..f7662b2c8 --- /dev/null +++ b/run.sh @@ -0,0 +1 @@ +WO_HOST=demo.webodm.org WO_PORT=443 WO_SSL_INSECURE_PORT_REDIRECT=80 WO_SSL=YES WO_DEBUG=NO docker-compose -f docker-compose.yml -f docker-compose.ssl.yml -f docker-compose.build.yml up -d diff --git a/worker/celery.py b/worker/celery.py index 083edd814..d82e083e6 100644 --- a/worker/celery.py +++ b/worker/celery.py @@ -32,7 +32,7 @@ 'task': 'worker.tasks.cleanup_tmp_directory', 'schedule': 3600, 'options': { - 'expires': 1799, + 'expires': 30, 'retry': False } }, @@ -69,4 +69,4 @@ def ready(self): MockAsyncResult.set = lambda cti, r: MockAsyncResult(cti, r) if __name__ == '__main__': - app.start() \ No newline at end of file + app.start()