Commit 09c95b47 authored by sumpfralle's avatar sumpfralle

fixed a mistake in transmitting cacheable list items to remote workers

increased timeout for task handlers


git-svn-id: https://pycam.svn.sourceforge.net/svnroot/pycam/trunk@778 bbaffbd6-741e-11dd-a85d-61de82d9cad9
parent f67bdb95
......@@ -203,8 +203,12 @@ def cleanup():
# Managers started via ".connect" may skip this.
if hasattr(__manager, "shutdown"):
# wait for the spawner and the worker threads to go down
time.sleep(1.5)
__manager.shutdown()
time.sleep(2.5)
#__manager.shutdown()
time.sleep(0.1)
# check if it is still alive and kill it if necessary
if __manager._process.is_alive():
__manager._process.terminate()
def _spawn_daemon(manager, number_of_processes, worker_uuid_list):
""" wait for items in the 'tasks' queue to appear and then spawn workers
......@@ -322,14 +326,14 @@ def run_in_parallel_remote(func, args_list, unordered=False,
elif isinstance(arg, (list, set, tuple)) \
and ([True for item in arg if hasattr(item, "uuid")]):
# a list with at least one cacheable item
for item in arg:
new_arg_list = []
for item in arg:
if hasattr(item, "uuid"):
data_uuid = ProcessDataCacheItemID(item.uuid)
if not remote_cache.contains(data_uuid):
log.debug(("Adding cache item for job %s: " \
+ "%s - %s") \
% (job_id, item.uuid, arg.__class__))
log.debug("Adding cache item from list for " \
+ "job %s: %s - %s" \
% (job_id, item.uuid, item.__class__))
remote_cache.add(data_uuid, item)
new_arg_list.append(data_uuid)
else:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment