summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorLuis R. Rodriguez <mcgrof@do-not-panic.com>2014-04-15 22:48:26 +0000
committerLuis R. Rodriguez <mcgrof@do-not-panic.com>2014-04-17 16:23:09 -0700
commitee6dd7b99be6209c9b7bef8b4b176ca8be1b3af7 (patch)
tree895e1fc3182512521e42f1140d33780ed3c3bec5 /lib
parentecec075043a34fa0e88c2716c5f4eca3782f7ca3 (diff)
backports: pycocci - make the Coccinelle wrapper a standalone tool
This lets us share it for general use as a generic tool, we'll upkeep it here for now and if Coccinelle picks it up we can drop it and just require folks to install it. This lets us use the same solution for standard testing / regular development and also for backports without any discrepencies. Cc: Peter Senna <peter.senna@gmail.com> Cc: Julia Lawall <julia.lawall@lip6.fr> Cc: Gilles Muller <Gilles.Muller@lip6.fr> Signed-off-by: Luis R. Rodriguez <mcgrof@do-not-panic.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/bpcoccinelle.py91
1 files changed, 0 insertions, 91 deletions
diff --git a/lib/bpcoccinelle.py b/lib/bpcoccinelle.py
deleted file mode 100644
index 2e0153eb..00000000
--- a/lib/bpcoccinelle.py
+++ /dev/null
@@ -1,91 +0,0 @@
-from multiprocessing import Process, cpu_count, Queue
-import subprocess, os
-from lib.tempdir import tempdir
-
-class CoccinelleError(Exception):
- pass
-class ExecutionError(CoccinelleError):
- def __init__(self, errcode):
- self.error_code = errcode
-class ExecutionErrorThread(CoccinelleError):
- def __init__(self, errcode, fn, cocci_file, threads, t, logwrite, print_name):
- self.error_code = errcode
- logwrite("Failed to apply changes from %s" % print_name)
-
- logwrite("Specific log output from change that failed using %s" % print_name)
- tf = open(fn, 'r')
- for line in tf.read():
- logwrite('> %s' % line)
- tf.close()
-
- logwrite("Full log using %s" % print_name)
- for num in range(threads):
- fn = os.path.join(t, '.tmp_spatch_worker.' + str(num))
- if (not os.path.isfile(fn)):
- continue
- tf = open(fn, 'r')
- for line in tf.read():
- logwrite('> %s' % line)
- tf.close()
- os.unlink(fn)
-
-def spatch(cocci_file, outdir,
- max_threads, thread_id, temp_dir, ret_q, extra_args=[]):
- cmd = ['spatch', '--sp-file', cocci_file, '--in-place',
- '--recursive-includes',
- '--relax-include-path',
- '--use-coccigrep',
- '--timeout', '120',
- '--backup-suffix', '.cocci_backup', '--dir', '.']
-
- if (max_threads > 1):
- cmd.extend(['-max', str(max_threads), '-index', str(thread_id)])
-
- cmd.extend(extra_args)
-
- fn = os.path.join(temp_dir, '.tmp_spatch_worker.' + str(thread_id))
- outfile = open(fn, 'w')
-
- sprocess = subprocess.Popen(cmd,
- stdout=outfile, stderr=subprocess.STDOUT,
- close_fds=True, universal_newlines=True,
- cwd=outdir)
- sprocess.wait()
- if sprocess.returncode != 0:
- raise ExecutionError(sprocess.returncode)
- outfile.close()
- ret_q.put((sprocess.returncode, fn))
-
-def threaded_spatch(cocci_file, outdir, logwrite, print_name, extra_args=[]):
- threads = cpu_count()
- jobs = list()
- output = ''
- ret_q = Queue()
- with tempdir() as t:
-
- for num in range(threads):
- p = Process(target=spatch, args=(cocci_file, outdir,
- threads, num, t, ret_q,
- extra_args))
- jobs.append(p)
- for p in jobs:
- p.start()
-
- for num in range(threads):
- ret, fn = ret_q.get()
- if ret != 0:
- raise ExecutionErrorThread(ret, fn, cocci_file, threads, t,
- logwrite, print_name)
-
- for job in jobs:
- p.join()
-
- for num in range(threads):
- fn = os.path.join(t, '.tmp_spatch_worker.' + str(num))
- tf = open(fn, 'r')
- output = output + tf.read()
- tf.close()
- os.unlink(fn)
-
- output = output + '\n'
- return output