Commit 82dd9516 authored by Simon Pintarelli's avatar Simon Pintarelli

initial commit

- tested with python3
- TODO: adapt for other programming languages, C++, MATLAB

python3
parents
# Created by https://www.gitignore.io/api/python
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
# Sphinx documentation
docs/_build/
# PyBuilder
target/
#Ipython Notebook
.ipynb_checkpoints
File added
HOWTO
=====
1) Compute the hashes
---------------------
find ExamDir -type f -exec md5sum {} + > hashes.txt
2) Adapt the header as necessary
--------------------------------
emacs header.txt
3) Run this tool
----------------
python3 latex_creator_main.py --hashes hashes.txt --results /userdata/simonpi/FS15/results-final/
4) Run XeLaTeX
--------------
for f in `ls */*.tex`; do xelatex $f; xelatex $f; done
5) Print the pdf Files
----------------------
for f in 129.132.200.*.pdf; do lpr -P p-hg-j-42 -o sides=two-sided-long-edge -o landscape -o fit-to-page -o media=A4 -o number-up=2 -o number-up-layout=lr $f; done
\documentclass[a4paper]{article}
\usepackage[MnSymbol]{mathspec}
\usepackage{fontspec}
\usepackage{xunicode}
\usepackage{xltxtra}
\usepackage{fancyhdr}
\usepackage{lastpage}
\usepackage[margin=1.5cm]{geometry}
\setmonofont[Ligatures=TeX]{Inconsolata}
\usepackage{color}
\usepackage{graphicx}
\usepackage{relsize}
\usepackage{listings}
\definecolor{gray}{gray}{0.55}
\pagestyle{fancy}
\fancyfoot[C]{\thepage / \pageref{LastPage}}
\fancyhead[L]{IDENTIFIER}
\fancyhead[R]{IPADDRESS}
% http://tex.stackexchange.com/questions/80113/hide-section-numbers-but-keep-numbering
\renewcommand{\thesection}{}
\renewcommand{\thesubsection}{\arabic{section}.\arabic{subsection}}
\makeatletter
\def\@seccntformat#1{\csname #1ignore\expandafter\endcsname\csname the#1\endcsname\quad}
\let\sectionignore\@gobbletwo
\let\latex@numberline\numberline
\def\numberline#1{\if\relax#1\relax\else\latex@numberline{#1}\fi}
\makeatother
\parindent 0cm
% Python Code macro
\lstset{language=python,
basicstyle={\ttfamily \smaller},
basewidth=0.58em,
columns=fixed,
tabsize=2,
fontadjust=true,
frame=lb,
xleftmargin=4.2pt,
numbers=left,
stepnumber=2,
breaklines=true,
breakindent=0pt,
prebreak=\mbox{\tiny$\searrow$},
postbreak=\mbox{{\color{gray}$\cdots$}},
keywordstyle=\bfseries,
keywords={and, as, assert, break, class, continue, def, del,
elif, else, except, exec, finally, for, from, global,
if, import, in, is, lambda, not, or, pass, print, raise,
return, try, while, with, yield, True, False, None},
numberstyle=\color{gray},
commentstyle=\color{gray},
stringstyle=\textit,
showstringspaces=false,
}
\date{}
\title{IDENTIFIER \\ IPADDRESS }
\author{Numerische Methoden FS15}
\begin{document}
\maketitle
\tableofcontents
#!/usr/bin/env python
import re, os
from subprocess import Popen, PIPE, STDOUT
from collections import defaultdict
with open('header.txt') as f:
texheader = ''.join(f.readlines())
texfooter = '\end{document}'
symlink_prefix = '_____'
# used to grep with command `file`
image_formats = ['PNG image data', 'JPEG image data', 'PDF document']
code_formats = ['Python script']
text_formats = ['ASCII text']
short_names = {'PNG image data' : 'PNG',
'JPEG image data' : 'JPG',
'PDF document' : 'PDF',
'Python script' : 'PY',
'ASCII text' : 'TXT',
None : 'None'}
invalid_filenames = [ lambda x: bool(x.endswith('~')),
lambda x: x.find('#') != -1]
def is_file(fname, ftype):
p1 = Popen(["file", fname], stdout=PIPE)
p2 = Popen(["grep", ftype], stdin=p1.stdout, stdout=PIPE)
p2.wait()
return ftype if p2.poll() == 0 else None
def get_ftype(fname):
code_matches = filter(lambda x: x is not None,
map(lambda x: is_file(fname, x), code_formats))
image_matches = filter(lambda x: x is not None,
map(lambda x: is_file(fname, x), image_formats))
text_matches = filter(lambda x: x is not None,
map(lambda x: is_file(fname, x), text_formats))
sym_link = [is_file(fname, 'symbolic link')]
alle = code_matches + image_matches + text_matches + sym_link
# unix file command does not always recognize python scripts
if text_matches and fname[:-3] == '.py':
text_matches = []
code_matches = ['Python script']
return alle[0] if alle else None
def md5alias(fname):
"""
create a symlink with a latex compatible filename
"""
basename, ending = fname.rsplit('.', 1)
p1 = Popen(["md5sum", fname], stdout=PIPE)
if p1.wait() != 0:
raise Exception("something went wrong...")
md5sum = p1.stdout.read().split(' ')[0]
newfname = symlink_prefix + md5sum + '.' + ending
with open(os.devnull, 'w') as DEVNULL:
Popen(["ln", "-s", fname, newfname], stdout=DEVNULL, stderr=STDOUT).wait()
return newfname
def md5sum(fname):
p1 = Popen(["md5sum", fname], stdout = PIPE)
if p1.wait() != 0:
raise Exception("something went wrong...")
md5sum = p1.stdout.read().split(' ')[0]
return md5sum
def tex_figure(dd, dirname):
fname = dd['fname']
fialias = md5alias(fname)
texstr = ''
texstr += '\n\\begin{figure}[h!]'
texstr += '\n \\centering'
texstr += '\n \\includegraphics[width=.7\\linewidth]{%s}' % os.path.join(dirname, fialias)
texstr += '\n \\caption{ Filename = %s}' % fname.replace('_','\\_')
texstr += '\n\\end{figure}'
return texstr
def tex_listing(dd):
fname = dd['fname']
f = open(fname,'r')
texstr = ''
texstr += '\n\\begin{lstlisting}[caption={%s}]\n' % fname.replace('_',' ')
texstr += ''.join(f.readlines())
texstr += '\n\\end{lstlisting}'
f.close()
return texstr
def tex_verb_listing(dd):
fname = dd['fname']
f = open(fname,'r')
texstr = ''
texstr += '\n\\begin{lstlisting}[caption={%s}, language={}]\n' % fname.replace('_',' ')
texstr += ''.join(f.readlines())
texstr += '\n\\end{lstlisting}'
f.close()
return texstr
def collect_files(dirname):
all_files = []
for dirpath, subdirs, files in os.walk(dirname):
all_files += map(lambda x: os.path.join(dirpath, x).lstrip('./'), files)
# all_files +=
# # recursion
# for si in subdirs:
# all_files += collect_files(os.path.join(dirname, si))
all_files = sorted(all_files)
# create dictionary
section_ordered_dict = defaultdict(list)
for fi in all_files:
fi_splitted = fi.split(os.path.sep)
index = 0
etc_label = '_____etc'
try:
section_label = fi_splitted[index]
except:
section_label = etc_label
if fi_splitted[-1] == section_label:
if os.path.islink(fi):
full_path = os.path.realpath(fi)
full_path_splitted = full_path.split(os.path.sep)
index = full_path_splitted.index('results-clean')
section_label = full_path_splitted[index+1]
section_ordered_dict[section_label].append(fi)
elif os.path.isfile(fi):
section_ordered_dict[etc_label].append(fi)
else:
section_ordered_dict[section_label].append(fi)
return section_ordered_dict
def create_tex(dirname, original_files=[]):
os.chdir(dirname)
files_ordered_dict = collect_files('./')
fmod = lambda x: x['is_modified'] == True
texstr = ''
allfiles = []
hashsums = set()
for section_label, files in sorted(files_ordered_dict.iteritems()):
# filter files
files_filtered = []
for fname in files:
# check for own symlinks
basename = os.path.basename(fname)
if re.match('^' + symlink_prefix, basename):
#print('symlink detected')
pass
ftype = get_ftype(fname)
md5s = md5sum(fname)
is_duplicate = md5s in hashsums
if not ftype == 'symbolic link':
hashsums.add(md5s)
is_modified = not md5s in original_files
is_invalid = [ lf(fname) for lf in invalid_filenames ]
omitted_flag = True in is_invalid
files_filtered.append({'fname' : fname,
'ftype' : ftype,
'md5s' : md5s,
'is_modified' : is_modified,
'is_duplicate' : is_duplicate,
'omitted' : omitted_flag })
allfiles += files_filtered
# create tex
section_tex_str = ''
for elem in sorted(filter(fmod, files_filtered)):
if elem['omitted'] == True or elem['is_duplicate'] is True:
continue
if elem['ftype'] in code_formats:
tmp = tex_listing(elem)
section_tex_str += tmp
elif elem['ftype'] in image_formats:
section_tex_str += tex_figure(elem, dirname)
elif elem['ftype'] in text_formats:
section_tex_str += tex_verb_listing(elem)
else:
elem['omitted'] = True
print(' File omitted: ' + elem['fname'])
if not section_tex_str == '':
if section_label == '_____etc':
texstr += '\n \\clearpage \\section{Etc} \n'
else:
texstr += '\n \\clearpage \\section{' + section_label.replace("_","-") + '} \n'
texstr += section_tex_str
texstr += '\n\\end{document}'
return texstr, allfiles
def main(dirname, ip, identifier, original_files=[]):
"""
Keyword Arguments:
dirname -- root directory
identifier -- Student name, nethz account
original_files -- list of md5 hashes of original files
"""
print(" Looking into: " + dirname)
# grab files in root directory
cwd = os.getcwd()
texstr, files = create_tex(dirname, original_files=original_files)
os.chdir(cwd)
print(" Number Files found: %d" % len(files))
#create
output_dir = os.path.join(os.path.abspath(dirname), '../')
output_file = os.path.join(output_dir, '%s.tex' % ip )
my_header = texheader.replace('IDENTIFIER', identifier)
my_header = my_header.replace('IPADDRESS', ip)
out = [r'\begin{table}[h!] \centering']
out.append(r'\renewcommand{\arraystretch}{1.2}')
out.append(r'\begin{tabular}{|l|l|l|}')
out.append(r'\hline\multicolumn{3}{|l|}{\textbf{Modified}}\\\hline')
template_str = r'\verb`{:s}`& {:s} & \verb`{:s}` \\'
# modified
modified_files = filter(lambda x: (x['is_modified'] == True
and x['is_duplicate'] == False
and x['omitted'] == False
and not x['ftype'] == 'symbolic link'), files)
for item in modified_files:
out.append(template_str.format(item['fname'], short_names[item['ftype']], item['md5s']))
# duplicate
out.append(r'\hline\multicolumn{3}{|l|}{\textbf{Modified (duplicates)}}\\\hline')
for item in filter(lambda x: (x['is_modified'] == True
and x['omitted'] == False
and x['is_duplicate'] == True
and not x['ftype'] == 'symbolic link'), files):
out.append(template_str.format(item['fname'], short_names[item['ftype']], item['md5s']))
# not modified
out.append(r'\hline\multicolumn{3}{|l|}{\textbf{Unmodified}}\\\hline')
for item in filter(lambda x: (x['is_modified'] == False
and x['omitted'] == False
and not x['ftype'] == 'symbolic link'), files):
out.append(template_str.format(item['fname'], short_names[item['ftype']], item['md5s']))
# ignored
out.append(r'\hline\multicolumn{3}{|l|}{\textbf{Ignored}}\\\hline')
for item in filter(lambda x: (x['omitted'] == True
and not x['ftype'] == 'symbolic link'), files):
out.append(template_str.format(item['fname'], short_names[item['ftype']], item['md5s']))
out.append(r'\hline')
out.append(r'\end{tabular}')
out.append(r'\end{table}')
# write to file
if len(modified_files) or identifier != 'not found':
f = open(output_file, 'w')
f.write(my_header)
f.write('\n'.join(out))
f.write(r'\pagebreak')
f.write(texstr)
f.close()
#!/usr/bin/env python
import argparse
import os, re
import subprocess
import latex_creator
def read_info_file(infofile):
with open(infofile) as f:
for line in f.readlines():
match = re.search('export LOGINNAME="(.+?)"', line)
if match:
loginname = match.group(1)
match = re.search('export REALNAME="(.+?)"', line)
if match:
realname = match.group(1)
return realname, loginname
def get_name_table():
name_table = {}
for dirname in os.listdir(args.results):
match = re.match('[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}', dirname)
if match:
infofile = os.path.join(args.results, dirname, ".exam-setup-user")
if os.path.isfile(infofile):
name_table[dirname] = read_info_file(infofile)
return name_table
def get_file_hashes(hashfile):
with open(hashfile) as f:
hashes = [l[:32] for l in f.readlines()]
return hashes
if __name__ == '__main__':
results_dir = 'results-clean'
parser = argparse.ArgumentParser()
parser.add_argument("--results",
default="./",
help="""Directory containing the collected exam results.
Usually the result directory of:
exam-setup --get-files
""")
parser.add_argument("--hashes",
default="hashes.txt",
help="""File containing the md5 hashes of the original exam files.
Usually the output of:
find ExamDir -type f -exec md5sum {} + > hashes.txt
""")
args = parser.parse_args()
name_table = get_name_table()
file_hashes = get_file_hashes(args.hashes)
# Untabify
subprocess.call(["find", ".", "-name", "'*.py'", "-exec", "sed -i 's/\t/ /g'", "{}", ";"])
# Latexify
for dirname, subdirs, files in os.walk(args.results):
for si in subdirs:
match = re.match('[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}', si)
if match:
print('========================================')
print('Found student directory: ' + si)
local_result_dir = os.path.join(args.results, si, 'results-clean')
ip = si
if name_table.has_key(si):
name, nethz = name_table[si]
student = "%s (%s)" % (name, nethz)
else:
student = 'not found'
latex_creator.main(local_result_dir, ip, student, original_files=file_hashes)
Copyright (c) 2015-16, R. Bourquin / S. Pintarelli
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or other
materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment