repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
opencorato/sayit
speeches/search_indexes.py
2
1643
from haystack import indexes from speeches.models import Speech, Speaker, Section class SpeechIndex(indexes.SearchIndex, indexes.Indexable): # Use a template here to include speaker name as well... TODO text = indexes.CharField(document=True, model_attr='text') # , use_template=True) title = indexes.CharField(model_attr='heading') # use_template=True) start_date = indexes.DateTimeField(model_attr='start_date', null=True) instance = indexes.CharField(model_attr='instance__label') speaker = indexes.IntegerField(model_attr='speaker_id', null=True) def get_model(self): return Speech def index_queryset(self, using=None): return self.get_model()._default_manager.select_related('instance') def get_updated_field(self): return 'modified' class SpeakerIndex(indexes.SearchIndex, indexes.Indexable): text = indexes.CharField(document=True, model_attr='name') instance = indexes.CharField(model_attr='instance__label') def get_model(self): return Speaker def index_queryset(self, using=None): return self.get_model()._default_manager.select_related('instance') def get_updated_field(self): return 'updated_at' class SectionIndex(indexes.SearchIndex, indexes.Indexable): text = indexes.CharField(document=True, model_attr='heading') instance = indexes.CharField(model_attr='instance__label') def get_model(self): return Section def index_queryset(self, using=None): return self.get_model()._default_manager.select_related('instance') def get_updated_field(self): return 'modified'
agpl-3.0
-7,003,036,244,537,974,000
33.229167
86
0.702982
false
3.829837
false
false
false
jocelynj/weboob
weboob/applications/weboorrents/weboorrents.py
1
5788
# -*- coding: utf-8 -*- # Copyright(C) 2010 Romain Bignon # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. from __future__ import with_statement import sys from weboob.capabilities.torrent import ICapTorrent from weboob.tools.application.repl import ReplApplication from weboob.tools.application.formatters.iformatter import IFormatter __all__ = ['Weboorrents'] def sizeof_fmt(num): for x in ['bytes','KB','MB','GB','TB']: if num < 1024.0: return "%-4.1f%s" % (num, x) num /= 1024.0 class TorrentInfoFormatter(IFormatter): MANDATORY_FIELDS = ('id', 'name', 'size', 'seeders', 'leechers', 'url', 'files', 'description') def flush(self): pass def format_dict(self, item): result = u'%s%s%s\n' % (ReplApplication.BOLD, item['name'], ReplApplication.NC) result += 'ID: %s\n' % item['id'] result += 'Size: %s\n' % sizeof_fmt(item['size']) result += 'Seeders: %s\n' % item['seeders'] result += 'Leechers: %s\n' % item['leechers'] result += 'URL: %s\n' % item['url'] result += '\n%sFiles%s\n' % (ReplApplication.BOLD, ReplApplication.NC) for f in item['files']: result += ' * %s\n' % f result += '\n%sDescription%s\n' % (ReplApplication.BOLD, ReplApplication.NC) result += item['description'] return result class TorrentListFormatter(IFormatter): MANDATORY_FIELDS = ('id', 'name', 'size', 'seeders', 'leechers') count = 0 def flush(self): self.count = 0 pass def format_dict(self, item): self.count += 1 if self.interactive: backend = item['id'].split('@', 1)[1] result = u'%s* (%d) %s (%s)%s\n' % (ReplApplication.BOLD, self.count, item['name'], backend, ReplApplication.NC) else: result = u'%s* (%s) %s%s\n' % (ReplApplication.BOLD, item['id'], item['name'], ReplApplication.NC) size = sizeof_fmt(item['size']) result += ' %10s (Seed: %2d / Leech: %2d)' % (size, item['seeders'], item['leechers']) return result class Weboorrents(ReplApplication): APPNAME = 'weboorrents' VERSION = '0.4' COPYRIGHT = 'Copyright(C) 2010 Romain Bignon' CAPS = ICapTorrent EXTRA_FORMATTERS = {'torrent_list': TorrentListFormatter, 'torrent_info': TorrentInfoFormatter, } COMMANDS_FORMATTERS = {'search': 'torrent_list', 'info': 'torrent_info', } torrents = [] def _complete_id(self): return ['%s@%s' % (torrent.id, torrent.backend) for torrent in self.torrents] def complete_info(self, text, line, *ignored): args = line.split(' ') if len(args) == 2: return self._complete_id() def parse_id(self, id): if self.interactive: try: torrent = self.torrents[int(id) - 1] except (IndexError,ValueError): pass else: id = '%s@%s' % (torrent.id, torrent.backend) return ReplApplication.parse_id(self, id) def do_info(self, id): """ info ID Get information about a torrent. """ _id, backend_name = self.parse_id(id) found = 0 for backend, torrent in self.do('get_torrent', _id, backends=backend_name): if torrent: self.format(torrent) found = 1 if not found: print >>sys.stderr, 'Torrent "%s" not found' % id else: self.flush() def complete_getfile(self, text, line, *ignored): args = line.split(' ', 2) if len(args) == 2: return self._complete_id() elif len(args) >= 3: return self.path_completer(args[2]) def do_getfile(self, line): """ getfile ID FILENAME Get the .torrent file. FILENAME is where to write the file. If FILENAME is '-', the file is written to stdout. """ id, dest = self.parseargs(line, 2, 2) _id, backend_name = self.parse_id(id) for backend, buf in self.do('get_torrent_file', _id, backends=backend_name): if buf: if dest == '-': print buf else: try: with open(dest, 'w') as f: f.write(buf) except IOError, e: print >>sys.stderr, 'Unable to write .torrent in "%s": %s' % (dest, e) return 1 return print >>sys.stderr, 'Torrent "%s" not found' % id def do_search(self, pattern): """ search [PATTERN] Search torrents. """ self.torrents = [] if not pattern: pattern = None self.set_formatter_header(u'Search pattern: %s' % pattern if pattern else u'Latest torrents') for backend, torrent in self.do('iter_torrents', pattern=pattern): self.torrents.append(torrent) self.format(torrent) self.flush()
gpl-3.0
-737,716,371,771,267,100
31.700565
124
0.554423
false
3.736604
false
false
false
jbaragry/ardoq-archimate
setup.py
1
1672
"""A setuptools based setup module. See: https://packaging.python.org/en/latest/distributing.html https://github.com/pypa/sampleproject """ # Always prefer setuptools over distutils from setuptools import setup, find_packages # To use a consistent encoding from codecs import open from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name='ardoqarchimate', version='0.0.6', description='ArchiMate Open Exchange Format (R) importer for Ardoq (R)', long_description=long_description, url='https://github.com/jbaragry/ardoq-archimate', author='Jason Baragry', license='MIT', packages=find_packages(exclude=['resources']), include_package_data=True, classifiers=[ 'Development Status :: 3 - Alpha', # Indicate who your project is intended for 'Intended Audience :: Developers', 'Topic :: Software Development :: Documentation', # Pick your license as you wish (should match "license" above) 'License :: OSI Approved :: MIT License', # Specify the Python versions you support here. In particular, ensure # that you indicate whether you support Python 2, Python 3 or both. 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], keywords='architecture ardoq archimate import development tool', install_requires=['ardoqpy', 'xmltodict', 'configparser'], )
mit
7,526,193,376,280,982,000
33.833333
77
0.678828
false
4.048426
false
false
false
abingham/ackward
site_scons/ackward/class_property.py
1
1765
from .element import SigTemplateElement from .include import ImplInclude from .trace import trace header_getter = 'static $type $property_name();' header_setter = 'static void $property_name($header_signature);' impl_getter = ''' $type $class_name::$property_name() { using namespace boost::python; try { object prop = $class_name::cls().attr("$property_name"); return extract<$type>(prop); } TRANSLATE_PYTHON_EXCEPTION() }''' impl_setter = ''' void $class_name::$property_name($impl_signature) { using namespace boost::python; try { object prop = $class_name::cls().attr("$property_name"); prop = val; } TRANSLATE_PYTHON_EXCEPTION() }''' class ClassProperty(SigTemplateElement): '''A static property on a class. Args: * name: The name of the property. * type: The type of the property. * read_only: Whether the property is read-only or read-write. ''' @trace def __init__(self, name, type, read_only=False, parent=None): header = header_getter impl = impl_getter if not read_only: header = '\n'.join([header, header_setter]) impl = '\n'.join([impl, impl_setter]) SigTemplateElement.__init__( self, open_templates={ 'header': header, 'impl': impl, }, symbols={ 'property_name': name, 'type': type, 'signature': [(type, 'val')] }, parent=parent) self.add_child( ImplInclude( ('ackward', 'core', 'ExceptionTranslation.hpp')))
mit
3,386,836,769,049,771,000
25.343284
67
0.529178
false
4.162736
false
false
false
alexandercrosson/ml
tensorflow/cnn_text_classifier/train.py
1
7609
#! /usr/bin/env python """ Taken from Denny Britz's tutorial on CNNs http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/ """ import tensorflow as tf import numpy as np import os import time import datetime import data_helpers from text_cnn import TextCNN from tensorflow.contrib import learn # Parameters # ------------------------ # Hyperparameters tf.flags.DEFINE_integer('embedding_dim', 128, 'Dimensionality of character embedding (default: 128') tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')") tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)") tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)") tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularizaion lambda (default: 0.0)") # Training parameters tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") tf.flags.DEFINE_integer("num_epochs", 200, "Number of training epochs (default: 200)") tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)") tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)") # Misc Parameters tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") FLAGS = tf.flags.FLAGS FLAGS._parse_flags() print '\nParameters:' for attr, value in sorted(FLAGS.__flags.items()): print('{}{}'.format(attr.upper(), value)) print '' # Data Preprocessing # ------------------------ # Load data print 'Loading data...' x_test, y = data_helpers.load_data_and_labels() # Build Vocabulary max_document_length = max([len(x.split(' ')) for x in x_test]) vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length) x = np.array(list(vocab_processor.fit_transform(x_test))) # Randomly Shuffle the data np.random.seed(10) shuffle_indicies = np.random.permutation(np.arange(len(y))) x_shuffled = x[shuffle_indicies] y_shuffled = y[shuffle_indicies] # Train / Test Split x_train, x_dev = x_shuffled[:-1000], x_shuffled[-1000:] y_train, y_dev = y_shuffled[:-1000], y_shuffled[-1000:] print 'Vocabulary size: {:d}'.format(len(vocab_processor.vocabulary_)) print 'Train/Dev split {:d}/{:d}'.format(len(y_train), len(y_dev)) # Training # ------------------------ with tf.Graph().as_default(): session_conf = tf.ConfigProto( allow_soft_placement=FLAGS.allow_soft_placement, log_device_placement=FLAGS.log_device_placement) sess = tf.Session(config=session_conf) with sess.as_default(): cnn = TextCNN( sequence_length=x_train.shape[1], num_classes=2, vocab_size=len(vocab_processor.vocabulary_), embedding_size=FLAGS.embedding_dim, filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))), num_filters=FLAGS.num_filters) # Training Procecure global_step = tf.Variable(0, name='global_step', trainable=False) optimizer = tf.train.AdamOptimizer(1e-3) grads_and_vars = optimizer.compute_gradients(cnn.loss) train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step) # Keep track of gradien values and sparsity (optional) grad_summaries = [] for g, v in grads_and_vars: if g is not None: grad_hist_summary = tf.histogram_summary('{}/grad/hist'.\ format(v.name), g) sparsity_summary = tf.scalar_summary('{}/grad/sparsity'.\ format(v.name), tf.nn.zero_fraction(g)) grad_summaries.append(grad_hist_summary) grad_summaries.append(sparsity_summary) grad_summaries_merged = tf.merge_summary(grad_summaries) # Output directory for models and summaries timestamp = str(int(time.time())) out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp)) print("Writing to {}\n".format(out_dir)) # Summaries for loss and accuracy loss_summary = tf.scalar_summary('loss', cnn.loss) acc_summary = tf.scalar_summary('accuracy', cnn.accuracy) # Train summaries train_summary_op = tf.merge_summary([loss_summary, acc_summary, grad_summaries_merged]) train_summary_dir = os.path.join(out_dir, 'summaries', 'train') train_summary_writer = tf.train.SummaryWriter(train_summary_dir, sess.graph) # Dev Summaries dev_summary_op = tf.merge_summary([loss_summary, acc_summary]) dev_summary_dir = os.path.join(out_dir, 'summaries', 'dev') dev_summary_writer = tf.train.SummaryWriter(dev_summary_dir, sess.graph) # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints")) checkpoint_prefix = os.path.join(checkpoint_dir, "model") if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) saver = tf.train.Saver(tf.all_variables()) # Write vocabulary vocab_processor.save(os.path.join(out_dir, "vocab")) # Initialize all variables sess.run(tf.initialize_all_variables()) def train_step(x_batch, y_batch): """ A single training step """ feed_dict = { cnn.input_x: x_batch, cnn.input_y: y_batch, cnn.dropout_keep_prob: FLAGS.dropout_keep_prob } _, step, summaries, loss, accuracy = sess.run( [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy], feed_dict) time_str = datetime.datetime.now().isoformat() print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy)) train_summary_writer.add_summary(summaries, step) def dev_step(x_batch, y_batch, writer=None): """ Evaluates model on a dev set """ feed_dict = { cnn.input_x: x_batch, cnn.input_y: y_batch, cnn.dropout_keep_prob: 1.0 } step, summaries, loss, accuracy = sess.run( [global_step, dev_summary_op, cnn.loss, cnn.accuracy], feed_dict) time_str = datetime.datetime.now().isoformat() print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy)) if writer: writer.add_summary(summaries, step) # Generate batches batches = data_helpers.batch_iter( list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs) # Training loop. For each batch... for batch in batches: x_batch, y_batch = zip(*batch) train_step(x_batch, y_batch) current_step = tf.train.global_step(sess, global_step) if current_step % FLAGS.evaluate_every == 0: print("\nEvaluation:") dev_step(x_dev, y_dev, writer=dev_summary_writer) print("") if current_step % FLAGS.checkpoint_every == 0: path = saver.save(sess, checkpoint_prefix, global_step=current_step) print("Saved model checkpoint to {}\n".format(path))
mit
-7,158,994,837,752,444,000
38.630208
112
0.613353
false
3.628517
false
false
false
bkuczenski/lca-tools
antelope_reports/tables/base.py
1
11488
""" Functions for creating tables for useful / important comparisons. These are analogous to charts in that they are forms of output and it's not clear where they belong. Lists of tabular outputs: * process or fragment Inventory * compare process inventories * compare allocations of a multioutput process * compare LCIA factors for different methods * compare an LCIA method with the components of one or more Lcia Results using it Here's another thing: right now I'm using dynamic grid to show these in the window... but wouldn't it perhaps be preferable to use pandas? doesn't pandas afford all sorts of useful features, like ... um... what is pandas good for again? for working with data frames. Not necessarily for creating data frames. Most likely, I could modify dynamic_grid to *return* a dataframe instead of drawing a table. """ from collections import defaultdict from pandas import DataFrame def printable(tup, width=8): out = [] for k in tup: if isinstance(k, str): out.append(k) elif k is None: out.append('') else: try: g = '%*.3g' % (width, k) except TypeError: g = '%*.*s' % (width, width, '----') out.append(g) return tuple(out) class BaseTableOutput(object): """ A prototype class for storing and returning tabular information. This should ultimately be adopted in places where dynamic_grids are used, or where TeX or excel tables are produced (like in lca_matrix foreground output generators) but for now it is just being used to provide some separation of concerns for the flowables super-grid. At the heart is a dict whose key is a 2-tuple of (row signifier, column index). The row signifier can be any hashable object, but the column indices are always sequential. re-ordering columns is something we do not feel particularly like enabling at the present time. The user creates the table with initialization parameters as desired, and then builds out the table by adding columns in sequence. The table has an inclusion criterion for the iterables (which could be None)-- if the criterion is met, the object is added; if not, it is skipped. The criterion can change, but (since the table contents are static) this will not result in columns being re-iterated. Subclasses MAY redefine: _returns_sets: determines whether each grid item is singly or multiply valued Subclasses MUST implement: _near_headings -- column names for left-side headings _generate_items(col) -- argument is a column iterable - generates items _pull_row_from_item(item) -- argument is one of the objects returned by the column iteration, returns row key _extract_data_from_item -- argument is an dict from the grid dict, returns either a dict or an immutable object """ _near_headings = '', # should be overridden _far_headings = '', # should be overridden _returns_sets = False def _pull_row_from_item(self, item): """ Returns the row tuple from an item, for insertion into the rows set. meant to be overridden :param item: :return: always a tuple. default item, """ row = item # if not self._returns_sets: return row, def _pull_note_from_item(self, item): """ Returns the "long" / descriptive text appended to the right-hand side of the table. should return a str. Only used if _returns_sets is false (otherwise, the sets indicate the row + subrow labels) This is may turn out to be totally silly / pointless. :param item: :return: """ return '' def _generate_items(self, iterable): """ yields the items from a column entry. Meant to be overridden. :param iterable: :return: """ for item in iterable: if self._criterion(item): yield item def _extract_data_from_item(self, item): """ note: dict item is a list of components Determines how to get the data point from the item/list. Meant to be overridden. If self._returns_sets is true, should return a dict. Else should return an immutable. :param item: :return: a string """ return item def _header_row(self): """ Returns a tuple of columns for the header row :return: """ header = self._near_headings for i, _ in enumerate(self._columns): header += ('C%d' % i), header += self._far_headings # placeholder for row notes / subitem keys return header def _build_near_header(self, row, prev): the_row = [] for i, _ in enumerate(self._near_headings): if prev is not None: if prev[i] == row[i]: the_row.append('""') continue the_row.append('%s' % row[i]) return the_row def _build_row(self, row, prev=None): """ Returns a single row as a tuple. :param row: :param prev: [None] previous row printed (input, not output). Used to suppress header output for repeat entries. :return: """ # first build the near header the_row = self._build_near_header(row, prev) data_keys = set() data_vals = [] # first pass: get all the data / keys for i, _ in enumerate(self._columns): data = self._extract_data_from_item(self._d[row, i]) if isinstance(data, dict): if not self._returns_sets: raise TypeError('multiple values returned but subclass does not allow them!') for k in data.keys(): data_keys.add(k) data_vals.append(data) # second pass: build the sub-table by rows if self._returns_sets: the_rows = [] _ftt = True # first time through keys = tuple(sorted(data_keys, key=lambda x: x[-2])) for k in keys: if not _ftt: the_row = ['' for i in range(len(self._near_headings))] for i, _ in enumerate(self._columns): if k in data_vals[i]: the_row.append(data_vals[i][k]) else: the_row.append(None) the_row.append(k) if _ftt: the_row.append(self._notes[row]) else: the_row.append('') the_rows.append(the_row) _ftt = False return the_rows else: the_row.extend(data_vals) # add notes the_row.append(self._notes[row]) return the_row def __init__(self, *args, criterion=None): """ Provide 0 or more positional arguments as data columns; add data columns later with add_column(arg) :param args: sequential data columns :param criterion: A callable expression that returns true if a given """ self._d = defaultdict(list) if callable(criterion): self._criterion = criterion else: if criterion is not None: print('Ignoring non-callable criterion') self._criterion = lambda x: True self._rows = set() # set of valid keys to dict self._notes = dict() self._columns = [] # list of columns in the order added # a valid reference consists of (x, y) where x in self._rows and y < len(self._columns) for arg in args: self.add_column(arg) def _add_rowitem(self, col_idx, item, row=None): if row is None: row = self._pull_row_from_item(item) self._rows.add(row) if row not in self._notes: self._notes[row] = self._pull_note_from_item(item) self._d[row, col_idx].append(item) def add_column(self, arg): col_idx = len(self._columns) for k in self._generate_items(arg): self._add_rowitem(col_idx, k) self._columns.append(arg) def _sorted_rows(self): for row in sorted(self._rows, key=lambda x: tuple([str(k) for k in x])): yield row def text(self, width=10, hdr_width=24, max_width=112, expanded=True): """ Outputs the table in text format :return: nothing. """ header = self._header_row() prev = None body = [] width = max(6, width) wds = [len(header[i]) for i in range(len(self._near_headings))] # determine column widths for row in self._sorted_rows(): prt_row = self._build_row(row, prev=prev) if self._returns_sets: wds = [min(max(wds[i], len('%s' % prt_row[0][i])), hdr_width) for i in range(len(self._near_headings))] else: wds = [min(max(wds[i], len('%s' % prt_row[i])), hdr_width) for i in range(len(self._near_headings))] body.append(prt_row) prev = row # build display string rem_width = max_width fmt = '' for i in wds: rem_width -= i fmt += '%%-%d.%ds ' % (i, i) rem_width -= 1 for i in range(len(self._columns)): rem_width -= width fmt += '%%-%d.%ds ' % (width, width) rem_width -= 1 if rem_width < 0: # uh oh negative rem width: widen freely; set remainder to 10 chars max_width -= (rem_width - 10) rem_width = 10 fmt += '%%-%d.%ds' % (rem_width, rem_width) if self._returns_sets: fmt += ' %s' print(fmt % header) print('-' * max_width) for row in body: if self._returns_sets: for subrow in row: # sorted(row, key=lambda x: x[-2]) print(fmt % printable(subrow, width=width)) else: print(fmt % printable(row, width=width)) print(fmt % header) print('\nColumns:') for i, c in enumerate(self._columns): print('C%d: %s' % (i, c)) def dataframe(self): df = DataFrame(columns=self._header_row()) prev = None for row in self._sorted_rows(): if self._returns_sets: for r in self._build_row(row): d = dict(zip(self._header_row(), printable(r))) df = df.append(d, ignore_index=True) else: d = dict(zip(self._header_row(), printable(self._build_row(row, prev=prev)))) df = df.append(d, ignore_index=True) prev = row return df def to_excel(self, xl_writer, sheetname, width_scaling=0.75): """ Must supply a pandas XlsxWriter. This routine does not save the document. :param xl_writer: :param sheetname: :param width_scaling: :return: """ df = self.dataframe() df.to_excel(xl_writer, sheet_name=sheetname) sht = xl_writer.sheets[sheetname] for k in self._near_headings + self._far_headings: ix = df.columns.tolist().index(k) + 1 mx = max([7, width_scaling * df[k].astype(str).str.len().max()]) sht.set_column(ix, ix, width=mx)
gpl-2.0
-5,190,139,048,839,691,000
34.9
120
0.566591
false
4.035125
false
false
false
saaros/pghoard
pghoard/config.py
1
5845
""" pghoard - configuration validation Copyright (c) 2016 Ohmu Ltd See LICENSE for details """ from pghoard.common import convert_pg_command_version_to_number from pghoard.postgres_command import PGHOARD_HOST, PGHOARD_PORT from pghoard.rohmu import get_class_for_transfer from pghoard.rohmu.errors import InvalidConfigurationError from pghoard.rohmu.snappyfile import snappy import json import os import subprocess def set_config_defaults(config, *, check_commands=True): # TODO: consider implementing a real configuration schema at some point # misc global defaults config.setdefault("backup_location", None) config.setdefault("http_address", PGHOARD_HOST) config.setdefault("http_port", PGHOARD_PORT) config.setdefault("alert_file_dir", config.get("backup_location") or os.getcwd()) config.setdefault("json_state_file_path", "/tmp/pghoard_state.json") # XXX: get a better default config.setdefault("log_level", "INFO") config.setdefault("path_prefix", "") config.setdefault("upload_retries_warning_limit", 3) # set command paths and check their versions for command in ["pg_basebackup", "pg_receivexlog"]: command_path = config.setdefault(command + "_path", "/usr/bin/" + command) if check_commands: version_output = subprocess.check_output([command_path, "--version"]) version_string = version_output.decode("ascii").strip() config[command + "_version"] = convert_pg_command_version_to_number(version_string) else: config[command + "_version"] = None # default to 5 compression and transfer threads config.setdefault("compression", {}).setdefault("thread_count", 5) config.setdefault("transfer", {}).setdefault("thread_count", 5) # default to prefetching min(#compressors, #transferagents) - 1 objects so all # operations where prefetching is used run fully in parallel without waiting to start config.setdefault("restore_prefetch", min( config["compression"]["thread_count"], config["transfer"]["thread_count"]) - 1) # if compression algorithm is not explicitly set prefer snappy if it's available if snappy is not None: config["compression"].setdefault("algorithm", "snappy") else: config["compression"].setdefault("algorithm", "lzma") config["compression"].setdefault("level", 0) # defaults for sites config.setdefault("backup_sites", {}) for site_name, site_config in config["backup_sites"].items(): site_config.setdefault("active", True) site_config.setdefault("active_backup_mode", "pg_receivexlog") site_config.setdefault("basebackup_count", 2) site_config.setdefault("basebackup_interval_hours", 24) site_config.setdefault("basebackup_mode", "pipe" if site_config.get("stream_compression") else "basic") site_config.setdefault("encryption_key_id", None) site_config.setdefault("object_storage", None) site_config.setdefault("pg_xlog_directory", "/var/lib/pgsql/data/pg_xlog") obj_store = site_config["object_storage"] or {} if not obj_store: pass elif "storage_type" not in obj_store: raise InvalidConfigurationError("Site {!r}: storage_type not defined for object_storage".format(site_name)) elif obj_store["storage_type"] == "local" and obj_store.get("directory") == config.get("backup_location"): raise InvalidConfigurationError( "Site {!r}: invalid 'local' target directory {!r}, must be different from 'backup_location'".format( site_name, config.get("backup_location"))) else: try: get_class_for_transfer(obj_store["storage_type"]) except ImportError as ex: raise InvalidConfigurationError( "Site {0!r} object_storage: {1.__class__.__name__!s}: {1!s}".format(site_name, ex)) return config def read_json_config_file(filename, *, check_commands=True, add_defaults=True): try: with open(filename, "r") as fp: config = json.load(fp) except FileNotFoundError: raise InvalidConfigurationError("Configuration file {!r} does not exist".format(filename)) except ValueError as ex: raise InvalidConfigurationError("Configuration file {!r} does not contain valid JSON: {}" .format(filename, str(ex))) except OSError as ex: raise InvalidConfigurationError("Configuration file {!r} can't be opened: {}" .format(filename, ex.__class__.__name__)) if not add_defaults: return config return set_config_defaults(config, check_commands=check_commands) def get_site_from_config(config, site): if not config.get("backup_sites"): raise InvalidConfigurationError("No backup sites defined in configuration") site_count = len(config["backup_sites"]) if site is None: if site_count > 1: raise InvalidConfigurationError("Backup site not set and configuration file defines {} sites: {}" .format(site_count, sorted(config["backup_sites"]))) site = list(config["backup_sites"])[0] elif site not in config["backup_sites"]: n_sites = "{} other site{}".format(site_count, "s" if site_count > 1 else "") raise InvalidConfigurationError("Site {!r} not defined in configuration file. {} are defined: {}" .format(site, n_sites, sorted(config["backup_sites"]))) return site def key_lookup_for_site(config, site): def key_lookup(key_id): return config["backup_sites"][site]["encryption_keys"][key_id]["private"] return key_lookup
apache-2.0
8,807,986,599,779,522,000
45.76
119
0.648417
false
4.14539
true
false
false
glaudsonml/kurgan-ai
tools/sqlmap/plugins/dbms/oracle/fingerprint.py
1
3732
#!/usr/bin/env python """ Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import re from lib.core.common import Backend from lib.core.common import Format from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.enums import DBMS from lib.core.session import setDbms from lib.core.settings import ORACLE_ALIASES from lib.request import inject from plugins.generic.fingerprint import Fingerprint as GenericFingerprint class Fingerprint(GenericFingerprint): def __init__(self): GenericFingerprint.__init__(self, DBMS.ORACLE) def getFingerprint(self): value = "" wsOsFp = Format.getOs("web server", kb.headersFp) if wsOsFp: value += "%s\n" % wsOsFp if kb.data.banner: dbmsOsFp = Format.getOs("back-end DBMS", kb.bannerFp) if dbmsOsFp: value += "%s\n" % dbmsOsFp value += "back-end DBMS: " if not conf.extensiveFp: value += DBMS.ORACLE return value actVer = Format.getDbms() blank = " " * 15 value += "active fingerprint: %s" % actVer if kb.bannerFp: banVer = kb.bannerFp["dbmsVersion"] if 'dbmsVersion' in kb.bannerFp else None banVer = Format.getDbms([banVer]) value += "\n%sbanner parsing fingerprint: %s" % (blank, banVer) htmlErrorFp = Format.getErrorParsedDBMSes() if htmlErrorFp: value += "\n%shtml error message fingerprint: %s" % (blank, htmlErrorFp) return value def checkDbms(self): if not conf.extensiveFp and (Backend.isDbmsWithin(ORACLE_ALIASES) or (conf.dbms or "").lower() in ORACLE_ALIASES): setDbms(DBMS.ORACLE) self.getBanner() return True infoMsg = "testing %s" % DBMS.ORACLE logger.info(infoMsg) # NOTE: SELECT ROWNUM=ROWNUM FROM DUAL does not work connecting # directly to the Oracle database if conf.direct: result = True else: result = inject.checkBooleanExpression("ROWNUM=ROWNUM") if result: infoMsg = "confirming %s" % DBMS.ORACLE logger.info(infoMsg) # NOTE: SELECT LENGTH(SYSDATE)=LENGTH(SYSDATE) FROM DUAL does # not work connecting directly to the Oracle database if conf.direct: result = True else: result = inject.checkBooleanExpression("LENGTH(SYSDATE)=LENGTH(SYSDATE)") if not result: warnMsg = "the back-end DBMS is not %s" % DBMS.ORACLE logger.warn(warnMsg) return False setDbms(DBMS.ORACLE) self.getBanner() if not conf.extensiveFp: return True infoMsg = "actively fingerprinting %s" % DBMS.ORACLE logger.info(infoMsg) for version in ("11i", "10g", "9i", "8i"): number = int(re.search("([\d]+)", version).group(1)) output = inject.checkBooleanExpression("%d=(SELECT SUBSTR((VERSION),1,%d) FROM SYS.PRODUCT_COMPONENT_VERSION WHERE ROWNUM=1)" % (number, 1 if number < 10 else 2)) if output: Backend.setVersion(version) break return True else: warnMsg = "the back-end DBMS is not %s" % DBMS.ORACLE logger.warn(warnMsg) return False def forceDbmsEnum(self): if conf.db: conf.db = conf.db.upper() if conf.tbl: conf.tbl = conf.tbl.upper()
apache-2.0
-7,529,427,755,926,559,000
28.856
178
0.580118
false
3.746988
false
false
false
noca/pythonlibs
cache.py
1
1939
# -*- coding: utf-8 -*- ''' Common cache method for python. Check usage on Example. ''' class DontCache(Exception): pass def cache(compute_key, container_factory): marker = object() def decorator(func): def replacement(*args, **kwargs): cache = container_factory() if cache is None: return func(*args, **kwargs) try: key = compute_key(*args, **kwargs) except DontCache: return func(*args, **kwargs) key = '{0}.{1}:{2}'.format(func.__module__, func.__name__, key) cached_value = cache.get(key, marker) if cached_value is marker: cached_value = cache[key] = func(*args, **kwargs) else: pass return cached_value replacement.__doc__ = func.__doc__ return replacement return decorator # Show Example if __name__ == '__main__': # container is an factory function provide dict like object # for storing cache, the scope is limited by this factory def local_container(): if 'example_cache' not in globals(): globals()['example_cache'] = dict() return globals()['example_cache'] # we always provide a more sofisticated cache function for # a given cache factory def local_cache(compute_key): return cache(compute_key, local_container) # compute_key takes exactly parameters as to be cached function # , it's function specified def _cachekey_exmample_func(selects, filters): key = '' for s in selects: key += s + ':' for f in filters: key += f + '-' return key # decorate the normal function is all @local_cache(_cachekey_exmample_func) def sql_query(selects, filters): return
bsd-2-clause
-4,467,158,069,026,433,000
25.561644
67
0.542548
false
4.49884
false
false
false
clchiou/garage
py/g1/devtools/buildtools/tests/test_buildtools.py
1
2374
import unittest import unittest.mock import distutils.errors from g1.devtools import buildtools class BuildtoolsTest(unittest.TestCase): @unittest.mock.patch(buildtools.__name__ + '.distutils.file_util') def test_make_copy_files(self, mock_file_util): mock_cmd = unittest.mock.Mock() mock_cmd.FILENAMES = [] mock_cmd.SRC_DIR = None mock_cmd.DST_DIR = None cls = buildtools.make_copy_files(filenames=[]) cls.initialize_options(mock_cmd) self.assertIsNone(mock_cmd.src_dir) self.assertIsNone(mock_cmd.dst_dir) with self.assertRaisesRegex( distutils.errors.DistutilsOptionError, r'--src-dir is required', ): cls.finalize_options(mock_cmd) mock_cmd.src_dir = 'a/b' with self.assertRaisesRegex( distutils.errors.DistutilsOptionError, r'--dst-dir is required', ): cls.finalize_options(mock_cmd) mock_cmd.dst_dir = 'c/d' mock_cmd.FILENAMES = ['e', 'f'] with self.assertRaisesRegex( distutils.errors.DistutilsOptionError, r'source file does not exist: a/b/e', ): cls.finalize_options(mock_cmd) mock_file_util.copy_file.assert_not_called() cls.run(mock_cmd) self.assertEqual( mock_file_util.copy_file.mock_calls, [ unittest.mock.call('a/b/e', 'c/d/e', preserve_mode=False), unittest.mock.call('a/b/f', 'c/d/f', preserve_mode=False), ], ) @unittest.mock.patch(buildtools.__name__ + '.subprocess') def test_read_pkg_config(self, subprocess_mock): subprocess_mock.run.return_value.stdout = ( b'-I"/s o m e/where/include" -I"/s o m e/where/include" ' b'-L"/s o m e/where/lib" -L"/s o m e/where/lib" ' b'-lfoo -lfoo ' b'-DMSG="hello world" -DMSG="hello world" ' ) self.assertEqual( buildtools.read_package_config(''), buildtools.PackageConfig( include_dirs=['/s o m e/where/include'], library_dirs=['/s o m e/where/lib'], libraries=['foo'], extra_compile_args=['-DMSG=hello world'], ), ) if __name__ == '__main__': unittest.main()
mit
-6,761,329,704,171,718,000
31.081081
74
0.556024
false
3.657935
true
false
false
dmayer/time_trial
time_trial_gui/lib/rq_result_processor.py
1
1207
from datetime import datetime from time import sleep from rq.job import Job from models.trial import Trial from redis import Redis __author__ = 'daniel' import threading class RqResultsProcessor(threading.Thread): session = None stopped = False def stop(self): self.stopped = True def run(self): redis_conn = Redis() # get all while True: incomplete = self.session.query(Trial).filter(Trial.end_date == None).filter(Trial.start_date!=None).all() for t in incomplete: try: job = Job.fetch(t.job, connection=redis_conn) except: print("Exception occurred. Moving on.") sleep(1) continue if job.result is not None: print("Result for " + t.name + " found.") t.result = job.result t.end_date = datetime.now() self.session.add(t) self.session.commit() self.session.expire(t) if self.stopped: self.session.close() return sleep(1)
mit
-4,295,996,394,186,815,000
26.431818
118
0.509528
false
4.537594
false
false
false
futurely/openai-universe-agents
ga3c/NetworkVP.py
1
12245
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of NVIDIA CORPORATION nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import re import numpy as np import tensorflow as tf from Config import Config class NetworkVP: def __init__(self, device, model_name, num_actions): self.device = device self.model_name = model_name self.num_actions = num_actions self.img_width = Config.IMAGE_WIDTH self.img_height = Config.IMAGE_HEIGHT self.img_channels = Config.STACKED_FRAMES self.learning_rate = Config.LEARNING_RATE_START self.beta = Config.BETA_START self.log_epsilon = Config.LOG_EPSILON self.graph = tf.Graph() with self.graph.as_default() as g: with tf.device(self.device): self._create_graph() self.sess = tf.Session( graph=self.graph, config=tf.ConfigProto( allow_soft_placement=True, log_device_placement=False, gpu_options=tf.GPUOptions(allow_growth=True))) self.sess.run(tf.global_variables_initializer()) if Config.TENSORBOARD: self._create_tensor_board() if Config.LOAD_CHECKPOINT or Config.SAVE_MODELS: vars = tf.global_variables() self.saver = tf.train.Saver( {var.name: var for var in vars}, max_to_keep=0) def _create_graph(self): self.x = tf.placeholder( tf.float32, [None, self.img_height, self.img_width, self.img_channels], name='X') self.y_r = tf.placeholder(tf.float32, [None], name='Yr') self.var_beta = tf.placeholder(tf.float32, name='beta', shape=[]) self.var_learning_rate = tf.placeholder(tf.float32, name='lr', shape=[]) self.global_step = tf.Variable(0, trainable=False, name='step') # As implemented in A3C paper self.n1 = self.conv2d_layer(self.x, 8, 16, 'conv11', strides=[1, 4, 4, 1]) self.n2 = self.conv2d_layer(self.n1, 4, 32, 'conv12', strides=[1, 2, 2, 1]) self.action_index = tf.placeholder(tf.float32, [None, self.num_actions]) _input = self.n2 flatten_input_shape = _input.get_shape() nb_elements = flatten_input_shape[1] * flatten_input_shape[ 2] * flatten_input_shape[3] self.flat = tf.reshape(_input, shape=[-1, nb_elements._value]) self.d1 = self.dense_layer(self.flat, 256, 'dense1') self.logits_v = tf.squeeze( self.dense_layer(self.d1, 1, 'logits_v', func=None), squeeze_dims=[1]) self.cost_v = 0.5 * tf.reduce_sum( tf.square(self.y_r - self.logits_v), reduction_indices=0) self.logits_p = self.dense_layer(self.d1, self.num_actions, 'logits_p') if Config.USE_LOG_SOFTMAX: self.softmax_p = tf.nn.softmax(self.logits_p) self.log_softmax_p = tf.nn.log_softmax(self.logits_p) self.log_selected_action_prob = tf.reduce_sum( self.log_softmax_p * self.action_index, reduction_indices=1) self.cost_p_1 = self.log_selected_action_prob * ( self.y_r - tf.stop_gradient(self.logits_v)) self.cost_p_2 = -1 * self.var_beta * \ tf.reduce_sum(self.log_softmax_p * self.softmax_p, reduction_indices=1) else: self.softmax_p = (tf.nn.softmax(self.logits_p) + Config.MIN_POLICY) / ( 1.0 + Config.MIN_POLICY * self.num_actions) self.selected_action_prob = tf.reduce_sum( self.softmax_p * self.action_index, reduction_indices=1) self.cost_p_1 = tf.log(tf.maximum(self.selected_action_prob, self.log_epsilon)) \ * (self.y_r - tf.stop_gradient(self.logits_v)) self.cost_p_2 = -1 * self.var_beta * \ tf.reduce_sum(tf.log(tf.maximum(self.softmax_p, self.log_epsilon)) * self.softmax_p, reduction_indices=1) self.cost_p_1_agg = tf.reduce_sum(self.cost_p_1, reduction_indices=0) self.cost_p_2_agg = tf.reduce_sum(self.cost_p_2, reduction_indices=0) self.cost_p = -(self.cost_p_1_agg + self.cost_p_2_agg) if Config.DUAL_RMSPROP: self.opt_p = tf.train.RMSPropOptimizer( learning_rate=self.var_learning_rate, decay=Config.RMSPROP_DECAY, momentum=Config.RMSPROP_MOMENTUM, epsilon=Config.RMSPROP_EPSILON) self.opt_v = tf.train.RMSPropOptimizer( learning_rate=self.var_learning_rate, decay=Config.RMSPROP_DECAY, momentum=Config.RMSPROP_MOMENTUM, epsilon=Config.RMSPROP_EPSILON) else: self.cost_all = self.cost_p + self.cost_v self.opt = tf.train.RMSPropOptimizer( learning_rate=self.var_learning_rate, decay=Config.RMSPROP_DECAY, momentum=Config.RMSPROP_MOMENTUM, epsilon=Config.RMSPROP_EPSILON) if Config.USE_GRAD_CLIP: if Config.DUAL_RMSPROP: self.opt_grad_v = self.opt_v.compute_gradients(self.cost_v) self.opt_grad_v_clipped = [ (tf.clip_by_norm(g, Config.GRAD_CLIP_NORM), v) for g, v in self.opt_grad_v if not g is None ] self.train_op_v = self.opt_v.apply_gradients(self.opt_grad_v_clipped) self.opt_grad_p = self.opt_p.compute_gradients(self.cost_p) self.opt_grad_p_clipped = [ (tf.clip_by_norm(g, Config.GRAD_CLIP_NORM), v) for g, v in self.opt_grad_p if not g is None ] self.train_op_p = self.opt_p.apply_gradients(self.opt_grad_p_clipped) self.train_op = [self.train_op_p, self.train_op_v] else: self.opt_grad = self.opt.compute_gradients(self.cost_all) self.opt_grad_clipped = [ (tf.clip_by_average_norm(g, Config.GRAD_CLIP_NORM), v) for g, v in self.opt_grad ] self.train_op = self.opt.apply_gradients(self.opt_grad_clipped) else: if Config.DUAL_RMSPROP: self.train_op_v = self.opt_p.minimize( self.cost_v, global_step=self.global_step) self.train_op_p = self.opt_v.minimize( self.cost_p, global_step=self.global_step) self.train_op = [self.train_op_p, self.train_op_v] else: self.train_op = self.opt.minimize( self.cost_all, global_step=self.global_step) def _create_tensor_board(self): summaries = tf.get_collection(tf.GraphKeys.SUMMARIES) summaries.append(tf.summary.scalar("Pcost_advantage", self.cost_p_1_agg)) summaries.append(tf.summary.scalar("Pcost_entropy", self.cost_p_2_agg)) summaries.append(tf.summary.scalar("Pcost", self.cost_p)) summaries.append(tf.summary.scalar("Vcost", self.cost_v)) summaries.append(tf.summary.scalar("LearningRate", self.var_learning_rate)) summaries.append(tf.summary.scalar("Beta", self.var_beta)) for var in tf.trainable_variables(): summaries.append(tf.summary.histogram("weights_%s" % var.name, var)) summaries.append(tf.summary.histogram("activation_n1", self.n1)) summaries.append(tf.summary.histogram("activation_n2", self.n2)) summaries.append(tf.summary.histogram("activation_d2", self.d1)) summaries.append(tf.summary.histogram("activation_v", self.logits_v)) summaries.append(tf.summary.histogram("activation_p", self.softmax_p)) self.summary_op = tf.summary.merge(summaries) self.log_writer = tf.summary.FileWriter("logs/%s" % self.model_name, self.sess.graph) def dense_layer(self, input, out_dim, name, func=tf.nn.relu): in_dim = input.get_shape().as_list()[-1] d = 1.0 / np.sqrt(in_dim) with tf.variable_scope(name): w_init = tf.random_uniform_initializer(-d, d) b_init = tf.random_uniform_initializer(-d, d) w = tf.get_variable( 'w', dtype=tf.float32, shape=[in_dim, out_dim], initializer=w_init) b = tf.get_variable('b', shape=[out_dim], initializer=b_init) output = tf.matmul(input, w) + b if func is not None: output = func(output) return output def conv2d_layer(self, input, filter_size, out_dim, name, strides, func=tf.nn.relu): in_dim = input.get_shape().as_list()[-1] d = 1.0 / np.sqrt(filter_size * filter_size * in_dim) with tf.variable_scope(name): w_init = tf.random_uniform_initializer(-d, d) b_init = tf.random_uniform_initializer(-d, d) w = tf.get_variable( 'w', shape=[filter_size, filter_size, in_dim, out_dim], dtype=tf.float32, initializer=w_init) b = tf.get_variable('b', shape=[out_dim], initializer=b_init) output = tf.nn.conv2d(input, w, strides=strides, padding='SAME') + b if func is not None: output = func(output) return output def __get_base_feed_dict(self): return { self.var_beta: self.beta, self.var_learning_rate: self.learning_rate } def get_global_step(self): step = self.sess.run(self.global_step) return step def predict_single(self, x): return self.predict_p(x[None, :])[0] def predict_v(self, x): prediction = self.sess.run(self.logits_v, feed_dict={self.x: x}) return prediction def predict_p(self, x): prediction = self.sess.run(self.softmax_p, feed_dict={self.x: x}) return prediction def predict_p_and_v(self, x): return self.sess.run( [self.softmax_p, self.logits_v], feed_dict={self.x: x}) def train(self, x, y_r, a, trainer_id): feed_dict = self.__get_base_feed_dict() feed_dict.update({self.x: x, self.y_r: y_r, self.action_index: a}) self.sess.run(self.train_op, feed_dict=feed_dict) def log(self, x, y_r, a): feed_dict = self.__get_base_feed_dict() feed_dict.update({self.x: x, self.y_r: y_r, self.action_index: a}) step, summary = self.sess.run( [self.global_step, self.summary_op], feed_dict=feed_dict) self.log_writer.add_summary(summary, step) def _checkpoint_filename(self, episode): return 'checkpoints/%s_%08d' % (self.model_name, episode) def _get_episode_from_filename(self, filename): # TODO: hacky way of getting the episode. ideally episode should be stored as a TF variable return int(re.split('/|_|\.', filename)[2]) def save(self, episode): self.saver.save(self.sess, self._checkpoint_filename(episode)) def load(self): filename = tf.train.latest_checkpoint( os.path.dirname(self._checkpoint_filename(episode=0))) if Config.LOAD_EPISODE > 0: filename = self._checkpoint_filename(Config.LOAD_EPISODE) self.saver.restore(self.sess, filename) return self._get_episode_from_filename(filename) def get_variables_names(self): return [ var.name for var in self.graph.get_collection('trainable_variables') ] def get_variable_value(self, name): return self.sess.run(self.graph.get_tensor_by_name(name))
mit
7,946,365,921,996,443,000
39.546358
95
0.644753
false
3.248011
true
false
false
efforia/eos-dashboard
invent/store/store/views.py
1
3721
# -*- coding: UTF-8 -*- import paypalrestsdk,urlparse,urllib2 from xml.etree import ElementTree as ETree from hooks import paypal_api,pagseguro_api from django.core.mail import send_mail from django.conf import settings from django.http import Http404,HttpResponse from django.http import HttpResponse as response from django.shortcuts import get_object_or_404,redirect,render from cartridge.shop.models import Product, ProductVariation, Order, OrderItem from paypalrestsdk import Payment def payment_cancel(request): # Not implemented already return redirect('/') def paypal_redirect(request,order): paypal_api() payment = paypalrestsdk.Payment.find(order.transaction_id) for link in payment.links: if link.method == "REDIRECT": redirect_url = link.href url = urlparse.urlparse(link.href) params = urlparse.parse_qs(url.query) redirect_token = params['token'][0] order.paypal_redirect_token = redirect_token order.save() return redirect(redirect_url) def payment_redirect(request, order_id): lookup = {"id": order_id} if not request.user.is_authenticated(): lookup["key"] = request.session.session_key elif not request.user.is_staff: lookup["user_id"] = request.user.id order = get_object_or_404(Order, **lookup) is_pagseguro = order.pagseguro_redirect is_paypal = order.paypal_redirect_token if 'none' not in is_pagseguro: return redirect(str(is_pagseguro)) elif 'none' not in is_paypal: return paypal_redirect(request,order) else: return redirect("/store/execute?orderid=%s" % lookup["id"]) def payment_slip(request): orderid = request.GET['id'] order = Order.objects.filter(id=orderid)[0] send_mail('Pedido de boleto', 'O pedido de boleto foi solicitado ao Efforia para o pedido %s. Em instantes você estará recebendo pelo e-mail. Aguarde instruções.' % order.id, 'oi@efforia.com.br', [order.billing_detail_email,'contato@efforia.com.br'], fail_silently=False) context = { "order": order } resp = render(request,"shop/slip_confirmation.html",context) return resp def payment_bank(request): orderid = request.GET['order_id'] order = Order.objects.filter(id=orderid)[0] context = { "order": order, "agency": settings.BANK_AGENCY, "account": settings.BANK_ACCOUNT, "socname": settings.BANK_SOCIALNAME } resp = render(request,"shop/bank_confirmation.html",context) return resp def payment_execute(request, template="shop/payment_confirmation.html"): order = None lookup = {} if request.GET.has_key('token'): paypal_api() token = request.GET['token'] payer_id = request.GET['PayerID'] order = get_object_or_404(Order, paypal_redirect_token=token) payment = Payment.find(order.transaction_id) payment.execute({ "payer_id": payer_id }) elif request.GET.has_key('transaction_id'): api = pagseguro_api() email = api.data['email'] token = api.data['token'] transaction = request.GET['transaction_id'] url = api.config.TRANSACTION_URL % transaction resp = urllib2.urlopen("%s?email=%s&token=%s" % (url,email,token)).read() lookup["id"] = ETree.fromstring(resp).findall("reference")[0].text print ETree.fromstring(resp).findall("reference")[0].text if not request.user.is_authenticated(): lookup["key"] = request.session.session_key if not request.user.is_staff: lookup["user_id"] = request.user.id order = get_object_or_404(Order, **lookup) order.transaction_id = transaction elif request.GET.has_key('orderid'): return redirect("/store/bank?order_id=%s" % request.GET['orderid']) order.status = 2 order.save() context = { "order" : order } response = render(request, template, context) return response
lgpl-3.0
-6,523,896,887,667,410,000
39.445652
196
0.71083
false
3.180342
false
false
false
saga-project/bliss
bliss/plugins/local/localjob.py
1
11546
# -*- coding: utf-8 -*- # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 __author__ = "Ole Christian Weidner" __copyright__ = "Copyright 2011-2012, Ole Christian Weidner" __license__ = "MIT" from bliss.interface import JobPluginInterface from bliss.plugins.local.process import LocalJobProcess import bliss.saga class LocalJobPlugin(JobPluginInterface): '''Implements a job plugin that can submit jobs to the local machine''' ######################################## ## class BookKeeper: '''Keeps track of job and service objects''' def __init__(self, parent): self.objects = {} self.processes = {} self.parent = parent def add_service_object(self, service_obj): self.objects[hex(id(service_obj))] = {'instance' : service_obj, 'jobs' : []} def del_service_obj(self, service_obj): try: self.objects.remove((hex(id(service_obj)))) except Exception: pass def add_job_object(self, job_obj, service_obj): service_id = hex(id(service_obj)) job_id = hex(id(job_obj)) try: self.objects[service_id]['jobs'].append(job_obj) self.processes[job_id] = LocalJobProcess(jobdescription=job_obj.get_description(), plugin=self.parent) except Exception, ex: self.parent.log_error_and_raise(bliss.saga.Error.NoSuccess, "Can't register job: %s" % (ex)) def del_job_object(self, job_obj): pass def get_service_for_job(self, job_obj): '''Return the service object the job is registered with''' for key in self.objects.keys(): if job_obj in self.objects[key]['jobs']: return self.objects[key]['instance'] self.parrent.log_error_and_raise(bliss.saga.Error.NoSuccess, "INTERNAL ERROR: Job object %s is not known by this plugin" % (job)) def get_job_for_jobid(self, service_obj, job_id): '''Return the job object associated with the given job id''' for job in self.list_jobs_for_service(service_obj): proc = self.get_process_for_job(job) if proc.getpid(str(service_obj._url)) == job_id: return job self.parrent.log_error_and_raise(bliss.saga.Error.NoSuccess, "Job ID not known by this plugin.") def list_jobs_for_service(self, service_obj): '''List all jobs that are registered with the given service''' service_id = hex(id(service_obj)) return self.objects[service_id]['jobs'] def get_process_for_job(self, job_obj): '''Return the local process object for a given job''' try: return self.processes[hex(id(job_obj))] except Exception, ex: self.parrent.log_error_and_raise(bliss.saga.Error.NoSuccess, "INTERNAL ERROR: Job object %s is not associated with a process" % (job_obj)) ## ######################################## ## Step 1: Define adaptor name. Convention is: ## saga.plugin.<package>.<name> _name = 'saga.plugin.job.local' ## Step 2: Define supported url schemas ## _schemas = ['fork'] ## Step 3: Define apis supported by this adaptor ## _apis = ['saga.job'] def __init__(self, url): '''Class constructor''' JobPluginInterface.__init__(self, name=self._name, schemas=self._schemas) self.bookkeeper = self.BookKeeper(self) @classmethod def sanity_check(self): '''Implements interface from _PluginBase''' ## Step 3: Implement sanity_check. This method is called *once* on ## Bliss startup. Here you should check if everything this ## adaptor needs is available, e.g., certain command line tools, ## python modules and so on. ## try: import subprocess except Exception, ex: print "module missing -- plugin disabled. (NEEDS LOGGING SUPPORT)" return False return True def get_runtime_info(self): '''Implements interface from _PluginBase''' #str = "Plugin: %s. Registered job.service objects: %s.\n%s".format( # self.name, len(self.objects), repr(self.objects)) #return str def register_service_object(self, service_obj): '''Implements interface from _JobPluginBase''' ## Step 4: Implement register_service_object. This method is called if ## a service object is instantiated with a url schema that matches ## this adaptor. You can still reject it by throwing an exception. if service_obj._url.host != "localhost": self.log_error_and_raise(bliss.saga.Error.BadParameter, "Only 'localhost' can be used as hostname") self.bookkeeper.add_service_object(service_obj) self.log_info("Registered new service object %s" % (repr(service_obj))) def unregister_service_object(self, service_obj): '''Implements interface from _JobPluginBase''' ## Step 5: Implement unregister_service_object. This method is called if ## a service object associated with this plugin is deleted. You ## shouldn't throw an exception here, since this method is called ## by the destructor! self.bookkeeper.del_service_object(service_obj) self.log_info("Unegistered new service object %s" % (repr(service_obj))) #def register_job_object(self, job_obj, service_obj): # '''Implements interface from _JobPluginBase''' # ## Step 6: Implement register_job_object. This method is called if # ## a job object is instantiated via the service.create_job() call. # ## You can still reject it by throwing an exception. # self.bookkeeper.add_job_object(job_obj, service_obj) # self.log_info("Registered new job object %s" % (repr(job_obj))) def unregister_job_object(self, job_obj): '''Implements interface from _JobPluginBase''' self.bookkeeper.del_job_object(job_obj) self.log_info("Unegisteredjob object %s" % (repr(job_obj))) def service_create_job(self, service_obj, job_description): '''Implements interface from _JobPluginBase. This method is called for saga.Service.create_job(). ''' if job_description.executable is None: self.log_error_and_raise(bliss.saga.Error.BadParameter, "No executable defined in job description") try: job = bliss.saga.job.Job() job._Job__init_from_service(service_obj=service_obj, job_desc=job_description) #self.bookkeeper.add_job_object_to_service(job, service_obj, # bliss.saga.job.JobID(service_obj._url, None)) self.bookkeeper.add_job_object(job, service_obj) return job except Exception, ex: self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't create a new job because: %s " % (str(ex))) def service_list(self, service_obj): '''Implements interface from _JobPluginBase''' ## Step 76: Implement service_list_jobs() try: return self.bookkeeper.list_jobs_for_service(service_obj) except Exception, ex: self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't retreive job list because: %s " % (str(ex))) def service_get_job(self, service_obj, job_id): '''Implements interface from _JobPluginBase''' ## Step 76: Implement service_get_job() try: return self.bookkeeper.get_job_for_jobid(service_obj, job_id) except Exception, ex: self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't get job list because: %s " % (str(ex))) def job_get_state(self, job): '''Implements interface from _JobPluginBase''' try: service = self.bookkeeper.get_service_for_job(job) return self.bookkeeper.get_process_for_job(job).getstate() except Exception, ex: self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't get job state because: %s " % (str(ex))) def job_get_job_id(self, job): '''Implements interface from _JobPluginBase''' try: service = self.bookkeeper.get_service_for_job(job) return self.bookkeeper.get_process_for_job(job).getpid(str(service._url)) #self.log_info("Started local process: %s %s" % (job.get_description().executable, job.get_description().arguments)) except Exception, ex: self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't get job id because: %s " % (str(ex))) def job_run(self, job): '''Implements interface from _JobPluginBase''' ## Step X: implement job.run() if job.get_description().executable is None: self.log_error_and_raise(bliss.saga.Error.BadParameter, "No executable defined in job description") try: service = self.bookkeeper.get_service_for_job(job) self.bookkeeper.get_process_for_job(job).run(job.get_description()) #self.log_info("Started local process: %s %s" % (job.get_description().executable, job.get_description().arguments)) except Exception, ex: self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't run job because: %s " % (str(ex))) def job_cancel(self, job): '''Implements interface from _JobPluginBase''' ## Step X: implement job.cancel() try: self.bookkeeper.get_process_for_job(job).terminate() self.log_info("Terminated local process: %s %s" % (job.get_description().executable, job.get_description().arguments)) except Exception, ex: self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't cancel job because: %s (already finished?)" % (str(ex))) def job_wait(self, job, timeout): '''Implements interface from _JobPluginBase''' ## Step X: implement job.wait() try: service = self.bookkeeper.get_service_for_job(job) self.bookkeeper.get_process_for_job(job).wait(timeout) except Exception, ex: self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't wait for the job because: %s " % (str(ex))) def job_get_exitcode(self, job_obj): '''Implements interface from _JobPluginBase''' try: service = self.bookkeeper.get_service_for_job(job_obj) #process = self.bookkeeper.get_process_for_job(job_obj) #jobstate = process.getstate() #if jobstate != bliss.saga.Job.Done or jobstate != bliss.saga.job.Failed: # self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't get the job's exitcode. Job must be in 'Done' or 'Failed' state.") #else: return self.bookkeeper.get_process_for_job(job_obj).get_exitcode() except Exception, ex: self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't get exitcode for job because: %s " % (str(ex)))
mit
-7,737,438,792,153,225,000
43.237548
146
0.594838
false
3.774436
false
false
false
Jyrsa/hoppy.fi
hoppy/settings.py
1
3844
""" Django settings for hoppy project. For more information on this file, see https://docs.djangoproject.com/en/1.6/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'my name is my passport, verify me' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition DEFAULT_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ) THIRD_PARTY_APPS = ( 'south', 'autoslug', 'huey.djhuey', 'tastypie', ) LOCAL_APPS = ( 'beerstatus', ) INSTALLED_APPS = DEFAULT_APPS + THIRD_PARTY_APPS + LOCAL_APPS MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'hoppy.urls' WSGI_APPLICATION = 'hoppy.wsgi.application' # Database # https://docs.djangoproject.com/en/1.6/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.6/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.6/howto/static-files/ STATIC_URL = '/static/' HUEY = { 'backend': 'huey.backends.redis_backend', 'name': 'hoppy-connection', 'connection': {'host': 'localhost', 'port':6379}, 'always_eager': False, 'consumer_options': {'workers': 4}, } LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, 'syslog': { 'level':'INFO', 'class':'logging.handlers.SysLogHandler', 'address': '/dev/log', }, 'console':{ 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'simple' }, 'null': { 'level': 'DEBUG', 'class': 'logging.NullHandler', }, }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, 'huey.consumer': { 'handlers': ['syslog', 'console'], 'level': 'DEBUG', 'propagate': True, } } } #having a local_settings isn't mandatory but #if one exists, it overrides stuff try: from local_settings import * except ImportError: pass
mit
-4,624,239,657,169,039,000
22.728395
95
0.601197
false
3.526606
false
false
false
dreadrel/UWF_2014_spring_COP3990C-2507
notebooks/scripts/book_code/code/timeseqs.py
1
1033
# File timeseqs.py "Test the relative speed of iteration tool alternatives." import sys, timer # Import timer functions reps = 10000 repslist = list(range(reps)) # Hoist out, list in both 2.X/3.X def forLoop(): res = [] for x in repslist: res.append(abs(x)) return res def listComp(): return [abs(x) for x in repslist] def mapCall(): return list(map(abs, repslist)) # Use list() here in 3.X only! # return map(abs, repslist) def genExpr(): return list(abs(x) for x in repslist) # list() required to force results def genFunc(): def gen(): for x in repslist: yield abs(x) return list(gen()) # list() required to force results print(sys.version) for test in (forLoop, listComp, mapCall, genExpr, genFunc): (bestof, (total, result)) = timer.bestoftotal(5, 1000, test) print ('%-9s: %.5f => [%s...%s]' % (test.__name__, bestof, result[0], result[-1]))
apache-2.0
-2,903,239,872,491,046,000
29.382353
83
0.568248
false
3.364821
false
false
false
mdavid/cherokee-webserver-svnclone
admin/util.py
1
8071
# -*- coding: utf-8 -*- # # Cherokee-admin # # Authors: # Alvaro Lopez Ortega <alvaro@alobbs.com> # # Copyright (C) 2001-2010 Alvaro Lopez Ortega # # This program is free software; you can redistribute it and/or # modify it under the terms of version 2 of the GNU General Public # License as published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. # import os import sys import glob import socket import CTK # # Strings # def bool_to_active (b): return (_('Inactive'), _('Active'))[bool(b)] def bool_to_onoff (b): return (_('Off'), _('On'))[bool(b)] def bool_to_yesno (b): return (_('No'), _('Yes'))[bool(b)] # # Virtual Server # def cfg_vsrv_get_next(): """ Get the prefix of the next vserver """ tmp = [int(x) for x in CTK.cfg.keys("vserver")] tmp.sort() next = str(tmp[-1] + 10) return "vserver!%s" % (next) def cfg_vsrv_rule_get_next (pre): """ Get the prefix of the next rule of a vserver """ tmp = [int(x) for x in CTK.cfg.keys("%s!rule"%(pre))] tmp.sort() if tmp: next = tmp[-1] + 100 else: next = 100 return (next, "%s!rule!%d" % (pre, next)) def cfg_vsrv_rule_find_extension (pre, extension): """Find an extension rule in a virtual server """ for r in CTK.cfg.keys("%s!rule"%(pre)): p = "%s!rule!%s" % (pre, r) if CTK.cfg.get_val ("%s!match"%(p)) == "extensions": if extension in CTK.cfg.get_val ("%s!match!extensions"%(p)): return p def cfg_vsrv_rule_find_regexp (pre, regexp): """Find a regular expresion rule in a virtual server """ for r in CTK.cfg.keys("%s!rule"%(pre)): p = "%s!rule!%s" % (pre, r) if CTK.cfg.get_val ("%s!match"%(p)) == "request": if regexp == CTK.cfg.get_val ("%s!match!request"%(p)): return p # # Information Sources # def cfg_source_get_next (): tmp = [int(x) for x in CTK.cfg.keys("source")] if not tmp: return (1, "source!1") tmp.sort() next = tmp[-1] + 10 return (next, "source!%d" % (next)) def cfg_source_find_interpreter (in_interpreter = None, in_nick = None): for i in CTK.cfg.keys("source"): if CTK.cfg.get_val("source!%s!type"%(i)) != 'interpreter': continue if (in_interpreter and in_interpreter in CTK.cfg.get_val("source!%s!interpreter"%(i))): return "source!%s" % (i) if (in_nick and in_nick in CTK.cfg.get_val("source!%s!nick"%(i))): return "source!%s" % (i) def cfg_source_find_empty_port (n_ports=1): ports = [] for i in CTK.cfg.keys("source"): host = CTK.cfg.get_val ("source!%s!host"%(i)) if not host: continue colon = host.rfind(':') if colon < 0: continue port = int (host[colon+1:]) if port < 1024: continue ports.append (port) pport = 1025 for x in ports: if pport + n_ports < x: return pport assert (False) def cfg_source_find_free_port (host_name='localhost'): """Return a port not currently running anything""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((host_name, 0)) addr, port = s.getsockname() s.close() return port def cfg_source_get_localhost_addr (): x, x, addrs = socket.gethostbyname_ex('localhost') if addrs: return addrs[0] return None def cfg_get_surrounding_repls (macro, value, n_minus=9, n_plus=9): replacements = {} tmp = value.split('!') pre = '!'.join(tmp[:-1]) num = int(tmp[-1]) for n in range(n_minus): replacements['%s_minus%d'%(macro,n+1)] = '%s!%d' %(pre, num-(n+1)) for n in range(n_plus): replacements['%s_plus%d'%(macro,n+1)] = '%s!%d' %(pre, num+(n+1)) return replacements # # Version strings management # def version_to_int (v): num = 0 tmp = v.split('.') if len(tmp) >= 3: num += int(tmp[2]) * (10**3) if len(tmp) >= 2: num += int(tmp[1]) * (10**6) if len(tmp) >= 1: num += int(tmp[0]) * (10**9) return num def version_cmp (x, y): xp = x.split('b') yp = y.split('b') if len(xp) > 1: x_ver = version_to_int(xp[0]) x_beta = xp[1] else: x_ver = version_to_int(xp[0]) x_beta = None if len(yp) > 1: y_ver = version_to_int(yp[0]) y_beta = yp[1] else: y_ver = version_to_int(yp[0]) y_beta = None if x_ver == y_ver: if not x_beta and not y_beta: return 0 if not y_beta: return -1 if not x_beta: return 1 return cmp(int(x_beta),int(y_beta)) elif x_ver > y_ver: return 1 return -1 # # Paths # def path_find_binary (executable, extra_dirs=[], custom_test=None): """Find an executable. It checks 'extra_dirs' and the PATH. The 'executable' parameter can be either a string or a list. """ assert (type(executable) in [str, list]) dirs = extra_dirs env_path = os.getenv("PATH") if env_path: dirs += filter (lambda x: x, env_path.split(":")) for dir in dirs: if type(executable) == str: tmp = os.path.join (dir, executable) if os.path.exists (tmp): if custom_test: if not custom_test(tmp): continue return tmp elif type(executable) == list: for n in executable: tmp = os.path.join (dir, n) if os.path.exists (tmp): if custom_test: if not custom_test(tmp): continue return tmp def path_find_w_default (path_list, default=''): """Find a path. It checks a list of paths (that can contain wildcards), if none exists default is returned. """ for path in path_list: if '*' in path or '?' in path: to_check = glob.glob (path) else: to_check = [path] for p in to_check: if os.path.exists (p): return p return default # # OS # def os_get_document_root(): if sys.platform == 'darwin': return "/Library/WebServer/Documents" elif sys.platform == 'linux2': if os.path.exists ("/etc/redhat-release"): return '/var/www' elif os.path.exists ("/etc/fedora-release"): return '/var/www' elif os.path.exists ("/etc/SuSE-release"): return '/srv/www/htdocs' elif os.path.exists ("/etc/debian_version"): return '/var/www' elif os.path.exists ("/etc/gentoo-release"): return '/var/www' elif os.path.exists ("/etc/slackware-version"): return '/var/www' return '/var/www' return '' # # Misc # def split_list (value): ids = [] for t1 in value.split(','): for t2 in t1.split(' '): id = t2.strip() if not id: continue ids.append(id) return ids def lists_differ (a, b): """Compare lists disregarding order""" if len(a) != len(b): return True if bool (set(a)-set(b)): return True if bool (set(b)-set(a)): return True return False def get_real_path (name, nochroot=False): """Get real path accounting for chrooted environments""" chroot = CTK.cfg.get_val('server!chroot') if chroot and not nochroot: fullname = os.path.normpath (chroot + os.path.sep + name) else: fullname = name return fullname
gpl-2.0
7,725,153,230,824,267,000
24.86859
76
0.548755
false
3.315941
false
false
false
mjs7231/python-plexapi
plexapi/library.py
1
60945
# -*- coding: utf-8 -*- from urllib.parse import quote, quote_plus, unquote, urlencode from plexapi import X_PLEX_CONTAINER_SIZE, log, utils from plexapi.base import PlexObject from plexapi.exceptions import BadRequest, NotFound from plexapi.media import MediaTag from plexapi.settings import Setting class Library(PlexObject): """ Represents a PlexServer library. This contains all sections of media defined in your Plex server including video, shows and audio. Attributes: key (str): '/library' identifier (str): Unknown ('com.plexapp.plugins.library'). mediaTagVersion (str): Unknown (/system/bundle/media/flags/) server (:class:`~plexapi.server.PlexServer`): PlexServer this client is connected to. title1 (str): 'Plex Library' (not sure how useful this is). title2 (str): Second title (this is blank on my setup). """ key = '/library' def _loadData(self, data): self._data = data self._sectionsByID = {} # cached Section UUIDs self.identifier = data.attrib.get('identifier') self.mediaTagVersion = data.attrib.get('mediaTagVersion') self.title1 = data.attrib.get('title1') self.title2 = data.attrib.get('title2') def sections(self): """ Returns a list of all media sections in this library. Library sections may be any of :class:`~plexapi.library.MovieSection`, :class:`~plexapi.library.ShowSection`, :class:`~plexapi.library.MusicSection`, :class:`~plexapi.library.PhotoSection`. """ key = '/library/sections' sections = [] for elem in self._server.query(key): for cls in (MovieSection, ShowSection, MusicSection, PhotoSection): if elem.attrib.get('type') == cls.TYPE: section = cls(self._server, elem, key) self._sectionsByID[section.key] = section sections.append(section) return sections def section(self, title=None): """ Returns the :class:`~plexapi.library.LibrarySection` that matches the specified title. Parameters: title (str): Title of the section to return. """ for section in self.sections(): if section.title.lower() == title.lower(): return section raise NotFound('Invalid library section: %s' % title) def sectionByID(self, sectionID): """ Returns the :class:`~plexapi.library.LibrarySection` that matches the specified sectionID. Parameters: sectionID (str): ID of the section to return. """ if not self._sectionsByID or sectionID not in self._sectionsByID: self.sections() return self._sectionsByID[sectionID] def all(self, **kwargs): """ Returns a list of all media from all library sections. This may be a very large dataset to retrieve. """ items = [] for section in self.sections(): for item in section.all(**kwargs): items.append(item) return items def onDeck(self): """ Returns a list of all media items on deck. """ return self.fetchItems('/library/onDeck') def recentlyAdded(self): """ Returns a list of all media items recently added. """ return self.fetchItems('/library/recentlyAdded') def search(self, title=None, libtype=None, **kwargs): """ Searching within a library section is much more powerful. It seems certain attributes on the media objects can be targeted to filter this search down a bit, but I havent found the documentation for it. Example: "studio=Comedy%20Central" or "year=1999" "title=Kung Fu" all work. Other items such as actor=<id> seem to work, but require you already know the id of the actor. TLDR: This is untested but seems to work. Use library section search when you can. """ args = {} if title: args['title'] = title if libtype: args['type'] = utils.searchType(libtype) for attr, value in kwargs.items(): args[attr] = value key = '/library/all%s' % utils.joinArgs(args) return self.fetchItems(key) def cleanBundles(self): """ Poster images and other metadata for items in your library are kept in "bundle" packages. When you remove items from your library, these bundles aren't immediately removed. Removing these old bundles can reduce the size of your install. By default, your server will automatically clean up old bundles once a week as part of Scheduled Tasks. """ # TODO: Should this check the response for success or the correct mediaprefix? self._server.query('/library/clean/bundles') def emptyTrash(self): """ If a library has items in the Library Trash, use this option to empty the Trash. """ for section in self.sections(): section.emptyTrash() def optimize(self): """ The Optimize option cleans up the server database from unused or fragmented data. For example, if you have deleted or added an entire library or many items in a library, you may like to optimize the database. """ self._server.query('/library/optimize') def update(self): """ Scan this library for new items.""" self._server.query('/library/sections/all/refresh') def cancelUpdate(self): """ Cancel a library update. """ key = '/library/sections/all/refresh' self._server.query(key, method=self._server._session.delete) def refresh(self): """ Forces a download of fresh media information from the internet. This can take a long time. Any locked fields are not modified. """ self._server.query('/library/sections/all/refresh?force=1') def deleteMediaPreviews(self): """ Delete the preview thumbnails for the all sections. This cannot be undone. Recreating media preview files can take hours or even days. """ for section in self.sections(): section.deleteMediaPreviews() def add(self, name='', type='', agent='', scanner='', location='', language='en', *args, **kwargs): """ Simplified add for the most common options. Parameters: name (str): Name of the library agent (str): Example com.plexapp.agents.imdb type (str): movie, show, # check me location (str): /path/to/files language (str): Two letter language fx en kwargs (dict): Advanced options should be passed as a dict. where the id is the key. **Photo Preferences** * **agent** (str): com.plexapp.agents.none * **enableAutoPhotoTags** (bool): Tag photos. Default value false. * **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true. * **includeInGlobal** (bool): Include in dashboard. Default value true. * **scanner** (str): Plex Photo Scanner **Movie Preferences** * **agent** (str): com.plexapp.agents.none, com.plexapp.agents.imdb, com.plexapp.agents.themoviedb * **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true. * **enableCinemaTrailers** (bool): Enable Cinema Trailers. Default value true. * **includeInGlobal** (bool): Include in dashboard. Default value true. * **scanner** (str): Plex Movie Scanner, Plex Video Files Scanner **IMDB Movie Options** (com.plexapp.agents.imdb) * **title** (bool): Localized titles. Default value false. * **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true. * **only_trailers** (bool): Skip extras which aren't trailers. Default value false. * **redband** (bool): Use red band (restricted audiences) trailers when available. Default value false. * **native_subs** (bool): Include extras with subtitles in Library language. Default value false. * **cast_list** (int): Cast List Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database. * **ratings** (int): Ratings Source, Default value 0 Possible options: 0:Rotten Tomatoes, 1:IMDb, 2:The Movie Database. * **summary** (int): Plot Summary Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database. * **country** (int): Default value 46 Possible options 0:Argentina, 1:Australia, 2:Austria, 3:Belgium, 4:Belize, 5:Bolivia, 6:Brazil, 7:Canada, 8:Chile, 9:Colombia, 10:Costa Rica, 11:Czech Republic, 12:Denmark, 13:Dominican Republic, 14:Ecuador, 15:El Salvador, 16:France, 17:Germany, 18:Guatemala, 19:Honduras, 20:Hong Kong SAR, 21:Ireland, 22:Italy, 23:Jamaica, 24:Korea, 25:Liechtenstein, 26:Luxembourg, 27:Mexico, 28:Netherlands, 29:New Zealand, 30:Nicaragua, 31:Panama, 32:Paraguay, 33:Peru, 34:Portugal, 35:Peoples Republic of China, 36:Puerto Rico, 37:Russia, 38:Singapore, 39:South Africa, 40:Spain, 41:Sweden, 42:Switzerland, 43:Taiwan, 44:Trinidad, 45:United Kingdom, 46:United States, 47:Uruguay, 48:Venezuela. * **collections** (bool): Use collection info from The Movie Database. Default value false. * **localart** (bool): Prefer artwork based on library language. Default value true. * **adult** (bool): Include adult content. Default value false. * **usage** (bool): Send anonymous usage data to Plex. Default value true. **TheMovieDB Movie Options** (com.plexapp.agents.themoviedb) * **collections** (bool): Use collection info from The Movie Database. Default value false. * **localart** (bool): Prefer artwork based on library language. Default value true. * **adult** (bool): Include adult content. Default value false. * **country** (int): Country (used for release date and content rating). Default value 47 Possible options 0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize, 6:Bolivia, 7:Brazil, 8:Canada, 9:Chile, 10:Colombia, 11:Costa Rica, 12:Czech Republic, 13:Denmark, 14:Dominican Republic, 15:Ecuador, 16:El Salvador, 17:France, 18:Germany, 19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland, 23:Italy, 24:Jamaica, 25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands, 30:New Zealand, 31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal, 36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore, 40:South Africa, 41:Spain, 42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad, 46:United Kingdom, 47:United States, 48:Uruguay, 49:Venezuela. **Show Preferences** * **agent** (str): com.plexapp.agents.none, com.plexapp.agents.thetvdb, com.plexapp.agents.themoviedb * **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true. * **episodeSort** (int): Episode order. Default -1 Possible options: 0:Oldest first, 1:Newest first. * **flattenSeasons** (int): Seasons. Default value 0 Possible options: 0:Show,1:Hide. * **includeInGlobal** (bool): Include in dashboard. Default value true. * **scanner** (str): Plex Series Scanner **TheTVDB Show Options** (com.plexapp.agents.thetvdb) * **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true. * **native_subs** (bool): Include extras with subtitles in Library language. Default value false. **TheMovieDB Show Options** (com.plexapp.agents.themoviedb) * **collections** (bool): Use collection info from The Movie Database. Default value false. * **localart** (bool): Prefer artwork based on library language. Default value true. * **adult** (bool): Include adult content. Default value false. * **country** (int): Country (used for release date and content rating). Default value 47 options 0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize, 6:Bolivia, 7:Brazil, 8:Canada, 9:Chile, 10:Colombia, 11:Costa Rica, 12:Czech Republic, 13:Denmark, 14:Dominican Republic, 15:Ecuador, 16:El Salvador, 17:France, 18:Germany, 19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland, 23:Italy, 24:Jamaica, 25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands, 30:New Zealand, 31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal, 36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore, 40:South Africa, 41:Spain, 42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad, 46:United Kingdom, 47:United States, 48:Uruguay, 49:Venezuela. **Other Video Preferences** * **agent** (str): com.plexapp.agents.none, com.plexapp.agents.imdb, com.plexapp.agents.themoviedb * **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true. * **enableCinemaTrailers** (bool): Enable Cinema Trailers. Default value true. * **includeInGlobal** (bool): Include in dashboard. Default value true. * **scanner** (str): Plex Movie Scanner, Plex Video Files Scanner **IMDB Other Video Options** (com.plexapp.agents.imdb) * **title** (bool): Localized titles. Default value false. * **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true. * **only_trailers** (bool): Skip extras which aren't trailers. Default value false. * **redband** (bool): Use red band (restricted audiences) trailers when available. Default value false. * **native_subs** (bool): Include extras with subtitles in Library language. Default value false. * **cast_list** (int): Cast List Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database. * **ratings** (int): Ratings Source Default value 0 Possible options: 0:Rotten Tomatoes,1:IMDb,2:The Movie Database. * **summary** (int): Plot Summary Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database. * **country** (int): Country: Default value 46 Possible options: 0:Argentina, 1:Australia, 2:Austria, 3:Belgium, 4:Belize, 5:Bolivia, 6:Brazil, 7:Canada, 8:Chile, 9:Colombia, 10:Costa Rica, 11:Czech Republic, 12:Denmark, 13:Dominican Republic, 14:Ecuador, 15:El Salvador, 16:France, 17:Germany, 18:Guatemala, 19:Honduras, 20:Hong Kong SAR, 21:Ireland, 22:Italy, 23:Jamaica, 24:Korea, 25:Liechtenstein, 26:Luxembourg, 27:Mexico, 28:Netherlands, 29:New Zealand, 30:Nicaragua, 31:Panama, 32:Paraguay, 33:Peru, 34:Portugal, 35:Peoples Republic of China, 36:Puerto Rico, 37:Russia, 38:Singapore, 39:South Africa, 40:Spain, 41:Sweden, 42:Switzerland, 43:Taiwan, 44:Trinidad, 45:United Kingdom, 46:United States, 47:Uruguay, 48:Venezuela. * **collections** (bool): Use collection info from The Movie Database. Default value false. * **localart** (bool): Prefer artwork based on library language. Default value true. * **adult** (bool): Include adult content. Default value false. * **usage** (bool): Send anonymous usage data to Plex. Default value true. **TheMovieDB Other Video Options** (com.plexapp.agents.themoviedb) * **collections** (bool): Use collection info from The Movie Database. Default value false. * **localart** (bool): Prefer artwork based on library language. Default value true. * **adult** (bool): Include adult content. Default value false. * **country** (int): Country (used for release date and content rating). Default value 47 Possible options 0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize, 6:Bolivia, 7:Brazil, 8:Canada, 9:Chile, 10:Colombia, 11:Costa Rica, 12:Czech Republic, 13:Denmark, 14:Dominican Republic, 15:Ecuador, 16:El Salvador, 17:France, 18:Germany, 19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland, 23:Italy, 24:Jamaica, 25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands, 30:New Zealand, 31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal, 36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore, 40:South Africa, 41:Spain, 42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad, 46:United Kingdom, 47:United States, 48:Uruguay, 49:Venezuela. """ part = '/library/sections?name=%s&type=%s&agent=%s&scanner=%s&language=%s&location=%s' % ( quote_plus(name), type, agent, quote_plus(scanner), language, quote_plus(location)) # noqa E126 if kwargs: part += urlencode(kwargs) return self._server.query(part, method=self._server._session.post) def history(self, maxresults=9999999, mindate=None): """ Get Play History for all library Sections for the owner. Parameters: maxresults (int): Only return the specified number of results (optional). mindate (datetime): Min datetime to return results from. """ hist = [] for section in self.sections(): hist.extend(section.history(maxresults=maxresults, mindate=mindate)) return hist class LibrarySection(PlexObject): """ Base class for a single library section. Attributes: ALLOWED_FILTERS (tuple): () ALLOWED_SORT (tuple): () BOOLEAN_FILTERS (tuple<str>): ('unwatched', 'duplicate') server (:class:`~plexapi.server.PlexServer`): Server this client is connected to. initpath (str): Path requested when building this object. agent (str): Unknown (com.plexapp.agents.imdb, etc) allowSync (bool): True if you allow syncing content from this section. art (str): Wallpaper artwork used to respresent this section. composite (str): Composit image used to represent this section. createdAt (datetime): Datetime this library section was created. filters (str): Unknown key (str): Key (or ID) of this library section. language (str): Language represented in this section (en, xn, etc). locations (str): Paths on disk where section content is stored. refreshing (str): True if this section is currently being refreshed. scanner (str): Internal scanner used to find media (Plex Movie Scanner, Plex Premium Music Scanner, etc.) thumb (str): Thumbnail image used to represent this section. title (str): Title of this section. type (str): Type of content section represents (movie, artist, photo, show). updatedAt (datetime): Datetime this library section was last updated. uuid (str): Unique id for this section (32258d7c-3e6c-4ac5-98ad-bad7a3b78c63) totalSize (int): Total number of item in the library """ ALLOWED_FILTERS = () ALLOWED_SORT = () BOOLEAN_FILTERS = ('unwatched', 'duplicate') def _loadData(self, data): self._data = data self.agent = data.attrib.get('agent') self.allowSync = utils.cast(bool, data.attrib.get('allowSync')) self.art = data.attrib.get('art') self.composite = data.attrib.get('composite') self.createdAt = utils.toDatetime(data.attrib.get('createdAt')) self.filters = data.attrib.get('filters') self.key = data.attrib.get('key') # invalid key from plex self.language = data.attrib.get('language') self.locations = self.listAttrs(data, 'path', etag='Location') self.refreshing = utils.cast(bool, data.attrib.get('refreshing')) self.scanner = data.attrib.get('scanner') self.thumb = data.attrib.get('thumb') self.title = data.attrib.get('title') self.type = data.attrib.get('type') self.updatedAt = utils.toDatetime(data.attrib.get('updatedAt')) self.uuid = data.attrib.get('uuid') # Private attrs as we dont want a reload. self._total_size = None def fetchItems(self, ekey, cls=None, container_start=None, container_size=None, **kwargs): """ Load the specified key to find and build all items with the specified tag and attrs. See :func:`~plexapi.base.PlexObject.fetchItem` for more details on how this is used. Parameters: container_start (None, int): offset to get a subset of the data container_size (None, int): How many items in data """ url_kw = {} if container_start is not None: url_kw["X-Plex-Container-Start"] = container_start if container_size is not None: url_kw["X-Plex-Container-Size"] = container_size if ekey is None: raise BadRequest('ekey was not provided') data = self._server.query(ekey, params=url_kw) if '/all' in ekey: # totalSize is only included in the xml response # if container size is used. total_size = data.attrib.get("totalSize") or data.attrib.get("size") self._total_size = utils.cast(int, total_size) items = self.findItems(data, cls, ekey, **kwargs) librarySectionID = data.attrib.get('librarySectionID') if librarySectionID: for item in items: item.librarySectionID = librarySectionID return items @property def totalSize(self): if self._total_size is None: part = '/library/sections/%s/all?X-Plex-Container-Start=0&X-Plex-Container-Size=1' % self.key data = self._server.query(part) self._total_size = int(data.attrib.get("totalSize")) return self._total_size def delete(self): """ Delete a library section. """ try: return self._server.query('/library/sections/%s' % self.key, method=self._server._session.delete) except BadRequest: # pragma: no cover msg = 'Failed to delete library %s' % self.key msg += 'You may need to allow this permission in your Plex settings.' log.error(msg) raise def reload(self, key=None): return self._server.library.section(self.title) def edit(self, agent=None, **kwargs): """ Edit a library (Note: agent is required). See :class:`~plexapi.library.Library` for example usage. Parameters: kwargs (dict): Dict of settings to edit. """ if not agent: agent = self.agent part = '/library/sections/%s?agent=%s&%s' % (self.key, agent, urlencode(kwargs)) self._server.query(part, method=self._server._session.put) # Reload this way since the self.key dont have a full path, but is simply a id. for s in self._server.library.sections(): if s.key == self.key: return s def get(self, title): """ Returns the media item with the specified title. Parameters: title (str): Title of the item to return. """ key = '/library/sections/%s/all?title=%s' % (self.key, quote(title, safe='')) return self.fetchItem(key, title__iexact=title) def all(self, sort=None, **kwargs): """ Returns a list of media from this library section. Parameters: sort (string): The sort string """ sortStr = '' if sort is not None: sortStr = '?sort=' + sort key = '/library/sections/%s/all%s' % (self.key, sortStr) return self.fetchItems(key, **kwargs) def agents(self): """ Returns a list of available `:class:`~plexapi.media.Agent` for this library section. """ return self._server.agents(utils.searchType(self.type)) def settings(self): """ Returns a list of all library settings. """ key = '/library/sections/%s/prefs' % self.key data = self._server.query(key) return self.findItems(data, cls=Setting) def onDeck(self): """ Returns a list of media items on deck from this library section. """ key = '/library/sections/%s/onDeck' % self.key return self.fetchItems(key) def recentlyAdded(self, maxresults=50): """ Returns a list of media items recently added from this library section. Parameters: maxresults (int): Max number of items to return (default 50). """ return self.search(sort='addedAt:desc', maxresults=maxresults) def analyze(self): """ Run an analysis on all of the items in this library section. See See :func:`~plexapi.base.PlexPartialObject.analyze` for more details. """ key = '/library/sections/%s/analyze' % self.key self._server.query(key, method=self._server._session.put) def emptyTrash(self): """ If a section has items in the Trash, use this option to empty the Trash. """ key = '/library/sections/%s/emptyTrash' % self.key self._server.query(key, method=self._server._session.put) def update(self): """ Scan this section for new media. """ key = '/library/sections/%s/refresh' % self.key self._server.query(key) def cancelUpdate(self): """ Cancel update of this Library Section. """ key = '/library/sections/%s/refresh' % self.key self._server.query(key, method=self._server._session.delete) def refresh(self): """ Forces a download of fresh media information from the internet. This can take a long time. Any locked fields are not modified. """ key = '/library/sections/%s/refresh?force=1' % self.key self._server.query(key) def deleteMediaPreviews(self): """ Delete the preview thumbnails for items in this library. This cannot be undone. Recreating media preview files can take hours or even days. """ key = '/library/sections/%s/indexes' % self.key self._server.query(key, method=self._server._session.delete) def listChoices(self, category, libtype=None, **kwargs): """ Returns a list of :class:`~plexapi.library.FilterChoice` objects for the specified category and libtype. kwargs can be any of the same kwargs in :func:`plexapi.library.LibraySection.search()` to help narrow down the choices to only those that matter in your current context. Parameters: category (str): Category to list choices for (genre, contentRating, etc). libtype (int): Library type of item filter. **kwargs (dict): Additional kwargs to narrow down the choices. Raises: :class:`plexapi.exceptions.BadRequest`: Cannot include kwarg equal to specified category. """ # TODO: Should this be moved to base? if category in kwargs: raise BadRequest('Cannot include kwarg equal to specified category: %s' % category) args = {} for subcategory, value in kwargs.items(): args[category] = self._cleanSearchFilter(subcategory, value) if libtype is not None: args['type'] = utils.searchType(libtype) key = '/library/sections/%s/%s%s' % (self.key, category, utils.joinArgs(args)) return self.fetchItems(key, cls=FilterChoice) def search(self, title=None, sort=None, maxresults=None, libtype=None, container_start=0, container_size=X_PLEX_CONTAINER_SIZE, **kwargs): """ Search the library. The http requests will be batched in container_size. If you're only looking for the first <num> results, it would be wise to set the maxresults option to that amount so this functions doesn't iterate over all results on the server. Parameters: title (str): General string query to search for (optional). sort (str): column:dir; column can be any of {addedAt, originallyAvailableAt, lastViewedAt, titleSort, rating, mediaHeight, duration}. dir can be asc or desc (optional). maxresults (int): Only return the specified number of results (optional). libtype (str): Filter results to a spcifiec libtype (movie, show, episode, artist, album, track; optional). container_start (int): default 0 container_size (int): default X_PLEX_CONTAINER_SIZE in your config file. **kwargs (dict): Any of the available filters for the current library section. Partial string matches allowed. Multiple matches OR together. Negative filtering also possible, just add an exclamation mark to the end of filter name, e.g. `resolution!=1x1`. * unwatched: Display or hide unwatched content (True, False). [all] * duplicate: Display or hide duplicate items (True, False). [movie] * actor: List of actors to search ([actor_or_id, ...]). [movie] * collection: List of collections to search within ([collection_or_id, ...]). [all] * contentRating: List of content ratings to search within ([rating_or_key, ...]). [movie,tv] * country: List of countries to search within ([country_or_key, ...]). [movie,music] * decade: List of decades to search within ([yyy0, ...]). [movie] * director: List of directors to search ([director_or_id, ...]). [movie] * genre: List Genres to search within ([genere_or_id, ...]). [all] * network: List of TV networks to search within ([resolution_or_key, ...]). [tv] * resolution: List of video resolutions to search within ([resolution_or_key, ...]). [movie] * studio: List of studios to search within ([studio_or_key, ...]). [music] * year: List of years to search within ([yyyy, ...]). [all] Raises: :class:`plexapi.exceptions.BadRequest`: when applying unknown filter """ # cleanup the core arguments args = {} for category, value in kwargs.items(): args[category] = self._cleanSearchFilter(category, value, libtype) if title is not None: args['title'] = title if sort is not None: args['sort'] = self._cleanSearchSort(sort) if libtype is not None: args['type'] = utils.searchType(libtype) results = [] subresults = [] offset = container_start if maxresults is not None: container_size = min(container_size, maxresults) while True: key = '/library/sections/%s/all%s' % (self.key, utils.joinArgs(args)) subresults = self.fetchItems(key, container_start=container_start, container_size=container_size) if not len(subresults): if offset > self.totalSize: log.info("container_start is higher then the number of items in the library") break results.extend(subresults) # self.totalSize is not used as a condition in the while loop as # this require a additional http request. # self.totalSize is updated from .fetchItems wanted_number_of_items = self.totalSize - offset if maxresults is not None: wanted_number_of_items = min(maxresults, wanted_number_of_items) container_size = min(container_size, maxresults - len(results)) if wanted_number_of_items <= len(results): break container_start += container_size return results def _cleanSearchFilter(self, category, value, libtype=None): # check a few things before we begin if category.endswith('!'): if category[:-1] not in self.ALLOWED_FILTERS: raise BadRequest('Unknown filter category: %s' % category[:-1]) elif category not in self.ALLOWED_FILTERS: raise BadRequest('Unknown filter category: %s' % category) if category in self.BOOLEAN_FILTERS: return '1' if value else '0' if not isinstance(value, (list, tuple)): value = [value] # convert list of values to list of keys or ids result = set() choices = self.listChoices(category, libtype) lookup = {c.title.lower(): unquote(unquote(c.key)) for c in choices} allowed = set(c.key for c in choices) for item in value: item = str((item.id or item.tag) if isinstance(item, MediaTag) else item).lower() # find most logical choice(s) to use in url if item in allowed: result.add(item); continue if item in lookup: result.add(lookup[item]); continue matches = [k for t, k in lookup.items() if item in t] if matches: map(result.add, matches); continue # nothing matched; use raw item value log.debug('Filter value not listed, using raw item value: %s' % item) result.add(item) return ','.join(result) def _cleanSearchSort(self, sort): sort = '%s:asc' % sort if ':' not in sort else sort scol, sdir = sort.lower().split(':') lookup = {s.lower(): s for s in self.ALLOWED_SORT} if scol not in lookup: raise BadRequest('Unknown sort column: %s' % scol) if sdir not in ('asc', 'desc'): raise BadRequest('Unknown sort dir: %s' % sdir) return '%s:%s' % (lookup[scol], sdir) def sync(self, policy, mediaSettings, client=None, clientId=None, title=None, sort=None, libtype=None, **kwargs): """ Add current library section as sync item for specified device. See description of :func:`~plexapi.library.LibrarySection.search()` for details about filtering / sorting and :func:`plexapi.myplex.MyPlexAccount.sync()` for possible exceptions. Parameters: policy (:class:`plexapi.sync.Policy`): policy of syncing the media (how many items to sync and process watched media or not), generated automatically when method called on specific LibrarySection object. mediaSettings (:class:`plexapi.sync.MediaSettings`): Transcoding settings used for the media, generated automatically when method called on specific LibrarySection object. client (:class:`plexapi.myplex.MyPlexDevice`): sync destination, see :func:`plexapi.myplex.MyPlexAccount.sync`. clientId (str): sync destination, see :func:`plexapi.myplex.MyPlexAccount.sync`. title (str): descriptive title for the new :class:`plexapi.sync.SyncItem`, if empty the value would be generated from metadata of current media. sort (str): formatted as `column:dir`; column can be any of {`addedAt`, `originallyAvailableAt`, `lastViewedAt`, `titleSort`, `rating`, `mediaHeight`, `duration`}. dir can be `asc` or `desc`. libtype (str): Filter results to a specific libtype (`movie`, `show`, `episode`, `artist`, `album`, `track`). Returns: :class:`plexapi.sync.SyncItem`: an instance of created syncItem. Raises: :class:`plexapi.exceptions.BadRequest`: when the library is not allowed to sync Example: .. code-block:: python from plexapi import myplex from plexapi.sync import Policy, MediaSettings, VIDEO_QUALITY_3_MBPS_720p c = myplex.MyPlexAccount() target = c.device('Plex Client') sync_items_wd = c.syncItems(target.clientIdentifier) srv = c.resource('Server Name').connect() section = srv.library.section('Movies') policy = Policy('count', unwatched=True, value=1) media_settings = MediaSettings.create(VIDEO_QUALITY_3_MBPS_720p) section.sync(target, policy, media_settings, title='Next best movie', sort='rating:desc') """ from plexapi.sync import SyncItem if not self.allowSync: raise BadRequest('The requested library is not allowed to sync') args = {} for category, value in kwargs.items(): args[category] = self._cleanSearchFilter(category, value, libtype) if sort is not None: args['sort'] = self._cleanSearchSort(sort) if libtype is not None: args['type'] = utils.searchType(libtype) myplex = self._server.myPlexAccount() sync_item = SyncItem(self._server, None) sync_item.title = title if title else self.title sync_item.rootTitle = self.title sync_item.contentType = self.CONTENT_TYPE sync_item.metadataType = self.METADATA_TYPE sync_item.machineIdentifier = self._server.machineIdentifier key = '/library/sections/%s/all' % self.key sync_item.location = 'library://%s/directory/%s' % (self.uuid, quote_plus(key + utils.joinArgs(args))) sync_item.policy = policy sync_item.mediaSettings = mediaSettings return myplex.sync(client=client, clientId=clientId, sync_item=sync_item) def history(self, maxresults=9999999, mindate=None): """ Get Play History for this library Section for the owner. Parameters: maxresults (int): Only return the specified number of results (optional). mindate (datetime): Min datetime to return results from. """ return self._server.history(maxresults=maxresults, mindate=mindate, librarySectionID=self.key, accountID=1) class MovieSection(LibrarySection): """ Represents a :class:`~plexapi.library.LibrarySection` section containing movies. Attributes: ALLOWED_FILTERS (list<str>): List of allowed search filters. ('unwatched', 'duplicate', 'year', 'decade', 'genre', 'contentRating', 'collection', 'director', 'actor', 'country', 'studio', 'resolution', 'guid', 'label') ALLOWED_SORT (list<str>): List of allowed sorting keys. ('addedAt', 'originallyAvailableAt', 'lastViewedAt', 'titleSort', 'rating', 'mediaHeight', 'duration') TAG (str): 'Directory' TYPE (str): 'movie' """ ALLOWED_FILTERS = ('unwatched', 'duplicate', 'year', 'decade', 'genre', 'contentRating', 'collection', 'director', 'actor', 'country', 'studio', 'resolution', 'guid', 'label', 'writer', 'producer', 'subtitleLanguage', 'audioLanguage', 'lastViewedAt', 'viewCount', 'addedAt') ALLOWED_SORT = ('addedAt', 'originallyAvailableAt', 'lastViewedAt', 'titleSort', 'rating', 'mediaHeight', 'duration') TAG = 'Directory' TYPE = 'movie' METADATA_TYPE = 'movie' CONTENT_TYPE = 'video' def collection(self, **kwargs): """ Returns a list of collections from this library section. """ return self.search(libtype='collection', **kwargs) def sync(self, videoQuality, limit=None, unwatched=False, **kwargs): """ Add current Movie library section as sync item for specified device. See description of :func:`plexapi.library.LibrarySection.search()` for details about filtering / sorting and :func:`plexapi.library.LibrarySection.sync()` for details on syncing libraries and possible exceptions. Parameters: videoQuality (int): idx of quality of the video, one of VIDEO_QUALITY_* values defined in :mod:`plexapi.sync` module. limit (int): maximum count of movies to sync, unlimited if `None`. unwatched (bool): if `True` watched videos wouldn't be synced. Returns: :class:`plexapi.sync.SyncItem`: an instance of created syncItem. Example: .. code-block:: python from plexapi import myplex from plexapi.sync import VIDEO_QUALITY_3_MBPS_720p c = myplex.MyPlexAccount() target = c.device('Plex Client') sync_items_wd = c.syncItems(target.clientIdentifier) srv = c.resource('Server Name').connect() section = srv.library.section('Movies') section.sync(VIDEO_QUALITY_3_MBPS_720p, client=target, limit=1, unwatched=True, title='Next best movie', sort='rating:desc') """ from plexapi.sync import Policy, MediaSettings kwargs['mediaSettings'] = MediaSettings.createVideo(videoQuality) kwargs['policy'] = Policy.create(limit, unwatched) return super(MovieSection, self).sync(**kwargs) class ShowSection(LibrarySection): """ Represents a :class:`~plexapi.library.LibrarySection` section containing tv shows. Attributes: ALLOWED_FILTERS (list<str>): List of allowed search filters. ('unwatched', 'year', 'genre', 'contentRating', 'network', 'collection', 'guid', 'label') ALLOWED_SORT (list<str>): List of allowed sorting keys. ('addedAt', 'lastViewedAt', 'originallyAvailableAt', 'titleSort', 'rating', 'unwatched') TAG (str): 'Directory' TYPE (str): 'show' """ ALLOWED_FILTERS = ('unwatched', 'year', 'genre', 'contentRating', 'network', 'collection', 'guid', 'duplicate', 'label', 'show.title', 'show.year', 'show.userRating', 'show.viewCount', 'show.lastViewedAt', 'show.actor', 'show.addedAt', 'episode.title', 'episode.originallyAvailableAt', 'episode.resolution', 'episode.subtitleLanguage', 'episode.unwatched', 'episode.addedAt', 'episode.userRating', 'episode.viewCount', 'episode.lastViewedAt') ALLOWED_SORT = ('addedAt', 'lastViewedAt', 'originallyAvailableAt', 'titleSort', 'rating', 'unwatched') TAG = 'Directory' TYPE = 'show' METADATA_TYPE = 'episode' CONTENT_TYPE = 'video' def searchShows(self, **kwargs): """ Search for a show. See :func:`~plexapi.library.LibrarySection.search()` for usage. """ return self.search(libtype='show', **kwargs) def searchEpisodes(self, **kwargs): """ Search for an episode. See :func:`~plexapi.library.LibrarySection.search()` for usage. """ return self.search(libtype='episode', **kwargs) def recentlyAdded(self, libtype='episode', maxresults=50): """ Returns a list of recently added episodes from this library section. Parameters: maxresults (int): Max number of items to return (default 50). """ return self.search(sort='addedAt:desc', libtype=libtype, maxresults=maxresults) def collection(self, **kwargs): """ Returns a list of collections from this library section. """ return self.search(libtype='collection', **kwargs) def sync(self, videoQuality, limit=None, unwatched=False, **kwargs): """ Add current Show library section as sync item for specified device. See description of :func:`plexapi.library.LibrarySection.search()` for details about filtering / sorting and :func:`plexapi.library.LibrarySection.sync()` for details on syncing libraries and possible exceptions. Parameters: videoQuality (int): idx of quality of the video, one of VIDEO_QUALITY_* values defined in :mod:`plexapi.sync` module. limit (int): maximum count of episodes to sync, unlimited if `None`. unwatched (bool): if `True` watched videos wouldn't be synced. Returns: :class:`plexapi.sync.SyncItem`: an instance of created syncItem. Example: .. code-block:: python from plexapi import myplex from plexapi.sync import VIDEO_QUALITY_3_MBPS_720p c = myplex.MyPlexAccount() target = c.device('Plex Client') sync_items_wd = c.syncItems(target.clientIdentifier) srv = c.resource('Server Name').connect() section = srv.library.section('TV-Shows') section.sync(VIDEO_QUALITY_3_MBPS_720p, client=target, limit=1, unwatched=True, title='Next unwatched episode') """ from plexapi.sync import Policy, MediaSettings kwargs['mediaSettings'] = MediaSettings.createVideo(videoQuality) kwargs['policy'] = Policy.create(limit, unwatched) return super(ShowSection, self).sync(**kwargs) class MusicSection(LibrarySection): """ Represents a :class:`~plexapi.library.LibrarySection` section containing music artists. Attributes: ALLOWED_FILTERS (list<str>): List of allowed search filters. ('genre', 'country', 'collection') ALLOWED_SORT (list<str>): List of allowed sorting keys. ('addedAt', 'lastViewedAt', 'viewCount', 'titleSort') TAG (str): 'Directory' TYPE (str): 'artist' """ ALLOWED_FILTERS = ('genre', 'country', 'collection', 'mood', 'year', 'track.userRating', 'artist.title', 'artist.userRating', 'artist.genre', 'artist.country', 'artist.collection', 'artist.addedAt', 'album.title', 'album.userRating', 'album.genre', 'album.decade', 'album.collection', 'album.viewCount', 'album.lastViewedAt', 'album.studio', 'album.addedAt', 'track.title', 'track.userRating', 'track.viewCount', 'track.lastViewedAt', 'track.skipCount', 'track.lastSkippedAt') ALLOWED_SORT = ('addedAt', 'lastViewedAt', 'viewCount', 'titleSort', 'userRating') TAG = 'Directory' TYPE = 'artist' CONTENT_TYPE = 'audio' METADATA_TYPE = 'track' def albums(self): """ Returns a list of :class:`~plexapi.audio.Album` objects in this section. """ key = '/library/sections/%s/albums' % self.key return self.fetchItems(key) def searchArtists(self, **kwargs): """ Search for an artist. See :func:`~plexapi.library.LibrarySection.search()` for usage. """ return self.search(libtype='artist', **kwargs) def searchAlbums(self, **kwargs): """ Search for an album. See :func:`~plexapi.library.LibrarySection.search()` for usage. """ return self.search(libtype='album', **kwargs) def searchTracks(self, **kwargs): """ Search for a track. See :func:`~plexapi.library.LibrarySection.search()` for usage. """ return self.search(libtype='track', **kwargs) def collection(self, **kwargs): """ Returns a list of collections from this library section. """ return self.search(libtype='collection', **kwargs) def sync(self, bitrate, limit=None, **kwargs): """ Add current Music library section as sync item for specified device. See description of :func:`plexapi.library.LibrarySection.search()` for details about filtering / sorting and :func:`plexapi.library.LibrarySection.sync()` for details on syncing libraries and possible exceptions. Parameters: bitrate (int): maximum bitrate for synchronized music, better use one of MUSIC_BITRATE_* values from the module :mod:`plexapi.sync`. limit (int): maximum count of tracks to sync, unlimited if `None`. Returns: :class:`plexapi.sync.SyncItem`: an instance of created syncItem. Example: .. code-block:: python from plexapi import myplex from plexapi.sync import AUDIO_BITRATE_320_KBPS c = myplex.MyPlexAccount() target = c.device('Plex Client') sync_items_wd = c.syncItems(target.clientIdentifier) srv = c.resource('Server Name').connect() section = srv.library.section('Music') section.sync(AUDIO_BITRATE_320_KBPS, client=target, limit=100, sort='addedAt:desc', title='New music') """ from plexapi.sync import Policy, MediaSettings kwargs['mediaSettings'] = MediaSettings.createMusic(bitrate) kwargs['policy'] = Policy.create(limit) return super(MusicSection, self).sync(**kwargs) class PhotoSection(LibrarySection): """ Represents a :class:`~plexapi.library.LibrarySection` section containing photos. Attributes: ALLOWED_FILTERS (list<str>): List of allowed search filters. ('all', 'iso', 'make', 'lens', 'aperture', 'exposure', 'device', 'resolution') ALLOWED_SORT (list<str>): List of allowed sorting keys. ('addedAt') TAG (str): 'Directory' TYPE (str): 'photo' """ ALLOWED_FILTERS = ('all', 'iso', 'make', 'lens', 'aperture', 'exposure', 'device', 'resolution', 'place', 'originallyAvailableAt', 'addedAt', 'title', 'userRating', 'tag', 'year') ALLOWED_SORT = ('addedAt',) TAG = 'Directory' TYPE = 'photo' CONTENT_TYPE = 'photo' METADATA_TYPE = 'photo' def searchAlbums(self, title, **kwargs): """ Search for an album. See :func:`~plexapi.library.LibrarySection.search()` for usage. """ return self.search(libtype='photoalbum', title=title, **kwargs) def searchPhotos(self, title, **kwargs): """ Search for a photo. See :func:`~plexapi.library.LibrarySection.search()` for usage. """ return self.search(libtype='photo', title=title, **kwargs) def sync(self, resolution, limit=None, **kwargs): """ Add current Music library section as sync item for specified device. See description of :func:`plexapi.library.LibrarySection.search()` for details about filtering / sorting and :func:`plexapi.library.LibrarySection.sync()` for details on syncing libraries and possible exceptions. Parameters: resolution (str): maximum allowed resolution for synchronized photos, see PHOTO_QUALITY_* values in the module :mod:`plexapi.sync`. limit (int): maximum count of tracks to sync, unlimited if `None`. Returns: :class:`plexapi.sync.SyncItem`: an instance of created syncItem. Example: .. code-block:: python from plexapi import myplex from plexapi.sync import PHOTO_QUALITY_HIGH c = myplex.MyPlexAccount() target = c.device('Plex Client') sync_items_wd = c.syncItems(target.clientIdentifier) srv = c.resource('Server Name').connect() section = srv.library.section('Photos') section.sync(PHOTO_QUALITY_HIGH, client=target, limit=100, sort='addedAt:desc', title='Fresh photos') """ from plexapi.sync import Policy, MediaSettings kwargs['mediaSettings'] = MediaSettings.createPhoto(resolution) kwargs['policy'] = Policy.create(limit) return super(PhotoSection, self).sync(**kwargs) class FilterChoice(PlexObject): """ Represents a single filter choice. These objects are gathered when using filters while searching for library items and is the object returned in the result set of :func:`~plexapi.library.LibrarySection.listChoices()`. Attributes: TAG (str): 'Directory' server (:class:`~plexapi.server.PlexServer`): PlexServer this client is connected to. initpath (str): Relative path requested when retrieving specified `data` (optional). fastKey (str): API path to quickly list all items in this filter (/library/sections/<section>/all?genre=<key>) key (str): Short key (id) of this filter option (used ad <key> in fastKey above). thumb (str): Thumbnail used to represent this filter option. title (str): Human readable name for this filter option. type (str): Filter type (genre, contentRating, etc). """ TAG = 'Directory' def _loadData(self, data): """ Load attribute values from Plex XML response. """ self._data = data self.fastKey = data.attrib.get('fastKey') self.key = data.attrib.get('key') self.thumb = data.attrib.get('thumb') self.title = data.attrib.get('title') self.type = data.attrib.get('type') @utils.registerPlexObject class Hub(PlexObject): """ Represents a single Hub (or category) in the PlexServer search. Attributes: TAG (str): 'Hub' hubIdentifier (str): Unknown. size (int): Number of items found. title (str): Title of this Hub. type (str): Type of items in the Hub. items (str): List of items in the Hub. """ TAG = 'Hub' def _loadData(self, data): """ Load attribute values from Plex XML response. """ self._data = data self.hubIdentifier = data.attrib.get('hubIdentifier') self.size = utils.cast(int, data.attrib.get('size')) self.title = data.attrib.get('title') self.type = data.attrib.get('type') self.key = data.attrib.get('key') self.items = self.findItems(data) def __len__(self): return self.size @utils.registerPlexObject class Collections(PlexObject): TAG = 'Directory' TYPE = 'collection' _include = "?includeExternalMedia=1&includePreferences=1" def _loadData(self, data): self.ratingKey = utils.cast(int, data.attrib.get('ratingKey')) self._details_key = "/library/metadata/%s%s" % (self.ratingKey, self._include) self.key = data.attrib.get('key') self.type = data.attrib.get('type') self.title = data.attrib.get('title') self.subtype = data.attrib.get('subtype') self.summary = data.attrib.get('summary') self.index = utils.cast(int, data.attrib.get('index')) self.thumb = data.attrib.get('thumb') self.addedAt = utils.toDatetime(data.attrib.get('addedAt')) self.updatedAt = utils.toDatetime(data.attrib.get('updatedAt')) self.childCount = utils.cast(int, data.attrib.get('childCount')) self.minYear = utils.cast(int, data.attrib.get('minYear')) self.maxYear = utils.cast(int, data.attrib.get('maxYear')) self.collectionMode = data.attrib.get('collectionMode') self.collectionSort = data.attrib.get('collectionSort') @property def children(self): return self.fetchItems(self.key) def __len__(self): return self.childCount def delete(self): part = '/library/metadata/%s' % self.ratingKey return self._server.query(part, method=self._server._session.delete) def modeUpdate(self, mode=None): """ Update Collection Mode Parameters: mode: default (Library default) hide (Hide Collection) hideItems (Hide Items in this Collection) showItems (Show this Collection and its Items) Example: collection = 'plexapi.library.Collections' collection.updateMode(mode="hide") """ mode_dict = {'default': '-2', 'hide': '0', 'hideItems': '1', 'showItems': '2'} key = mode_dict.get(mode) if key is None: raise BadRequest('Unknown collection mode : %s. Options %s' % (mode, list(mode_dict))) part = '/library/metadata/%s/prefs?collectionMode=%s' % (self.ratingKey, key) return self._server.query(part, method=self._server._session.put) def sortUpdate(self, sort=None): """ Update Collection Sorting Parameters: sort: realease (Order Collection by realease dates) alpha (Order Collection Alphabetically) Example: colleciton = 'plexapi.library.Collections' collection.updateSort(mode="alpha") """ sort_dict = {'release': '0', 'alpha': '1'} key = sort_dict.get(sort) if key is None: raise BadRequest('Unknown sort dir: %s. Options: %s' % (sort, list(sort_dict))) part = '/library/metadata/%s/prefs?collectionSort=%s' % (self.ratingKey, key) return self._server.query(part, method=self._server._session.put) def posters(self): """ Returns list of available poster objects. :class:`~plexapi.media.Poster`. """ return self.fetchItems('/library/metadata/%s/posters' % self.ratingKey) def uploadPoster(self, url=None, filepath=None): """ Upload poster from url or filepath. :class:`~plexapi.media.Poster` to :class:`~plexapi.video.Video`. """ if url: key = '/library/metadata/%s/posters?url=%s' % (self.ratingKey, quote_plus(url)) self._server.query(key, method=self._server._session.post) elif filepath: key = '/library/metadata/%s/posters?' % self.ratingKey data = open(filepath, 'rb').read() self._server.query(key, method=self._server._session.post, data=data) def setPoster(self, poster): """ Set . :class:`~plexapi.media.Poster` to :class:`~plexapi.video.Video` """ poster.select() def arts(self): """ Returns list of available art objects. :class:`~plexapi.media.Poster`. """ return self.fetchItems('/library/metadata/%s/arts' % self.ratingKey) def uploadArt(self, url=None, filepath=None): """ Upload art from url or filepath. :class:`~plexapi.media.Poster` to :class:`~plexapi.video.Video`. """ if url: key = '/library/metadata/%s/arts?url=%s' % (self.ratingKey, quote_plus(url)) self._server.query(key, method=self._server._session.post) elif filepath: key = '/library/metadata/%s/arts?' % self.ratingKey data = open(filepath, 'rb').read() self._server.query(key, method=self._server._session.post, data=data) def setArt(self, art): """ Set :class:`~plexapi.media.Poster` to :class:`~plexapi.video.Video` """ art.select() # def edit(self, **kwargs): # TODO
bsd-3-clause
3,471,805,347,811,345,400
49.618771
127
0.599278
false
4.05759
false
false
false
Martin09/E-BeamPatterns
100 Wafers - 1cm Squares/Multi-Use Pattern/v1.4/MembraneDesign_100Wafer_v1.4.py
1
20307
# -*- coding: utf-8 -*- """ Created on Fri Dec 18 14:11:31 2015 @author: Martin Friedl """ import itertools from datetime import date from random import choice as random_choice import numpy as np from Patterns.GrowthTheoryCell import make_theory_cell from Patterns.GrowthTheoryCell_100_3BranchDevices import make_theory_cell_3br from Patterns.GrowthTheoryCell_100_4BranchDevices import make_theory_cell_4br from Patterns.QuantumPlayground_100_v1 import make_qp from gdsCAD_py3.core import Cell, Boundary, CellArray, Layout, Path from gdsCAD_py3.shapes import Box, Rectangle, Label from gdsCAD_py3.templates100 import Wafer_GridStyle, dashed_line WAFER_ID = '000050254318SL' # CHANGE THIS FOR EACH DIFFERENT WAFER PATTERN = 'SQ1.4' putOnWafer = True # Output full wafer or just a single pattern? HighDensity = False # High density of triangles? glbAlignmentMarks = False tDicingMarks = 10. # Dicing mark line thickness (um) rotAngle = 0. # Rotation angle of the membranes wafer_r = 25e3 waferVer = '100 Membranes Multi-Use v1.4'.format(int(wafer_r / 1000)) waferLabel = waferVer + '\n' + date.today().strftime("%d%m%Y") # Layers l_smBeam = 0 l_lgBeam = 1 l_drawing = 100 # %% Wafer template for MBE growth class MBE100Wafer(Wafer_GridStyle): """ A 2" wafer divided into square cells """ def __init__(self, name, cells=None): Wafer_GridStyle.__init__(self, name=name, cells=cells, block_gap=1200.) # The placement of the wafer alignment markers am_x = 1.5e4 am_y = 1.5e4 self.align_pts = np.array([am_x, am_y]) self.align_pts = np.vstack((self.align_pts, self.align_pts * (-1, 1))) # Reflect about y-axis self.align_pts = np.vstack((self.align_pts, self.align_pts * (1, -1))) # Reflect about x-axis self.wafer_r = 25e3 self.block_size = np.array([10e3, 10e3]) self._place_blocks(radius=self.wafer_r + 5e3) # if glbAlignmentMarks: # self.add_aligment_marks(l_lgBeam) # self.add_orientation_text(l_lgBeam) # self.add_dicing_marks() # l_lgBeam, mkWidth=mkWidth Width of dicing marks self.add_blocks() self.add_wafer_outline(layers=l_drawing) self.add_dashed_dicing_marks(layers=[l_lgBeam]) self.add_subdicing_marks(200, 5, layers=[l_lgBeam]) self.add_block_labels(l_lgBeam, quasi_unique_labels=True) self.add_prealignment_markers(layers=[l_lgBeam]) self.add_tem_membranes([0.02, 0.04, 0.06, 0.08], 500, 1, l_smBeam) self.add_theory_cells() self.add_chip_labels() # self.add_blockLabels(l_lgBeam) # self.add_cellLabels(l_lgBeam) bottom = np.array([0, -self.wafer_r * 0.9]) # top = np.array([0, -1]) * bottom self.add_waferLabel(waferLabel, l_drawing, pos=bottom) def add_block_labels(self, layers, quasi_unique_labels=False): if type(layers) is not list: layers = [layers] txtSize = 800 if quasi_unique_labels: unique_label_string = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890' possible_labels = ["".join(x) for x in itertools.product(unique_label_string, repeat=2)] blockids_set = set() while len(blockids_set) < len(self.blocks): blockids_set.add(random_choice(possible_labels)) blockids = list(blockids_set) for i, block in enumerate(self.blocks): blocklabel = Cell('LBL_B_' + blockids[i]) for l in layers: txt = Label(blockids[i], txtSize, layer=l) bbox = txt.bounding_box offset = (0, 0) txt.translate(-np.mean(bbox, 0)) # Center text around origin txt.translate(offset) # Translate it to bottom of wafer blocklabel.add(txt) block.add(blocklabel, origin=(self.block_size[0] / 2., self.block_size[1] / 2.)) else: for (i, pt) in enumerate(self.block_pts): origin = (pt + np.array([0.5, 0.5])) * self.block_size blk_lbl = self.blockcols[pt[0]] + self.blockrows[pt[1]] for l in layers: txt = Label(blk_lbl, txtSize, layer=l_lgBeam) bbox = txt.bounding_box offset = np.array(pt) txt.translate(-np.mean(bbox, 0)) # Center text around origin lbl_cell = Cell("lbl_" + blk_lbl) lbl_cell.add(txt) origin += np.array([0, 2000]) # Translate it up by 2mm self.add(lbl_cell, origin=origin) def add_dashed_dicing_marks(self, layers): if type(layers) is not list: layers = [layers] width = 10. / 2 dashlength = 2000 r = self.wafer_r rng = np.floor(self.wafer_r / self.block_size).astype(int) dmarks = Cell('DIC_MRKS') for l in layers: for x in np.arange(-rng[0], rng[0] + 1) * self.block_size[0]: y = np.sqrt(r ** 2 - x ** 2) vm = dashed_line([x, y], [x, -y], dashlength, width, layer=l) dmarks.add(vm) for y in np.arange(-rng[1], rng[1] + 1) * self.block_size[1]: x = np.sqrt(r ** 2 - y ** 2) hm = dashed_line([x, y], [-x, y], dashlength, width, layer=l) dmarks.add(hm) self.add(dmarks) def add_subdicing_marks(self, length, width, layers): if type(layers) is not list: layers = [layers] for l in layers: mark_cell = Cell("SubdicingMark") line = Path([[0, 0], [0, length]], width=width, layer=l) mark_cell.add(line) for block in self.blocks: block.add(mark_cell, origin=(self.block_size[0] / 2., 0), rotation=0) block.add(mark_cell, origin=(0, self.block_size[1] / 2.), rotation=-90) block.add(mark_cell, origin=(self.block_size[0], self.block_size[1] / 2.), rotation=90) block.add(mark_cell, origin=(self.block_size[0] / 2., self.block_size[1]), rotation=180) def add_prealignment_markers(self, layers, mrkr_size=7): if mrkr_size % 2 == 0: # Number is even, but we need odd numbers mrkr_size += 1 if type(layers) is not list: layers = [layers] for l in layers: rect_size = 10. # 10 um large PAMM rectangles marker_rect = Rectangle([-rect_size / 2., -rect_size / 2.], [rect_size / 2., rect_size / 2.], layer=l) marker = Cell('10umMarker') marker.add(marker_rect) # Make one arm of the PAMM array marker_arm = Cell('PAMM_Arm') # Define the positions of the markers, they increase in spacing by 1 um each time: mrkr_positions = [75 * n + (n - 1) * n // 2 for n in range(1, (mrkr_size - 1) // 2 + 1)] for pos in mrkr_positions: marker_arm.add(marker, origin=[pos, 0]) # Build the final PAMM Marker pamm_cell = Cell('PAMM_Marker') pamm_cell.add(marker) # Center marker pamm_cell.add(marker_arm) # Right arm pamm_cell.add(marker_arm, rotation=180) # Left arm pamm_cell.add(marker_arm, rotation=90) # Top arm pamm_cell.add(marker_arm, rotation=-90) # Bottom arm for pos in mrkr_positions: pamm_cell.add(marker_arm, origin=[pos, 0], rotation=90) # Top arms pamm_cell.add(marker_arm, origin=[-pos, 0], rotation=90) pamm_cell.add(marker_arm, origin=[pos, 0], rotation=-90) # Bottom arms pamm_cell.add(marker_arm, origin=[-pos, 0], rotation=-90) # Make the 4 tick marks that mark the center of the array h = 30. w = 100. tick_mrk = Rectangle([-w / 2., -h / 2.], [w / 2, h / 2.], layer=l) tick_mrk_cell = Cell("TickMark") tick_mrk_cell.add(tick_mrk) pos = mrkr_positions[-1] + 75 + w / 2. pamm_cell.add(tick_mrk_cell, origin=[pos, 0]) pamm_cell.add(tick_mrk_cell, origin=[-pos, 0]) pamm_cell.add(tick_mrk_cell, origin=[0, pos], rotation=90) pamm_cell.add(tick_mrk_cell, origin=[0, -pos], rotation=90) center_x, center_y = (5000, 5000) for block in self.blocks: block.add(pamm_cell, origin=(center_x + 2000, center_y)) block.add(pamm_cell, origin=(center_x - 2000, center_y)) def add_tem_membranes(self, widths, length, pitch, layer): tem_membranes = Cell('TEM_Membranes') n = 4 curr_y = 0 for width in widths: membrane = Path([(-length / 2., 0), (length / 2., 0)], width=width, layer=layer) membrane_cell = Cell('Membrane_w{:.0f}'.format(width * 1000)) membrane_cell.add(membrane) membrane_array = CellArray(membrane_cell, 1, n, (0, pitch)) membrane_array_cell = Cell('MembraneArray_w{:.0f}'.format(width * 1000)) membrane_array_cell.add(membrane_array) tem_membranes.add(membrane_array_cell, origin=(0, curr_y)) curr_y += n * pitch n2 = 3 tem_membranes2 = Cell('Many_TEM_Membranes') tem_membranes2.add(CellArray(tem_membranes, 1, n2, (0, n * len(widths) * pitch))) center_x, center_y = (5000, 5000) for block in self.blocks: block.add(tem_membranes2, origin=(center_x, center_y + 2000)) block.add(tem_membranes2, origin=(center_x, center_y + 1500), rotation=45) def add_theory_cells(self): theory_cells = Cell('TheoryCells') theory_cells.add(make_theory_cell(wafer_orient='100'), origin=(-400, 0)) theory_cells.add(make_theory_cell_3br(), origin=(0, 0)) theory_cells.add(make_theory_cell_4br(), origin=(400, 0)) theory_cells.add(make_theory_cell(wafer_orient='100'), origin=(-500, -400), rotation=45) theory_cells.add(make_theory_cell_3br(), origin=(-50, -400), rotation=45) theory_cells.add(make_theory_cell_4br(), origin=(370, -400), rotation=45) center_x, center_y = (5000, 5000) for block in self.blocks: block.add(theory_cells, origin=(center_x, center_y - 1700)) def add_chip_labels(self): wafer_lbl = PATTERN + '\n' + WAFER_ID text = Label(wafer_lbl, 20., layer=l_lgBeam) text.translate(tuple(np.array(-text.bounding_box.mean(0)))) # Center justify label chip_lbl_cell = Cell('chip_label') chip_lbl_cell.add(text) center_x, center_y = (5000, 5000) for block in self.blocks: block.add(chip_lbl_cell, origin=(center_x, center_y - 2850)) class Frame(Cell): """ Make a frame for writing to with ebeam lithography Params: -name of the frame, just like when naming a cell -size: the size of the frame as an array [xsize,ysize] """ def __init__(self, name, size, border_layers): if not (type(border_layers) == list): border_layers = [border_layers] Cell.__init__(self, name) self.size_x, self.size_y = size # Create the border of the cell for l in border_layers: self.border = Box( (-self.size_x / 2., -self.size_y / 2.), (self.size_x / 2., self.size_y / 2.), 1, layer=l) self.add(self.border) # Add border to the frame self.align_markers = None def make_align_markers(self, t, w, position, layers, joy_markers=False, camps_markers=False): if not (type(layers) == list): layers = [layers] top_mk_cell = Cell('AlignmentMark') for l in layers: if not joy_markers: am0 = Rectangle((-w / 2., -w / 2.), (w / 2., w / 2.), layer=l) rect_mk_cell = Cell("RectMarker") rect_mk_cell.add(am0) top_mk_cell.add(rect_mk_cell) elif joy_markers: crosspts = [(0, 0), (w / 2., 0), (w / 2., t), (t, t), (t, w / 2), (0, w / 2), (0, 0)] crosspts.extend(tuple(map(tuple, (-np.array(crosspts)).tolist()))) am0 = Boundary(crosspts, layer=l) # Create gdsCAD shape joy_mk_cell = Cell("JOYMarker") joy_mk_cell.add(am0) top_mk_cell.add(joy_mk_cell) if camps_markers: emw = 20. # 20 um e-beam marker width camps_mk = Rectangle((-emw / 2., -emw / 2.), (emw / 2., emw / 2.), layer=l) camps_mk_cell = Cell("CAMPSMarker") camps_mk_cell.add(camps_mk) top_mk_cell.add(camps_mk_cell, origin=[100., 100.]) top_mk_cell.add(camps_mk_cell, origin=[100., -100.]) top_mk_cell.add(camps_mk_cell, origin=[-100., 100.]) top_mk_cell.add(camps_mk_cell, origin=[-100., -100.]) self.align_markers = Cell("AlignMarkers") self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([1, -1])) self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([-1, -1])) self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([1, 1])) self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([-1, 1])) self.add(self.align_markers) def make_slit_array(self, _pitches, spacing, _widths, _lengths, rot_angle, array_height, array_width, array_spacing, layers): if not (type(layers) == list): layers = [layers] if not (type(_pitches) == list): _pitches = [_pitches] if not (type(_lengths) == list): _lengths = [_lengths] if not (type(_widths) == list): _widths = [_widths] manyslits = i = j = None for l in layers: i = -1 j = -1 manyslits = Cell("SlitArray") pitch = _pitches[0] for length in _lengths: j += 1 i = -1 for width in _widths: # for pitch in pitches: i += 1 if i % 3 == 0: j += 1 # Move to array to next line i = 0 # Restart at left nx = int(array_width / (length + spacing)) ny = int(array_height / pitch) # Define the slits slit = Cell("Slits") rect = Rectangle((-length / 2., -width / 2.), (length / 2., width / 2.), layer=l) slit.add(rect) slits = CellArray(slit, nx, ny, (length + spacing, pitch)) slits.translate((-(nx - 1) * (length + spacing) / 2., -(ny - 1) * pitch / 2.)) slit_array = Cell("SlitArray") slit_array.add(slits) text = Label('w/p/l\n%i/%i/%i' % (width * 1000, pitch, length), 5, layer=l) lbl_vertical_offset = 1.35 if j % 2 == 0: text.translate( tuple(np.array(-text.bounding_box.mean(0)) + np.array(( 0, -array_height / lbl_vertical_offset)))) # Center justify label else: text.translate( tuple(np.array(-text.bounding_box.mean(0)) + np.array(( 0, array_height / lbl_vertical_offset)))) # Center justify label slit_array.add(text) manyslits.add(slit_array, origin=((array_width + array_spacing) * i, ( array_height + 2. * array_spacing) * j - array_spacing / 2.)) # This is an ugly hack to center rotated slits, should fix this properly... if rot_angle == 45: # TODO: fix this ugly thing hacky_offset_x = 200 hacky_offset_y = -25 elif rot_angle == 90: hacky_offset_x = 356 hacky_offset_y = 96.5 else: hacky_offset_x = 0 hacky_offset_y = 0 self.add(manyslits, origin=(-i * (array_width + array_spacing) / 2 + hacky_offset_x, -(j + 1.5) * (array_height + array_spacing) / 2 + hacky_offset_y), rotation=rot_angle) # %%Create the pattern that we want to write lgField = Frame("LargeField", (2000., 2000.), []) # Create the large write field lgField.make_align_markers(20., 200., (850., 850.), l_lgBeam, joy_markers=True, camps_markers=True) # Define parameters that we will use for the slits widths = [0.01, 0.015, 0.020, 0.030, 0.040, 0.050] pitches = [2.0, 4.0] length = 20. smFrameSize = 400 slitColumnSpacing = 3. # Create the smaller write field and corresponding markers smField1 = Frame("SmallField1", (smFrameSize, smFrameSize), []) smField1.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True) smField1.make_slit_array(pitches[0], slitColumnSpacing, widths, length, 0, 100, 100, 30, l_smBeam) smField2 = Frame("SmallField2", (smFrameSize, smFrameSize), []) smField2.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True) smField2.make_slit_array(pitches[0], slitColumnSpacing, widths, length, 45, 100, 100, 30, l_smBeam) smField3 = Frame("SmallField3", (smFrameSize, smFrameSize), []) smField3.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True) smField3.make_slit_array(pitches[1], slitColumnSpacing, widths, length, 0, 100, 100, 30, l_smBeam) smField4 = Frame("SmallField4", (smFrameSize, smFrameSize), []) smField4.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True) smField4.make_slit_array(pitches[0], slitColumnSpacing, widths, length, 90, 100, 100, 30, l_smBeam) quantum_playground = make_qp() centerAlignField = Frame("CenterAlignField", (smFrameSize, smFrameSize), []) centerAlignField.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True) centerLeftAlignField = Frame("CenterLeftAlignField", (smFrameSize, smFrameSize), []) centerLeftAlignField.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True) centerLeftAlignField.add(quantum_playground) centerRightAlignField = Frame("CenterRightAlignField", (smFrameSize, smFrameSize), []) centerRightAlignField.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True) centerRightAlignField.add(quantum_playground, rotation=45) # Add everything together to a top cell topCell = Cell("TopCell") topCell.add(lgField) smFrameSpacing = 400 # Spacing between the three small frames dx = smFrameSpacing + smFrameSize dy = smFrameSpacing + smFrameSize topCell.add(smField1, origin=(-dx / 2., dy / 2.)) topCell.add(smField2, origin=(dx / 2., dy / 2.)) topCell.add(smField3, origin=(-dx / 2., -dy / 2.)) topCell.add(smField4, origin=(dx / 2., -dy / 2.)) topCell.add(centerLeftAlignField, origin=(-dx / 2, 0.)) topCell.add(centerRightAlignField, origin=(dx / 2, 0.)) topCell.add(centerAlignField, origin=(0., 0.)) topCell.spacing = np.array([4000., 4000.]) # %%Create the layout and output GDS file layout = Layout('LIBRARY') if putOnWafer: # Fit as many patterns on a 2inch wafer as possible wafer = MBE100Wafer('MembranesWafer', cells=[topCell]) layout.add(wafer) # layout.show() else: # Only output a single copy of the pattern (not on a wafer) layout.add(topCell) layout.show() filestring = str(waferVer) + '_' + WAFER_ID + '_' + date.today().strftime("%d%m%Y") + ' dMark' + str(tDicingMarks) filename = filestring.replace(' ', '_') + '.gds' layout.save(filename) cell_layout = Layout('LIBRARY') cell_layout.add(wafer.blocks[0]) cell_layout.save(filestring.replace(' ', '_') + '_block' + '.gds') # Output up chip for doing aligned jobs layout_field = Layout('LIBRARY') layout_field.add(topCell) layout_field.save(filestring.replace(' ', '_') + '_2mmField.gds')
gpl-3.0
-19,201,809,848,424,624
42.859611
114
0.566406
false
3.239273
false
false
false
Zero-Projects/Mozart
mozart/core/validators.py
1
2098
#!/usr/bin/python # -*- coding: utf-8 -*- from django import forms from django.utils.text import slugify from django.contrib.auth import authenticate from mozart.core.messages import custom_error_messages, media_messages def eval_blank(data): if str(data).isspace(): raise forms.ValidationError(custom_error_messages['blank'], code='blank') return data def eval_iexact(data, model, field, label): original = data model_name = (model._meta.verbose_name).lower() field_label = (model._meta.get_field(label).verbose_name).lower() lookup = '%s__iexact' % field if field == 'slug': data = slugify(data) lookup = field try: model.objects.get(**{lookup: data}) except model.DoesNotExist: return original raise forms.ValidationError(custom_error_messages['unique'], code='unique', params={'model_name': model_name, 'field_label': field_label}) def eval_matching(data_1, data_2): if data_1 != data_2: raise forms.ValidationError(custom_error_messages['mismatch'],) return data_1 and data_2 def eval_password(username, password): user_cache = authenticate(username=username, password=password) if user_cache is None: raise forms.ValidationError(custom_error_messages['incorrect_password']) return username and password # Media Validators def eval_audio(data): file_type = str(data.content_type) if file_type == 'audio/mp3': return data raise forms.ValidationError(media_messages['invalid_audio'],) def eval_image(data): file_type = str(data.content_type) if file_type == 'image/jpeg' or file_type == 'image/bmp' \ or file_type == 'image/png': return data raise forms.ValidationError(media_messages['invalid_image'],) def eval_general(data): file_type = str(data.content_type) if file_type == 'image/jpeg' or file_type == 'image/bmp' \ or file_type == 'image/png' or file_type == 'audio/mp3': return data raise forms.ValidationError(media_messages['invalid_archive'],)
bsd-3-clause
-6,490,742,904,031,472,000
29.405797
94
0.662536
false
3.726465
false
false
false
exaile/exaile
plugins/daapclient/test.py
1
1484
# This file contains some code to test the DAAPClient as stand-alone application. import sys import logging from .client import DAAPClient log = logging.getLogger(__name__) def main(): connection = DAAPClient() if len(sys.argv) > 1: host = sys.argv[1] else: host = "localhost" if len(sys.argv) > 2: port = sys.argv[2] else: port = 3689 logging.basicConfig( level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s' ) try: # do everything in a big try, so we can disconnect at the end connection.connect(host, port) # auth isn't supported yet. Just log in session = connection.login() library = session.library() log.debug("Library name is `%r`", library.name) tracks = library.tracks() # demo - save the first track to disk # print("Saving %s by %s to disk as 'track.mp3'"%(tracks[0].name, tracks[0].artist)) # tracks[0].save("track.mp3") if len(tracks) > 0: tracks[0].atom.printTree() else: print('No Tracks') session.update() print(session.revision) finally: # this here, so we logout even if there's an error somewhere, # or itunes will eventually refuse more connections. print("--------------") try: session.logout() except Exception: pass if __name__ == '__main__': main()
gpl-2.0
1,291,806,273,974,802,000
23.733333
92
0.566712
false
3.895013
false
false
false
hannahborje/myTodoList
todoView.py
1
1345
from flask import request, jsonify, render_template from todoModel import TodoModel import flask.views import json RETRIEVE_DEFAULT_NR = 5 # Render template for main.html class TodoView(flask.views.MethodView): def get(self): return render_template('main.html') # Add todo (item) and if it is checked or not (value=false) class TodoAdd(flask.views.MethodView): def post(self): args = json.loads(request.data) TodoModel.add_todo(args['item'], args['value']) return jsonify({ 'success': True }) # When a todo is checked - change its value (true or false) class TodoAddValue(flask.views.MethodView): def post(self): args = json.loads(request.data) print("Changed done value to:", args) TodoModel.add_value(args['id'], args['value']) return jsonify({'success' : True}) # Retrieves all the todos from the database, including id and value class TodoRetrieve(flask.views.MethodView): def get(self, n): try: n = int(n) except ValueError: n = RETRIEVE_DEFAULT_NR if n <= 0: n = RETRIEVE_DEFAULT_NR todoList = TodoModel.retrieve_todos(n) return jsonify({ 'success': True, 'todoList': [{ 'id': item[0], 'text':item[1], 'value':item[2] } for item in todoList] })
mit
6,281,056,490,317,521,000
31.02381
97
0.62974
false
3.558201
false
false
false
tranquilit/WAPT
waptsetupgui/deb/createdeb.py
1
10151
#!/usr/bin/python # -*- coding: utf-8 -*- # ----------------------------------------------------------------------- # This file is part of WAPT # Copyright (C) 2013-2014 Tranquil IT Systems http://www.tranquil.it # WAPT aims to help Windows systems administrators to deploy # setup and update applications on users PC. # # WAPT is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # WAPT is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with WAPT. If not, see <http://www.gnu.org/licenses/>. # # ----------------------------------------------------------------------- from __future__ import print_function import sys import os import platform import logging import re import types import shutil import subprocess import argparse import stat import glob import jinja2 from git import Repo makepath = os.path.join from shutil import copyfile def run(*args, **kwargs): return subprocess.check_output(*args, shell=True, **kwargs) def eprint(*args, **kwargs): print(*args, file=sys.stderr, **kwargs) def mkdir(path): if not os.path.isdir(path): os.makedirs(path) def debian_major(): return platform.linux_distribution()[1].split('.')[0] def get_distrib(): return platform.linux_distribution()[0].lower() def git_hash(): r = Repo('.',search_parent_directories=True) return '%s' % (r.active_branch.object.name_rev[:8],) def dev_revision(): return '%s' % (git_hash()) def setloglevel(alogger,loglevel): """set loglevel as string""" if loglevel in ('debug','warning','info','error','critical'): numeric_level = getattr(logging, loglevel.upper(), None) if not isinstance(numeric_level, int): raise ValueError('Invalid log level: %s' % loglevel) alogger.setLevel(numeric_level) def rsync(src, dst, excludes=[]): excludes_list = ['*.pyc','*~','.svn','deb','.git','.gitignore'] excludes_list.extend(excludes) rsync_source = src rsync_destination = dst rsync_options = ['-a','--stats'] for x in excludes_list: rsync_options.extend(['--exclude',x]) rsync_command = ['/usr/bin/rsync'] + rsync_options + [rsync_source,rsync_destination] eprint(rsync_command) return subprocess.check_output(rsync_command) def add_symlink(link_target,link_name): if link_target.startswith('/'): link_target = link_target[1:] relative_link_target_path = os.path.join('builddir',link_target) eprint("adding symlink %s -> %s" % (link_name, relative_link_target_path )) mkdir(os.path.dirname(relative_link_target_path)) if not os.path.exists(relative_link_target_path): cmd = 'ln -s %s %s ' % (relative_link_target_path,link_name) eprint(cmd) eprint(subprocess.check_output(cmd)) class Version(object): """Version object of form 0.0.0 can compare with respect to natural numbering and not alphabetical Args: version (str) : version string member_count (int) : number of version memebers to take in account. If actual members in version is less, add missing memeber with 0 value If actual members count is higher, removes last ones. >>> Version('0.10.2') > Version('0.2.5') True >>> Version('0.1.2') < Version('0.2.5') True >>> Version('0.1.2') == Version('0.1.2') True >>> Version('7') < Version('7.1') True .. versionchanged:: 1.6.2.5 truncate version members list to members_count if provided. """ def __init__(self,version,members_count=None): if version is None: version = '' assert isinstance(version,types.ModuleType) or isinstance(version,bytes) or isinstance(version,bytes) or isinstance(version,Version) if isinstance(version,types.ModuleType): self.versionstring = getattr(version,'__version__',None) elif isinstance(version,Version): self.versionstring = getattr(version,'versionstring',None) else: self.versionstring = version self.members = [ v.strip() for v in self.versionstring.split('.')] self.members_count = members_count if members_count is not None: if len(self.members)<members_count: self.members.extend(['0'] * (members_count-len(self.members))) else: self.members = self.members[0:members_count] def __cmp__(self,aversion): def nat_cmp(a, b): a = a or '' b = b or '' def convert(text): if text.isdigit(): return int(text) else: return text.lower() def alphanum_key(key): return [convert(c) for c in re.split('([0-9]+)', key)] return cmp(alphanum_key(a), alphanum_key(b)) if not isinstance(aversion,Version): aversion = Version(aversion,self.members_count) for i in range(0,max([len(self.members),len(aversion.members)])): if i<len(self.members): i1 = self.members[i] else: i1 = '' if i<len(aversion.members): i2 = aversion.members[i] else: i2='' v = nat_cmp(i1,i2) if v: return v return 0 def __str__(self): return '.'.join(self.members) def __repr__(self): return "Version('{}')".format('.'.join(self.members)) current_path = os.path.realpath(__file__) wapt_source_dir = os.path.abspath(os.path.join(os.path.dirname(current_path),'../..')) parser = argparse.ArgumentParser(u'Build a Debian package with already compiled executables in root directory.') parser.add_argument('-l', '--loglevel', help='Change log level (error, warning, info, debug...)') parser.add_argument('-r', '--revision',default=dev_revision(), help='revision to append to package version') options = parser.parse_args() logger = logging.getLogger() logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s') if options.loglevel is not None: setloglevel(logger,options.loglevel) if platform.system() != 'Linux': logger.error("This script should be used on Debian Linux") sys.exit(1) ######################################### BDIR = './builddir/' dict_exe = { 'WAPTSELF':'waptself.bin', 'WAPTEXIT':'waptexit.bin', } WAPTEDITION=os.environ.get('WAPTEDITION','community') ######################################### logger.debug('Getting version from waptutils') for line in open(os.path.join(wapt_source_dir,"waptutils.py")): if line.strip().startswith('__version__'): wapt_version = str(Version(line.split('=')[1].strip().replace('"', '').replace("'", ''),3)) if not wapt_version: eprint(u'version not found in %s/config.py' % os.path.abspath('..')) sys.exit(1) r = Repo('.',search_parent_directories=True) rev_count = '%04d' % (r.active_branch.commit.count(),) wapt_version = wapt_version +'.'+rev_count if options.revision: full_version = wapt_version + '-' + options.revision else: full_version = wapt_version logger.info('Create templates for control and postinst') jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader('./debian/')) template_control = jinja_env.get_template('control.tmpl') template_vars = { 'version': wapt_version, 'description': 'WAPT Agent executables for Debian/Ubuntu\n', } render_control = template_control.render(template_vars) if os.path.exists(BDIR): shutil.rmtree(BDIR) os.makedirs(os.path.join(BDIR,'DEBIAN')) with open(os.path.join(BDIR,'DEBIAN','control'),'w') as f_control: f_control.write(render_control) shutil.copy('./debian/postinst',os.path.join(BDIR,'DEBIAN','postinst')) shutil.copy('./debian/postrm',os.path.join(BDIR,'DEBIAN','postrm')) dir_desktop = os.path.join(BDIR,'opt/wapt') os.makedirs(dir_desktop) shutil.copy('../common/waptexit.desktop',os.path.join(dir_desktop,'tis-waptexit.desktop')) shutil.copy('../common/waptself.desktop',os.path.join(dir_desktop,'tis-waptself.desktop')) translation_path = '../../languages' translation_path_deb = makepath(BDIR,'opt/wapt/languages') files_translation = glob.glob(makepath(translation_path,'waptself*')) + glob.glob(makepath(translation_path,'waptexit*')) os.makedirs(translation_path_deb) for file in files_translation: shutil.copy2(file,translation_path_payload) if WAPTEDITION.lower()=='community': waptself_png = '../common/waptself-community.png' waptexit_png = '../common/waptexit-community.png' else: waptself_png = '../common/waptself-enterprise.png' waptexit_png = '../common/waptexit-enterprise.png' os.makedirs(os.path.join(BDIR,'opt/wapt/icons')) icons_to_convert=[(waptself_png,makepath(BDIR,'opt/wapt/icons/waptself-%s.png')),(waptexit_png,makepath(BDIR,'opt/wapt/icons/waptexit-%s.png'))] for icon in icons_to_convert: for size in ["16","32","64","128"]: run("convert %s -resize %sx%s %s" % (icon[0],size,size,icon[1] % size)) os.chmod(os.path.join(BDIR,'DEBIAN/'), 0755) os.chmod(os.path.join(BDIR,'DEBIAN','postinst'), 0755) os.chmod(os.path.join(BDIR,'DEBIAN','postrm'), 0755) # creates package file structure opt_wapt = os.path.join(BDIR,'opt/wapt') mkdir(opt_wapt) for afile in dict_exe.keys(): os.chmod(dict_exe[afile],0755) shutil.copy(dict_exe[afile],opt_wapt) # build if WAPTEDITION=='enterprise': package_filename = 'tis-waptagent-gui-enterprise-%s.deb' % (full_version) else: package_filename = 'tis-waptagent-gui-%s.deb' % (full_version) eprint(subprocess.check_output(['dpkg-deb', '--build', BDIR, package_filename])) print(package_filename)
gpl-3.0
-6,633,762,645,145,644,000
34.003448
144
0.63442
false
3.465688
false
false
false
GoogleCloudDataproc/cloud-dataproc
codelabs/spark-bigquery/counts_by_subreddit.py
1
3261
# Copyright 2019 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script accompanies this codelab: # https://codelabs.developers.google.com/codelabs/pyspark-bigquery/ # This script outputs subreddits counts for a given set of years and month # This data comes from BigQuery via the dataset "fh-bigquery.reddit_comments" # These allow us to create a schema for our data from pyspark.sql.types import StructField, StructType, StringType, LongType # A Spark Session is how we interact with Spark SQL to create Dataframes from pyspark.sql import SparkSession # This will help catch some PySpark errors from py4j.protocol import Py4JJavaError # Create a SparkSession under the name "reddit". Viewable via the Spark UI spark = SparkSession.builder.appName("reddit").getOrCreate() # Create a two column schema consisting of a string and a long integer fields = [StructField("subreddit", StringType(), True), StructField("count", LongType(), True)] schema = StructType(fields) # Create an empty DataFrame. We will continuously union our output with this subreddit_counts = spark.createDataFrame([], schema) # Establish a set of years and months to iterate over years = ['2017', '2018', '2019'] months = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12'] # Keep track of all tables accessed via the job tables_read = [] for year in years: for month in months: # In the form of <project-id>.<dataset>.<table> table = f"fh-bigquery.reddit_posts.{year}_{month}" # If the table doesn't exist we will simply continue and not # log it into our "tables_read" list try: table_df = (spark.read.format('bigquery').option('table', table) .load()) tables_read.append(table) except Py4JJavaError as e: if f"Table {table} not found" in str(e): continue else: raise # We perform a group-by on subreddit, aggregating by the count and then # unioning the output to our base dataframe subreddit_counts = ( table_df .groupBy("subreddit") .count() .union(subreddit_counts) ) print("The following list of tables will be accounted for in our analysis:") for table in tables_read: print(table) # From our base table, we perform a group-by, summing over the counts. # We then rename the column and sort in descending order both for readability. # show() will collect the table into memory output the table to std out. ( subreddit_counts .groupBy("subreddit") .sum("count") .withColumnRenamed("sum(count)", "count") .sort("count", ascending=False) .show() )
apache-2.0
19,847,593,516,167,104
35.640449
79
0.680773
false
3.933655
false
false
false
kingtaurus/cs224d
assignment1/tensorflow_word2vec.py
1
3188
import os import math import random import collections import numpy as np import tensorflow as tf import cs224d.data_utils as data_utils from tensorflow.models.embedding import gen_word2vec as word2vec class Options(object): def __init__(self): #Model Options self.emb_dim = 20 self.train_data = None self.num_samples = 20 self.learning_rate = 1.0 self.epochs_to_train = 5 self.batch_size = 64 self.window_size = 5 self.min_count = 3 class Word2Vec(object): """Word2Vec model (skipgram) """ def __init__(self, options, session): self._options = options self._session = session self._word2id = {} self._id2word = [] self.build_graph() self.build_eval_graph() self.save_vocab() self._read_dataset() def _read_dataset(self): # dataset = data_utils.StanfordSentiment() # #print(dataset.sent_labels()[0:100]) # #print(dataset.getSplitSentences(0)[0:100]) # #this is the labels vector :) # #sentences = np.from_iter(dataset.sentences(), dtype="int32") # self._word2id = dataset.tokens() # print(self._word2id["UNK"]) # ids = [self._word2id.get(w) for w in self._word2id.keys()] # print(ids) pass def forward(self, examples, labels): return None,None def nce_loss(self, true_logits, sampled_logits): opts = self._options true_xent = tf.nn.sigmoid_cross_entropy_with_logits(true_logits, tf.ones_like(true_logits)) sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(sampled_logits, tf.zeros_like(sampled_logits)) nce_loss_tensor = (tf.reduce_sum(true_xent) + tf.reduce_sum(sampled_xent)) / opts.batch_size return nce_loss_tensor def build_graph(self): opts = self._options (words, counts, words_per_epoch, self._epoch, self._words, examples, labels) = word2vec.skipgram(filename="text8", batch_size=opt.batch_size, window_size=opt.window_size, min_count=opt.min_count, subsample=0) (opts.vocab_words, opts.vocab_counts, opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch]) opts.vocab_size = len(opts.vocab_words) print("Data file: ", opts.train_data) print("Vocab size: ", opts.vocab_size - 1, " + UNK") print("Words per epoch: ", opts.words_per_epoch) self._examples = examples self._labels = labels self._id2word = opts.vocab_words for i, w in enumerate(self._id2word): self._word2id[w] = i true_logits, sampled_logits = self.forward(examples, labels) loss = self.nce_loss(true_logits, sampled_logits) tf.scalar_summary("NCE loss", loss) self._loss = loss self.optimize(loss) def build_eval_graph(self): pass def save_vocab(self): pass if __name__ == "__main__": opt = Options() session = tf.Session() model = Word2Vec(opt, session)
mit
-6,612,433,178,655,619,000
33.27957
109
0.584065
false
3.582022
false
false
false
chubbymaggie/barf-project
barf/utils/reil.py
1
7524
# Copyright (c) 2014, Fundacion Dr. Manuel Sadosky # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from barf.analysis.basicblock import CFGRecoverer from barf.analysis.basicblock import ControlFlowGraph from barf.analysis.basicblock import RecursiveDescent from barf.arch.x86.x86base import X86ArchitectureInformation from barf.arch.x86.x86disassembler import X86Disassembler from barf.arch.x86.x86translator import X86Translator from barf.core.reil import ReilContainer from barf.core.reil import ReilSequence from barf.core.reil import split_address class ReilContainerBuilder(object): def __init__(self, binary): self.__binary = binary self.__arch_mode = self.__binary.architecture_mode self.__arch = X86ArchitectureInformation(self.__arch_mode) self.__disassembler = X86Disassembler(architecture_mode=self.__arch_mode) self.__translator = X86Translator(architecture_mode=self.__arch_mode) self.__bb_builder = CFGRecoverer(RecursiveDescent(self.__disassembler, self.__binary.text_section, self.__translator, self.__arch)) def build(self, functions): reil_container = ReilContainer() for _, start, end in functions: bbs, _ = self.__bb_builder.build(start, end) cfg = ControlFlowGraph(bbs) reil_container = self.__translate_cfg(cfg, reil_container=reil_container) return reil_container # Auxiliary methods # ======================================================================== # def __translate_cfg(self, cfg, reil_container=None): if not reil_container: reil_container = ReilContainer() asm_instrs = [] for bb in cfg.basic_blocks: for dual_instr in bb: asm_instrs += [dual_instr.asm_instr] reil_container = self.__translate(asm_instrs, reil_container) return reil_container def __translate(self, asm_instrs, reil_container): asm_instr_last = None instr_seq_prev = None for asm_instr in asm_instrs: instr_seq = ReilSequence() for reil_instr in self.__translator.translate(asm_instr): instr_seq.append(reil_instr) if instr_seq_prev: instr_seq_prev.next_sequence_address = instr_seq.address reil_container.add(instr_seq) instr_seq_prev = instr_seq if instr_seq_prev: if asm_instr_last: instr_seq_prev.next_sequence_address = (asm_instr_last.address + asm_instr_last.size) << 8 return reil_container class ReilContainerEx(object): def __init__(self, binary, symbols): self.__binary = binary self.__arch_mode = self.__binary.architecture_mode self.__arch = X86ArchitectureInformation(self.__arch_mode) self.__disassembler = X86Disassembler(architecture_mode=self.__arch_mode) self.__translator = X86Translator(architecture_mode=self.__arch_mode) self.__bb_builder = CFGRecoverer(RecursiveDescent(self.__disassembler, self.__binary.text_section, self.__translator, self.__arch)) self.__container = {} self.__symbols = symbols self.__symbols_by_addr = {} for name, start, end in symbols: self.__symbols_by_addr[start] = (name, end) # Auxiliary methods # ======================================================================== # def __translate_cfg(self, cfg, reil_container=None): if not reil_container: reil_container = ReilContainer() asm_instrs = [] for bb in cfg.basic_blocks: for dual_instr in bb: asm_instrs += [dual_instr.asm_instr] reil_container = self.__translate(asm_instrs, reil_container) return reil_container def __translate(self, asm_instrs, reil_container): asm_instr_last = None instr_seq_prev = None for asm_instr in asm_instrs: instr_seq = ReilSequence() for reil_instr in self.__translator.translate(asm_instr): instr_seq.append(reil_instr) if instr_seq_prev: instr_seq_prev.next_sequence_address = instr_seq.address reil_container.add(instr_seq) instr_seq_prev = instr_seq if instr_seq_prev: if asm_instr_last: instr_seq_prev.next_sequence_address = (asm_instr_last.address + asm_instr_last.size) << 8 return reil_container def add(self, sequence): base_addr, _ = split_address(sequence.address) if base_addr in self.__container.keys(): raise Exception("Invalid sequence") else: self.__container[base_addr] = sequence def fetch(self, address): base_addr, index = split_address(address) if base_addr not in self.__container.keys(): self.__resolve_address(base_addr) return self.__container[base_addr].get(index) def get_next_address(self, address): base_addr, index = split_address(address) if base_addr not in self.__container.keys(): raise Exception("Invalid address.") addr = address if index < len(self.__container[base_addr]) - 1: addr += 1 else: addr = self.__container[base_addr].next_sequence_address return addr def dump(self): for base_addr in sorted(self.__container.keys()): self.__container[base_addr].dump() print("-" * 80) def __iter__(self): for addr in sorted(self.__container.keys()): for instr in self.__container[addr]: yield instr def __resolve_address(self, address): if address not in self.__symbols_by_addr: # print("Not symbol : {:#010x}".format(address)) raise Exception("Symbol not found!") name, end = self.__symbols_by_addr[address] # print("Resolving {:s} @ {:#010x}".format(name, address)) cfg = ControlFlowGraph(self.__bb_builder.build(address, end)) _ = self.__translate_cfg(cfg, reil_container=self)
bsd-2-clause
-6,574,008,995,850,594,000
34.828571
106
0.624535
false
4.125
false
false
false
Antergos/Cnchi
src/installation/post_fstab.py
1
9489
#!/usr/bin/env python # -*- coding: utf-8 -*- # # post_fstab.py # # Copyright © 2013-2018 Antergos # # This file is part of Cnchi. # # Cnchi is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Cnchi is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Cnchi; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. """ Create /etc/fstab file """ import logging import os import re import parted3.fs_module as fs class PostFstab(): """ Setup /etc/fstab """ DEST_DIR = '/install' def __init__(self, method, mount_devices, fs_devices, ssd, settings): """ Init class properties """ self.method = method self.mount_devices = mount_devices self.fs_devices = fs_devices self.ssd = ssd self.zfs = settings.get('zfs') self.use_lvm = settings.get('use_lvm') self.use_luks = settings.get('use_luks') self.luks_root_password = settings.get('luks_root_password') self.root_uuid = None def get_swap_fstab_line(self, uuid, partition_path): """ Create swap line for fstab """ # If using a TRIM supported SSD, # discard is a valid mount option for swap if partition_path in self.ssd: opts = "defaults,discard" else: opts = "defaults" if self.zfs: # We can't use UUID with zfs, so we will use device name txt = "{0} swap swap {1} 0 0".format(partition_path, opts) else: txt = "UUID={0} swap swap {1} 0 0".format(uuid, opts) return txt @staticmethod def add_vol_to_crypttab(vol_name, uuid, keyfile='none'): """ Modify the crypttab file """ crypttab_path = os.path.join(PostFstab.DEST_DIR, 'etc/crypttab') os.chmod(crypttab_path, 0o666) with open(crypttab_path, 'a') as crypttab_file: line = "{0} /dev/disk/by-uuid/{1} {2} luks\n" line = line.format(vol_name, uuid, keyfile) crypttab_file.write(line) logging.debug("Added %s to crypttab", line) os.chmod(crypttab_path, 0o600) @staticmethod def get_device_fstab_line(partition_path, mount_point, myfmt, opts='defaults', chk='0'): """ Create fstab line """ txt = "{0} {1} {2} {3} 0 {4}" txt = txt.format(partition_path, mount_point, myfmt, opts, chk) logging.debug("Added %s to fstab", txt) return txt @staticmethod def get_uuid_fstab_line(uuid, mount_point, myfmt, opts='defaults', chk='0'): """ Create fstab line """ txt = "UUID={0} {1} {2} {3} 0 {4}" txt = txt.format(uuid, mount_point, myfmt, opts, chk) logging.debug("Added %s to fstab", txt) return txt @staticmethod def get_mount_options(myfmt, is_ssd): """ Adds mount options depending on filesystem """ opts = "" if not is_ssd: if "btrfs" in myfmt: opts = "defaults,relatime,space_cache,autodefrag,inode_cache" elif "f2fs" in myfmt: opts = "defaults,noatime" elif "ext3" in myfmt or "ext4" in myfmt: opts = "defaults,relatime,data=ordered" else: opts = "defaults,relatime" else: # As of linux kernel version 3.7, the following # filesystems support TRIM: ext4, btrfs, JFS, and XFS. if myfmt in ["ext4", "jfs", "xfs"]: opts = "defaults,noatime,discard" elif myfmt == "btrfs": opts = ("defaults,noatime,compress=lzo,ssd,discard," "space_cache,autodefrag,inode_cache") else: opts = "defaults,noatime" return opts def run(self): """ Create /etc/fstab file """ all_lines = [ "# /etc/fstab: static file system information.", "#", "# Use 'blkid' to print the universally unique identifier for a", "# device; this may be used with UUID= as a more robust way to name devices", "# that works even if disks are added and removed. See fstab(5).", "#", "# <file system> <mount point> <type> <options> <dump> <pass>", "#"] # Use lsblk to be able to match LUKS UUID with mapper UUID pknames = fs.get_pknames() for mount_point in self.mount_devices: partition_path = self.mount_devices[mount_point] uuid = fs.get_uuid(partition_path) if uuid == "": logging.warning( "Can't get %s partition UUID. It won't be added to fstab", partition_path) continue if partition_path in self.fs_devices: myfmt = self.fs_devices[partition_path] else: # It hasn't any filesystem defined, skip it. continue # Take care of swap partitions if "swap" in myfmt: txt = self.get_swap_fstab_line(uuid, partition_path) all_lines.append(txt) logging.debug("Added %s to fstab", txt) continue # Fix for home + luks, no lvm (from Automatic Install) if ("/home" in mount_point and self.method == "automatic" and self.use_luks and not self.use_lvm and '/dev/mapper' in partition_path): keyfile = '/etc/luks-keys/home' if self.luks_root_password: # Use password and not a keyfile keyfile = 'none' vol_name = partition_path[len("/dev/mapper/"):] self.add_vol_to_crypttab(vol_name, uuid, keyfile) # Add cryptAntergosHome line to fstab txt = self.get_device_fstab_line(partition_path, mount_point, myfmt) all_lines.append(txt) continue # Add all LUKS partitions from Advanced Install (except root). if (self.method == 'advanced' and mount_point is not '/' and self.use_luks and '/dev/mapper' in partition_path): # As the mapper with the filesystem will have a different UUID # than the partition it is encrypted in, we have to take care # of this here. Then we will be able to add it to crypttab try: vol_name = partition_path[len("/dev/mapper/"):] luks_partition_path = "/dev/" + pknames[vol_name] except KeyError: logging.error( "Can't find the PKNAME value of %s", partition_path) continue luks_uuid = fs.get_uuid(luks_partition_path) if luks_uuid: self.add_vol_to_crypttab(vol_name, luks_uuid) else: logging.error( "Can't add luks uuid to crypttab for %s partition", luks_partition_path) continue # Finally, the fstab line to mount the unencrypted file system # if a mount point has been specified by the user if mount_point: txt = self.get_device_fstab_line(partition_path, mount_point, myfmt) all_lines.append(txt) continue # Avoid adding a partition to fstab when it has no mount point # (swap has been checked above) if mount_point == "": continue # fstab uses vfat to mount fat16 and fat32 partitions if "fat" in myfmt: myfmt = 'vfat' # Create mount point on destination system if it yet doesn't exist full_path = os.path.join(PostFstab.DEST_DIR, mount_point) os.makedirs(full_path, mode=0o755, exist_ok=True) # Is ssd ? # Device list example: {'/dev/sdb': False, '/dev/sda': True} txt = "Device list : {0}".format(self.ssd) logging.debug(txt) device = re.sub("[0-9]+$", "", partition_path) is_ssd = self.ssd.get(device) txt = "Device: {0}, SSD: {1}".format(device, is_ssd) logging.debug(txt) # Get mount options opts = self.get_mount_options(myfmt, is_ssd) chk = '0' if mount_point == "/": if myfmt not in ['btrfs', 'f2fs']: chk = '1' self.root_uuid = uuid txt = self.get_uuid_fstab_line(uuid, mount_point, myfmt, opts, chk) all_lines.append(txt) full_text = '\n'.join(all_lines) + '\n' fstab_path = os.path.join(PostFstab.DEST_DIR, 'etc/fstab') with open(fstab_path, 'w') as fstab_file: fstab_file.write(full_text)
gpl-3.0
5,706,897,448,647,539,000
37.569106
92
0.541315
false
3.824264
false
false
false
bjoernricks/python-quilt
quilt/cli/delete.py
1
2083
# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80: # python-quilt - A Python implementation of the quilt patch system # # Copyright (C) 2012 - 2017 Björn Ricks <bjoern.ricks@gmail.com> # # See LICENSE comming with the source of python-quilt for details. from quilt.cli.meta import Command from quilt.cli.parser import Argument, OptionArgument from quilt.delete import Delete class DeleteCommand(Command): name = "delete" help = "Remove the specified or topmost patch from the series file." remove = OptionArgument("-r", action="store_true", dest="remove", default=False, help="Remove the deleted patch file from the " "patches directory as well.") backup = OptionArgument("--backup", action="store_true", default=False, dest="backup", help="Rename the patch file to patch~ rather than " "deleting it. Ignored if not used with `-r'.") next = OptionArgument("-n", action="store_true", dest="next", help="Delete the next unapplied patch, " "rather than the specified or topmost patch.") patch = Argument(nargs="?") def run(self, args): delete = Delete(self.get_cwd(), self.get_pc_dir(), self.get_patches_dir()) delete.deleted_patch.connect(self.deleted_patch) delete.deleting_patch.connect(self.deleting_patch) if args.next and args.patch: self.exit_error("-n parameter doesn't take an argument") if args.next: delete.delete_next(args.remove, args.backup) else: delete.delete_patch(args.patch, args.remove, args.backup) def deleted_patch(self, patch): print("Removed patch %s" % patch.get_name()) def deleting_patch(self, patch, applied): if applied: print("Removing currently applied patch %s" % patch.get_name()) else: print("Removing patch %s" % patch.get_name())
mit
-4,401,374,766,035,893,000
38.283019
79
0.59414
false
4.14741
false
false
false
kbknapp/Platypus-cad
platypi/ppmodules/Apps/Testing/Network.py
1
2926
# -*- coding: utf-8 -*- #!/usr/bin/python # # Python 3.x # # ppctl_cadnetwork v0.1 # * Displays information about the network setup to the PiFaceCAD # * Requires: # * ifconfig (for subnet mask) # * grep (for subnet mask) # * awk (for subnet mask) # * ip (for default gw) # # Changelog # * v0.1 # * Initial Release # import pifacecad import socket # For: IP, Hostname import subprocess # For: Default GW, Subnet Mask _ROCKER_RIGHT = 7 _ROCKER_LEFT = 6 _ROCKER_PUSH = 5 _curr_index = 0 _cad = None _listener = None _orig_listener = None _orig_screen = "" _screens = [["IP:", ""], ["Subnet Mask:", ""], ["Default GW:", ""], ["Hostname", ""], ["Quit?", ""]] def _write_screen(): _cad.lcd.clear() if _screens[_curr_index][1] == "": _do_action(_curr_index) _cad.lcd.write("%s\n%s" % (_screens[_curr_index][0], _screens[_curr_index][1])) def _next(): global _curr_index if _curr_index == len(_screens): _curr_index = 0 else: _curr_index += 1 _write_screen() def _previous(): global _curr_index if _curr_index == 0: _curr_index = len(_screens) else: _curr_index -= 1 _write_screen() def _do_action(): if _curr_index == 0: # Get IP _screens[0][1] = socket.gethostbyname(socket.gethostname()) elif _curr_index == 1: # Get Subnet Mask _screens[1][1] = subprocess.check_output("ifconfig eth0 | grep netmask | awk '{print $4}'", shell=True).decode("utf-8") elif _curr_index == 2: # Get Default GW _screens[2][1] = subprocess.check_output("ip route show | grep via | awk '{print $3}'", shell=True).decode("utf-8") elif _curr_index == 3: # Get hostname _screens[3][1] = socket.gethostname() else: # Quit _listener.deactivate() _cad.lcd.clear() if _orig_screen != "" and _orig_listener is not None: _cad.lcd.write(_orig_screen) _orig_listener.activate() def _register_buttons(): _listener = pifacecad.SwitchEventListener(chip=_cad) # Add rocker->right (switch 7) to 'next' _listener.register(_ROCKER_RIGHT, pifacecad.IODIR_FALLING_EDGE, _next) # Add rocker->left (switch 6) to 'previous' _listener.register(_ROCKER_LEFT, pifacecad.IODIR_FALLING_EDGE, _previous) # Add rocker->down (push) (switch 8) to 'do action' _listener.register(_ROCKER_PUSH, pifacecad.IODIR_FALLING_EDGE, _do_action) _listener.activate() def start_module(cad, listener, screen): global _cad global _orig_listener global _orig_screen _cad = cad _orig_listener = listener _orig_screen = screen _cad.lcd.clear() _cad.lcd.blink_off() _cad.lcd.cursor_off() if _screens[0][1] == "": _do_action(0) _cad.lcd.write("%s\n%s" % (_screens[0][0], _screens[0][1])) _register_buttons() if __name__ == "__main__": # Called directly, must initialize CAD _cad = pifacecad.PiFaceCAD() _cad.lcd.blink_off() _cad.lcd.cursor_off() _cad.lcd.backlight_off() if _screens[0][1] == "": _do_action(0) _cad.lcd.write("%s\n%s" % (_screens[0][0], _screens[0][1])) _register_buttons()
gpl-2.0
-4,029,454,239,713,564,000
22.796748
121
0.638414
false
2.533333
false
false
false
tomassurin/codility
Lesson 01/01 - tapes.py
1
1696
# A non-empty zero-indexed array A consisting of N integers is given. Array A represents numbers on a tape. # Any integer P, such that 0 < P < N, splits this tape into two non-empty parts: A[0], A[1], ..., A[P − 1] and A[P], A[P + 1], ..., A[N − 1]. # The difference between the two parts is the value of: |(A[0] + A[1] + ... + A[P − 1]) − (A[P] + A[P + 1] + ... + A[N − 1])| # In other words, it is the absolute difference between the sum of the first part and the sum of the second part. # For example, consider array A such that: # A[0] = 3 # A[1] = 1 # A[2] = 2 # A[3] = 4 # A[4] = 3 # We can split this tape in four places: # P = 1, difference = |3 − 10| = 7 # P = 2, difference = |4 − 9| = 5 # P = 3, difference = |6 − 7| = 1 # P = 4, difference = |10 − 3| = 7 # Write a function: # def solution(A) # that, given a non-empty zero-indexed array A of N integers, returns the minimal difference that can be achieved. # For example, given: # A[0] = 3 # A[1] = 1 # A[2] = 2 # A[3] = 4 # A[4] = 3 # the function should return 1, as explained above. # Assume that: # N is an integer within the range [2..100,000]; # each element of array A is an integer within the range [−1,000..1,000]. # Complexity: # expected worst-case time complexity is O(N); # expected worst-case space complexity is O(N), beyond input storage (not counting the storage required for input arguments). # Elements of input arrays can be modified. def solution(A): differences = [] sumAll = sum(A) partSum = 0 for i in xrange(0, len(A)): partSum += A[i] differences.append(abs(partSum - (sumAll - partSum))) return min(differences)
unlicense
710,252,307,385,933,200
29.490909
141
0.614558
false
2.756579
false
false
false
sternoru/goscalecms
goscale/plugins/presentations/cms_plugins.py
1
2031
from goscale.cms_plugins import GoscaleCMSPluginBase from cms.plugin_pool import plugin_pool from django.utils.translation import ugettext_lazy as _ from django.conf import settings import models GOSCALE_PRESENTATIONS_PLUGIN_TEMPLATES = getattr(settings, 'GOSCALE_PRESENTATIONS_PLUGIN_TEMPLATES', ( ('presentation.html', _('Presentation')), )) + getattr(settings, 'GOSCALE_PRESENTATIONS_CUSTOM_PLUGIN_TEMPLATES', ()) class GooglePresentationPlugin(GoscaleCMSPluginBase): """ Google Presentation plugin for GoScale """ model = models.GooglePresentation name = _("Google Presentation") plugin_templates = GOSCALE_PRESENTATIONS_PLUGIN_TEMPLATES render_template = GOSCALE_PRESENTATIONS_PLUGIN_TEMPLATES[0][0] fieldsets = [ [_('Presentation options'), { 'fields': ['embed', 'width', 'height', 'ratio', 'embed_as_is', 'delay', 'autoplay', 'loop'] }] ] plugin_pool.register_plugin(GooglePresentationPlugin) class SlidesharePlugin(GoscaleCMSPluginBase): """ Slideshare Presentation plugin for GoScale """ model = models.Slideshare name = _("Slideshare Presentation") plugin_templates = GOSCALE_PRESENTATIONS_PLUGIN_TEMPLATES render_template = GOSCALE_PRESENTATIONS_PLUGIN_TEMPLATES[0][0] fieldsets = [ [_('Presentation options'), { 'fields': ['embed', 'width', 'height', 'ratio', 'embed_as_is', 'start', 'without_related_content'] }] ] plugin_pool.register_plugin(SlidesharePlugin) class SpeakerdeckPlugin(GoscaleCMSPluginBase): """ Speakerdeck Presentation plugin for GoScale """ model = models.Speakerdeck name = _("Speakerdeck Presentation") plugin_templates = GOSCALE_PRESENTATIONS_PLUGIN_TEMPLATES render_template = GOSCALE_PRESENTATIONS_PLUGIN_TEMPLATES[0][0] fieldsets = [ [_('Presentation options'), { 'fields': ['embed', 'width', 'height', 'ratio', 'embed_as_is', 'start'] }] ] plugin_pool.register_plugin(SpeakerdeckPlugin)
bsd-3-clause
7,149,152,504,235,631,000
32.866667
110
0.687839
false
3.686025
false
false
false
guohongze/adminset
branches/migrations/0001_initial.py
1
4299
# -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-04-18 05:56 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('appconf', '0001_initial'), ] operations = [ migrations.CreateModel( name='Branch', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=255, unique=True, verbose_name='\u5206\u652f\u673a\u6784')), ('address', models.CharField(blank=True, max_length=255, null=True, verbose_name='\u529e\u516c\u5730\u5740')), ('telephone', models.CharField(blank=True, max_length=25, null=True, verbose_name='\u8054\u7cfb\u7535\u8bdd')), ('description', models.CharField(blank=True, max_length=255, null=True, verbose_name='\u5907\u6ce8')), ('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='appconf.AppOwner', verbose_name='\u8d1f\u8d23\u4eba')), ], ), migrations.CreateModel( name='Region', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=255, unique=True, verbose_name='\u884c\u653f\u533a\u57df')), ('address', models.CharField(blank=True, max_length=255, null=True, verbose_name='\u529e\u516c\u5730\u5740')), ('telephone', models.CharField(blank=True, max_length=25, null=True, verbose_name='\u8054\u7cfb\u7535\u8bdd')), ('description', models.CharField(blank=True, max_length=255, null=True, verbose_name='\u5907\u6ce8\u4fe1\u606f')), ('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='appconf.AppOwner', verbose_name='\u8d1f\u8d23\u4eba')), ], ), migrations.CreateModel( name='Resource', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sn', models.CharField(max_length=255, verbose_name='\u8d44\u6e90\u7f16\u7801')), ('name', models.CharField(max_length=255, verbose_name='\u8d44\u6e90\u540d\u79f0')), ('spec', models.CharField(max_length=255, verbose_name='\u8d44\u6e90\u89c4\u683c')), ('budget', models.CharField(blank=True, max_length=255, null=True, verbose_name='\u9884\u7b97\u91d1\u989d')), ('paid', models.CharField(blank=True, max_length=255, null=True, verbose_name='\u5408\u540c\u91d1\u989d')), ('contract', models.CharField(blank=True, max_length=255, null=True, verbose_name='\u5408\u540c\u7f16\u53f7')), ('contract_start', models.DateField(blank=True, max_length=255, null=True, verbose_name='\u5408\u540c\u5f00\u59cb')), ('contract_end', models.DateField(blank=True, null=True, verbose_name='\u5408\u540c\u7ed3\u675f')), ('supplier', models.CharField(blank=True, max_length=255, null=True, verbose_name='\u4f9b\u5e94\u5546\u540d')), ('service_phone', models.CharField(blank=True, max_length=25, null=True, verbose_name='\u670d\u52a1\u7535\u8bdd')), ('description', models.CharField(blank=True, max_length=255, null=True, verbose_name='\u5408\u540c\u8bf4\u660e')), ('branch', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='branches.Branch', verbose_name='\u6240\u5c5e\u673a\u6784')), ('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='appconf.AppOwner', verbose_name='\u4f9b\u5e94\u5546\u8054\u7cfb\u4eba')), ], ), migrations.AddField( model_name='branch', name='region', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='branches.Region', verbose_name='\u6240\u5c5e\u5927\u533a'), ), ]
gpl-2.0
-442,874,068,296,152,400
66.171875
190
0.626192
false
3.193908
false
false
false
erickt/hue
desktop/libs/hadoop/src/hadoop/cluster.py
1
7182
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import logging from hadoop import conf from hadoop.fs import webhdfs, LocalSubFileSystem from hadoop.job_tracker import LiveJobTracker from desktop.lib.paths import get_build_dir LOG = logging.getLogger(__name__) FS_CACHE = None MR_CACHE = None MR_NAME_CACHE = 'default' def _make_filesystem(identifier): choice = os.getenv("FB_FS") if choice == "testing": path = os.path.join(get_build_dir(), "fs") if not os.path.isdir(path): LOG.warning(("Could not find fs directory: %s. Perhaps you need to run manage.py filebrowser_test_setup?") % path) return LocalSubFileSystem(path) else: cluster_conf = conf.HDFS_CLUSTERS[identifier] return webhdfs.WebHdfs.from_config(cluster_conf) def _make_mrcluster(identifier): cluster_conf = conf.MR_CLUSTERS[identifier] return LiveJobTracker.from_conf(cluster_conf) def get_hdfs(identifier="default"): global FS_CACHE get_all_hdfs() return FS_CACHE[identifier] def get_defaultfs(): fs = get_hdfs() if fs.logical_name: return fs.logical_name else: return fs.fs_defaultfs def get_all_hdfs(): global FS_CACHE if FS_CACHE is not None: return FS_CACHE FS_CACHE = {} for identifier in conf.HDFS_CLUSTERS.keys(): FS_CACHE[identifier] = _make_filesystem(identifier) return FS_CACHE def get_default_mrcluster(): """ Get the default JT (not necessarily HA). """ global MR_CACHE global MR_NAME_CACHE try: all_mrclusters() return MR_CACHE.get(MR_NAME_CACHE) except KeyError: # Return an arbitrary cluster candidates = all_mrclusters() if candidates: return candidates.values()[0] return None def get_default_yarncluster(): """ Get the default RM (not necessarily HA). """ global MR_NAME_CACHE try: return conf.YARN_CLUSTERS[MR_NAME_CACHE] except KeyError: return get_yarn() def get_next_ha_mrcluster(): """ Return the next available JT instance and cache its name. This method currently works for distincting between active/standby JT as a standby JT does not respond. A cleaner but more complicated way would be to do something like the MRHAAdmin tool and org.apache.hadoop.ha.HAServiceStatus#getServiceStatus(). """ global MR_NAME_CACHE candidates = all_mrclusters() has_ha = sum([conf.MR_CLUSTERS[name].SUBMIT_TO.get() for name in conf.MR_CLUSTERS.keys()]) >= 2 current_user = get_default_mrcluster().user for name in conf.MR_CLUSTERS.keys(): config = conf.MR_CLUSTERS[name] if config.SUBMIT_TO.get(): jt = candidates[name] if has_ha: try: jt.setuser(current_user) status = jt.cluster_status() if status.stateAsString == 'RUNNING': MR_NAME_CACHE = name LOG.warn('Picking HA JobTracker: %s' % name) return (config, jt) else: LOG.info('JobTracker %s is not RUNNING, skipping it: %s' % (name, status)) except Exception, ex: LOG.info('JobTracker %s is not available, skipping it: %s' % (name, ex)) else: return (config, jt) return None def get_mrcluster(identifier="default"): global MR_CACHE all_mrclusters() return MR_CACHE[identifier] def all_mrclusters(): global MR_CACHE if MR_CACHE is not None: return MR_CACHE MR_CACHE = {} for identifier in conf.MR_CLUSTERS.keys(): MR_CACHE[identifier] = _make_mrcluster(identifier) return MR_CACHE def get_yarn(): global MR_NAME_CACHE if MR_NAME_CACHE in conf.YARN_CLUSTERS and conf.YARN_CLUSTERS[MR_NAME_CACHE].SUBMIT_TO.get(): return conf.YARN_CLUSTERS[MR_NAME_CACHE] for name in conf.YARN_CLUSTERS.keys(): yarn = conf.YARN_CLUSTERS[name] if yarn.SUBMIT_TO.get(): return yarn def get_next_ha_yarncluster(): """ Return the next available YARN RM instance and cache its name. """ from hadoop.yarn.resource_manager_api import ResourceManagerApi global MR_NAME_CACHE has_ha = sum([conf.YARN_CLUSTERS[name].SUBMIT_TO.get() for name in conf.YARN_CLUSTERS.keys()]) >= 2 for name in conf.YARN_CLUSTERS.keys(): config = conf.YARN_CLUSTERS[name] if config.SUBMIT_TO.get(): rm = ResourceManagerApi(config.RESOURCE_MANAGER_API_URL.get(), config.SECURITY_ENABLED.get(), config.SSL_CERT_CA_VERIFY.get()) if has_ha: try: cluster_info = rm.cluster() if cluster_info['clusterInfo']['haState'] == 'ACTIVE': MR_NAME_CACHE = name LOG.warn('Picking RM HA: %s' % name) from hadoop.yarn import resource_manager_api resource_manager_api._api_cache = None # Reset cache from hadoop.yarn import mapreduce_api mapreduce_api._api_cache = None return (config, rm) else: LOG.info('RM %s is not RUNNING, skipping it: %s' % (name, cluster_info)) except Exception, ex: LOG.info('RM %s is not available, skipping it: %s' % (name, ex)) else: return (config, rm) return None def get_cluster_for_job_submission(): """ Check the 'submit_to' for each MR/Yarn cluster, and return the config section of first one that enables submission. Support MR1/MR2 HA. """ yarn = get_next_ha_yarncluster() if yarn: return yarn mr = get_next_ha_mrcluster() if mr is not None: return mr return None def get_cluster_conf_for_job_submission(): cluster = get_cluster_for_job_submission() if cluster: config, rm = cluster return config else: return None def get_cluster_addr_for_job_submission(): """ Check the 'submit_to' for each MR/Yarn cluster, and return the logical name or host:port of first one that enables submission. """ if is_yarn(): if get_yarn().LOGICAL_NAME.get(): return get_yarn().LOGICAL_NAME.get() conf = get_cluster_conf_for_job_submission() if conf is None: return None return "%s:%s" % (conf.HOST.get(), conf.PORT.get()) def is_yarn(): return get_yarn() is not None def clear_caches(): """ Clears cluster's internal caches. Returns something that can be given back to restore_caches. """ global FS_CACHE, MR_CACHE old = FS_CACHE, MR_CACHE FS_CACHE, MR_CACHE = None, None return old def restore_caches(old): """ Restores caches from the result of a previous clear_caches call. """ global FS_CACHE, MR_CACHE FS_CACHE, MR_CACHE = old
apache-2.0
8,905,411,804,505,308,000
26
132
0.673629
false
3.39414
true
false
false
bobobox/ansible
lib/ansible/cli/doc.py
1
13761
# (c) 2014, James Tanner <tanner.jc@gmail.com> # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # # ansible-vault is a script that encrypts/decrypts YAML files. See # http://docs.ansible.com/playbooks_vault.html for more details. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import datetime import os import traceback import textwrap from ansible.compat.six import iteritems, string_types from ansible import constants as C from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.plugins import module_loader, action_loader from ansible.cli import CLI from ansible.utils import module_docs try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class DocCLI(CLI): """ Vault command line class """ def __init__(self, args): super(DocCLI, self).__init__(args) self.module_list = [] def parse(self): self.parser = CLI.base_parser( usage='usage: %prog [options] [module...]', epilog='Show Ansible module documentation', module_opts=True, ) self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir', help='List available modules') self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet', help='Show playbook snippet for specified module(s)') self.parser.add_option("-a", "--all", action="store_true", default=False, dest='all_modules', help='Show documentation for all modules') super(DocCLI, self).parse() display.verbosity = self.options.verbosity def run(self): super(DocCLI, self).run() if self.options.module_path is not None: for i in self.options.module_path.split(os.pathsep): module_loader.add_directory(i) # list modules if self.options.list_dir: paths = module_loader._get_paths() for path in paths: self.find_modules(path) self.pager(self.get_module_list_text()) return 0 # process all modules if self.options.all_modules: paths = module_loader._get_paths() for path in paths: self.find_modules(path) self.args = sorted(set(self.module_list) - module_docs.BLACKLIST_MODULES) if len(self.args) == 0: raise AnsibleOptionsError("Incorrect options passed") # process command line module list text = '' for module in self.args: try: # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs filename = module_loader.find_plugin(module, mod_type='.py', ignore_deprecated=True) if filename is None: display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader))) continue if any(filename.endswith(x) for x in C.BLACKLIST_EXTS): continue try: doc, plainexamples, returndocs, metadata = module_docs.get_docstring(filename, verbose=(self.options.verbosity > 0)) except: display.vvv(traceback.format_exc()) display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module) continue if doc is not None: # is there corresponding action plugin? if module in action_loader: doc['action'] = True else: doc['action'] = False all_keys = [] for (k,v) in iteritems(doc['options']): all_keys.append(k) all_keys = sorted(all_keys) doc['option_keys'] = all_keys doc['filename'] = filename doc['docuri'] = doc['module'].replace('_', '-') doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') doc['plainexamples'] = plainexamples doc['returndocs'] = returndocs doc['metadata'] = metadata if self.options.show_snippet: text += self.get_snippet_text(doc) else: text += self.get_man_text(doc) else: # this typically means we couldn't even parse the docstring, not just that the YAML is busted, # probably a quoting issue. raise AnsibleError("Parsing produced an empty object.") except Exception as e: display.vvv(traceback.format_exc()) raise AnsibleError("module %s missing documentation (or could not parse documentation): %s\n" % (module, str(e))) if text: self.pager(text) return 0 def find_modules(self, path): for module in os.listdir(path): full_path = '/'.join([path, module]) if module.startswith('.'): continue elif os.path.isdir(full_path): continue elif any(module.endswith(x) for x in C.BLACKLIST_EXTS): continue elif module.startswith('__'): continue elif module in C.IGNORE_FILES: continue elif module.startswith('_'): if os.path.islink(full_path): # avoids aliases continue module = os.path.splitext(module)[0] # removes the extension module = module.lstrip('_') # remove underscore from deprecated modules self.module_list.append(module) def get_module_list_text(self): columns = display.columns displace = max(len(x) for x in self.module_list) linelimit = columns - displace - 5 text = [] deprecated = [] for module in sorted(set(self.module_list)): if module in module_docs.BLACKLIST_MODULES: continue # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs filename = module_loader.find_plugin(module, mod_type='.py', ignore_deprecated=True) if filename is None: continue if filename.endswith(".ps1"): continue if os.path.isdir(filename): continue try: doc, plainexamples, returndocs, metadata = module_docs.get_docstring(filename) desc = self.tty_ify(doc.get('short_description', '?')).strip() if len(desc) > linelimit: desc = desc[:linelimit] + '...' if module.startswith('_'): # Handle deprecated deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc)) else: text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc)) except: raise AnsibleError("module %s has a documentation error formatting or is missing documentation\n" % module) if len(deprecated) > 0: text.append("\nDEPRECATED:") text.extend(deprecated) return "\n".join(text) @staticmethod def print_paths(finder): ''' Returns a string suitable for printing of the search path ''' # Uses a list to get the order right ret = [] for i in finder._get_paths(): if i not in ret: ret.append(i) return os.pathsep.join(ret) def get_snippet_text(self, doc): text = [] desc = CLI.tty_ify(doc['short_description']) text.append("- name: %s" % (desc)) text.append(" action: %s" % (doc['module'])) pad = 31 subdent = " " * pad limit = display.columns - pad for o in sorted(doc['options'].keys()): opt = doc['options'][o] desc = CLI.tty_ify(" ".join(opt['description'])) required = opt.get('required', False) if not isinstance(required, bool): raise("Incorrect value for 'Required', a boolean is needed.: %s" % required) if required: s = o + "=" else: s = o text.append(" %-20s # %s" % (s, textwrap.fill(desc, limit, subsequent_indent=subdent))) text.append('') return "\n".join(text) def get_man_text(self, doc): opt_indent=" " text = [] text.append("> %s (%s)\n" % (doc['module'].upper(), doc['filename'])) pad = display.columns * 0.20 limit = max(display.columns - int(pad), 70) if isinstance(doc['description'], list): desc = " ".join(doc['description']) else: desc = doc['description'] text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=" ", subsequent_indent=" ")) # FUTURE: move deprecation to metadata-only if 'deprecated' in doc and doc['deprecated'] is not None and len(doc['deprecated']) > 0: text.append("DEPRECATED: \n%s\n" % doc['deprecated']) if doc['metadata'] and isinstance(doc['metadata'], dict): text.append("Metadata:") for k in doc['metadata']: if isinstance(k, list): text.append("\t%s: %s\n" % (k.capitalize(), ", ".join(doc['metadata'][k]))) else: text.append("\t%s: %s\n" % (k.capitalize(), doc['metadata'][k])) if 'action' in doc and doc['action']: text.append(" * note: %s\n" % "This module has a corresponding action plugin.") if 'option_keys' in doc and len(doc['option_keys']) > 0: text.append("Options (= is mandatory):\n") for o in sorted(doc['option_keys']): opt = doc['options'][o] required = opt.get('required', False) if not isinstance(required, bool): raise("Incorrect value for 'Required', a boolean is needed.: %s" % required) if required: opt_leadin = "=" else: opt_leadin = "-" text.append("%s %s" % (opt_leadin, o)) if isinstance(opt['description'], list): for entry in opt['description']: text.append(textwrap.fill(CLI.tty_ify(entry), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) else: text.append(textwrap.fill(CLI.tty_ify(opt['description']), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) choices = '' if 'choices' in opt: choices = "(Choices: " + ", ".join(str(i) for i in opt['choices']) + ")" default = '' if 'default' in opt or not required: default = "[Default: " + str(opt.get('default', '(null)')) + "]" text.append(textwrap.fill(CLI.tty_ify(choices + default), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) if 'notes' in doc and doc['notes'] and len(doc['notes']) > 0: text.append("Notes:") for note in doc['notes']: text.append(textwrap.fill(CLI.tty_ify(note), limit-6, initial_indent=" * ", subsequent_indent=opt_indent)) if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0: req = ", ".join(doc['requirements']) text.append("Requirements:%s\n" % textwrap.fill(CLI.tty_ify(req), limit-16, initial_indent=" ", subsequent_indent=opt_indent)) if 'examples' in doc and len(doc['examples']) > 0: text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's')) for ex in doc['examples']: text.append("%s\n" % (ex['code'])) if 'plainexamples' in doc and doc['plainexamples'] is not None: text.append("EXAMPLES:") text.append(doc['plainexamples']) if 'returndocs' in doc and doc['returndocs'] is not None: text.append("RETURN VALUES:") text.append(doc['returndocs']) text.append('') maintainers = set() if 'author' in doc: if isinstance(doc['author'], string_types): maintainers.add(doc['author']) else: maintainers.update(doc['author']) if 'maintainers' in doc: if isinstance(doc['maintainers'], string_types): maintainers.add(doc['author']) else: maintainers.update(doc['author']) text.append('MAINTAINERS: ' + ', '.join(maintainers)) text.append('') return "\n".join(text)
gpl-3.0
4,870,792,144,390,071,000
38.205128
153
0.548943
false
4.282913
false
false
false
TUDelftGeodesy/Doris
doris_stack/functions/baselines.py
1
3319
import os import numpy as np import warnings from shutil import copyfile from doris.doris_stack.main_code.resdata import ResData import datetime import subprocess def baselines(dir_in,inputfile,start_date='2014-01-01',end_date='2018-01-01',doris=''): # This function calculates the baselines and plots a baseline plot. # Define doris path if not doris: doris = doris_path if not os.path.exists(dir_in): warnings.warn('The input directory does not exist!') return os.chdir(dir_in) process_folder = os.path.join(dir_in, 'baseline_process') if not os.path.exists(process_folder): os.mkdir(process_folder) os.chdir(process_folder) try: first = np.datetime64(start_date) last = np.datetime64(end_date) except: warnings.warn('Input dates could not be converted, use "yyyy-mm-dd"') return # Search for folders and take only the first burst. folders = next(os.walk(dir_in))[1] folders = sorted(folders) # Initialize... (Search for folders / resfiles / dates) n = 0 res = []; date = [] for fold in folders: # Select only the folders which has a name like yyyymmdd and are within if len(fold) == 8: # define date of folder date_prod = np.datetime64((fold[:4] + '-' + fold[4:6] + '-' + fold[6:])) if date_prod >= first and date_prod <= last: # Select the first swath date_fold = os.path.join(dir_in,fold) swath_fold = os.path.join(date_fold,next(os.walk(date_fold))[1][0]) # Select the first burst prod_files = next(os.walk(swath_fold))[2] for file in prod_files: if file.endswith('1.res'): res.extend([os.path.join(swath_fold,file)]) date.extend([date_prod]) n = n + 1 break # Now create a set of baselines baselines = np.zeros([len(res),len(res)]) resfiles = dict() # First create the ifgs.res files and store the data in a res data class. master = res[0] copyfile(master,os.path.join(process_folder,'master.res')) for resultfile, dat in zip(res, date): copyfile(resultfile,os.path.join(process_folder,'slave.res')) subprocess.call([doris + ' ' + inputfile], shell=True) dat = dat.astype(datetime.datetime).strftime('%Y-%m-%d') resfiles[dat] = ResData(type='interferogram',filename='ifgs.res') resfiles[dat].read() os.remove(os.path.join(process_folder,'ifgs.res')) # Then gather the baselines for dat, j in zip(date, range(len(date))): dat = dat.astype(datetime.datetime).strftime('%Y-%m-%d') baselines[j,0] = resfiles[dat].processes['coarse_orbits']['Bperp'][1] # Create figure of baselines. days = (date[0] - date).astype(float) plt.figure(111) plt.plot(baselines[:,0], days, marker='o') # Annotate for dat, x, y in zip(date, baselines[:,0], days): dat = dat.astype(datetime.datetime).strftime('%Y-%m-%d') plt.annotate( dat, xy = (x, y), xytext = (0, 0), textcoords = 'offset points', size = 8) plt.savefig('baseline_plot.pdf')
gpl-3.0
-2,185,828,952,843,984,600
32.525253
87
0.587828
false
3.51589
false
false
false
rolker/cesium-tools
srtm2qmesh.py
1
7012
#!/usr/bin/env python import sys import os import math import json import scipy.io.netcdf import quantized_mesh_tile.global_geodetic import quantized_mesh_tile.terrain # https://pypi.python.org/pypi/quantized-mesh-tile/ # pip install quantized-mesh-tile class Grd: def __init__(self,fname,tileSize): self.ncfile = scipy.io.netcdf.netcdf_file(fname) self.xcount = self.ncfile.dimensions['lon'] self.ycount = self.ncfile.dimensions['lat'] self.latVar = self.ncfile.variables['lat'] self.lonVar = self.ncfile.variables['lon'] self.zVar = self.ncfile.variables['z'] self.minx = self.lonVar[0] self.miny = self.latVar[0] self.maxx = self.lonVar[-1] self.maxy = self.latVar[-1] self.dx = (self.maxx-self.minx)/(self.xcount-1.0) self.dy = (self.maxy-self.miny)/(self.ycount-1.0) self.maxZoom = int(math.log(180/(self.dy*tileSize),2)) def getPointAtIndex(self,xi,yi): if xi < 0 or yi < 0 or xi >= self.xcount or yi >= self.ycount: return None lat = self.latVar[int(yi)] lon = self.lonVar[int(xi)] d = self.zVar[int(yi),int(xi)] return Point(lat,lon,d) def interpolatePointAtIndex(self,xi,yi,interpolateX=False,interpolateY=False,verbose=False): if (not interpolateX and not interpolateY): return self.getPointAtIndex(xi,yi) if xi < 0 or yi < 0 or xi >= self.xcount or yi >= self.ycount: return None xi0 = int(xi) xi1 = min(xi0+1,self.xcount-1) xp = xi-xi0 yi0 = int(yi) yi1 = min(yi0+1,self.ycount-1) yp = yi-yi0 lon0 = self.lonVar[xi0] lon1 = self.lonVar[xi1] lon = lon0*(1.0-xp)+lon1*xp lat0 = self.latVar[yi0] lat1 = self.latVar[yi1] lat = lat0*(1.0-yp)+lat1*yp d00 = self.zVar[yi0,xi0] d01 = self.zVar[yi1,xi0] d10 = self.zVar[yi0,xi1] d11 = self.zVar[yi1,xi1] d0 = d00*(1.0-yp)+d01*yp d1 = d10*(1.0-yp)+d11*yp d = d0*(1.0-xp)+d1*xp if verbose: print 'ds:',d00,d01,d10,d11,'d:',d,'xp:',xp,'yp:',yp, return Point(lat,lon,d) def __str__(self): return 'size: '+str(self.xcount)+'x'+str(self.ycount)+' bounds: '+str(self.minx)+','+str(self.miny)+' - '+str(self.maxx)+','+str(self.maxy)+' dx,dy:'+str(self.dx)+','+str(self.dy)+' max zoom: '+str(self.maxZoom) class Point: def __init__(self,lat,lon,height=None): self.lat = lat self.lon = lon self.height = height def __str__(self): return 'lat: '+str(self.lat)+', lon: '+str(self.lon)+', height: '+str(self.height) def __repr__(self): return '('+self.__str__()+')' def asTriple(self): h = self.height if math.isnan(h): h = 0.0 return (self.lon,self.lat,h) def createTile(x,y,level,params,base,maps=None): print geodetic.TileBounds(x,y,level) fname = os.path.join(params['outputDirectory'],str(level)+'/'+str(x)+'/'+str(y)+'.terrain') print '\t',fname dn = os.path.dirname(fname) if not os.path.isdir(dn): os.makedirs(os.path.dirname(fname)) if os.path.isfile(fname): os.remove(fname) b = geodetic.TileBounds(x,y,level) m = base if level >= base.maxZoom: m = maps[0] xStep = ((b[2]-b[0])/params['tileSize'])/m.dx yStep = ((b[3]-b[1])/params['tileSize'])/m.dy print '\txStep:',xStep,'yStep:',yStep xi = (b[0]-m.minx)/m.dx yi = (b[1]-m.miny)/m.dy print '\txi,yi:',xi,yi print '\t',m.getPointAtIndex(xi,yi) print '\tinterp\t',m.interpolatePointAtIndex(xi,yi,True,True,True) sys.stdout.flush() triangles = [] verticies = [] for j in range(params['tileSize']): if j == 0: yedge0 = True else: yedge0 = False if j == (params['tileSize']-1): yedge1 = True else: yedge1 = False for i in range(params['tileSize']): if i == 0: xedge0 = True else: xedge0 = False if i == (params['tileSize']-1): xedge1 = True else: xedge1 = False if i < (params['tileSize']) and j < (params['tileSize']): t1 = m.interpolatePointAtIndex(xi+i*xStep,yi+j*yStep,xedge0,yedge0) t2 = m.interpolatePointAtIndex(xi+(i+1)*xStep,yi+j*yStep,xedge1,yedge0) t3 = m.interpolatePointAtIndex(xi+(i+1)*xStep,yi+(j+1)*yStep,xedge1,yedge1) if t1 is not None and t2 is not None and t3 is not None: triangles.append((t1.asTriple(),t2.asTriple(),t3.asTriple())) t1 = m.interpolatePointAtIndex(xi+i*xStep,yi+j*yStep,xedge0,yedge0) t2 = m.interpolatePointAtIndex(xi+(i+1)*xStep,yi+(j+1)*yStep,xedge1,yedge1) t3 = m.interpolatePointAtIndex(xi+i*xStep,yi+(j+1)*yStep,xedge0,yedge1) if t1 is not None and t2 is not None and t3 is not None: triangles.append((t1.asTriple(),t2.asTriple(),t3.asTriple())) if i == (params['tileSize']-1) and j == (params['tileSize']-1): print '\tget\t',m.getPointAtIndex(xi+(i+1)*xStep,yi+(j+1)*yStep) print '\tinterp\t',m.interpolatePointAtIndex(xi+(i+1)*xStep,yi+(j+1)*yStep,xedge1,yedge1,True) if len(triangles): tile = quantized_mesh_tile.encode(triangles,bounds=geodetic.TileBounds(x,y,level),hasLighting=True) tile.toFile(fname) if len(sys.argv) != 2: print 'Usage: base2qmesh params.json' sys.exit(1) params = json.load(open(sys.argv[1])) print params geodetic = quantized_mesh_tile.global_geodetic.GlobalGeodetic(True) base = Grd(params['basemap'],params['tileSize']) print base maxLevel = params['baseLevels'] maps = [] for m in params['maps']: print m maps.append(Grd(m,params['tileSize'])) maxLevel = max(maxLevel,maps[-1].maxZoom) print maps[-1] layer = {'tilesjon':'2.1.0', 'format':'quantized-mesh-1.0', 'scheme':'tms', 'minzoom':0, 'tiles':('{z}/{x}/{y}.terrain',), 'available':[] } for level in range(maxLevel): layer['maxzoom']=level factor = 2**level print level,factor sys.stdout.flush() if level < params['baseLevels']: for x in range(2*factor): for y in range(factor): createTile(x,y,level,params,base) else: x0,y0= geodetic.LonLatToTile(maps[0].minx,maps[0].miny,level) x1,y1= geodetic.LonLatToTile(maps[0].maxx,maps[0].maxy,level) print 'level:',level,'indecies:',x0,y0,'-',x1,y1 for x in range(x0,x1+1): for y in range(y0,y1+1): createTile(x,y,level,params,base,maps) open(os.path.join(params['outputDirectory'],'layer.json'),'w').write(json.dumps(layer))
mit
2,825,510,702,830,970,000
34.065
219
0.566315
false
2.930213
false
false
false
LaryLoose/laryloose.xbmc-addons
plugin.video.szenestreams/default.py
1
7913
#!/usr/bin/python # -*- coding: utf-8 -*- import urllib,urllib2,re,xbmcaddon,xbmcplugin,xbmcgui,xbmc,HTMLParser from stream import * htmlparser = HTMLParser.HTMLParser() pluginhandle = int(sys.argv[1]) itemcnt = 0 baseurl = 'http://www.szene-streams.com' settings = xbmcaddon.Addon(id='plugin.video.szene-streams') maxitems = (int(settings.getSetting("items_per_page"))+1)*10 filterUnknownHoster = settings.getSetting("filterUnknownHoster") == 'true' forceMovieViewMode = settings.getSetting("forceMovieViewMode") == 'true' movieViewMode = str(settings.getSetting("movieViewMode")) dbg = False def CATEGORIES(): data = getUrl(baseurl) cats = re.findall('<a[^>]*?class="CatInf"[^>]*?href="(.*?)"[^>]*?>.*?<div class="CatNumInf">(.*?)</div>[^<]*?<div[^>]*?class="CatNameInf">(.*?)</div>', data, re.S|re.I) addDir('Letzte Updates', baseurl + '/publ/?page1', 1, '', True) addDir('Suche...', baseurl + '/publ', 4, '', True) addDir('Serien', baseurl + '/load', 0, '', True) for (url, num, name) in cats: if 'http:' not in url: url = baseurl + url addDir(name + ' [COLOR=blue](' + num + ')[/COLOR]', url, 1, '', True) xbmc.executebuiltin("Container.SetViewMode(400)") def SERIES(url): data = getUrl(url) cats = re.findall('<a[^>]*?class="CatInf"[^>]*?href="(.*?)"[^>]*?>.*?<div class="CatNumInf">(.*?)</div>[^<]*?<div[^>]*?class="CatNameInf">(.*?)</div>', data, re.S|re.I) addDir('Letzte Updates', baseurl + '/load/0-1', 1, '', True) for (url, num, name) in cats: if 'http:' not in url: url = baseurl + url addDir(name + ' [COLOR=blue](' + num + ')[/COLOR]', url, 1, '', True) xbmc.executebuiltin("Container.SetViewMode(400)") def INDEX(url, search=None): global itemcnt if (dbg): print url data = getUrl(url, search) movies = re.findall('<div class="ImgWrapNews">[^<]*<a[^<]*<img[^>]*src="([^"]*.[jpg|png])"[^>]*alt="([^"]*)"[^>]*>.*?class="[^"]*entryLink[^"]*".*?href="([^"]*)"', data, re.S|re.I) if movies: for (m_image, m_title, m_url) in movies: if 'http:' not in m_url: m_url = baseurl + m_url addDir(clean(m_title), m_url, 2, m_image, True) itemcnt = itemcnt + 1 nextPage = re.findall('<a class="swchItem"[^>]*onclick="spages\(\'(\d+)\'[^>]*?"[^>]*><span>&raquo;</span>', data, re.S) if nextPage: if '?page' in url: nextPageUrl = re.sub('\?page[\d]+$', '?page' + nextPage[0], url) elif re.search('[\d]+-[\d]+$', url): nextPageUrl = re.sub('-[\d]+$', '-' + nextPage[0], url) else: nextPageUrl = url + "-" + nextPage[0] if itemcnt >= maxitems: addDir('Weiter >>', nextPageUrl, 1, '', True) else: INDEX(nextPageUrl) if forceMovieViewMode: xbmc.executebuiltin("Container.SetViewMode(" + movieViewMode + ")") def VIDEOLINKS(url, image): data = getUrl(url) streams = [] raw = re.findall('(<fieldset[^>]*>[^<]*<legend>.*?</fieldset>)', data, re.S) if raw: for each in raw: if "Film Tipps" in each: continue series = re.findall('<div class="spoiler"><font[^>]*><b[^>]*>(.+?)</b>(.*?)<input', each, re.S|re.I) if not series: series = re.findall('<legend>(.+?)</legend>[^<]*<div class="spoiler">(.*?)<input', each, re.S|re.I) if not series: series = re.findall('<legend>(.+?)</legend>.*?(<iframe.*?</iframe>|<a[^>]*href=".+"[^>]*>).*', each, re.S|re.I) if series: for ser in series: for (s, n) in re.findall('<a[^>]*href="([^"]+)"[^>]*>([^<]*)<', each, re.S|re.I): if dbg: print 'ser1' if ser: n = clean(ser[1]) + ' ' + extractFilename(s) n = clean(n) if n else extractFilename(s) if n: streams += [(n, s)] for s in re.findall('<iframe[^>]*src="([^"]*)"[^>]*>', each, re.S|re.I): if dbg: print 'ser2' if ser: n = clean(ser[1]) if not n: n = 'unknown' if n: streams += [(n, s)] elif re.match('.*?iframe.*?src.*', each, re.S|re.I): if dbg: print 'nonser1' streams += re.findall('<font[^>]*>.*?src=".*?/player/(.*?)\..{3}".*?<iframe.*?src=["|\'](.*?)["|\']', each, re.S|re.I) else: if dbg: print 'nonser2' streams += re.findall('<font[^>]*>.*?src=".*?/player/(.*?)\..{3}".*?</font>.*?target="_blank" href=["|\'](.*?)["|\']', each, re.S|re.I) if streams: for (filename, stream) in streams: stream = cleanURL(stream) if dbg: print "filename: " + str(filename) + ", stream: " + str(stream) hoster = get_stream_link().get_hostername(stream) if filterUnknownHoster and hoster == 'Not Supported': continue entry = '[COLOR=blue](' + hoster + ')[/COLOR] ' + filename addLink(entry, cleanURL(stream), 3, image) def SEARCH(url): keyboard = xbmc.Keyboard('', 'Suche') keyboard.doModal() if keyboard.isConfirmed() and keyboard.getText(): search_string = keyboard.getText() INDEX(url, search_string) def clean(s): try: s = htmlparser.unescape(s) except: print "could not unescape string '%s'"%(s) s = re.sub('<[^>]*>', '', s) s = s.replace('_', ' ') s = re.sub('[ ]+', ' ', s) for hit in set(re.findall("&#\d+;", s)): try: s = s.replace(hit, unichr(int(hit[2:-1]))) except ValueError: pass return s.strip('\n').strip() def cleanURL(s): s = re.sub('<[^>]*>', '', s) s = re.sub('[ ]+', ' ', s) for hit in set(re.findall("&#\d+;", s)): try: s = s.replace(hit, unichr(int(hit[2:-1]))) except ValueError: pass return s.strip('\n').strip() def extractFilename(path): path = re.sub('^.*/', '',clean(path)).replace('.html', '').replace('_', ' ') return re.sub('\.[a-zA-Z]{3}', '', path) def GETLINK(url): stream_url = get_stream_link().get_stream(url) if stream_url: if re.match('^Error: ', stream_url, re.S|re.I): xbmc.executebuiltin("XBMC.Notification(Fehler!, " + re.sub('^Error: ','',stream_url) + ", 4000)") else: listitem = xbmcgui.ListItem(path=stream_url) return xbmcplugin.setResolvedUrl(pluginhandle, True, listitem) def getUrl(url, query=None): req = urllib2.Request(url) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3') if (dbg): print query if query: values = { 'query' : query, 'a' : '2' } response = urllib2.urlopen(req, urllib.urlencode(values)) else: response = urllib2.urlopen(req) data = response.read() response.close() return data def get_params(): param=[] paramstring=sys.argv[2] if len(paramstring)>=2: params=sys.argv[2] cleanedparams=params.replace('?','') if (params[len(params)-1]=='/'): params=params[0:len(params)-2] pairsofparams=cleanedparams.split('&') param={} for i in range(len(pairsofparams)): splitparams={} splitparams=pairsofparams[i].split('=') if (len(splitparams))==2: param[splitparams[0]]=splitparams[1] return param def addLink(name, url, mode, image): u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode) liz = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=image) liz.setInfo( type="Video", infoLabels={ "Title": name } ) liz.setProperty('IsPlayable', 'true') return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz) def addDir(name, url, mode, image, is_folder=False): u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&image="+urllib.quote_plus(image) liz = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=image) liz.setInfo( type="Video", infoLabels={ "Title": name } ) return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=is_folder) params = get_params() url = mode = image = None try: url = urllib.unquote_plus(params["url"]) except: pass try: mode = int(params["mode"]) except: pass try: image = urllib.unquote_plus(params["image"]) except: pass if mode==None or url==None or len(url)<1: CATEGORIES() elif mode==0: SERIES(url) elif mode==1: INDEX(url) elif mode==2: VIDEOLINKS(url, image) elif mode==3: GETLINK(url) elif mode==4: SEARCH(url) xbmcplugin.endOfDirectory(int(sys.argv[1]))
gpl-2.0
3,533,005,184,932,227,000
39.172589
181
0.606091
false
2.801062
false
false
false
ericzundel/pants
src/python/pants/engine/scheduler.py
1
19768
# coding=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import logging import os import threading import time from contextlib import contextmanager from pants.base.specs import (AscendantAddresses, DescendantAddresses, SiblingAddresses, SingleAddress) from pants.build_graph.address import Address from pants.engine.addressable import SubclassesOf from pants.engine.fs import PathGlobs, create_fs_intrinsics, generate_fs_subjects from pants.engine.isolated_process import create_snapshot_intrinsics, create_snapshot_singletons from pants.engine.nodes import Return, Runnable, Throw from pants.engine.rules import NodeBuilder, RulesetValidator from pants.engine.selectors import (Select, SelectDependencies, SelectLiteral, SelectProjection, SelectVariant, constraint_for) from pants.engine.struct import HasProducts, Variants from pants.engine.subsystem.native import (ExternContext, Function, TypeConstraint, TypeId, extern_id_to_str, extern_key_for, extern_project, extern_project_multi, extern_satisfied_by, extern_store_list, extern_val_to_str) from pants.util.objects import datatype logger = logging.getLogger(__name__) class ExecutionRequest(datatype('ExecutionRequest', ['roots'])): """Holds the roots for an execution, which might have been requested by a user. To create an ExecutionRequest, see `LocalScheduler.build_request` (which performs goal translation) or `LocalScheduler.execution_request`. :param roots: Roots for this request. :type roots: list of tuples of subject and product. """ class LocalScheduler(object): """A scheduler that expands a product Graph by executing user defined tasks.""" def __init__(self, goals, tasks, project_tree, native, graph_lock=None): """ :param goals: A dict from a goal name to a product type. A goal is just an alias for a particular (possibly synthetic) product. :param tasks: A set of (output, input selection clause, task function) triples which is used to compute values in the product graph. :param project_tree: An instance of ProjectTree for the current build root. :param native: An instance of engine.subsystem.native.Native. :param graph_lock: A re-entrant lock to use for guarding access to the internal product Graph instance. Defaults to creating a new threading.RLock(). """ self._products_by_goal = goals self._project_tree = project_tree self._native = native self._product_graph_lock = graph_lock or threading.RLock() self._run_count = 0 # Create a handle for the ExternContext (which must be kept alive as long as this object), and # the native Scheduler. self._context = ExternContext() self._context_handle = native.new_handle(self._context) # TODO: The only (?) case where we use inheritance rather than exact type unions. has_products_constraint = TypeConstraint(self._to_id(SubclassesOf(HasProducts))) scheduler = native.lib.scheduler_create(self._context_handle, extern_key_for, extern_id_to_str, extern_val_to_str, extern_satisfied_by, extern_store_list, extern_project, extern_project_multi, self._to_key('name'), self._to_key('products'), self._to_key('default'), self._to_constraint(Address), has_products_constraint, self._to_constraint(Variants)) self._scheduler = native.gc(scheduler, native.lib.scheduler_destroy) self._execution_request = None # Validate and register all provided and intrinsic tasks. select_product = lambda product: Select(product) # TODO: This bounding of input Subject types allows for closed-world validation, but is not # strictly necessary for execution. We might eventually be able to remove it by only executing # validation below the execution roots (and thus not considering paths that aren't in use). root_selector_fns = { Address: select_product, AscendantAddresses: select_product, DescendantAddresses: select_product, PathGlobs: select_product, SiblingAddresses: select_product, SingleAddress: select_product, } intrinsics = create_fs_intrinsics(project_tree) + create_snapshot_intrinsics(project_tree) singletons = create_snapshot_singletons(project_tree) node_builder = NodeBuilder.create(tasks, intrinsics, singletons) RulesetValidator(node_builder, goals, root_selector_fns).validate() self._register_tasks(node_builder.tasks) self._register_intrinsics(node_builder.intrinsics) self._register_singletons(node_builder.singletons) def _to_value(self, obj): return self._context.to_value(obj) def _from_value(self, val): return self._context.from_value(val) def _to_id(self, typ): return self._context.to_id(typ) def _to_key(self, obj): return self._context.to_key(obj) def _from_id(self, cdata): return self._context.from_id(cdata) def _from_key(self, cdata): return self._context.from_key(cdata) def _to_constraint(self, type_or_constraint): return TypeConstraint(self._to_id(constraint_for(type_or_constraint))) def _register_singletons(self, singletons): """Register the given singletons dict. Singleton tasks are those that are the default for a particular type(product). Like intrinsics, singleton tasks create Runnables that are not cacheable. """ for product_type, rule in singletons.items(): self._native.lib.singleton_task_add(self._scheduler, Function(self._to_id(rule.func)), self._to_constraint(product_type)) def _register_intrinsics(self, intrinsics): """Register the given intrinsics dict. Intrinsic tasks are those that are the default for a particular type(subject), type(product) pair. By default, intrinsic tasks create Runnables that are not cacheable. """ for (subject_type, product_type), rule in intrinsics.items(): self._native.lib.intrinsic_task_add(self._scheduler, Function(self._to_id(rule.func)), TypeId(self._to_id(subject_type)), self._to_constraint(subject_type), self._to_constraint(product_type)) def _register_tasks(self, tasks): """Register the given tasks dict with the native scheduler.""" registered = set() for output_type, rules in tasks.items(): output_constraint = self._to_constraint(output_type) for rule in rules: # TODO: The task map has heterogeneous keys, so we normalize them to type constraints # and dedupe them before registering to the native engine: # see: https://github.com/pantsbuild/pants/issues/4005 key = (output_constraint, rule) if key in registered: continue registered.add(key) _, input_selects, func = rule.as_triple() self._native.lib.task_add(self._scheduler, Function(self._to_id(func)), output_constraint) for selector in input_selects: selector_type = type(selector) product_constraint = self._to_constraint(selector.product) if selector_type is Select: self._native.lib.task_add_select(self._scheduler, product_constraint) elif selector_type is SelectVariant: self._native.lib.task_add_select_variant(self._scheduler, product_constraint, self._context.utf8_buf(selector.variant_key)) elif selector_type is SelectLiteral: # NB: Intentionally ignores subject parameter to provide a literal subject. self._native.lib.task_add_select_literal(self._scheduler, self._to_key(selector.subject), product_constraint) elif selector_type is SelectDependencies: self._native.lib.task_add_select_dependencies(self._scheduler, product_constraint, self._to_constraint(selector.dep_product), self._to_key(selector.field), selector.transitive) elif selector_type is SelectProjection: if len(selector.fields) != 1: raise ValueError("TODO: remove support for projecting multiple fields at once.") field = selector.fields[0] self._native.lib.task_add_select_projection(self._scheduler, self._to_constraint(selector.product), TypeId(self._to_id(selector.projected_subject)), self._to_key(field), self._to_constraint(selector.input_product)) else: raise ValueError('Unrecognized Selector type: {}'.format(selector)) self._native.lib.task_end(self._scheduler) def trace(self, roots): """Yields a stringified 'stacktrace' starting from the given failed root. :param iterable roots: An iterable of the root nodes to begin the trace from. """ return "TODO: Restore trace (see: #4007)." def visualize_graph_to_file(self, filename): """Visualize a graph walk by writing graphviz `dot` output to a file. :param iterable roots: An iterable of the root nodes to begin the graph walk from. :param str filename: The filename to output the graphviz output to. """ with self._product_graph_lock: self._native.lib.graph_visualize(self._scheduler, bytes(filename)) def build_request(self, goals, subjects): """Translate the given goal names into product types, and return an ExecutionRequest. :param goals: The list of goal names supplied on the command line. :type goals: list of string :param subjects: A list of Spec and/or PathGlobs objects. :type subject: list of :class:`pants.base.specs.Spec`, `pants.build_graph.Address`, and/or :class:`pants.engine.fs.PathGlobs` objects. :returns: An ExecutionRequest for the given goals and subjects. """ return self.execution_request([self._products_by_goal[goal_name] for goal_name in goals], subjects) def execution_request(self, products, subjects): """Create and return an ExecutionRequest for the given products and subjects. The resulting ExecutionRequest object will contain keys tied to this scheduler's product Graph, and so it will not be directly usable with other scheduler instances without being re-created. An ExecutionRequest for an Address represents exactly one product output, as does SingleAddress. But we differentiate between them here in order to normalize the output for all Spec objects as "list of product". :param products: A list of product types to request for the roots. :type products: list of types :param subjects: A list of Spec and/or PathGlobs objects. :type subject: list of :class:`pants.base.specs.Spec`, `pants.build_graph.Address`, and/or :class:`pants.engine.fs.PathGlobs` objects. :returns: An ExecutionRequest for the given products and subjects. """ return ExecutionRequest(tuple((s, Select(p)) for s in subjects for p in products)) def selection_request(self, requests): """Create and return an ExecutionRequest for the given (selector, subject) tuples. This method allows users to specify their own selectors. It has the potential to replace execution_request, which is a subset of this method, because it uses default selectors. :param requests: A list of (selector, subject) tuples. :return: An ExecutionRequest for the given selectors and subjects. """ #TODO: Think about how to deprecate the existing execution_request API. return ExecutionRequest(tuple((subject, selector) for selector, subject in requests)) @contextmanager def locked(self): with self._product_graph_lock: yield def root_entries(self, execution_request): """Returns the roots for the given ExecutionRequest as a dict of tuples to State.""" with self._product_graph_lock: if self._execution_request is not execution_request: raise AssertionError( "Multiple concurrent executions are not supported! {} vs {}".format( self._execution_request, execution_request)) raw_roots = self._native.gc(self._native.lib.execution_roots(self._scheduler), self._native.lib.nodes_destroy) roots = {} for root, raw_root in zip(execution_request.roots, self._native.unpack(raw_roots.nodes_ptr, raw_roots.nodes_len)): if raw_root.union_tag is 0: state = None elif raw_root.union_tag is 1: state = Return(self._from_value(raw_root.union_return)) elif raw_root.union_tag is 2: state = Throw("Failed") elif raw_root.union_tag is 3: state = Throw("Nooped") else: raise ValueError('Unrecognized State type `{}` on: {}'.format(raw_root.union_tag, raw_root)) roots[root] = state return roots def invalidate_files(self, filenames): """Calls `Graph.invalidate_files()` against an internal product Graph instance.""" subjects = set(generate_fs_subjects(filenames)) subject_keys = list(self._to_key(subject) for subject in subjects) with self._product_graph_lock: invalidated = self._native.lib.graph_invalidate(self._scheduler, subject_keys, len(subject_keys)) logger.debug('invalidated %d nodes for subjects: %s', invalidated, subjects) return invalidated def node_count(self): with self._product_graph_lock: return self._native.lib.graph_len(self._scheduler) def _execution_next(self, completed): # Unzip into two arrays. returns_ids, returns_states, throws_ids = [], [], [] for cid, c in completed: if type(c) is Return: returns_ids.append(cid) returns_states.append(self._to_value(c.value)) elif type(c) is Throw: throws_ids.append(cid) else: raise ValueError("Unexpected `Completed` state from Runnable execution: {}".format(c)) # Run, then collect the outputs from the Scheduler's RawExecution struct. self._native.lib.execution_next(self._scheduler, returns_ids, returns_states, len(returns_ids), throws_ids, len(throws_ids)) def decode_runnable(raw): return ( raw.id, Runnable(self._from_id(raw.func.id_), tuple(self._from_value(arg) for arg in self._native.unpack(raw.args_ptr, raw.args_len)), bool(raw.cacheable)) ) runnables = [decode_runnable(r) for r in self._native.unpack(self._scheduler.execution.runnables_ptr, self._scheduler.execution.runnables_len)] # Rezip from two arrays. return runnables def _execution_add_roots(self, execution_request): if self._execution_request is not None: self._native.lib.execution_reset(self._scheduler) self._execution_request = execution_request for subject, selector in execution_request.roots: if type(selector) is Select: self._native.lib.execution_add_root_select(self._scheduler, self._to_key(subject), self._to_constraint(selector.product)) elif type(selector) is SelectDependencies: self._native.lib.execution_add_root_select_dependencies(self._scheduler, self._to_key(subject), self._to_constraint(selector.product), self._to_constraint(selector.dep_product), self._to_key(selector.field), selector.transitive) else: raise ValueError('Unsupported root selector type: {}'.format(selector)) def schedule(self, execution_request): """Yields batches of Steps until the roots specified by the request have been completed. This method should be called by exactly one scheduling thread, but the Step objects returned by this method are intended to be executed in multiple threads, and then satisfied by the scheduling thread. """ with self._product_graph_lock: start_time = time.time() # Reset execution, and add any roots from the request. self._execution_add_roots(execution_request) # Yield nodes that are Runnable, and then compute new ones. completed = [] outstanding_runnable = set() runnable_count, scheduling_iterations = 0, 0 while True: # Call the scheduler to create Runnables for the Engine. runnable = self._execution_next(completed) outstanding_runnable.difference_update(i for i, _ in completed) outstanding_runnable.update(i for i, _ in runnable) if not runnable and not outstanding_runnable: # Finished. break # The double yield here is intentional, and assumes consumption of this generator in # a `for` loop with a `generator.send(completed)` call in the body of the loop. completed = yield runnable yield runnable_count += len(runnable) scheduling_iterations += 1 if self._native.visualize_to_dir is not None: name = 'run.{}.dot'.format(self._run_count) self._run_count += 1 self.visualize_graph_to_file(os.path.join(self._native.visualize_to_dir, name)) logger.debug( 'ran %s scheduling iterations and %s runnables in %f seconds. ' 'there are %s total nodes.', scheduling_iterations, runnable_count, time.time() - start_time, self._native.lib.graph_len(self._scheduler) )
apache-2.0
4,387,969,145,928,732,700
46.864407
120
0.608964
false
4.536026
false
false
false
SuperFriendBFG/PyBreakout
Game/Scenes/GameOverScene.py
1
1454
import pygame from Game.Scenes.Scene import Scene from Game.Shared import * from Game import Highscore class GameOverScene(Scene): def __init__(self, game): super(GameOverScene, self).__init__(game) self.__playerName = "" self.__highscoreSprite = pygame.image.load(GameConstants.SPRITE_HIGHSCORE) def render(self): self.getGame().screen.blit(self.__highscoreSprite, (50, 50)) self.clearText() self.addText("Your Name: ", 300, 200, size=30) self.addText(self.__playerName, 420, 200, size=30) super(GameOverScene, self).render() def handleEvents(self, events): super(GameOverScene, self).handleEvents(events) for event in events: if event.type == pygame.QUIT: exit() if event.type == pygame.KEYDOWN: if event.key == pygame.K_RETURN: game = self.getGame() Highscore().add(self.__playerName, game.getScore()) game.reset() game.changeScene(GameConstants.HIGHSCORE_SCENE) elif event.key >= 65 and event.key <= 122: self.__playerName += chr(event.key) if event.key == pygame.K_F1: self.getGame().reset() self.getGame().changeScene(GameConstants.PLAYING_SCENE)
gpl-3.0
4,623,302,732,384,540,000
31.090909
83
0.541265
false
4.027701
false
false
false
BertrandBordage/django-postgrefts
postgrefts/migrations/0001_initial.py
1
2798
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import postgrefts.fields class Migration(migrations.Migration): dependencies = [ ('contenttypes', '0001_initial'), ] operations = [ migrations.CreateModel( name='Index', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('language', models.CharField(max_length=5, db_index=True)), ('object_id', models.PositiveIntegerField()), ('url', models.CharField(max_length=300)), ('thumbnail_url', models.CharField(max_length=300, blank=True)), ('boost', models.FloatField(default=1.0)), ('title', models.CharField(max_length=100)), ('body', models.TextField(blank=True)), ('title_search', postgrefts.fields.VectorField(default='', serialize=False, editable=False)), ('body_search', postgrefts.fields.VectorField(default='', serialize=False, editable=False)), ('content_type', models.ForeignKey(to='contenttypes.ContentType')), ], options={ }, bases=(models.Model,), ), migrations.AlterUniqueTogether( name='index', unique_together=set([('language', 'content_type', 'object_id')]), ), migrations.RunSQL(""" CREATE EXTENSION IF NOT EXISTS unaccent; CREATE EXTENSION IF NOT EXISTS btree_gin; CREATE TEXT SEARCH CONFIGURATION fr ( COPY = french ); CREATE TEXT SEARCH DICTIONARY fr_stop ( TEMPLATE = simple, StopWords = 'french', Accept = false ); -- myspell-fr must be installed in order to get this dict working. CREATE TEXT SEARCH DICTIONARY fr_ispell ( TEMPLATE = ispell, DictFile = 'fr', AffFile = 'fr' ); CREATE TEXT SEARCH DICTIONARY fr_stem ( TEMPLATE = snowball, Language = 'french' ); ALTER TEXT SEARCH CONFIGURATION fr ALTER MAPPING FOR asciihword, asciiword WITH fr_stop, fr_ispell, simple; ALTER TEXT SEARCH CONFIGURATION fr ALTER MAPPING FOR hword, hword_asciipart, hword_part, word WITH fr_stop, fr_ispell, unaccent, simple; CREATE INDEX content_type_id_title_search ON postgrefts_index USING gin(content_type_id, title_search); CREATE INDEX title_search ON postgrefts_index USING gin(title_search); CREATE INDEX body_search ON postgrefts_index USING gin(body_search); """), ]
bsd-3-clause
-3,996,278,163,791,316,000
42.046154
117
0.572552
false
4.311248
false
false
false
zen4ever/django-dynatree
treewidget/apps/categories/models.py
1
2254
from django.db import models from django_extensions.db.fields import AutoSlugField import mptt from urlparse import urljoin class Category(models.Model): parent = models.ForeignKey('self', null=True, blank=True, related_name='children') name = models.CharField(max_length=50) slug = AutoSlugField(max_length=50, overwrite=True, populate_from='name') url = models.TextField(editable=False) class Meta: verbose_name_plural = "categories" unique_together = (("name", "slug", "parent"), ) ordering = ("tree_id", "lft") def __unicode__(self): return self.url def save(self, force_insert=False, force_update=False, **kwargs): super(Category, self).save( force_insert=force_insert, force_update=force_update, **kwargs) self.update_url() def get_tree(self, *args): """ Return the tree structure for this element """ level_representation = "--" if self.level == 0: node = "| " else: node = "+ " _tree_structure = node + level_representation * self.level return _tree_structure get_tree.short_description = 'tree' def get_repr(self, *args): """ Return the branch representation for this element """ level_representation = "--" if self.level == 0: node = "| " else: node = "+ " _tree_structure = node + level_representation * self.level + ' ' + self.name return _tree_structure get_repr.short_description = 'representation' def tree_order(self): return str(self.tree_id) + str(self.lft) def update_url(self): """ Updates the url for this Category and all children Categories. """ url = urljoin(getattr(self.parent, 'url', '') + '/', self.slug) if url != self.url: self.url = url self.save() for child in self.get_children(): child.update_url() mptt.register(Category, order_insertion_by=['name'])
bsd-2-clause
3,287,626,030,816,427,000
28.657895
84
0.537711
false
4.293333
false
false
false
QuantumTechDevStudio/RUDNEVGAUSS
rms/run_manager_approx2.py
1
3432
# -*- coding: utf-8 -*- import os from rms import Run import time import math ###################################################################################################################################### # Программа для управления ранами. Руководство по использованию # # 1) Задаем количество ранов и присываем путь к корневому каталогу в какую-нибудь переменную # # 2) Задаем задачу(пока это влияет только на иерархию папок) # # 3) Задаем версию(аналогично) # # 4) задаем имя вычислительного скрипта # # 4) Если нужно(ТОЛЬКО ЕСЛИ НУЖНО), создаем основание нашей иерархии папок os.makedirs("./runs/" + task + "/" + version + "/") # # 5) В цикле создаем объект Run, задаем словарь параметров задачи(в разработке) и вызываем run.execute() # # 6) Возвращаемся в исходную директорию # # 7) Можно добавлять что-то своё. Но лучше делать это в своём вычислительном скрипте # ###################################################################################################################################### #n_parralel = 5 n_stat = 4 initial_path = os.getcwd() task = "approx" version = "main" program = "approx_script2.py" os.makedirs("./runs/" + task + "/" + version + "/") k = 1 a = 0 b = math.pi*2 dim = 1 m = 10 n_sig = 1 n_sig_max = 50 eps = 1e-9 iters = int(1e5) max_t = 200 while(n_sig <= n_sig_max): for i in range(n_stat): run = Run(task, version, program) model_dict = {"a":a, "b":b, "m":m, "k":k, "n_sig":n_sig, "dim":dim, "iters":iters, "eps":eps, "max_t" : max_t} run.feed_model_info(model_dict) run.execute() os.chdir(initial_path) time.sleep(max_t+5) n_sig += 1 dim = 2 n_sig = 1 eps = 1e-9 iters = int(1e5) max_t = 400 while(n_sig <= n_sig_max): for i in range(n_stat): run = Run(task, version, program) model_dict = {"a":a, "b":b, "m":m, "k":k, "n_sig":n_sig, "dim":dim, "iters":iters, "eps":eps, "max_t" : max_t} run.feed_model_info(model_dict) run.execute() os.chdir(initial_path) time.sleep(max_t+5) n_sig += 1 dim = 3 n_sig = 1 eps = 1e-9 iters = int(1e5) max_t = 660 while(n_sig <= n_sig_max): run = Run(task, version, program) model_dict = {"a":a, "b":b, "m":m, "k":k, "n_sig":n_sig, "dim":dim, "iters":iters, "eps":eps, "max_t" : max_t} run.feed_model_info(model_dict) run.execute() os.chdir(initial_path) time.sleep(max_t+5) n_sig += 1 print(" ---done--- ")
gpl-3.0
-7,805,784,263,390,462,000
32.264368
134
0.442469
false
2.38099
false
false
false
hongzhouye/frankenstein
scf/rhfatom.py
1
6064
""" Spin-restricted Hartree-Fock for atom """ import numpy as np import scipy.linalg as slg from frankenstein import molecule, scf from frankenstein.tools.mol_utils import get_norb_l from frankenstein.tools.scf_utils import get_fock, get_fock_ao_direct, \ get_scf_energy from frankenstein.data.atom_data import get_atomic_number, get_nelec_by_l class RHFATOM(scf.RHF): """Basic class for spin-restricted Hartree-Fock for atoms. Note: The idea is to fix the MO occupation to satisfy the aufbau principle. For degenerate shells (l > 0), the occupation is spherically averaged. The following example illustrates this using nitrogen atom. >>> mfa = RHFATOM("N", "cc-pVDZ") >>> print(mfa.mol.bas_l) <<< [0, 0, 0, 1, 1, 2] >>> print(mfa.mol.bas_pure) <<< [False, False, False, False, False, True] >>> print(mfa.idao_by_l) <<< [[0, 1, 2], [3, 6], [9]] >>> print(mfa.occ_vec) <<< [1. 1. 0. 0.5 0.5 0.5 0. 0. 0. 0. 0. 0. 0. 0. ] """ def __init__(self, atomsymb, basis, **kwargs): Z = get_atomic_number(atomsymb) spin = 2 if Z % 2 else 1 atom = molecule.MOL("{:s} 0 0 0".format(atomsymb), basis, spin=spin, \ verbose=0) scf.RHF.__init__(self, atom, orth_hV=False, max_iter=10, conv=5, \ guess="core", **kwargs) self.norb_by_l = self.mol.get_norb_by_l() self.idao_by_l = self.mol.get_idao_by_l() self.occ_vec = self.get_occ_vec() self.initialize() def initialize(self): # we need re-define how we compute Fock matrices and etc. since we are now working in raw AOs (i.e., non-orthogonal) def __rdm1_builder_ez(mo_coeff): id_occ = self.occ_vec > 0 Cocc = mo_coeff[id_occ] return (Cocc * self.occ_vec[id_occ]) @ Cocc.T # Inp: rdm1 in AO; Out: Fock in AO if self.ao_direct: def __fock_builder_ez(Dao): m = self.mol Iao = np.eye(m.nao) return get_fock_ao_direct(m.h, m.Zs, m.xyzs, m.basis, Iao, Dao) else: def __fock_builder_ez(Dao): m = self.mol return get_fock(m.h, m.V, Dao) def __e_scf_builder_ez(fock, rdm1): return get_scf_energy(self.mol.h, fock, rdm1) self.rdm1_builder_ez = __rdm1_builder_ez self.fock_builder_ez = __fock_builder_ez self.e_scf_builder_ez = __e_scf_builder_ez def get_sphave_occ(self): """Get spherically averaged occupation """ nelec_by_l = get_nelec_by_l(self.mol.atoms[0]) max_l = len(self.norb_by_l) ndocc = [0] * max_l nfocc = [0.] * max_l for l in range(max_l): norb_l = self.norb_by_l[l] ndocc[l] = nelec_by_l[l] // (2 * norb_l) nfocc[l] = (nelec_by_l[l] - ndocc[l]*2*norb_l) / float(norb_l) return ndocc, nfocc def get_occ_vec(self): ndocc, nfocc = self.get_sphave_occ() occ_vec = np.zeros(self.mol.nmo) for l,idao in enumerate(self.idao_by_l): norb_l = self.norb_by_l[l] for m in range(norb_l): occ_vec[np.array(idao[:ndocc[l]], dtype=int)+m] = 1. if len(idao) > ndocc[l]: occ_vec[idao[ndocc[l]]+m] = nfocc[l] * 0.5 return occ_vec def Roothaan_step(self): """Diagonalize the spherically averaged Fock matrix. Note: Since AOs with different l's are orthogonal, this "average and diagonalize" process is performed one l-group at a time, and the final MO coefficient matrix will be block diagonalized. """ mo_energy = np.zeros(self.nao) mo_coeff = np.zeros([self.nao, self.nao]) max_l = len(self.idao_by_l) for l in range(max_l): idao = np.array(self.idao_by_l[l], dtype=int) norb_l = self.norb_by_l[l] # compute spherically averaged Fock matrix for shell with a.m. = l fock_l = 0. ovlp_l = 0. for m in range(norb_l): fock_l += self.fock[idao+m,:][:,idao+m] ovlp_l += self.mol.S[idao+m,:][:,idao+m] fock_l /= float(norb_l) ovlp_l /= float(norb_l) # diagonalize fl eps_l, C_l = slg.eigh(fock_l, ovlp_l) # construct mo_coeff and mo_energy for m in range(norb_l): mo_energy[idao+m] = eps_l for i,i1 in enumerate(idao): mo_coeff[idao+m,i1+m] = C_l[:,i] self.mo_energy = mo_energy self.mo_coeff = mo_coeff def update(self): if not self.mo_coeff is None: self.rdm1 = (self.mo_coeff * self.occ_vec) @ self.mo_coeff.T elif self.rdm1 is None: raise RuntimeError("Both mo_coeff and rdm1 are None.") self.fock = self.fock_builder_ez(self.rdm1) self.e_scf = self.e_scf_builder_ez(self.fock, self.rdm1) self.S2 = 0. if not self.unrestricted \ else get_uscf_S2(self.rdm1, self.noccs) def get_diis_errmat(self): if self.unrestricted: raise ValueError("Atomic SCF only supports spin-restricted calculations!") else: X = self.fock @ self.rdm1 @ self.mol.S X -= X.T return X if __name__ == "__main__": from frankenstein.data.atom_data import get_atomic_name from frankenstein.tools.io_utils import dumpMat # for Z in range(1,10): for Z in [7]: atom = get_atomic_name(Z) mfa = RHFATOM(atom, "cc-pVDZ") mfa.verbose = 1 print(mfa.mol.bas_l) print(mfa.mol.bas_pure) print(mfa.idao_by_l) print(mfa.occ_vec) # mfa.kernel() # print(np.trace(mfa.rdm1@mfa.mol.S)) # print(mfa.e_scf, "\n") # dumpMat(mfa.mo_energy) # dumpMat(mfa.mo_coeff) # dumpMat(mfa.occ_vec) # dumpMat((mfa.mo_coeff*mfa.occ_vec)@mfa.mo_coeff.T*2.)
bsd-3-clause
1,381,208,671,575,960,600
34.670588
208
0.54535
false
2.828358
false
false
false
9h37/pompadour-wiki
pompadour_wiki/pompadour_wiki/apps/wiki/models.py
1
1637
# -*- coding: utf-8 -*- from django.core.files.base import ContentFile from django.db.models.signals import post_delete from django.db import models from django.utils.translation import ugettext from django.core.cache import cache from gitstorage.StorageBackend import GitStorage class Wiki(models.Model): name = models.CharField(max_length=50) slug = models.SlugField(max_length=50) description = models.TextField() gitdir = models.CharField(max_length=512) def __unicode__(self): return self.name @property def repo(self): return GitStorage(self.gitdir) def create_repo(self, user): """ Create repository """ st = GitStorage.create_storage(self.gitdir) content = ContentFile('Home') st.save('Home.md', content) st.commit(user, 'Initialize repository') def invalidate_cache_on_delete(sender, **kwargs): """ When a Wiki is deleted, clear all cache """ cache.clear() # Create empty commit wiki = kwargs.get('instance', None) if not wiki: raise AttributeError, 'instance is NoneType' # current user ??? wiki.repo.commit(None, ugettext(u'Wiki deleted')) post_delete.connect(invalidate_cache_on_delete, sender=Wiki) class WikiNotifier(models.Model): wiki = models.ForeignKey(Wiki) email = models.EmailField(max_length=254) def __unicode__(self): return self.email class Document(models.Model): path = models.CharField(max_length=512) wikipath = models.CharField(max_length=512) is_image = models.BooleanField() def __unicode__(self): return self.path
mit
-8,345,888,732,468,905,000
23.80303
60
0.677459
false
3.815851
false
false
false
retooth/morse
morse/slots/search.py
1
1306
#!/usr/bin/python # This file is part of Morse. # # Morse is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Morse is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Morse. If not, see <http://www.gnu.org/licenses/>. from . import app from flask import request, jsonify from ..models.core import User from ..protocols import ajax_triggered @app.route('/search/users.json', methods=['GET']) @ajax_triggered def get_users (): """ Gets a list of users matching GET parameter pattern. :rtype: json """ pattern = request.args.get('pattern') if pattern: users = User.query.filter(User.username.ilike('%' + pattern + '%')).all() else: users = User.query.all() userlist = [] for u in users: userlist.append([u.id, u.username]) return jsonify(users = userlist)
gpl-3.0
7,996,134,308,721,858,000
30.853659
81
0.676876
false
3.818713
false
false
false
YueLinHo/Subversion
tools/dist/release.py
1
52520
#!/usr/bin/env python # python: coding=utf-8 # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # About this script: # This script is intended to simplify creating Subversion releases for # any of the supported release lines of Subversion. # It works well with our Apache infrastructure, and should make rolling, # posting, and announcing releases dirt simple. # # This script may be run on a number of platforms, but it is intended to # be run on people.apache.org. As such, it may have dependencies (such # as Python version) which may not be common, but are guaranteed to be # available on people.apache.org. # It'd be kind of nice to use the Subversion python bindings in this script, # but people.apache.org doesn't currently have them installed # Stuff we need import os import re import sys import glob import fnmatch import shutil import urllib2 import hashlib import tarfile import logging import datetime import tempfile import operator import itertools import subprocess import argparse # standard in Python 2.7 # Find ezt, using Subversion's copy, if there isn't one on the system. try: import ezt except ImportError: ezt_path = os.path.dirname(os.path.dirname(os.path.abspath(sys.path[0]))) ezt_path = os.path.join(ezt_path, 'build', 'generator') sys.path.append(ezt_path) import ezt sys.path.remove(ezt_path) # Our required / recommended release tool versions by release branch tool_versions = { 'trunk' : { 'autoconf' : ['2.69', '954bd69b391edc12d6a4a51a2dd1476543da5c6bbf05a95b59dc0dd6fd4c2969'], 'libtool' : ['2.4.6', 'e3bd4d5d3d025a36c21dd6af7ea818a2afcd4dfc1ea5a17b39d7854bcd0c06e3'], 'swig' : ['3.0.10', '2939aae39dec06095462f1b95ce1c958ac80d07b926e48871046d17c0094f44c'], }, '1.10' : { 'autoconf' : ['2.69', '954bd69b391edc12d6a4a51a2dd1476543da5c6bbf05a95b59dc0dd6fd4c2969'], 'libtool' : ['2.4.6', 'e3bd4d5d3d025a36c21dd6af7ea818a2afcd4dfc1ea5a17b39d7854bcd0c06e3'], 'swig' : ['3.0.10', '2939aae39dec06095462f1b95ce1c958ac80d07b926e48871046d17c0094f44c'], }, '1.9' : { 'autoconf' : ['2.69', '954bd69b391edc12d6a4a51a2dd1476543da5c6bbf05a95b59dc0dd6fd4c2969'], 'libtool' : ['2.4.6', 'e3bd4d5d3d025a36c21dd6af7ea818a2afcd4dfc1ea5a17b39d7854bcd0c06e3'], 'swig' : ['2.0.12', '65e13f22a60cecd7279c59882ff8ebe1ffe34078e85c602821a541817a4317f7'], }, '1.8' : { 'autoconf' : ['2.69', '954bd69b391edc12d6a4a51a2dd1476543da5c6bbf05a95b59dc0dd6fd4c2969'], 'libtool' : ['2.4.3', '36b4881c1843d7585de9c66c4c3d9a067ed3a3f792bc670beba21f5a4960acdf'], 'swig' : ['2.0.9', '586954000d297fafd7e91d1ad31089cc7e249f658889d11a44605d3662569539'], }, } # The version that is our current recommended release # ### TODO: derive this from svn_version.h; see ../../build/getversion.py recommended_release = '1.9' # Some constants repos = 'https://svn.apache.org/repos/asf/subversion' secure_repos = 'https://svn.apache.org/repos/asf/subversion' dist_repos = 'https://dist.apache.org/repos/dist' dist_dev_url = dist_repos + '/dev/subversion' dist_release_url = dist_repos + '/release/subversion' KEYS = 'https://people.apache.org/keys/group/subversion.asc' extns = ['zip', 'tar.gz', 'tar.bz2'] #---------------------------------------------------------------------- # Utility functions class Version(object): regex = re.compile(r'(\d+).(\d+).(\d+)(?:-(?:(rc|alpha|beta)(\d+)))?') def __init__(self, ver_str): # Special case the 'trunk-nightly' version if ver_str == 'trunk-nightly': self.major = None self.minor = None self.patch = None self.pre = 'nightly' self.pre_num = None self.base = 'nightly' self.branch = 'trunk' return match = self.regex.search(ver_str) if not match: raise RuntimeError("Bad version string '%s'" % ver_str) self.major = int(match.group(1)) self.minor = int(match.group(2)) self.patch = int(match.group(3)) if match.group(4): self.pre = match.group(4) self.pre_num = int(match.group(5)) else: self.pre = None self.pre_num = None self.base = '%d.%d.%d' % (self.major, self.minor, self.patch) self.branch = '%d.%d' % (self.major, self.minor) def is_prerelease(self): return self.pre != None def is_recommended(self): return self.branch == recommended_release def get_download_anchor(self): if self.is_prerelease(): return 'pre-releases' else: if self.is_recommended(): return 'recommended-release' else: return 'supported-releases' def get_ver_tags(self, revnum): # These get substituted into svn_version.h ver_tag = '' ver_numtag = '' if self.pre == 'alpha': ver_tag = '" (Alpha %d)"' % self.pre_num ver_numtag = '"-alpha%d"' % self.pre_num elif self.pre == 'beta': ver_tag = '" (Beta %d)"' % args.version.pre_num ver_numtag = '"-beta%d"' % self.pre_num elif self.pre == 'rc': ver_tag = '" (Release Candidate %d)"' % self.pre_num ver_numtag = '"-rc%d"' % self.pre_num elif self.pre == 'nightly': ver_tag = '" (Nightly Build r%d)"' % revnum ver_numtag = '"-nightly-r%d"' % revnum else: ver_tag = '" (r%d)"' % revnum ver_numtag = '""' return (ver_tag, ver_numtag) def __serialize(self): return (self.major, self.minor, self.patch, self.pre, self.pre_num) def __eq__(self, that): return self.__serialize() == that.__serialize() def __ne__(self, that): return self.__serialize() != that.__serialize() def __hash__(self): return hash(self.__serialize()) def __lt__(self, that): if self.major < that.major: return True if self.major > that.major: return False if self.minor < that.minor: return True if self.minor > that.minor: return False if self.patch < that.patch: return True if self.patch > that.patch: return False if not self.pre and not that.pre: return False if not self.pre and that.pre: return False if self.pre and not that.pre: return True # We are both pre-releases if self.pre != that.pre: return self.pre < that.pre else: return self.pre_num < that.pre_num def __str__(self): "Return an SVN_VER_NUMBER-formatted string, or 'nightly'." if self.pre: if self.pre == 'nightly': return 'nightly' else: extra = '-%s%d' % (self.pre, self.pre_num) else: extra = '' return self.base + extra def __repr__(self): return "Version(%s)" % repr(str(self)) def get_prefix(base_dir): return os.path.join(base_dir, 'prefix') def get_tempdir(base_dir): return os.path.join(base_dir, 'tempdir') def get_workdir(base_dir): return os.path.join(get_tempdir(base_dir), 'working') # The name of this directory is also used to name the tarball and for # the root of paths within the tarball, e.g. subversion-1.9.5 or # subversion-nightly-r1800000 def get_exportdir(base_dir, version, revnum): if version.pre != 'nightly': return os.path.join(get_tempdir(base_dir), 'subversion-'+str(version)) return os.path.join(get_tempdir(base_dir), 'subversion-%s-r%d' % (version, revnum)) def get_deploydir(base_dir): return os.path.join(base_dir, 'deploy') def get_target(args): "Return the location of the artifacts" if args.target: return args.target else: return get_deploydir(args.base_dir) def get_tmpldir(): return os.path.join(os.path.abspath(sys.path[0]), 'templates') def get_tmplfile(filename): try: return open(os.path.join(get_tmpldir(), filename)) except IOError: # Hmm, we had a problem with the local version, let's try the repo return urllib2.urlopen(repos + '/trunk/tools/dist/templates/' + filename) def get_nullfile(): return open(os.path.devnull, 'w') def run_script(verbose, script, hide_stderr=False): stderr = None if verbose: stdout = None else: stdout = get_nullfile() if hide_stderr: stderr = get_nullfile() for l in script.split('\n'): subprocess.check_call(l.split(), stdout=stdout, stderr=stderr) def download_file(url, target, checksum): response = urllib2.urlopen(url) target_file = open(target, 'w+') target_file.write(response.read()) target_file.seek(0) m = hashlib.sha256() m.update(target_file.read()) target_file.close() checksum2 = m.hexdigest() if checksum != checksum2: raise RuntimeError("Checksum mismatch for '%s': "\ "downloaded: '%s'; expected: '%s'" % \ (target, checksum, checksum2)) #---------------------------------------------------------------------- # ezt helpers # In ezt, «[if-any foo]» is true when «data['foo'] == False», # hence, provide this constant for readability. ezt_False = "" # And this constant for symmetry. ezt_True = True # And this for convenience. def ezt_bool(boolean_value): return ezt_True if boolean_value else ezt_False #---------------------------------------------------------------------- # Cleaning up the environment def cleanup(args): 'Remove generated files and folders.' logging.info('Cleaning') shutil.rmtree(get_prefix(args.base_dir), True) shutil.rmtree(get_tempdir(args.base_dir), True) shutil.rmtree(get_deploydir(args.base_dir), True) #---------------------------------------------------------------------- # Creating an environment to roll the release class RollDep(object): 'The super class for each of the build dependencies.' def __init__(self, base_dir, use_existing, verbose): self._base_dir = base_dir self._use_existing = use_existing self._verbose = verbose def _test_version(self, cmd): proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) (stdout, stderr) = proc.communicate() rc = proc.wait() if rc: return '' return stdout.split('\n') def build(self): if not hasattr(self, '_extra_configure_flags'): self._extra_configure_flags = '' cwd = os.getcwd() tempdir = get_tempdir(self._base_dir) tarball = os.path.join(tempdir, self._filebase + '.tar.gz') if os.path.exists(tarball): if not self._use_existing: raise RuntimeError('autoconf tarball "%s" already exists' % tarball) logging.info('Using existing %s.tar.gz' % self._filebase) else: logging.info('Fetching %s' % self._filebase) download_file(self._url, tarball, self._checksum) # Extract tarball tarfile.open(tarball).extractall(tempdir) logging.info('Building ' + self.label) os.chdir(os.path.join(tempdir, self._filebase)) run_script(self._verbose, '''./configure --prefix=%s %s make make install''' % (get_prefix(self._base_dir), self._extra_configure_flags)) os.chdir(cwd) class AutoconfDep(RollDep): def __init__(self, base_dir, use_existing, verbose, autoconf_ver, checksum): RollDep.__init__(self, base_dir, use_existing, verbose) self.label = 'autoconf' self._filebase = 'autoconf-' + autoconf_ver self._autoconf_ver = autoconf_ver self._url = 'https://ftp.gnu.org/gnu/autoconf/%s.tar.gz' % self._filebase self._checksum = checksum def have_usable(self): output = self._test_version(['autoconf', '-V']) if not output: return False version = output[0].split()[-1:][0] return version == self._autoconf_ver def use_system(self): if not self._use_existing: return False return self.have_usable() class LibtoolDep(RollDep): def __init__(self, base_dir, use_existing, verbose, libtool_ver, checksum): RollDep.__init__(self, base_dir, use_existing, verbose) self.label = 'libtool' self._filebase = 'libtool-' + libtool_ver self._libtool_ver = libtool_ver self._url = 'https://ftp.gnu.org/gnu/libtool/%s.tar.gz' % self._filebase self._checksum = checksum def have_usable(self): output = self._test_version(['libtool', '--version']) if not output: return False return self._libtool_ver in output[0] def use_system(self): # We unconditionally return False here, to avoid using a borked # system libtool (I'm looking at you, Debian). return False def build(self): RollDep.build(self) # autogen.sh looks for glibtoolize before libtoolize bin_dir = os.path.join(get_prefix(self._base_dir), "bin") os.symlink("libtoolize", os.path.join(bin_dir, "glibtoolize")) os.symlink("libtool", os.path.join(bin_dir, "glibtool")) class SwigDep(RollDep): def __init__(self, base_dir, use_existing, verbose, swig_ver, checksum, sf_mirror): RollDep.__init__(self, base_dir, use_existing, verbose) self.label = 'swig' self._filebase = 'swig-' + swig_ver self._swig_ver = swig_ver self._url = 'https://sourceforge.net/projects/swig/files/swig/%(swig)s/%(swig)s.tar.gz/download?use_mirror=%(sf_mirror)s' % \ { 'swig' : self._filebase, 'sf_mirror' : sf_mirror } self._checksum = checksum self._extra_configure_flags = '--without-pcre' def have_usable(self): output = self._test_version(['swig', '-version']) if not output: return False version = output[1].split()[-1:][0] return version == self._swig_ver def use_system(self): if not self._use_existing: return False return self.have_usable() def build_env(args): 'Download prerequisites for a release and prepare the environment.' logging.info('Creating release environment') try: os.mkdir(get_prefix(args.base_dir)) os.mkdir(get_tempdir(args.base_dir)) except OSError: if not args.use_existing: raise autoconf = AutoconfDep(args.base_dir, args.use_existing, args.verbose, tool_versions[args.version.branch]['autoconf'][0], tool_versions[args.version.branch]['autoconf'][1]) libtool = LibtoolDep(args.base_dir, args.use_existing, args.verbose, tool_versions[args.version.branch]['libtool'][0], tool_versions[args.version.branch]['libtool'][1]) swig = SwigDep(args.base_dir, args.use_existing, args.verbose, tool_versions[args.version.branch]['swig'][0], tool_versions[args.version.branch]['swig'][1], args.sf_mirror) # iterate over our rolling deps, and build them if needed for dep in [autoconf, libtool, swig]: if dep.use_system(): logging.info('Using system %s' % dep.label) else: dep.build() #---------------------------------------------------------------------- # Create release artifacts def compare_changes(repos, branch, revision): mergeinfo_cmd = ['svn', 'mergeinfo', '--show-revs=eligible', repos + '/trunk/CHANGES', repos + '/' + branch + '/' + 'CHANGES'] stdout = subprocess.check_output(mergeinfo_cmd) if stdout: # Treat this as a warning since we are now putting entries for future # minor releases in CHANGES on trunk. logging.warning('CHANGES has unmerged revisions: %s' % stdout.replace("\n", " ")) _current_year = str(datetime.datetime.now().year) _copyright_re = re.compile(r'Copyright (?:\(C\) )?(?P<year>[0-9]+)' r' The Apache Software Foundation', re.MULTILINE) def check_copyright_year(repos, branch, revision): def check_file(branch_relpath): file_url = (repos + '/' + branch + '/' + branch_relpath + '@' + str(revision)) cat_cmd = ['svn', 'cat', file_url] stdout = subprocess.check_output(cat_cmd) m = _copyright_re.search(stdout) if m: year = m.group('year') else: year = None if year != _current_year: logging.warning('Copyright year in ' + branch_relpath + ' is not the current year') check_file('NOTICE') check_file('subversion/libsvn_subr/version.c') def replace_lines(path, actions): with open(path, 'r') as old_content: lines = old_content.readlines() with open(path, 'w') as new_content: for line in lines: for start, pattern, repl in actions: if line.startswith(start): line = re.sub(pattern, repl, line) new_content.write(line) def roll_tarballs(args): 'Create the release artifacts.' if not args.branch: args.branch = 'branches/%d.%d.x' % (args.version.major, args.version.minor) branch = args.branch # shorthand branch = branch.rstrip('/') # canonicalize for later comparisons logging.info('Rolling release %s from branch %s@%d' % (args.version, branch, args.revnum)) check_copyright_year(repos, args.branch, args.revnum) # Ensure we've got the appropriate rolling dependencies available autoconf = AutoconfDep(args.base_dir, False, args.verbose, tool_versions[args.version.branch]['autoconf'][0], tool_versions[args.version.branch]['autoconf'][1]) libtool = LibtoolDep(args.base_dir, False, args.verbose, tool_versions[args.version.branch]['libtool'][0], tool_versions[args.version.branch]['libtool'][1]) swig = SwigDep(args.base_dir, False, args.verbose, tool_versions[args.version.branch]['swig'][0], tool_versions[args.version.branch]['swig'][1], None) for dep in [autoconf, libtool, swig]: if not dep.have_usable(): raise RuntimeError('Cannot find usable %s' % dep.label) if branch != 'trunk': # Make sure CHANGES is sync'd. compare_changes(repos, branch, args.revnum) # Ensure the output directory doesn't already exist if os.path.exists(get_deploydir(args.base_dir)): raise RuntimeError('output directory \'%s\' already exists' % get_deploydir(args.base_dir)) os.mkdir(get_deploydir(args.base_dir)) logging.info('Preparing working copy source') shutil.rmtree(get_workdir(args.base_dir), True) run_script(args.verbose, 'svn checkout %s %s' % (repos + '/' + branch + '@' + str(args.revnum), get_workdir(args.base_dir))) # Exclude stuff we don't want in the tarball, it will not be present # in the exported tree. exclude = ['contrib', 'notes'] if branch != 'trunk': exclude += ['STATUS'] if args.version.minor < 7: exclude += ['packages', 'www'] cwd = os.getcwd() os.chdir(get_workdir(args.base_dir)) run_script(args.verbose, 'svn update --set-depth exclude %s' % " ".join(exclude)) os.chdir(cwd) if args.patches: # Assume patches are independent and can be applied in any # order, no need to sort. majmin = '%d.%d' % (args.version.major, args.version.minor) for name in os.listdir(args.patches): if name.find(majmin) != -1 and name.endswith('patch'): logging.info('Applying patch %s' % name) run_script(args.verbose, '''svn patch %s %s''' % (os.path.join(args.patches, name), get_workdir(args.base_dir))) # Massage the new version number into svn_version.h. ver_tag, ver_numtag = args.version.get_ver_tags(args.revnum) replacements = [('#define SVN_VER_TAG', '".*"', ver_tag), ('#define SVN_VER_NUMTAG', '".*"', ver_numtag), ('#define SVN_VER_REVISION', '[0-9][0-9]*', str(args.revnum))] if args.version.pre != 'nightly': # SVN_VER_PATCH might change for security releases, e.g., when # releasing 1.9.7 from the magic revision of 1.9.6. # # ### Would SVN_VER_MAJOR / SVN_VER_MINOR ever change? # ### Note that SVN_VER_MINOR is duplicated in some places, see # ### <https://subversion.apache.org/docs/community-guide/releasing.html#release-branches> replacements += [('#define SVN_VER_MAJOR', '[0-9][0-9]*', str(args.version.major)), ('#define SVN_VER_MINOR', '[0-9][0-9]*', str(args.version.minor)), ('#define SVN_VER_PATCH', '[0-9][0-9]*', str(args.version.patch))] replace_lines(os.path.join(get_workdir(args.base_dir), 'subversion', 'include', 'svn_version.h'), replacements) # Basename for export and tarballs, e.g. subversion-1.9.5 or # subversion-nightly-r1800000 exportdir = get_exportdir(args.base_dir, args.version, args.revnum) basename = os.path.basename(exportdir) def export(windows): shutil.rmtree(exportdir, True) if windows: eol_style = "--native-eol CRLF" else: eol_style = "--native-eol LF" run_script(args.verbose, "svn export %s %s %s" % (eol_style, get_workdir(args.base_dir), exportdir)) def transform_sql(): for root, dirs, files in os.walk(exportdir): for fname in files: if fname.endswith('.sql'): run_script(args.verbose, 'python build/transform_sql.py %s/%s %s/%s' % (root, fname, root, fname[:-4] + '.h')) def clean_autom4te(): for root, dirs, files in os.walk(get_workdir(args.base_dir)): for dname in dirs: if dname.startswith('autom4te') and dname.endswith('.cache'): shutil.rmtree(os.path.join(root, dname)) logging.info('Building Windows tarballs') export(windows=True) os.chdir(exportdir) transform_sql() # Can't use the po-update.sh in the Windows export since it has CRLF # line endings and won't run, so use the one in the working copy. run_script(args.verbose, '%s/tools/po/po-update.sh pot' % get_workdir(args.base_dir)) os.chdir(cwd) clean_autom4te() # dist.sh does it but pointless on Windows? os.chdir(get_tempdir(args.base_dir)) run_script(args.verbose, 'zip -q -r %s %s' % (basename + '.zip', basename)) os.chdir(cwd) logging.info('Building Unix tarballs') export(windows=False) os.chdir(exportdir) transform_sql() run_script(args.verbose, '''tools/po/po-update.sh pot ./autogen.sh --release''', hide_stderr=True) # SWIG is noisy os.chdir(cwd) clean_autom4te() # dist.sh does it but probably pointless # Do not use tar, it's probably GNU tar which produces tar files # that are not compliant with POSIX.1 when including filenames # longer than 100 chars. Platforms without a tar that understands # the GNU tar extension will not be able to extract the resulting # tar file. Use pax to produce POSIX.1 tar files. # # Use the gzip -n flag - this prevents it from storing the # original name of the .tar file, and far more importantly, the # mtime of the .tar file, in the produced .tar.gz file. This is # important, because it makes the gzip encoding reproducable by # anyone else who has an similar version of gzip, and also uses # "gzip -9n". This means that committers who want to GPG-sign both # the .tar.gz and the .tar.bz2 can download the .tar.bz2 (which is # smaller), and locally generate an exact duplicate of the # official .tar.gz file. This metadata is data on the temporary # uncompressed tarball itself, not any of its contents, so there # will be no effect on end-users. os.chdir(get_tempdir(args.base_dir)) run_script(args.verbose, '''pax -x ustar -w -f %s %s bzip2 -9fk %s gzip -9nf %s''' % (basename + '.tar', basename, basename + '.tar', basename + '.tar')) os.chdir(cwd) # Move the results to the deploy directory logging.info('Moving artifacts and calculating checksums') for e in extns: filename = basename + '.' + e filepath = os.path.join(get_tempdir(args.base_dir), filename) shutil.move(filepath, get_deploydir(args.base_dir)) filepath = os.path.join(get_deploydir(args.base_dir), filename) m = hashlib.sha1() m.update(open(filepath, 'r').read()) open(filepath + '.sha1', 'w').write(m.hexdigest()) m = hashlib.sha512() m.update(open(filepath, 'r').read()) open(filepath + '.sha512', 'w').write(m.hexdigest()) # Nightlies do not get tagged so do not need the header if args.version.pre != 'nightly': shutil.copy(os.path.join(get_workdir(args.base_dir), 'subversion', 'include', 'svn_version.h'), os.path.join(get_deploydir(args.base_dir), 'svn_version.h.dist-%s' % str(args.version))) # And we're done! #---------------------------------------------------------------------- # Sign the candidate release artifacts def sign_candidates(args): 'Sign candidate artifacts in the dist development directory.' def sign_file(filename): asc_file = open(filename + '.asc', 'a') logging.info("Signing %s" % filename) proc = subprocess.check_call(['gpg', '-ba', '-o', '-', filename], stdout=asc_file) asc_file.close() target = get_target(args) for e in extns: filename = os.path.join(target, 'subversion-%s.%s' % (args.version, e)) sign_file(filename) if args.version.major >= 1 and args.version.minor <= 6: filename = os.path.join(target, 'subversion-deps-%s.%s' % (args.version, e)) sign_file(filename) #---------------------------------------------------------------------- # Post the candidate release artifacts def post_candidates(args): 'Post candidate artifacts to the dist development directory.' target = get_target(args) logging.info('Importing tarballs to %s' % dist_dev_url) ver = str(args.version) svn_cmd = ['svn', 'import', '-m', 'Add Subversion %s candidate release artifacts' % ver, '--auto-props', '--config-option', 'config:auto-props:*.asc=svn:eol-style=native;svn:mime-type=text/plain', target, dist_dev_url] if (args.username): svn_cmd += ['--username', args.username] subprocess.check_call(svn_cmd) #---------------------------------------------------------------------- # Create tag def create_tag(args): 'Create tag in the repository' target = get_target(args) logging.info('Creating tag for %s' % str(args.version)) if not args.branch: args.branch = 'branches/%d.%d.x' % (args.version.major, args.version.minor) branch = secure_repos + '/' + args.branch.rstrip('/') tag = secure_repos + '/tags/' + str(args.version) svnmucc_cmd = ['svnmucc', '-m', 'Tagging release ' + str(args.version)] if (args.username): svnmucc_cmd += ['--username', args.username] svnmucc_cmd += ['cp', str(args.revnum), branch, tag] svnmucc_cmd += ['put', os.path.join(target, 'svn_version.h.dist' + '-' + str(args.version)), tag + '/subversion/include/svn_version.h'] # don't redirect stdout/stderr since svnmucc might ask for a password try: subprocess.check_call(svnmucc_cmd) except subprocess.CalledProcessError: if args.version.is_prerelease(): logging.error("Do you need to pass --branch=trunk?") raise if not args.version.is_prerelease(): logging.info('Bumping revisions on the branch') def replace_in_place(fd, startofline, flat, spare): """In file object FD, replace FLAT with SPARE in the first line starting with STARTOFLINE.""" fd.seek(0, os.SEEK_SET) lines = fd.readlines() for i, line in enumerate(lines): if line.startswith(startofline): lines[i] = line.replace(flat, spare) break else: raise RuntimeError('Definition of %r not found' % startofline) fd.seek(0, os.SEEK_SET) fd.writelines(lines) fd.truncate() # for current callers, new value is never shorter. new_version = Version('%d.%d.%d' % (args.version.major, args.version.minor, args.version.patch + 1)) def file_object_for(relpath): fd = tempfile.NamedTemporaryFile() url = branch + '/' + relpath fd.url = url subprocess.check_call(['svn', 'cat', '%s@%d' % (url, args.revnum)], stdout=fd) return fd svn_version_h = file_object_for('subversion/include/svn_version.h') replace_in_place(svn_version_h, '#define SVN_VER_PATCH ', str(args.version.patch), str(new_version.patch)) STATUS = file_object_for('STATUS') replace_in_place(STATUS, 'Status of ', str(args.version), str(new_version)) svn_version_h.seek(0, os.SEEK_SET) STATUS.seek(0, os.SEEK_SET) subprocess.check_call(['svnmucc', '-r', str(args.revnum), '-m', 'Post-release housekeeping: ' 'bump the %s branch to %s.' % (branch.split('/')[-1], str(new_version)), 'put', svn_version_h.name, svn_version_h.url, 'put', STATUS.name, STATUS.url, ]) del svn_version_h del STATUS #---------------------------------------------------------------------- # Clean dist def clean_dist(args): 'Clean the distribution directory of all but the most recent artifacts.' stdout = subprocess.check_output(['svn', 'list', dist_release_url]) def minor(version): """Return the minor release line of the parameter, which must be a Version object.""" return (version.major, version.minor) filenames = stdout.split('\n') filenames = filter(lambda x: x.startswith('subversion-'), filenames) versions = set(map(Version, filenames)) minor_lines = set(map(minor, versions)) to_keep = set() # Keep 3 minor lines: 1.10.0-alpha3, 1.9.7, 1.8.19. # TODO: When we release 1.A.0 GA we'll have to manually remove 1.(A-2).* artifacts. for recent_line in sorted(minor_lines, reverse=True)[:3]: to_keep.add(max( x for x in versions if minor(x) == recent_line )) for i in sorted(to_keep): logging.info("Saving release '%s'", i) svnmucc_cmd = ['svnmucc', '-m', 'Remove old Subversion releases.\n' + 'They are still available at ' + 'https://archive.apache.org/dist/subversion/'] if (args.username): svnmucc_cmd += ['--username', args.username] for filename in filenames: if Version(filename) not in to_keep: logging.info("Removing %r", filename) svnmucc_cmd += ['rm', dist_release_url + '/' + filename] # don't redirect stdout/stderr since svnmucc might ask for a password if 'rm' in svnmucc_cmd: subprocess.check_call(svnmucc_cmd) else: logging.info("Nothing to remove") #---------------------------------------------------------------------- # Move to dist def move_to_dist(args): 'Move candidate artifacts to the distribution directory.' stdout = subprocess.check_output(['svn', 'list', dist_dev_url]) filenames = [] for entry in stdout.split('\n'): if fnmatch.fnmatch(entry, 'subversion-%s.*' % str(args.version)): filenames.append(entry) svnmucc_cmd = ['svnmucc', '-m', 'Publish Subversion-%s.' % str(args.version)] if (args.username): svnmucc_cmd += ['--username', args.username] svnmucc_cmd += ['rm', dist_dev_url + '/' + 'svn_version.h.dist' + '-' + str(args.version)] for filename in filenames: svnmucc_cmd += ['mv', dist_dev_url + '/' + filename, dist_release_url + '/' + filename] # don't redirect stdout/stderr since svnmucc might ask for a password logging.info('Moving release artifacts to %s' % dist_release_url) subprocess.check_call(svnmucc_cmd) #---------------------------------------------------------------------- # Write announcements def write_news(args): 'Write text for the Subversion website.' data = { 'date' : datetime.date.today().strftime('%Y%m%d'), 'date_pres' : datetime.date.today().strftime('%Y-%m-%d'), 'major-minor' : args.version.branch, 'version' : str(args.version), 'version_base' : args.version.base, 'anchor': args.version.get_download_anchor(), 'is_recommended': ezt_bool(args.version.is_recommended()), } if args.version.is_prerelease(): template_filename = 'rc-news.ezt' else: template_filename = 'stable-news.ezt' template = ezt.Template() template.parse(get_tmplfile(template_filename).read()) template.generate(sys.stdout, data) def get_sha1info(args): 'Return a list of sha1 info for the release' target = get_target(args) sha1s = glob.glob(os.path.join(target, 'subversion*-%s*.sha1' % args.version)) class info(object): pass sha1info = [] for s in sha1s: i = info() # strip ".sha1" i.filename = os.path.basename(s)[:-5] i.sha1 = open(s, 'r').read() sha1info.append(i) return sha1info def write_announcement(args): 'Write the release announcement.' sha1info = get_sha1info(args) siginfo = "\n".join(get_siginfo(args, True)) + "\n" data = { 'version' : str(args.version), 'sha1info' : sha1info, 'siginfo' : siginfo, 'major-minor' : args.version.branch, 'major-minor-patch' : args.version.base, 'anchor' : args.version.get_download_anchor(), } if args.version.is_prerelease(): template_filename = 'rc-release-ann.ezt' else: data['dot-zero'] = ezt_bool(args.version.patch == 0) # TODO: instead of requiring the RM to remember to pass --security, # read the private repository where CVE announcements are staged, # parse the json file that identifies which versions are affected, # and accordingly automagically set data['security']. data['security'] = ezt_bool(args.security) template_filename = 'stable-release-ann.ezt' # The template text assumes these two are mutually exclusive. # If you ever find a reason to make a x.y.0 release with a security # bug, just comment this out and update the template before sending. assert not (data['dot-zero'] and data['security']) template = ezt.Template(compress_whitespace = False) template.parse(get_tmplfile(template_filename).read()) template.generate(sys.stdout, data) def write_downloads(args): 'Output the download section of the website.' sha1info = get_sha1info(args) data = { 'version' : str(args.version), 'fileinfo' : sha1info, } template = ezt.Template(compress_whitespace = False) template.parse(get_tmplfile('download.ezt').read()) template.generate(sys.stdout, data) #---------------------------------------------------------------------- # Validate the signatures for a release key_start = '-----BEGIN PGP SIGNATURE-----' PUBLIC_KEY_ALGORITHMS = { # These values are taken from the RFC's registry at: # https://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 # # The values are callables that produce gpg1-like key length and type # indications, e.g., "4096R" for a 4096-bit RSA key. 1: (lambda keylen: str(keylen) + 'R'), # RSA } def _make_human_readable_fingerprint(fingerprint): return re.compile(r'(....)' * 10).sub(r'\1 \2 \3 \4 \5 \6 \7 \8 \9 \10', fingerprint) def get_siginfo(args, quiet=False): 'Returns a list of signatures for the release.' try: import gnupg except ImportError: import security._gnupg as gnupg gpg = gnupg.GPG() target = get_target(args) good_sigs = {} fingerprints = {} output = [] glob_pattern = os.path.join(target, 'subversion*-%s*.asc' % args.version) for filename in glob.glob(glob_pattern): text = open(filename).read() keys = text.split(key_start) if not quiet: logging.info("Checking %d sig(s) in %s" % (len(keys[1:]), filename)) for key in keys[1:]: fd, fn = tempfile.mkstemp() os.write(fd, key_start + key) os.close(fd) verified = gpg.verify_file(open(fn, 'rb'), filename[:-4]) os.unlink(fn) if verified.valid: good_sigs[verified.fingerprint] = True else: sys.stderr.write("BAD SIGNATURE for %s\n" % filename) if verified.key_id: sys.stderr.write(" key id: %s\n" % verified.key_id) sys.exit(1) for id in good_sigs.keys(): # Most potential signers have public short keyid (32-bit) collisions in # the https://evil32.com/ set, which has been uploaded to the # keyservers, so generate the long keyid (see use of LONG_KEY_ID below). # # TODO: in the future it'd be nice to use the 'gnupg' module here. gpg_output = subprocess.check_output( ['gpg', '--fixed-list-mode', '--with-colons', '--fingerprint', id], stderr=subprocess.STDOUT, ) gpg_output = gpg_output.splitlines() # This code was added in r934990, but there was no comment (nor log # message text) explaining its purpose. I've commented it out since # ignoring arbitrary warnings in a verification codepath is Bad. If # you run into warnings on your machine, feel free to uncomment it, # but when you do so please make it match specific warnings only. # #gpg_output = "\n".join([ l for l in gpg_output.splitlines() # if l[0:7] != 'Warning' ]) # Parse gpg's output. This happens to work for both gpg1 and gpg2, # even though their outputs are slightly different. # # See http://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob_plain;f=doc/DETAILS for line in gpg_output: parts = line.split(':') if parts[0] == 'pub': keylen = int(parts[2]) keytype = int(parts[3]) formatter = PUBLIC_KEY_ALGORITHMS[keytype] long_key_id = parts[4] length_and_type = formatter(keylen) + '/' + long_key_id del keylen, keytype, formatter, long_key_id break else: raise RuntimeError("Failed to determine LONG_KEY_ID") for line in gpg_output: parts = line.split(':') if parts[0] == 'fpr': fingerprint = parts[9] break else: raise RuntimeError("Failed to determine FINGERPRINT") for line in gpg_output: parts = line.split(':') if parts[0] == 'uid': name = parts[9].split(' <')[0] break else: raise RuntimeError("Failed to determine NAME") format_expandos = dict( name=name, length_and_type=length_and_type, fingerprint=_make_human_readable_fingerprint(fingerprint), ) del name, length_and_type, fingerprint line = " {name} [{length_and_type}] with fingerprint:" output.append( line.format(**format_expandos) ) line = " {fingerprint}" output.append( line.format(**format_expandos) ) return output def check_sigs(args): 'Check the signatures for the release.' output = get_siginfo(args) for line in output: print(line) def get_keys(args): 'Import the LDAP-based KEYS file to gpg' # We use a tempfile because urlopen() objects don't have a .fileno() with tempfile.SpooledTemporaryFile() as fd: fd.write(urllib2.urlopen(KEYS).read()) fd.flush() fd.seek(0) subprocess.check_call(['gpg', '--import'], stdin=fd) #---------------------------------------------------------------------- # Main entry point for argument parsing and handling def main(): 'Parse arguments, and drive the appropriate subcommand.' # Setup our main parser parser = argparse.ArgumentParser( description='Create an Apache Subversion release.') parser.add_argument('--clean', action='store_true', default=False, help='Remove any directories previously created by %(prog)s') parser.add_argument('--verbose', action='store_true', default=False, help='Increase output verbosity') parser.add_argument('--base-dir', default=os.getcwd(), help='''The directory in which to create needed files and folders. The default is the current working directory.''') subparsers = parser.add_subparsers(title='subcommands') # Setup the parser for the build-env subcommand subparser = subparsers.add_parser('build-env', help='''Download release prerequisistes, including autoconf, libtool, and swig.''') subparser.set_defaults(func=build_env) subparser.add_argument('version', type=Version, help='''The release label, such as '1.7.0-alpha1'.''') subparser.add_argument('--sf-mirror', default='softlayer', help='''The mirror to use for downloading files from SourceForge. If in the EU, you may want to use 'kent' for this value.''') subparser.add_argument('--use-existing', action='store_true', default=False, help='''Attempt to use existing build dependencies before downloading and building a private set.''') # Setup the parser for the roll subcommand subparser = subparsers.add_parser('roll', help='''Create the release artifacts.''') subparser.set_defaults(func=roll_tarballs) subparser.add_argument('version', type=Version, help='''The release label, such as '1.7.0-alpha1'.''') subparser.add_argument('revnum', type=lambda arg: int(arg.lstrip('r')), help='''The revision number to base the release on.''') subparser.add_argument('--branch', help='''The branch to base the release on, relative to ^/subversion/.''') subparser.add_argument('--patches', help='''The path to the directory containing patches.''') # Setup the parser for the sign-candidates subcommand subparser = subparsers.add_parser('sign-candidates', help='''Sign the release artifacts.''') subparser.set_defaults(func=sign_candidates) subparser.add_argument('version', type=Version, help='''The release label, such as '1.7.0-alpha1'.''') subparser.add_argument('--target', help='''The full path to the directory containing release artifacts.''') # Setup the parser for the post-candidates subcommand subparser = subparsers.add_parser('post-candidates', help='''Commit candidates to the release development area of the dist.apache.org repository.''') subparser.set_defaults(func=post_candidates) subparser.add_argument('version', type=Version, help='''The release label, such as '1.7.0-alpha1'.''') subparser.add_argument('--username', help='''Username for ''' + dist_repos + '''.''') subparser.add_argument('--target', help='''The full path to the directory containing release artifacts.''') # Setup the parser for the create-tag subcommand subparser = subparsers.add_parser('create-tag', help='''Create the release tag.''') subparser.set_defaults(func=create_tag) subparser.add_argument('version', type=Version, help='''The release label, such as '1.7.0-alpha1'.''') subparser.add_argument('revnum', type=lambda arg: int(arg.lstrip('r')), help='''The revision number to base the release on.''') subparser.add_argument('--branch', help='''The branch to base the release on, relative to ^/subversion/.''') subparser.add_argument('--username', help='''Username for ''' + secure_repos + '''.''') subparser.add_argument('--target', help='''The full path to the directory containing release artifacts.''') # The clean-dist subcommand subparser = subparsers.add_parser('clean-dist', help='''Clean the distribution directory (and mirrors) of all but the most recent MAJOR.MINOR release.''') subparser.set_defaults(func=clean_dist) subparser.add_argument('--dist-dir', help='''The directory to clean.''') subparser.add_argument('--username', help='''Username for ''' + dist_repos + '''.''') # The move-to-dist subcommand subparser = subparsers.add_parser('move-to-dist', help='''Move candiates and signatures from the temporary release dev location to the permanent distribution directory.''') subparser.set_defaults(func=move_to_dist) subparser.add_argument('version', type=Version, help='''The release label, such as '1.7.0-alpha1'.''') subparser.add_argument('--username', help='''Username for ''' + dist_repos + '''.''') # The write-news subcommand subparser = subparsers.add_parser('write-news', help='''Output to stdout template text for use in the news section of the Subversion website.''') subparser.set_defaults(func=write_news) subparser.add_argument('version', type=Version, help='''The release label, such as '1.7.0-alpha1'.''') # write-announcement subparser = subparsers.add_parser('write-announcement', help='''Output to stdout template text for the emailed release announcement.''') subparser.set_defaults(func=write_announcement) subparser.add_argument('--security', action='store_true', default=False, help='''The release being announced includes security fixes.''') subparser.add_argument('--target', help='''The full path to the directory containing release artifacts.''') subparser.add_argument('version', type=Version, help='''The release label, such as '1.7.0-alpha1'.''') # write-downloads subparser = subparsers.add_parser('write-downloads', help='''Output to stdout template text for the download table for subversion.apache.org''') subparser.set_defaults(func=write_downloads) subparser.add_argument('--target', help='''The full path to the directory containing release artifacts.''') subparser.add_argument('version', type=Version, help='''The release label, such as '1.7.0-alpha1'.''') # check-sigs subparser = subparsers.add_parser('check-sigs', help='''Output to stdout the signatures collected for this release''') subparser.set_defaults(func=check_sigs) subparser.add_argument('version', type=Version, help='''The release label, such as '1.7.0-alpha1'.''') subparser.add_argument('--target', help='''The full path to the directory containing release artifacts.''') # get-keys subparser = subparsers.add_parser('get-keys', help='''Import committers' public keys to ~/.gpg/''') subparser.set_defaults(func=get_keys) # A meta-target subparser = subparsers.add_parser('clean', help='''The same as the '--clean' switch, but as a separate subcommand.''') subparser.set_defaults(func=cleanup) # Parse the arguments args = parser.parse_args() # first, process any global operations if args.clean: cleanup(args) # Set up logging logger = logging.getLogger() if args.verbose: logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) # Fix up our path so we can use our installed versions os.environ['PATH'] = os.path.join(get_prefix(args.base_dir), 'bin') + ':' \ + os.environ['PATH'] # Make timestamps in tarballs independent of local timezone os.environ['TZ'] = 'UTC' # finally, run the subcommand, and give it the parsed arguments args.func(args) if __name__ == '__main__': main()
apache-2.0
5,809,119,848,239,719,000
37.987379
133
0.571064
false
3.860051
false
false
false
sid88in/incubator-airflow
airflow/contrib/operators/sftp_operator.py
1
5426
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from airflow.contrib.hooks.ssh_hook import SSHHook from airflow.exceptions import AirflowException from airflow.models import BaseOperator from airflow.utils.decorators import apply_defaults class SFTPOperation(object): PUT = 'put' GET = 'get' class SFTPOperator(BaseOperator): """ SFTPOperator for transferring files from remote host to local or vice a versa. This operator uses ssh_hook to open sftp transport channel that serve as basis for file transfer. :param ssh_hook: predefined ssh_hook to use for remote execution. Either `ssh_hook` or `ssh_conn_id` needs to be provided. :type ssh_hook: :class:`SSHHook` :param ssh_conn_id: connection id from airflow Connections. `ssh_conn_id` will be ingored if `ssh_hook` is provided. :type ssh_conn_id: str :param remote_host: remote host to connect (templated) Nullable. If provided, it will replace the `remote_host` which was defined in `ssh_hook` or predefined in the connection of `ssh_conn_id`. :type remote_host: str :param local_filepath: local file path to get or put. (templated) :type local_filepath: str :param remote_filepath: remote file path to get or put. (templated) :type remote_filepath: str :param operation: specify operation 'get' or 'put', defaults to put :type get: bool :param confirm: specify if the SFTP operation should be confirmed, defaults to True :type confirm: bool """ template_fields = ('local_filepath', 'remote_filepath', 'remote_host') @apply_defaults def __init__(self, ssh_hook=None, ssh_conn_id=None, remote_host=None, local_filepath=None, remote_filepath=None, operation=SFTPOperation.PUT, confirm=True, *args, **kwargs): super(SFTPOperator, self).__init__(*args, **kwargs) self.ssh_hook = ssh_hook self.ssh_conn_id = ssh_conn_id self.remote_host = remote_host self.local_filepath = local_filepath self.remote_filepath = remote_filepath self.operation = operation self.confirm = confirm if not (self.operation.lower() == SFTPOperation.GET or self.operation.lower() == SFTPOperation.PUT): raise TypeError("unsupported operation value {0}, expected {1} or {2}" .format(self.operation, SFTPOperation.GET, SFTPOperation.PUT)) def execute(self, context): file_msg = None try: if self.ssh_conn_id: if self.ssh_hook and isinstance(self.ssh_hook, SSHHook): self.log.info("ssh_conn_id is ignored when ssh_hook is provided.") else: self.log.info("ssh_hook is not provided or invalid. " + "Trying ssh_conn_id to create SSHHook.") self.ssh_hook = SSHHook(ssh_conn_id=self.ssh_conn_id) if not self.ssh_hook: raise AirflowException("Cannot operate without ssh_hook or ssh_conn_id.") if self.remote_host is not None: self.log.info("remote_host is provided explicitly. " + "It will replace the remote_host which was defined " + "in ssh_hook or predefined in connection of ssh_conn_id.") self.ssh_hook.remote_host = self.remote_host with self.ssh_hook.get_conn() as ssh_client: sftp_client = ssh_client.open_sftp() if self.operation.lower() == SFTPOperation.GET: file_msg = "from {0} to {1}".format(self.remote_filepath, self.local_filepath) self.log.debug("Starting to transfer %s", file_msg) sftp_client.get(self.remote_filepath, self.local_filepath) else: file_msg = "from {0} to {1}".format(self.local_filepath, self.remote_filepath) self.log.debug("Starting to transfer file %s", file_msg) sftp_client.put(self.local_filepath, self.remote_filepath, confirm=self.confirm) except Exception as e: raise AirflowException("Error while transferring {0}, error: {1}" .format(file_msg, str(e))) return None
apache-2.0
4,150,012,125,310,411,000
44.216667
90
0.600995
false
4.216006
false
false
false
justinmk/python-client
neovim/__init__.py
1
1799
from client import Client from script_host import ScriptHost from plugin_host import PluginHost from uv_stream import UvStream from msgpack_stream import MsgpackStream from rpc_stream import RPCStream from time import sleep import logging, os __all__ = ['connect', 'start_host', 'ScriptHost', 'PluginHost'] # Required for python 2.6 class NullHandler(logging.Handler): def emit(self, record): pass def connect(address=None, port=None, vim_compatible=False): client = Client(RPCStream(MsgpackStream(UvStream(address, port))), vim_compatible) client.discover_api() return client.vim def spawn(argv): client = Client(RPCStream(MsgpackStream(UvStream(spawn_argv=argv)))) client.discover_api() return client.vim def start_host(address=None, port=None): logging.root.addHandler(NullHandler()) logger = logging.getLogger(__name__) info = logger.info if 'NVIM_PYTHON_LOG_FILE' in os.environ: logfile = os.environ['NVIM_PYTHON_LOG_FILE'].strip() handler = logging.FileHandler(logfile, 'w') handler.formatter = logging.Formatter( '%(asctime)s [%(levelname)s @ ' '%(filename)s:%(funcName)s:%(lineno)s] %(process)s - %(message)s') logging.root.addHandler(handler) level = logging.INFO if 'NVIM_PYTHON_LOG_LEVEL' in os.environ: l = getattr(logging, os.environ['NVIM_PYTHON_LOG_LEVEL'].strip(), level) if isinstance(l, int): level = l logger.setLevel(level) info('connecting to neovim') vim = connect(address, port, vim_compatible=True) info('connected to neovim') with PluginHost(vim, discovered_plugins=[ScriptHost]) as host: host.run()
apache-2.0
1,397,941,041,186,873,000
31.125
78
0.644803
false
3.819533
false
false
false
regardscitoyens/nosfinanceslocales_scraper
localfinance/parsing/zone.py
1
5072
# -*- coding: utf-8 -*- from .document_mapper import DocumentMapper from .finance import ( CityFinanceParser, EPCIFinanceParser, DepartmentFinanceParser, DepartmentFinance2013Parser, RegionFinanceParser, RegionFinance2013Parser ) from .tax import ( CityTaxParser, CityBefore2008TaxParser, EPCITaxParser, EPCI2008TaxParser, EPCI2010TaxParser, DepTaxParser, DepTax2008Parser, DepTax20092010Parser, RegTaxParser2008, RegTaxParser20092010, RegTaxParserAfter2011, ) class BaseZoneParser(object): zone_type = '' def __init__(self, insee_code, year, url): self.data = { 'insee_code': insee_code, 'year': year, 'zone_type': self.zone_type, 'url': url } self.tax_parser = None self.finance_parser = None self.account = None self.finance_table_id = 3 def parse(self, hxs): data = self.data.copy() data.update(self.finance_parser.parse(hxs)) data.update(self.tax_parser.parse(hxs)) return data class RegionZoneParser(BaseZoneParser): zone_type = 'region' def __init__(self, insee_code, year, url): super(RegionZoneParser, self).__init__(insee_code, year, url) self.account = DocumentMapper("data/mapping/region_2008.yaml") year = int(self.data['year']) if year == 2008: self.tax_parser = RegTaxParser2008(self.account) self.finance_parser = RegionFinanceParser(self.account) elif 2008 < year < 2011: self.tax_parser = RegTaxParser20092010(self.account) self.finance_parser = RegionFinanceParser(self.account) elif 2010 < year < 2013: self.tax_parser = RegTaxParserAfter2011(self.account) self.finance_parser = RegionFinanceParser(self.account) else: self.account = DocumentMapper("data/mapping/region_2013.yaml") self.tax_parser = RegTaxParserAfter2011(self.account) self.finance_parser = RegionFinance2013Parser(self.account) class DepartmentZoneParser(BaseZoneParser): zone_type = 'department' def __init__(self, insee_code, year, url): super(DepartmentZoneParser, self).__init__(insee_code, year, url) year = int(self.data['year']) if year >= 2013: self.account = DocumentMapper("data/mapping/department_2013.yaml") self.tax_parser = DepTaxParser(self.account) self.finance_parser = DepartmentFinance2013Parser(self.account) elif 2013 > year > 2010: self.account = DocumentMapper("data/mapping/department_2011.yaml") self.tax_parser = DepTaxParser(self.account) self.finance_parser = DepartmentFinanceParser(self.account) elif year == 2010: self.account = DocumentMapper("data/mapping/department_2010.yaml") self.tax_parser = DepTax20092010Parser(self.account) self.finance_parser = DepartmentFinanceParser(self.account) elif 2010 > year > 2008: self.account = DocumentMapper("data/mapping/department_2009.yaml") self.tax_parser = DepTax20092010Parser(self.account) self.finance_parser = DepartmentFinanceParser(self.account) elif year == 2008: self.account = DocumentMapper("data/mapping/department_2008.yaml") self.tax_parser = DepTax2008Parser(self.account) self.finance_parser = DepartmentFinanceParser(self.account) class EPCIZoneParser(BaseZoneParser): zone_type = 'epci' def __init__(self, insee_code, year, url, siren): super(EPCIZoneParser, self).__init__(insee_code, year, url) self.data['siren'] = siren self.account = DocumentMapper("data/mapping/epci_2010.yaml") self.finance_parser = EPCIFinanceParser(self.account) year = int(self.data['year']) if year < 2009: self.account = DocumentMapper("data/mapping/epci_2008.yaml") self.tax_parser = EPCI2008TaxParser(self.account) elif year < 2011: self.tax_parser = EPCI2010TaxParser(self.account) else: self.tax_parser = EPCITaxParser(self.account) class CityZoneParser(BaseZoneParser): """Parser of city html page""" zone_type = 'city' def __init__(self, insee_code, year, url): super(CityZoneParser, self).__init__(insee_code, year, url) year = int(self.data['year']) if year > 2010: self.account = DocumentMapper("data/mapping/city_2011.yaml") self.tax_parser = CityTaxParser(self.account) elif 2008 < year < 2011: self.account = DocumentMapper("data/mapping/city_2009.yaml") self.tax_parser = CityTaxParser(self.account) elif year < 2009: self.account = DocumentMapper("data/mapping/city_2000.yaml") self.tax_parser = CityBefore2008TaxParser(self.account) self.finance_parser = CityFinanceParser(self.account)
mit
9,042,204,551,475,589,000
33.27027
78
0.637815
false
3.745938
false
false
false
czhengsci/pymatgen
pymatgen/__init__.py
1
2748
from __future__ import unicode_literals import sys import os import warnings import ruamel.yaml as yaml __author__ = "Pymatgen Development Team" __email__ ="pymatgen@googlegroups.com" __maintainer__ = "Shyue Ping Ong" __maintainer_email__ ="shyuep@gmail.com" __version__ = "2018.3.14" SETTINGS_FILE = os.path.join(os.path.expanduser("~"), ".pmgrc.yaml") def _load_pmg_settings(): try: with open(SETTINGS_FILE, "rt") as f: d = yaml.safe_load(f) except IOError: # If there are any errors, default to using environment variables # if present. d = {} for k, v in os.environ.items(): if k.startswith("PMG_"): d[k] = v elif k in ["VASP_PSP_DIR", "MAPI_KEY", "DEFAULT_FUNCTIONAL"]: d["PMG_" + k] = v clean_d = {} for k, v in d.items(): if not k.startswith("PMG_"): warnings.warn('With effect from pmg 5.0, all pymatgen settings are' ' prefixed with a "PMG_". E.g., "PMG_VASP_PSP_DIR" ' 'instead of "VASP_PSP_DIR".') clean_d["PMG_" + k] = v else: clean_d[k] = v return clean_d SETTINGS = _load_pmg_settings() # Order of imports is important on some systems to avoid # failures when loading shared libraries. # import spglib # from . import optimization, util # del(spglib, optimization, util) # Useful aliases for commonly used objects and modules. # Allows from pymatgen import <class> for quick usage. from pymatgen.core import * from .electronic_structure.core import Spin, Orbital from .ext.matproj import MPRester from monty.json import MontyEncoder, MontyDecoder, MSONable def get_structure_from_mp(formula): """ Convenience method to get a crystal from the Materials Project database via the API. Requires PMG_MAPI_KEY to be set. Args: formula (str): A formula Returns: (Structure) The lowest energy structure in Materials Project with that formula. """ m = MPRester() entries = m.get_entries(formula, inc_structure="final") if len(entries) == 0: raise ValueError("No structure with formula %s in Materials Project!" % formula) elif len(entries) > 1: warnings.warn("%d structures with formula %s found in Materials " "Project. The lowest energy structure will be returned." % (len(entries), formula)) return min(entries, key=lambda e: e.energy_per_atom).structure if sys.version_info < (3, 5): warnings.warn(""" Pymatgen will drop Py2k support from v2019.1.1. Pls consult the documentation at https://www.pymatgen.org for more details.""")
mit
4,265,692,872,666,505,000
30.597701
80
0.618632
false
3.606299
false
false
false
marksweiss/organize-m
lib/item.py
1
3548
from element import Elem class OrganizemIllegalDataFormatException(Exception): pass class OrganizemIllegalDataTypeException(Exception): pass # A single Item in the data file, with a root 'item:' element and child # elements for each of the fields (Elements) in an Item # Only title is required and all other args are optional and any or none can # be passed as named args (kwargs) # Values for all elements are available directly as properties # str() returns the YAML string serialization of the Item # repr() returns the Item as a dict/list that is what YAML deserializes to class Item(object): def __init__(self, title, dict_of_elems=None): # Store list of all elements in Item self._elems = Elem.get_elems() # Required elements are 'ROOT' and 'TITLE' # Set 'root' Item Element self.__setattr__('_' + Elem.ROOT, Elem.elem_init(Elem.ROOT, None)) # 'title' Element is required, set it first if not title: raise OrganizemIllegalDataFormatException("Cannot construct Item with null or empty title") title_obj = Elem.elem_init(Elem.TITLE, title) self.__setattr__('_' + Elem.TITLE, title_obj) self.__setattr__(Elem.TITLE, title_obj.val) # A little dirty, but not bad. Elem exposes method to get list of optional # elements, with the assumption being client can call get_optional_data_elems() to # get all elements and this to get only optional, so it can take care of # required ones (statically, as here) and process optional ones dynamically opt_elems = Elem.get_optional_data_elems() for elem in opt_elems: kwval = None elem_obj = None if dict_of_elems: if elem in dict_of_elems: kwval = dict_of_elems[elem] elem_obj = Elem.elem_init(elem, kwval) # Private object str(), repr() used by Item str() and repr() self.__setattr__('_' + elem, elem_obj) # Public getter just returns obj.val, value for the element self.__setattr__(elem, elem_obj.val) else: self.__setattr__('_' + elem, Elem.elem_init(elem, None)) self.__setattr__(elem, None) def __getattr__(self, attr): if attr in self.__dict__: return self.__dict__[attr] else: return None # Used for access to arbitrary element value: x = item[elem] def __getitem__(self, elem): return self.__getattr__(elem) # For now str() representation is YAML. Make separate method to make client # code more explicit and allow future change to str() without client code change def __str__(self): return self._to_yaml() def _to_yaml(self): return '\n'.join([str(self.__getattr__('_' + elem)) for elem in self._elems]) # NOTE: Used by organizem_test.py unit tests def __repr__(self): """ Returns form of object matching form produced by PyYaml.#load() when it loads the YAML item from the data file. So then PyYaml.#dump(Item.#repr()) produces valid YAML string """ # Use list of elements skipping ROOT # Iterate list of elems to create list of dicts, one for each attr elems = [{elem : self.__getattr__(elem)} for elem in self._elems[1:]] item_repr = {Elem.ROOT : elems} return repr(item_repr)
mit
1,379,241,476,481,216,000
43.35
103
0.602875
false
4.159437
false
false
false
macarthur-lab/xbrowse
seqr/migrations/0059_auto_20190705_1450.py
1
1096
# -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-07-05 14:50 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('seqr', '0058_matchmakercontactnotes'), ] operations = [ migrations.AlterField( model_name='family', name='analysis_status', field=models.CharField(choices=[(b'S', b'S'), (b'S_kgfp', b'S'), (b'S_kgdp', b'S'), (b'S_ng', b'S'), (b'Sc_kgfp', b'S'), (b'Sc_kgdp', b'S'), (b'Sc_ng', b'S'), (b'Rcpc', b'R'), (b'Rncc', b'R'), (b'C', b'C'), (b'I', b'A'), (b'Q', b'W')], default=b'Q', max_length=10), ), migrations.AlterField( model_name='family', name='internal_analysis_status', field=models.CharField(blank=True, choices=[(b'S', b'S'), (b'S_kgfp', b'S'), (b'S_kgdp', b'S'), (b'S_ng', b'S'), (b'Sc_kgfp', b'S'), (b'Sc_kgdp', b'S'), (b'Sc_ng', b'S'), (b'Rcpc', b'R'), (b'Rncc', b'R'), (b'C', b'C'), (b'I', b'A'), (b'Q', b'W')], max_length=10, null=True), ), ]
agpl-3.0
-1,311,480,607,405,681,700
42.84
286
0.516423
false
2.584906
false
false
false
ahmetcemturan/SFACT
skeinforge_application/skeinforge_plugins/craft_plugins/cool.py
1
19876
""" This page is in the table of contents. Cool is a craft tool to cool the shape. Cool works well with a stepper extruder, it does not work well with a DC motor extruder. If enabled, before each layer that takes less then "Minimum Layer Time" to print the tool head will orbit around the printed area for 'Minimum Layer Time' minus 'the time it takes to print the layer' before it starts printing the layer. This is great way to let layers with smaller area cool before you start printing on top of them (so you do not overheat the area). The cool manual page is at: http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Cool Allan Ecker aka The Masked Retriever's has written the "Skeinforge Quicktip: Cool" at: http://blog.thingiverse.com/2009/07/28/skeinforge-quicktip-cool/ ==Operation== The default 'Activate Cool' checkbox is on. When it is on, the functions described below will work, when it is off, the functions will not be called. ==Settings== ===Bridge Cool=== Default is one degree Celcius. If the layer is a bridge layer, then cool will lower the temperature by 'Bridge Cool' degrees Celcius. ===Cool Type=== Default is 'Slow Down'. ====Orbit==== When selected, cool will add orbits with the extruder off to give the layer time to cool, so that the next layer is not extruded on a molten base. The orbits will be around the largest island on that layer. Orbit should only be chosen if you can not upgrade to a stepper extruder. ====Slow Down==== When selected, cool will slow down the extruder so that it will take the minimum layer time to extrude the layer. DC motors do not operate properly at slow flow rates, so if you have a DC motor extruder, you should upgrade to a stepper extruder, but if you can't do that, you can try using the 'Orbit' option. ===Maximum Cool=== Default is 2 degrees Celcius. If it takes less time to extrude the layer than the minimum layer time, then cool will lower the temperature by the 'Maximum Cool' setting times the layer time over the minimum layer time. ===Minimum Layer Time=== Default is 60 seconds. Defines the minimum amount of time the extruder will spend on a layer, this is an important setting. ===Minimum Orbital Radius=== Default is 10 millimeters. When the orbit cool type is selected, if the area of the largest island is as large as the square of the "Minimum Orbital Radius" then the orbits will be just within the island. If the island is smaller, then the orbits will be in a square of the "Minimum Orbital Radius" around the center of the island. This is so that the hot extruder does not stay too close to small islands. ===Name of Alteration Files=== Cool looks for alteration files in the alterations folder in the .skeinforge folder in the home directory. Cool does not care if the text file names are capitalized, but some file systems do not handle file name cases properly, so to be on the safe side you should give them lower case names. If it doesn't find the file it then looks in the alterations folder in the skeinforge_plugins folder. The cool start and end text idea is from: http://makerhahn.blogspot.com/2008/10/yay-minimug.html ====Name of Cool End File==== Default is cool_end.gcode. If there is a file with the name of the "Name of Cool End File" setting, it will be added to the end of the orbits. ====Name of Cool Start File==== Default is cool_start.gcode. If there is a file with the name of the "Name of Cool Start File" setting, it will be added to the start of the orbits. ===Orbital Outset=== Default is 2 millimeters. When the orbit cool type is selected, the orbits will be outset around the largest island by 'Orbital Outset' millimeters. If 'Orbital Outset' is negative, the orbits will be inset instead. ===Turn Fan On at Beginning=== Default is on. When selected, cool will turn the fan on at the beginning of the fabrication by adding the M106 command. ===Turn Fan Off at Ending=== Default is on. When selected, cool will turn the fan off at the ending of the fabrication by adding the M107 command. ==Examples== The following examples cool the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and cool.py. > python cool.py This brings up the cool dialog. > python cool.py Screw Holder Bottom.stl The cool tool is parsing the file: Screw Holder Bottom.stl .. The cool tool has created the file: .. Screw Holder Bottom_cool.gcode """ from __future__ import absolute_import #Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module. import __init__ from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret from fabmetheus_utilities import archive from fabmetheus_utilities import euclidean from fabmetheus_utilities import gcodec from fabmetheus_utilities import intercircle from fabmetheus_utilities import settings from skeinforge_application.skeinforge_utilities import skeinforge_craft from skeinforge_application.skeinforge_utilities import skeinforge_polyfile from skeinforge_application.skeinforge_utilities import skeinforge_profile import os import sys __author__ = 'Enrique Perez (perez_enrique@yahoo.com)' __date__ = '$Date: 2008/21/04 $' __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' def getCraftedText(fileName, text, repository=None): 'Cool a gcode linear move text.' return getCraftedTextFromText(archive.getTextIfEmpty(fileName, text), repository) def getCraftedTextFromText(gcodeText, repository=None): 'Cool a gcode linear move text.' if gcodec.isProcedureDoneOrFileIsEmpty(gcodeText, 'cool'): return gcodeText if repository is None: repository = settings.getReadRepository(CoolRepository()) if not repository.activateCool.value: return gcodeText return CoolSkein().getCraftedGcode(gcodeText, repository) def getNewRepository(): 'Get new repository.' return CoolRepository() def writeOutput(fileName, shouldAnalyze=True): 'Cool a gcode linear move file. Chain cool the gcode if it is not already cooled.' skeinforge_craft.writeChainTextWithNounMessage(fileName, 'cool', shouldAnalyze) class CoolRepository: 'A class to handle the cool settings.' def __init__(self): 'Set the default settings, execute title & settings fileName.' skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.cool.html', self ) self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Cool', self, '') self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute( 'http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Cool') self.activateCool = settings.BooleanSetting().getFromValue('Activate Cool.. but use with a fan!', self, False) settings.LabelDisplay().getFromName('- When To use Cool?-', self ) self.minimumLayerTime = settings.FloatSpin().getFromValue(0.0, 'Use Cool if layer takes shorter than(seconds):', self, 120.0, 10.0) self.minimumLayerFeedrate = settings.FloatSpin().getFromValue(5.0, 'Do not go slower than (mm/s):', self, 50.0, 15.0) settings.LabelSeparator().getFromRepository(self) settings.LabelDisplay().getFromName('- What to do if Cool is necessary? -', self ) self.turnFanOnAtBeginning = settings.BooleanSetting().getFromValue('Turn Fan On at Beginning', self, True) self.turnFanOffAtEnding = settings.BooleanSetting().getFromValue('Turn Fan Off at Ending', self, True) settings.LabelSeparator().getFromRepository(self) settings.LabelDisplay().getFromName('- Name of Macro (gmc) Files to execute -', self ) self.nameOfCoolEndFile = settings.StringSetting().getFromValue('Execute when Cool ends:', self, 'cool_end.gmc') self.nameOfCoolStartFile = settings.StringSetting().getFromValue('Execute when Cool starts:', self, 'cool_start.gmc') settings.LabelSeparator().getFromRepository(self) settings.LabelDisplay().getFromName('- How to Cool? -', self ) self.coolType = settings.MenuButtonDisplay().getFromName('Cool by:', self) self.orbit = settings.MenuRadio().getFromMenuButtonDisplay(self.coolType, 'Orbiting around Object', self, False) self.slowDown = settings.MenuRadio().getFromMenuButtonDisplay(self.coolType, 'Slow Down during print', self, True) settings.LabelSeparator().getFromRepository(self) self.maximumCool = settings.FloatSpin().getFromValue(0.0, 'Maximum Cool (Celcius):', self, 10.0, 2.0) self.bridgeCool = settings.FloatSpin().getFromValue(0.0, 'Bridge Cool (Celcius):', self, 10.0, 1.0) self.minimumOrbitalRadius = settings.FloatSpin().getFromValue( 0.0, 'Minimum Orbital Radius (millimeters):', self, 20.0, 10.0) settings.LabelSeparator().getFromRepository(self) self.orbitalOutset = settings.FloatSpin().getFromValue(1.0, 'Orbital Outset (millimeters):', self, 5.0, 2.0) self.executeTitle = 'Cool' def execute(self): 'Cool button has been clicked.' fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode( self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled) for fileName in fileNames: writeOutput(fileName) class CoolSkein: 'A class to cool a skein of extrusions.' def __init__(self): self.boundaryLayer = None self.coolTemperature = None self.distanceFeedRate = gcodec.DistanceFeedRate() self.feedRateMinute = 960.0 self.highestZ = 1.0 self.isBridgeLayer = False self.isExtruderActive = False self.layerCount = settings.LayerCount() self.lineIndex = 0 self.lines = None self.multiplier = 1.0 self.oldFlowRate = None self.oldFlowRateString = None self.oldLocation = None self.oldTemperature = None def addCoolOrbits(self, remainingOrbitTime): 'Add the minimum radius cool orbits.' if len(self.boundaryLayer.loops) < 1: return insetBoundaryLoops = self.boundaryLayer.loops if abs(self.repository.orbitalOutset.value) > 0.1 * abs(self.edgeWidth): insetBoundaryLoops = intercircle.getInsetLoopsFromLoops(self.boundaryLayer.loops, -self.repository.orbitalOutset.value) if len(insetBoundaryLoops) < 1: insetBoundaryLoops = self.boundaryLayer.loops largestLoop = euclidean.getLargestLoop(insetBoundaryLoops) loopArea = euclidean.getAreaLoopAbsolute(largestLoop) if loopArea < self.minimumArea: center = 0.5 * (euclidean.getMaximumByComplexPath(largestLoop) + euclidean.getMinimumByComplexPath(largestLoop)) centerXBounded = max(center.real, self.boundingRectangle.cornerMinimum.real) centerXBounded = min(centerXBounded, self.boundingRectangle.cornerMaximum.real) centerYBounded = max(center.imag, self.boundingRectangle.cornerMinimum.imag) centerYBounded = min(centerYBounded, self.boundingRectangle.cornerMaximum.imag) center = complex(centerXBounded, centerYBounded) maximumCorner = center + self.halfCorner minimumCorner = center - self.halfCorner largestLoop = euclidean.getSquareLoopWiddershins(minimumCorner, maximumCorner) pointComplex = euclidean.getXYComplexFromVector3(self.oldLocation) if pointComplex is not None: largestLoop = euclidean.getLoopStartingClosest(self.edgeWidth, pointComplex, largestLoop) intercircle.addOrbitsIfLarge( self.distanceFeedRate, largestLoop, self.orbitalFeedRatePerSecond, remainingOrbitTime, self.highestZ) def addCoolTemperature(self, remainingOrbitTime): 'Parse a gcode line and add it to the cool skein.' layerCool = self.repository.maximumCool.value * remainingOrbitTime / self.repository.minimumLayerTime.value if self.isBridgeLayer: layerCool = max(self.repository.bridgeCool.value, layerCool) if self.oldTemperature is not None and layerCool != 0.0: self.coolTemperature = self.oldTemperature - layerCool self.addTemperature(self.coolTemperature) # def addFlowRate(self, flowRate): # 'Add a multipled line of flow rate if different.' # self.distanceFeedRate.addLine('M108 S' + euclidean.getFourSignificantFigures(flowRate)) def addGcodeFromFeedRateMovementZ(self, feedRateMinute, point, z): 'Add a movement to the output.' self.distanceFeedRate.addLine(self.distanceFeedRate.getLinearGcodeMovementWithFeedRate(feedRateMinute, point, z)) def addOrbitsIfNecessary(self, remainingOrbitTime): 'Parse a gcode line and add it to the cool skein.' if remainingOrbitTime > 0.0 and self.boundaryLayer is not None: self.addCoolOrbits(remainingOrbitTime) def addTemperature(self, temperature): 'Add a line of temperature.' self.distanceFeedRate.addLine('M104 S' + euclidean.getRoundedToThreePlaces(temperature)) def getCoolMove(self, line, location, splitLine): 'Get cool line according to time spent on layer.' self.feedRateMinute = gcodec.getFeedRateMinute(self.feedRateMinute, splitLine) calcCoolFeedrate = self.multiplier * self.feedRateMinute if calcCoolFeedrate >= self.repository.minimumLayerFeedrate.value*60: coolFeedrate = calcCoolFeedrate else: coolFeedrate = self.repository.minimumLayerFeedrate.value*60 return self.distanceFeedRate.getLineWithFeedRate(coolFeedrate, line, splitLine) def getCraftedGcode(self, gcodeText, repository): 'Parse gcode text and store the cool gcode.' self.repository = repository self.coolEndLines = settings.getAlterationFileLines(repository.nameOfCoolEndFile.value) self.coolStartLines = settings.getAlterationFileLines(repository.nameOfCoolStartFile.value) self.halfCorner = complex(repository.minimumOrbitalRadius.value, repository.minimumOrbitalRadius.value) self.lines = archive.getTextLines(gcodeText) self.minimumArea = 4.0 * repository.minimumOrbitalRadius.value * repository.minimumOrbitalRadius.value self.parseInitialization() self.boundingRectangle = gcodec.BoundingRectangle().getFromGcodeLines( self.lines[self.lineIndex :], 0.5 * self.edgeWidth) margin = 0.2 * self.edgeWidth halfCornerMargin = self.halfCorner + complex(margin, margin) self.boundingRectangle.cornerMaximum -= halfCornerMargin self.boundingRectangle.cornerMinimum += halfCornerMargin for self.lineIndex in xrange(self.lineIndex, len(self.lines)): line = self.lines[self.lineIndex] self.parseLine(line) if repository.turnFanOffAtEnding.value: self.distanceFeedRate.addLine('M107') return gcodec.getGcodeWithoutDuplication('M108', self.distanceFeedRate.output.getvalue()) def getLayerTime(self): 'Get the time the extruder spends on the layer.' feedRateMinute = self.feedRateMinute layerTime = 0.0 lastThreadLocation = self.oldLocation for lineIndex in xrange(self.lineIndex, len(self.lines)): line = self.lines[lineIndex] splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) firstWord = gcodec.getFirstWord(splitLine) if firstWord == 'G1': location = gcodec.getLocationFromSplitLine(lastThreadLocation, splitLine) feedRateMinute = gcodec.getFeedRateMinute(feedRateMinute, splitLine) if lastThreadLocation is not None: feedRateSecond = feedRateMinute / 60.0 layerTime += location.distance(lastThreadLocation) / feedRateSecond lastThreadLocation = location elif firstWord == '(<bridgeRotation>': self.isBridgeLayer = True elif firstWord == '(</layer>)': return layerTime return layerTime def getLayerTimeActive(self): 'Get the time the extruder spends on the layer while active.' feedRateMinute = self.feedRateMinute isExtruderActive = self.isExtruderActive layerTime = 0.0 lastThreadLocation = self.oldLocation for lineIndex in xrange(self.lineIndex, len(self.lines)): line = self.lines[lineIndex] splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) firstWord = gcodec.getFirstWord(splitLine) if firstWord == 'G1': location = gcodec.getLocationFromSplitLine(lastThreadLocation, splitLine) feedRateMinute = gcodec.getFeedRateMinute(feedRateMinute, splitLine) if lastThreadLocation is not None and isExtruderActive: feedRateSecond = feedRateMinute / 60.0 layerTime += location.distance(lastThreadLocation) / feedRateSecond lastThreadLocation = location elif firstWord == 'M101': isExtruderActive = True elif firstWord == 'M103': isExtruderActive = False elif firstWord == '(<bridgeRotation>': self.isBridgeLayer = True elif firstWord == '(</layer>)': return layerTime return layerTime def parseInitialization(self): 'Parse gcode initialization and store the parameters.' for self.lineIndex in xrange(len(self.lines)): line = self.lines[self.lineIndex] splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) firstWord = gcodec.getFirstWord(splitLine) self.distanceFeedRate.parseSplitLine(firstWord, splitLine) # if firstWord == 'M108': # self.oldFlowRate = float(splitLine[1][1 :]) if firstWord == '(<edgeWidth>': self.edgeWidth = float(splitLine[1]) if self.repository.turnFanOnAtBeginning.value: self.distanceFeedRate.addLine('M106') elif firstWord == '(</extruderInitialization>)': self.distanceFeedRate.addTagBracketedProcedure('cool') return # elif firstWord == '(<operatingFlowRate>': # self.oldFlowRate = float(splitLine[1]) elif firstWord == '(<orbitalFeedRatePerSecond>': self.orbitalFeedRatePerSecond = float(splitLine[1]) self.distanceFeedRate.addLine(line) def parseLine(self, line): 'Parse a gcode line and add it to the cool skein.' splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) if len(splitLine) < 1: return firstWord = splitLine[0] if firstWord == 'G1': location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine) self.highestZ = max(location.z, self.highestZ) if self.isExtruderActive: line = self.getCoolMove(line, location, splitLine) self.oldLocation = location elif firstWord == 'M101': self.isExtruderActive = True elif firstWord == 'M103': self.isExtruderActive = False elif firstWord == 'M104': self.oldTemperature = gcodec.getDoubleAfterFirstLetter(splitLine[1]) # elif firstWord == 'M108': # self.oldFlowRate = float(splitLine[1][1 :]) # self.addFlowRate(self.multiplier * self.oldFlowRate) # return elif firstWord == '(<boundaryPoint>': self.boundaryLoop.append(gcodec.getLocationFromSplitLine(None, splitLine).dropAxis()) elif firstWord == '(<layer>': self.layerCount.printProgressIncrement('cool') self.distanceFeedRate.addLine(line) self.distanceFeedRate.addLinesSetAbsoluteDistanceMode(self.coolStartLines) layerTime = self.getLayerTime() remainingOrbitTime = max(self.repository.minimumLayerTime.value - layerTime, 0.0) self.addCoolTemperature(remainingOrbitTime) if self.repository.orbit.value: self.addOrbitsIfNecessary(remainingOrbitTime) else: self.setMultiplier(remainingOrbitTime) # self.addFlowRate(self.multiplier * self.oldFlowRate) z = float(splitLine[1]) self.boundaryLayer = euclidean.LoopLayer(z) self.highestZ = max(z, self.highestZ) self.distanceFeedRate.addLinesSetAbsoluteDistanceMode(self.coolEndLines) return elif firstWord == '(</layer>)': self.isBridgeLayer = False self.multiplier = 1.0 if self.coolTemperature is not None: self.addTemperature(self.oldTemperature) self.coolTemperature = None # self.addFlowRate(self.oldFlowRate) elif firstWord == '(<nestedRing>)': self.boundaryLoop = [] self.boundaryLayer.loops.append(self.boundaryLoop) self.distanceFeedRate.addLine(line) def setMultiplier(self, remainingOrbitTime): 'Set the feed and flow rate multiplier.' layerTimeActive = self.getLayerTimeActive() self.multiplier = min(1.0, layerTimeActive / (remainingOrbitTime + layerTimeActive)) def main(): 'Display the cool dialog.' if len(sys.argv) > 1: writeOutput(' '.join(sys.argv[1 :])) else: settings.startMainLoopFromConstructor(getNewRepository()) if __name__ == '__main__': main()
agpl-3.0
-5,475,630,925,524,603,000
45.98818
439
0.768213
false
3.316536
false
false
false
bmya/odoo-support
web_support_client_issue/wizard/support_new_issue.py
1
3221
# -*- coding: utf-8 -*- ############################################################################## # For copyright and license notices, see __openerp__.py file in module root # directory ############################################################################## from openerp import fields, api, models, _ from openerp.addons.base.res.res_request import referencable_models class support_new_issue_wizzard(models.TransientModel): _name = "support.new_issue.wizard" _description = "Support - New Issue Wizard" @api.model def get_default_description(self): default_description = """ <h4>¿Cuáles son los <b>pasos</b> para reproducir su problema?</h4> <p> <br/> <br/> </p> <h4>¿Cuál es el problema?</h4> <p> <br/> <br/> </p> <h4>¿Puede copiarnos uno o más links a <b>casos concretos</b> o adjuntar una <b>captura de pantalla</b>?</h4> <p> <br/> <br/> </p> """ return default_description user_id = fields.Many2one( 'res.users', required=True, string='Usuario afectado', default=lambda self: self.env.user, ) company_id = fields.Many2one( 'res.company', required=True, string='Compañía utilizada', ) date = fields.Datetime( string='Date', required=True, default=fields.Datetime.now ) name = fields.Char( string='Title', required=True, ) description = fields.Html( string='Description', required=True, default=get_default_description, ) attachment_ids = fields.Many2many( 'ir.attachment', 'new_issue_ir_attachments_rel' 'wizard_id', 'attachment_id', string='Attachments', required=False, ) resource = fields.Reference( selection=lambda self: referencable_models( self, self.env.cr, self.env.uid, self.env.context), string='Recurso afectado', help='You can reference the model and record related to the issue, ' 'this will help our technicians to resolve the issue faster', required=False, ) priority = fields.Selection( [('0', 'Low'), ('1', 'Normal'), ('2', 'High')], 'Priority', default='0', ) @api.onchange('user_id') def change_user(self): self.company_id = self.user_id.company_id.id @api.multi def action_confirm(self): self.ensure_one() active_contract = self.env['support.contract'].get_active_contract() description = self.description if self.resource: description += '\nResource: %s' % str(self.resource) vals = { 'db_user': self.user_id.login, 'db_company': self.company_id.name, 'date': self.date, 'issue_description': description, 'name': self.name, 'priority': self.priority, } issue_id = active_contract.create_issue(vals, self.attachment_ids) return self.env['warning_box'].info( title=_('Issue succesfully loaded'), message=_('For your reference and if you contact support by another\ channel, issue ID: %s') % (issue_id))
lgpl-3.0
4,524,937,655,549,498,400
29.6
80
0.557734
false
3.680412
false
false
false
dirn/Secret-Santa
tests/factories.py
1
1124
"""Factories for populating models for tests.""" import factory from factory.alchemy import SQLAlchemyModelFactory from xmas import models from xmas.core import db from xmas.utils import slugify class Event(SQLAlchemyModelFactory): """A factory instance of :class:`~xmas.models.Event`.""" FACTORY_FOR = models.Event FACTORY_SESSION = db.session id = factory.Sequence(lambda x: x) name = factory.Sequence(lambda x: 'Event {}'.format(x)) slug = factory.LazyAttribute(lambda obj: slugify(obj.name)) class Item(SQLAlchemyModelFactory): """A factory instance of :class:`~xmas.models.Item`.""" FACTORY_FOR = models.Item FACTORY_SESSION = db.session id = factory.Sequence(lambda x: x) name = factory.Sequence(lambda x: 'Item {}'.format(x)) class User(SQLAlchemyModelFactory): """A factory instance of :class:`~xmas.models.User`.""" FACTORY_FOR = models.User FACTORY_SESSION = db.session id = factory.Sequence(lambda x: x) name = factory.Sequence(lambda x: 'User {}'.format(x)) email = factory.Sequence(lambda x: 'email-{}@example.org'.format(x))
bsd-3-clause
265,303,943,291,645,300
25.139535
72
0.69395
false
3.602564
false
false
false
DavidCain/film_server
cgi-bin/playlist.py
1
7485
#!/usr/bin/env python # David Cain # RE357 # 2012-12-16 """ A script to make a m3u bookmark playlist (playable in VLC), or an archive of .m4v video clip files. """ from collections import OrderedDict from datetime import datetime import cgi import csv import os import re import shutil import subprocess import sys import tempfile import traceback import zipfile hms = "%H:%M:%S" ms = "%M:%S" film_dir = "/srv/ftp/" movie_start = datetime.strptime("00:00:00", hms) def print_m3u(clips, title, filmpath): """ Print the contents of a .m3u playlist of clips in the film. Note that each bookmark should probably have a value for a "bytes" attribute, but it seems to work without it. """ attach_header("bookmarks.m3u") print "#EXTM3U" print "#EXTINF:7061,%s" % title # Bookmarks print "#EXTVLCOPT:bookmarks=", # trailing comma is key bookmarks = ["{name=%s,time=%i}" % (name, seconds(start)) for start, (end, name) in clips] print ",".join(bookmarks) # Path to file print filmpath def print_zip(clips, film_title): """ Print the contents of a .zip file of film clips. """ try: zip_file = make_clips(clips, film_title) except Exception, msg: text_err(msg) else: attach_header(film_title + "_clips.zip") for line in zip_file: print line, finally: try: os.remove(zip_file.name) except OSError: pass # If make_clips failed, file won't exist def make_clips(clips, film_title): """ Return a .zip file of film clips. """ temp_clip_dir = tempfile.mkdtemp(prefix=film_title) film_path = os.path.join(film_dir, "%s.m4v" % film_title) base, extension = os.path.splitext(film_path) clip_files = [] for start, (end, clip_name) in clips: if seconds(end - start) > 600: raise Exception("Clip '%s' exceeds ten minutes." % clip_name) running_time = str(end - start) # Will be in HMS start = str(start) clip_fn = clean_path(clip_name) outfile = os.path.join(temp_clip_dir, clip_fn + extension) cmd = ['ffmpeg', '-ss', start, '-t', running_time, '-i', film_path, '-acodec', 'copy', '-vcodec', 'copy', '-y', outfile] try: subprocess.check_output(cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError, e: text_err("Error code %i:\n\n %s" % (e.returncode, e.output)) clip_files.append(outfile) # Zip the clips into an archive, return file handle zip_handle = make_zip(clip_files, film_title + "_clips") shutil.rmtree(temp_clip_dir) return zip_handle def make_zip(paths, top_dir="film_clips"): """ Return the handle to a .zip archive of the given files. :param top_dir: Directory name to place files in """ fd, zip_path = tempfile.mkstemp() archive = zipfile.ZipFile(zip_path, 'w') for path in paths: arcname = os.path.join(top_dir, os.path.split(path)[1]) archive.write(path, arcname) archive.close() os.close(fd) return open(zip_path) class CSVError(Exception): pass def get_clip_dict(csv_file, give_times=False): """ Return a dictionary of clip names with start and end times. """ clip_dict = OrderedDict() clips_csv = csv.reader(csv_file) for num, line in enumerate(clips_csv, start=1): if len(line) > 3: raise CSVError("Too many columns on line %i (check commas!)" % num) elif len(line) < 3: raise CSVError("Fewer than three columns on line %i" % num) start, end, name = [val.strip() for val in line] timename = "%s-%s" % (start, end) clip_name = "%s - %s" % (timename, name) if give_times else name start_time = get_time(start) end_time = get_time(end) if end_time < start_time: raise CSVError("End time of '%s' (line %i) precedes start." % (name, num)) clip_dict[start_time] = (end_time, clip_name) return clip_dict def seconds(delta): return int(delta.total_seconds()) def get_time(clip_start): try: bookmark_time = datetime.strptime(clip_start, hms) except ValueError: try: bookmark_time = datetime.strptime(clip_start, ms) except ValueError: raise ValueError("Invalid time format '%s'." "Enter time in H:M:S, or M:S" % clip_start) return bookmark_time - movie_start def clean_path(path): """ Sanitize the path for sensible names. It's not to prevent traversals, just to avoid common filename 'gotchas' """ path = re.sub("[:/\\\]", "-", path) path = re.sub(" ", "_", path) path = re.sub("[?]", "", path) return path def universal_file(in_file): """ Return the handle to a file with universal EOL support. (A hack to get around the fact that CGI handles are already open). """ fileno, filename = tempfile.mkstemp() with open(filename, "w") as newline_file: for line in in_file: newline_file.write(line) os.close(fileno) return open(filename, "rU") def attach_header(outname): print 'Content-Type:text/enriched; filename="%s"' % outname print 'Content-Disposition: attachment; filename="%s"\n' % outname def text_err(msg): print 'Content-Type:text/plain\n' print "Error:\n" print msg sys.exit(1) def html_err(msg): print 'Content-Type:text/html\n' print "<html>\n<body>" print "<h1>Error:</h1>\n" print "<p>\n%s\n</p>" % msg print "</body>\n</html>" sys.exit(1) def main(): """ Read the CGI form, display any errors. Otherwise, give content. """ form = cgi.FieldStorage() film_title = form["title"].value movie_path = form["movie_path"].value clip_order = form["clip_order"].value user_csv = form["csv_file"].file # Quit if CSV file is empty if not (user_csv and user_csv.read()): html_err("No CSV file given.") user_csv.seek(0) # Get output type try: output_type = form["output_type"].value except: html_err("No output format selected.") # Raise error if using playlist and path is left as example path if (output_type == "playlist" and (not movie_path or movie_path == "/Users/suzieq/East_of_Eden.m4v")): html_err("Playlists require the path to your film.\n" '<a href="/gen_clips.html#full_path">' 'Getting the full path of a file' '</a>') csv_file = universal_file(user_csv) # Force universal line support # Parse CSV, crash if errors try: clip_dict = get_clip_dict(csv_file) except CSVError, msg: html_err(msg) except Exception, msg: html_err("Error parsing CSV: %s" % msg) finally: os.remove(csv_file.name) # Sort clips chronologically, if specified if clip_order == "chronological": clips = sorted(clip_dict.items()) else: clips = clip_dict.items() if len(clips) == 0: html_err("No clips were found in the CSV file!") # Give the result as downloadable if output_type == "playlist": print_m3u(clips, film_title, movie_path) elif output_type == "clips": print_zip(clips, film_title) if __name__ == "__main__": try: main() except SystemExit: pass except: traceback.print_exc(file=sys.stdout)
gpl-3.0
-7,705,770,874,046,983,000
26.929104
94
0.604275
false
3.406919
false
false
false
CMPUT410W15/cmput410-project
posts/remote.py
1
2909
"""Functions for dealing with remote posts.""" from author.models import Author from posts.models import Post, Comment from common.util import get_request_to_json, get_nodes from common.util import HINDLEBOOK, HINDLE_AUTH, BUBBLE, BUBBLE_AUTH from dateutil import parser import threading VISIBILITY = { 'PRIVATE': 0, 'FRIEND': 1, 'FRIENDS': 2, 'FOAF': 3, 'PUBLIC': 4, 'SERVERONLY': 5, 'private': 0, 'friend': 1, 'friends': 2, 'foaf': 3, 'public': 4, 'serveronly': 5 } CONTENT_TYPE = { u'text/html': 0, u'text/x-markdown': 1, } def get_pubdate(dictionary): pd1 = dictionary.get('pubDate', None) pd2 = dictionary.get('pubdate', None) return pd1 or pd2 def add_remote_comment(comment, post, author): comment_data = { 'uid': comment['guid'], 'content': comment['comment'], 'author': author, 'post': post } if not len(Comment.objects.filter(uid=comment['guid'])): c, _ = Comment.objects.get_or_create(**comment_data) c.published = parser.parse(get_pubdate(comment)) def add_remote_post(post, author): post_data = { 'uid': post['guid'], 'title': post['title'], 'description': post['description'], 'content': post['content'], 'content_type': CONTENT_TYPE.get(post['content-type'], 0), 'visibility': VISIBILITY[post['visibility']], 'send_author': author } if not len(Post.objects.filter(uid=post_data['uid'])): p = Post.objects.get_or_create(**post_data)[0] p.published = parser.parse(get_pubdate(post)) else: p = Post.objects.get(uid=post_data['uid']) for comment in post['comments']: try: author = Author.objects.get(uid=comment['author']['id']) add_remote_comment(comment, p, author) except: pass def update_posts_mutex(node, author, lock): if HINDLEBOOK in author.host: headers = {'Uuid': author.uid} data = get_request_to_json(node.url + 'author/posts', headers, HINDLE_AUTH) elif BUBBLE in author.host: data = get_request_to_json(node.url + 'author/posts2/', auth=BUBBLE_AUTH) else: data = 0 with lock: if not isinstance(data, int): for post in data['posts']: uid = post['author']['id'] try: author = Author.objects.get(uid=uid) add_remote_post(post, author) except: pass def reset_remote_posts(): lock = threading.Lock() for node in get_nodes(): for author in Author.objects.filter(user=None): thread = threading.Thread(target=update_posts_mutex, args=(node, author, lock)) thread.start()
apache-2.0
7,596,660,701,311,065,000
27.242718
68
0.559299
false
3.682278
false
false
false
MikeDMorgan/gwas_pipeline
PipelineGWAS.py
1
213238
######################################################################### ######################################################################### # Classes for handling genome-wide association input and output files, ## # analysis and qc programs, and post-hoc analyses ## ######################################################################### ######################################################################### import CGAT.Experiment as E import CGATPipelines.Pipeline as P import CGAT.IOTools as IOTools import numpy as np import pandas as pd import pandas.io.sql as pdsql import re import random import os import subprocess import rpy2.robjects as ro from rpy2.robjects import r as R from rpy2.robjects import pandas2ri as py2ri from rpy2.robjects.packages import importr # set matplotlib non-interactive backend to Agg to # allow running on cluster import collections import sqlite3 as sql from math import * import scipy.stats as stats import sklearn.metrics as metrics class FileGroup(object): ''' An object for holding, formatting and processing files for genome-wide association analysis including compressed and binary files File types supported: * plink - .ped and .map files * plink binary - .bim, .fam. and .bed files * variant call format - .vcf and .bcf (including gzipped vcf) * Oxford format - .gen or .bgen with matched sample text file (must be .sample) * GRM_binary - genetic relationship matrix calculated in an appropriate program in binary format. File suffixes are *.grm.bin, *.grm.N.bin and *.grmid * GRM_gz - previously calcualted gzip compressed GRM, file suffixes are *.grm.gz and *.grm.id Phenotypes are assumed to be contained in the relevant files, if not then an additional phenotypes files can be included using the `phenotypes` argument. Covariate files (if different from the phenotypes file) can also be included in the instantiation of a :FileGroup: object using the `covarite_files` argument. Only the `files` and `file_format` arguments are required. Genotype data are assumed to be raw genotype calls. This can be modified using the `genotype_format` argument upon instantiation. Values allowed are: * calls - standard bi-allelic genotype calls, i.e. AA, AB, BB * imputed_call - discrete genotype calls from imputed data, essentially treated the same as ``calls`` * genotype_prob - posterior probabilities for each genotype class, i.e. 0.88 0.07 0.05 corresponding to homozygote reference, heterozygote then homozygote rare allele. ''' # Defaults for file formats ped_file = None map_file = None bim_file = None fam_file = None bed_file = None sample_file = None gen_file = None bgen_file = None vcf_file = None bcf_file = None def __init__(self, files, file_format, phenotypes=None, genotype_format="calls", covariate_files=None): self.files = files self.file_format = file_format self.pheno_file = phenotypes self.genotype_format = genotype_format self.covariate_files = covariate_files self.set_file_prefix(files) def set_file_prefix(self, infiles): '''Get file prefixes from input files. These are used across all file formats, e.g. myfile.bed, myfile.bim, myfile.fam name=myfile. Only use periods, '.' to denote file suffixes. use hyphens and underscores for separating file names. Set these to the appropriate attributes. ''' file_prefixes = set() for f in infiles: # get all input file prefixes if len(f.split("/")) > 1: g = f.split("/")[-1] fdir = f.split("/")[:-1] fdir = "/".join(fdir) ffile = fdir + "/" + g.split(".")[0] file_prefixes.add(ffile) else: file_prefixes.add(f.split(".")[0]) # if only prefix then use this for all data files if len(file_prefixes) == 1: self.name = [xf for xf in file_prefixes][0] else: # if there are multiple prefixes then use separate # flags for file inputs self.name = None # define file types by their suffix instead if self.file_format == "plink": self.ped_file = [pf for pf in infiles if re.search(".ped", pf)][0] self.map_file = [mf for mf in infiles if re.search(".map", mf)][0] # check files exist (i.e. are not the default None values) try: assert self.ped_file except AssertionError: raise ValueError(".ped file is missing, please " "specify") try: assert self.map_file except AssertionError: raise ValueError(".map file is missing, please " "specify") elif self.file_format == "plink_binary": self.fam_file = [ff for ff in infiles if re.search(".fam", ff)][0] self.bim_file = [fb for fb in infiles if re.search(".bim", fb)][0] self.bed_file = [bf for bf in infiles if re.search(".bed", bf)][0] # check files exist (i.e. are not the default None values) try: assert self.fam_file except AssertionError: raise ValueError(".fam file is missing, please " "specify") try: assert self.bim_file except AssertionError: raise ValueError(".bim file is missing, please " "specify") try: assert self.bed_file except AssertionError: raise ValueError(".bed file is missing, please " "specify") elif self.file_format == "oxford": self.gen_file = [gf for gf in infiles if re.search(".gen", gf)][0] self.sample_file = [sf for sf in infiles if re.search(".sample", sf)][0] # check files exist (i.e. are not the default None values) try: assert self.gen_file except AssertionError: raise ValueError(".gen file missing, please " "specify") try: assert self.sample_file except AssertionError: raise ValueError(".sample file missing, please " "specify") elif self.file_format == "oxford_binary": self.bgen_file = [bg for bg in infiles if re.search(".bgen", bg)][0] self.sample_file = [sf for sf in infiles if re.search(".sample", sf)][0] # check files exist (i.e. are not the default None values) try: assert self.bgen_file except AssertionError: raise ValueError(".bgen file is missing, please " "specify") try: assert self.sample_file except AssertionError: raise ValueError(".sample file is missing, please " "specify") elif self.file_format == "vcf": self.vcf_file = [vf for vf in infiles if re.search(".vcf", vf)][0] # check files exist (i.e. are not the default None values) try: assert self.vcf_file except AssertionError: raise ValueError(".vcf file is missing, please " "specify") elif self.file_format == "bcf": self.bcf_file = [bv for bv in infiles if re.search(".bcf", bv)][0] # check files exist (i.e. are not the default None values) try: assert self.bcf_file except AssertionError: raise ValueError(".bcf file is missing, please " "specify") elif self.file_format == "GRM_binary": self.id_file = [ig for ig in infiles if re.search(".grm.id", ig)][0] self.n_file = [gn for gn in infiles if re.search(".grm.N.bin", gn)][0] self.bin_file = [gb for gb in infiles if re.search(".grm.bin", gb)][0] # check files exits try: assert self.id_file except AssertionError: raise ValueError("GRM ids file is missing, please " "specify") try: assert self.n_file except AssertionError: raise ValueError("grm.N file is missing, please " "specify") try: assert self.bin_file except AssertionError: VaueError("GRM genotype is missing, please " "specify") elif self.file_format == "GRM_plink": self.id_file = [ig for ig in infiles if re.search(".rel.id", ig)][0] self.rel_file = [gn for gn in infiles if re.search(".rel.N.bin", gn)][0] # check files exits try: assert self.id_file except AssertionError: raise ValueError("GRM ids file is missing, please " "specify") try: assert self.rel_file except AssertionError: raise ValueError("rel.N file is missing, please " "specify") def set_phenotype(self, pheno_file=None, pheno=1): ''' Set the phenotype for a set of individuals using an external phenotypes file. Default is to use the (n+2)th column, designated as pheno 1. ''' if type(pheno) == int: pheno = str(pheno) elif type(pheno) == str: pass else: raise AttributeError("Type of pheno unknown. " "Must be str or int.") self.pheno_file = pheno_file self.pheno = pheno class GWASProgram(object): ''' A base level object for programs designed to perform genome-wide association analysis and operate on genome-wide genotyping data. [INSERT PROPER DOCSTRING - see style guide] ''' def __init__(self, executable=None, required_format=None): self.executable = executable self.require_format = required_format def program_call(self, infiles, outfile): '''build a statement to perform genome-wide analysis using infiles ''' return "" def postprocess(self, infiles, outfile): '''collect and process output files from program - format for Result class objects''' return "" def build(self, infiles, outfile): '''run analysis program''' cmd_program = self.program_call(infile, outfile) cmd_postprocess = self.postprocess(infiles, outfile) if cmd_postprocess: cmd_postprocess = cmd_postprocess.strip().endswith(";") assert cmd_postprocess else: pass statement = " checkpoint; ".join((cmd_program, cmd_postprocess)) return statement class GCTA(GWASProgram): ''' GCTA is designed for computing genetic relationship matrices, linear mixed model analyses and phenotype estimation/prediction. It can also perform SNP-wise GWAS. Files MUST be in Plink binary format ''' def __init__(self, files, options=None, settings=None, design=None): self.infiles = files self.options = options self.settings = settings self.design = design self.executable = "gcta64" self.statement = {} self.filters = [] def program_call(self, infiles, outfile): '''build GCTA call statement on infiles''' statement = [] statement.append(self.executable) if infiles.name: inputs = self._build_single_file_input(infiles, infiles.file_format) statement.append(inputs) else: raise AttributeError("Files must be in binary plink format " "or as a GRM to use GCTA. Please " "convert and try again.") if infiles.pheno_file: statement.append(" --pheno %s --mpheno %s " % (infiles.pheno_file, infiles.pheno)) else: pass self.statement["program"] = " ".join(statement) def _build_single_file_input(self, infiles, file_format): '''internal function only. Use it to construct the file input flags with --file, --bfile or --data ''' statement = None if file_format == "plink": statement = " --file %s " % infiles.name elif file_format == "plink_binary": statement = " --bfile %s " % infiles.name elif file_format == "oxford" or file_format == "oxford_binary": statement = " --data %s" % infiles.name elif file_format == "GRM_binary" or file_format == "GRM_plink": statement = " --grm %s " % infiles.name else: raise AttributeError("file format is not defined or recognised." "Please define the input corectly when " "instantiating a FileGroup object") return statement def PCA(self, n_pcs="20"): ''' Perform PCA analysis on previosly generated GRM, output the number n principal componets, default = 20 ''' self._run_tasks(pca=n_pcs) def apply_filters(self, filter_type, filter_value): ''' * chromosome - exclude all variants not on the specified chromosome(s). [str/list] * autosome_number - for non-human species, the number of chromosomes to be considered autosomes * exclude_snps - text file list of variant IDs to exclude from analysis. [file] * extract - text file list of variant IDs to include in analysis, ignores all others. [file] * min_allele_frequency - only include SNPs with cohort/case allele frequency above this threshold. [float] * max_allele_frequency - include all SNPs with a MAF equal to or below this value. [float] ''' if filter_type == "chromosome": self._construct_filters(chromosome=filter_value) elif filter_type == "autosome_number": self._construct_filters(autosome_number=filter_value) elif filter_type == "exclude_snps": self._construct_filters(exclude_snps=filter_value) elif filter_type == "extract": self._construct_filters(extract=filter_value) elif filter_type == "min_allele_frequency": self._construct_filters(min_allele_frequency=filter_value) elif filter_type == "max_allele_frequency": self._construct_filters(max_allele_frequency=filter_value) elif filter_type == "keep": self._construct_filters(keep=filter_value) elif filter_type == "remove": self._construct_filters(remove=filter_value) def _construct_filters(self, **kwargs): ''' Add filter to each GCTA run. The filters accepted are defined below. These are input as keyword arguments supported by this function. * min_allele_frequency - only include SNPs with cohort/case allele frequency above this threshold. [float] * max_allele_frequency - include all SNPs with a MAF equal to or below this value. [float] * keep - keep individuals with matching individual and family IDs. [file] * remove - remove all individuals with matching individual and family IDs. [file] * extract - text file list of variant IDs to include in analysis, ignores all others. [file] * exclude - text file list of variant IDs to exclude from analysis. [file] * chromosome - exclude all variants not on the specified chromosome(s). [str/list] * autosome - exclude all non-place and non-autosomal variants. [boolean] * covariates_file - specify the covariates file with family and individual IDs in the first two columns. Covariates are in the (n+2)th column. Only used in conjunction with `covariate_filter`. [file] * covariate_filter - covariate columns value to filter on. Can be used with non-numeric values to filter out individuals with covariate =/= `covariate_filter` value. [str/int/float] * covariate_column - column number to apply filtering to if more than one covariate in the file. [int] * update_gender - provide gender information in a separate text file. [file] * grm_threshold - remove one of a pair of individuals with estimated relatedness greater than this value. * ld_significance - p-value threshold for regression test of LD significance * genotype_call - GenCall score cut-off for calling raw genotypes into Plink PED format * meta_pval - p-value threshold cut-off for conditional and joint genome-wide analysis * cojo_window - distance in kb beyond wich SNPs this distance apart are assumed to be in linkage equilibrium * cojo_collinear - multiple regression R^2 on selected SNPs value above which the testing SNP will not be selected. * cojo_inflation - adjust COJO analysis test statistics for genomic control. [boolean] * reml_iterations - maximum number of iterations to use during reml analysis. Default is 100. [int] ''' statement = [] # map of keyword arguments recognised to Plink2 filtering flags filter_map = {"min_allele_frequency": " --maf %s ", "max_allele_frequency": " --max-maf %s ", "keep": " --keep %s ", "remove": " --remove %s ", "extract": " --extract %s ", "exclude": " --exclude %s ", "chromosome": " --chr %s ", "autosome": " --autosome ", "autosome_number": " --autosome-num %s ", "grm_threshold": " --grm-cutoff %s ", "ld_significance": " --ls-sig %s ", "genotype_call": " --gencall %s ", "meta_pval": " --cojo-p %s ", "cojo_window": " --cojo-wind %s ", "cojo_collinear": " --cojo-collinear %s ", "cojo_inflation": " --cojo-gc ", "reml_iterations": " --reml-maxit %s "} # compile all filters together, checking for dependencies. # use a mapping dictionary to extract the relevant flags and # combinations to use. filters = [] filter_dict = {} for key, value in kwargs.iteritems(): filter_dict[key] = value for each in filter_dict.keys(): try: assert filter_map[each] # check for data type <- behaviour is type dependent if type(filter_dict[each]) == 'bool': filters.append(filter_map[each]) else: filter_val = filter_dict[each] filters.append(filter_map[each] % filter_val) except KeyError: E.warn("%s filter not recognised, please see " "documentation for allowed filters" % each) pass self.filters.append(" ".join(filters)) self.statement["filters"] = " ".join(self.filters) def mixed_model(self, lmm_method, grm=None, qcovar=None, dcovar=None): ''' Run a linear mixed model with the GRM used to model random effects of an estimated genetic relationshi between individuals ''' # add the mlm flag to the statement self._run_tasks(lmm=lmm_method) # construct the rest of mlm statement statement = [] if qcovar: statement.append(" --qcovar %s " % qcovar) else: pass if dcovar: statement.append(" --covar %s " % dcovar) else: pass try: statement.append(" --grm %s " % grm) except ValueError: E.warn("No GRM has been provided, the GRM ") self.statement["mlm"] = " ".join(statement) def reml_analysis(self, method, parameters, prevalence=None, qcovariates=None, discrete_covar=None): ''' Use REML to estimate the proportion of phenotypic variance explained by the estimated genetic relationship between individuals. Arguments --------- method: string GCTA method to use for REML estimation of h2. Includes: * snpBLUP - calculate the SNP BLUPs from the genotype data and the estimated total genetic value/ breeding value * fixed_cor - * priors - provide initial priors for the variance components estimation * unconstrained - allow variance estimates to fall outside of the normal parameter space, bounded [0, ). * GxE - estimate the contribution of GxE with covariates to the phenotype variance * BLUP_EBV - output individual total genetic effect/breeding values ''' statement = [] try: params = parameters.split(",") if len(params) == 1: params = params[0] else: pass except AttributeError: params = parameters self._run_tasks(parameter=params, greml=method) if prevalence: statement.append(" --prevalence %0.3f " % prevalence) else: pass if qcovariates: statement.append(" --qcovar %s " % qcovariates) else: pass if discrete_covar: statement.append(" --covar %s " % discrete_covar) else: pass self.statement["reml"] = " ".join(statement) def _run_tasks(self, parameter=None, **kwargs): ''' The principal functions of GCTA revolve around GRM estimation and variance components analysis, such as REML estimation of heritability and variance components, BLUP and phenotype prediciton. It can also be used to do PCA and conditional and joint GWAS. Tasks ----- * pca - perform principal components analysis on a GRM * greml - perform restricted maximum likelihood analysis for estimation of variance components * estimate_ld - estimate the linkage disequilibrium structure over the genomic regions specified * simulate_gwas - simulate genome-wide association data based on observed genotype data * cojo - conditional and joint genome-wide association analysis across SNPs and covariates * bivariate_reml - perform GREML on two traits, either both binary, both quantitative or one of each * lmm - perform a linear mixed model based association analysis ''' statement = [] # set up a dictionary of recognised tasks with key word argument # values as further dictionaries. Use the parameter argument # to pass arguments by value to string formatting # put all of the other tasks as options in the calling function task_map = {"pca": " --pca %s ", "greml": {"standard": " --reml ", "priors": " --reml --reml-priors %s ", "reml_algorithm": " --reml --reml-alg %s ", "unconstrained": " --reml --reml-no-constrain ", "GxE": " --reml --gxe %s ", "LRT": " --reml --reml-lrt %s ", "BLUP_EBV": " --reml --reml-pred-rand ", "snpBLUP": " --blup-snp %s "}, "estimate_ld": " --ld %s ", "simulate_gwas": {"quantitative": " --simu-qt ", "case_control": " --simu-cc %s %s "}, "cojo": {"stepwise": " --cojo-file %s --cojo-slct ", "no_selection": " --cojo-file %s --cojo-joint ", "snp_conditional": " --cojo-file %s --cojo-cond %s "}, "bivariate_reml": {"standard": " --reml-bivar %s ", "no_residual": " --reml-bivar %s --reml-bivar-nocove ", "fixed_cor": " --reml-bivar %s --reml-bivar-lrt-rg %s "}, "lmm": {"standard": " --mlma ", "loco": " --mlma-loco ", "no_covar": " --mlma-no-adj-covar "}, "remove_relations": {"cutoff": " --grm-cutoff %s "}} for task, value in kwargs.iteritems(): # check for PCA first as it is not nested in task_map if task == "pca": try: state = task_map[task] % value statement.append(state) except TypeError: statement.append(task_map[task]) statement.append # LD estimation is likewise not nested elif task == "estimate_ld": try: state = task_map[task] % value statement.append(state) except TypeError: raise IOError("no SNP file list detected") elif task != "parameter": try: # sub_task is a nested dictionary sub_task = task_map[task] try: assert sub_task[value] try: # some tasks do not contain task values for the # parameter argument - catch these with the TypeError # exception statement.append(sub_task[value] % parameter) # the default for parameter is None, check this is appropriate if not parameter: E.warn("Parameter value is set to NoneType. " "Please check this is an appropriate value " "to pass for this task") else: pass except TypeError: statement.append(sub_task[value]) except KeyError: raise KeyError("% Task not recognised, see docs for details of " "recognised tasks" % task) except KeyError: raise KeyError("Task not recognised, see docs for details of " "recognised tasks") else: pass self.statement["tasks"] = " ".join(statement) def genetic_relationship_matrix(self, compression="binary", metric=None, shape="square", options=None): ''' Calculate the estimated genetic relationship matrix from genotyping data * estimate_grm - estimate the realized genetic relationship matrix between individuals from genotyping data ''' mapf = {"binary": " --make-grm-bin ", "gzip": " --make-grm-gz ", "no_compress": " --make-grm ", "X_chr": " --make-grm-chr ", "X_chr_gz": " --make-grm-gz ", "inbreeding": " --ibc "} if options == "X_chr": if compression == "gz": state = mapf["X_chr_gz"] else: state = mapf["X_chr"] elif options == "inbreding": state = mapf["inbreeding"] else: pass # check compression is compatible if compression == "gz": state = mapf["gzip"] elif compression == "bin": state = mapf["binary"] elif compression is None and not options: state = mapf["no_compress"] self.statement["matrix"] = state def build_statement(self, infiles, outfile, threads=None, memory=None, parallel=None): ''' Build statement and execute from components ''' statement = [] exec_state = self.executable # calls to function add to the self.statement dictionary try: statement.append(self.statement["program"]) except KeyError: raise AttributeError("Input files and format not detected") try: statement.append(self.statement["filters"]) except KeyError: pass try: statement.append(self.statement["tasks"]) except KeyError: pass try: statement.append(self.statement["matrix"]) except KeyError: pass try: statement.append(self.statement["mlm"]) except KeyError: pass try: statement.append(self.statement["reml"]) except KeyError: pass if threads: statement.append(" --thread-num %i " % threads) else: pass # add output flag statement.append(" --out %s " % outfile) os.system(" ".join(statement)) class Plink2(GWASProgram): ''' Run various Plink functions and analysis, including file processing, GRM calculation, PCA and other GWA tasks Require Plink v1.9 to be in the users PATH variable as ``plink2`` to distinguish it from Plink v1.07. ''' def __init__(self, files, options=None, settings=None, design=None): self.infiles = files self.options = options self.settings = settings self.design = design self.executable = "plink2" self.statement = {} self.filters = [] def program_call(self, infiles, outfile): ''' build Plink call statement on infiles''' statement = [] statement.append(self.executable) if infiles.name: inputs =self. _build_single_file_input(infiles, infiles.file_format) statement.append(inputs) else: inputs = self._build_multiple_file_input(infiles, infiles.file_format) statement.append(inputs) # check for the presence of an additional phenotypes file try: if infiles.pheno_file: statement.append(" --pheno %s --mpheno %s " % (infiles.pheno_file, infiles.pheno)) else: pass except AttributeError: pass self.statement["program"] = " ".join(statement) def hamming_matrix(self, shape, compression, options): ''' Calculate genomic pair-wise distance matrix between individuals using Hamming distance across all variants ''' # check shape is compatible if not shape: shape = "triangle" elif shape in ["square", "square0", "triangle"]: pass else: raise ValueError("matrix shape %s not recognised." "Valid options are square, square0, " "and triangle." % shape) # check compression is compatible if compression in ["gz", "bin", "bin4"]: pass else: raise ValueError("compression %s not recognised. Accepted " "formats are gz, bin and bin4." % compression) if options: state = self._matrices(matrix_type="hamming", shape=shape, compression=compression, options=options) else: state = self._matrices(matrix_type="hamming", shape=shape, compression=compression) self.statement["matrix"] = state def ibs_matrix(self, shape, compression, options): ''' Calculate genomic pair-wise similarity matrix between individuals using proportion of IBS alleles ''' # check shape is compatible if shape in ["square", "square0", "triangle"]: pass else: raise ValueError("matrix shape %s not recognised." "Valid options are square, square0, " "and triangle." % shape) # check compression is compatible if compression in ["gz", "bin", "bin4"]: pass else: raise ValueError("compression %s not recognised. Accepted " "formats are gz, bin and bin4." % compression) if options: state = self._matrices(matrix_type="ibs", shape=shape, compression=compression, options=options) else: state = self._matrices(matrix_type="ibs", shape=shape, compression=compression) self.statement["matrix"] = state def genome_matrix(self, shape, compression, options): ''' Calculate genomic pair-wise distance matrix between individuals using 1 - proportion of IBS alleles ''' # check shape is compatible if shape in ["square", "square0", "triangle"]: pass else: raise ValueError("matrix shape %s not recognised." "Valid options are square, square0, " "and triangle." % shape) # check compression is compatible if compression in ["gz", "bin", "bin4"]: pass else: raise ValueError("compression %s not recognised. Accepted " "formats are gz, bin and bin4." % compression) if options: state = self._matrices(matrix_type="genomic", shape=shape, compression=compression, options=options) else: state = self._matrices(matrix_type="genomic", shape=shape, compression=compression) self.statement["matrix"] = state def genetic_relationship_matrix(self, shape, compression, metric, options=None): ''' Calculate genomic pair-wise distance matrix between individuals using proportion of IBS alleles Requires the use of the Plink2 parallelisation to run with large cohorts of patients ''' # check shape is compatible if shape in ["square", "square0", "triangle"]: pass else: raise ValueError("matrix shape %s not recognised." "Valid options are square, square0, " "and triangle." % shape) # check compression is compatible if compression in ["gz", "bin", "bin4"]: pass else: raise ValueError("compression %s not recognised. Accepted " "formats are gz, bin and bin4." % compression) if metric in ["cov", "ibc2", "ibc3"]: state = self._matrices(matrix_type="grm", shape=shape, compression=compression, options=metric) else: E.info("%s metric not recognised. Running with default Fhat1" % metric) state = self._matrices(matrix_type="grm", shape=shape, compression=compression) self.statement["matrix"] = state def apply_filters(self, filter_type, filter_value): ''' arguments supported by this function. * genotype_rate - exclude SNPs with a genotyping rate below this value. [float] * min_allele_frequency - only include SNPs with cohort/case allele frequency above this threshold. [float] * max_allele_frequency - include all SNPs with a MAF equal to or below this value. [float] * exclude_snp - exclude this single variant * exclude_snps - text file list of variant IDs to exclude from analysis. [file] * chromosome - exclude all variants not on the specified chromosome(s). [str/list] * exclude_chromosome - exclude all variants on the specified chromosome(s). [str/list] * autosome - exclude all non-place and non-autosomal variants. [boolean] * pseudo_autosome - include the pseudo-autosomal region of chromosome X. [boolean] * ignore_indels - remove all indels/multi-character allele coding variants. [boolean] * snp_bp_range - (from, to) range in bp of variants to include in analysis. [tuple] ''' if filter_type == "genotype_rate": self._construct_filters(genotype_rate=filter_value) elif filter_type == "hwe": self._construct_filters(hwe=filter_value) elif filter_type == "missingness": self._construct_filters(missingness=filter_value) elif filter_type == "min_allele_frequency": self._construct_filters(min_allele_frequency=filter_value) elif filter_type == "max_allele_frequency": self._construct_filters(max_allele_frequency=filter_value) elif filter_type == "exclude_snp": self._construct_filters(exclude_snp=filter_value) elif filter_type == "exclude": self._construct_filters(exclude=filter_value) elif filter_type == "extract": self._construct_filters(extract=filter_value) elif filter_type == "chromosome": self._construct_filters(chromosome=filter_value) elif filter_type == "exclude_chromosome": self._constuct_filters(exclude_chromosome=filter_value) elif filter_type == "autosome": self._construct_filters(autosome=filter_value) elif filter_type == "pseudo_autosome": self._construct_filters(pseudo_autosome=filter_value) elif filter_type == "ignore_indels": self._construct_filters(ignore_indels=filter_value) elif filter_type == "snp_bp_range": self._construct_filters(snp_bp_range=filter_value) elif filter_type == "conditional_snp": self._construct_filters(conditional_snp=filter_value) elif filter_type == "keep": self._construct_filters(keep=filter_value) elif filter_type == "remove": self._construct_filters(remove=filter_value) elif filter_type == "ignore_indels": self._construct_filters(ignore_indels=filter_value) def _build_multiple_file_input(self, infiles, file_format): ''' internal function only. Use it to construct the appropriate file input flags ''' statement = None if file_format == "oxford": statement = " --gen %s --sample %s " % (infiles.gen_file, infiles.sample_file) elif file_format == "oxford_binary": statement = " --bgen %s --sample %s " % (infiles.bgen_file, infiles.sample_file) elif file_format == "plink": statement = " --ped %s --map %s " % (infiles.ped_file, infiles.sample_file) elif file_format == "plink_binary": statement = " --bed %s --bim %s --fam %s " % (infiles.bed_file, infiles.bim_file, infiles.fam_file) elif file_format == "vcf": statement = " --vcf %s.vcf.gz " % infiles.vcf_file elif file_format == "bcf": statement = " --bcf %s " % infiles.vcf_file elif file_format == "GRM_binary": statement = " --grm-bin %s " % infiles.name else: raise AttributeError("file format is not defined. Please " "define the input file formats when " "instantiating a FileGroup object") return statement def _build_single_file_input(self, infiles, file_format): '''internal function only. Use it to construct the file input flags with --file, --bfile or --data ''' statement = None if file_format == "plink": statement = " --file %s " % infiles.name elif file_format == "plink_binary": statement = " --bfile %s " % infiles.name elif file_format == "oxford" or file_format == "oxford_binary": statement = " --data %s" % infiles.name elif file_format == "GRM_plink": statement = " --grm.bin %s " %infiles.name elif file_format == "GRM_binary": statement = " --grm-bin %s " % infiles.name elif file_format == "vcf": statement = " --vcf %s.vcf.gz " % infiles.name else: raise AttributeError("file format is not defined or recognised." "Please define the input corectly when " "instantiating a FileGroup object") return statement def _construct_filters(self, **kwargs): ''' Add filter to each plink run. [data type] The filters accepted are defined below. These are input as keyword arguments supported by this function. * genotype_rate - exclude SNPs with a genotyping rate below this value. [float] * missingness - exclude individuals with total genotype missingness above this value. [float] * hwe - p-value threshold for excluding SNPs deviating from Hardy-Weinberg expectations. [float] * min_allele_frequency - only include SNPs with cohort/case allele frequency above this threshold. [float] * max_allele_frequency - include all SNPs with a MAF equal to or below this value. [float] * mendelian_error - filter out samples/trios exceeding the error threshold. [float] * keep - keep individuals with matching individual and family IDs. [file] * remove - remove all individuals with matching individual and family IDs. [file] * quality_score_file - vcf file with variants and quality scores. Use `qual_score_column` and `var_id_col` to specify which columns correspond to the quality score and variant ID columns. [file] <int> <int> * min_qual_score - alters the lower bound of the quality score threshold; default is 0.[int] * max_qual_score - sets an upper limit on the quality scores; default is Inf. [int] * allow_no_sex - prevents phenotypes set to missing if there is no gender information. [boolean] * enforce_sex - force phenotype missing when using --make-bed, --recode or --write-covar. [boolean] * subset_filter - filter on a particular subset. Choices are: cases, controls, males, females, founders, nonfounders. [str] * extract - text file list of variant IDs to include in analysis, ignores all others. [file] * exclude - text file list of variant IDs to exclude from analysis. [file] * chromosome - exclude all variants not on the specified chromosome(s). [str/list] * exclude_chromosome - exclude all variants on the specified chromosome(s). [str/list] * autosome - exclude all non-place and non-autosomal variants. [boolean] * pseudo_autosome - include the pseudo-autosomal region of chromosome X. [boolean] * ignore_indels - remove all indels/multi-character allele coding variants. [boolean] * snp_bp_range - (from, to) range in bp of variants to include in analysis. [tuple] * specific_snp - only load the variant specified. [str] * exclude_snp - exclude this single variant * window_size - alters behaviour of `specific_snp` and `exclude_snp` to include/exclude SNPs within +/- half of this distance (kb) are also included. [float] * range_resolution - sets the resolution of the (from, to) range. Either bp, kb or mb. If set it will take the values from `snp_bp_range`. [str/int/float] * covariates_file - specify the covariates file with family and individual IDs in the first two columns. Covariates are in the (n+2)th column. Only used in conjunction with `covariate_filter`. [file] * covariate_filter - covariate columns value to filter on. Can be used with non-numeric values to filter out individuals with covariate =/= `covariate_filter` value. [str/int/float] * covariate_column - column number to apply filtering to if more than one covariate in the file. [int] ''' statement = [] # map of keyword arguments recognised to Plink2 filtering flags filter_map = {"genotype_rate": " --geno %s ", "missingness": "--mind %s ", "hwe": " --hwe %s ", "min_allele_frequency": " --maf %s ", "max_allele_frequency": " --max-maf %s ", "mendelian_error": " --me %s ", "keep": " --keep %s ", "remove": " --remove %s ", "quality_score_file": " --qual-scores %s ", "qual_score_column": " %s ", "var_id_col": " %s ", "min_qual_score": " --qual-threshold %s ", "max_qual_score": " --qual-max-threshold %s ", "allow_no_sex": " --allow-no-sex ", "enforce_sex": " --must-have-sex ", "subset_filter": " --filter-%s ", "extract": " --extract %s ", "exclude": " --exclude %s ", "chromosome": " --chr %s ", "exclude_chromosome": " --not-chr %s ", "autosome": " --autosome ", "pseudo_autosome": " --autosome-xy ", "ignore_indels": " --snps-only no-DI ", "snp_id_range": " --from %s --to %s ", "specific_snp": " --snp %s ", "window_size": " --window %s ", "exclude_snp": " --exclude-snp %s ", "snp_bp_range": "--from-bp %s --to-bp %s ", "covariates_file": " --filter %s ", "covariate_filter": " %s ", "covariate_column": " --mfilter %s ", "missing_phenotype": " --prune ", "conditional_snp": " --condition %s ", "haplotype_size": " --blocks-max-kb %s ", "haplotype_frequency": " --blocks-min-maf %s "} # compile all filters together, checking for dependencies. # use a mapping dictionary to extract the relevant flags and # combinations to use. filters = [] filter_dict = {} for key, value in kwargs.iteritems(): filter_dict[key] = value # need to check for covariates and qual scores - these # are more complex. Deal with these first and remove # from dictionary once complete. try: assert filter_dict["quality_score_file"] assert filter_dict["qual_score_column"] assert filter_dict["var_id_col"] quals = [] qual_file = filter_dict["quality_score_file"] score_col = filter_dict["qual_score_column"] id_col = filter_dict["var_id_col"] quals.append(filter_map["quality_score_file"] % qual_file) quals.append(filter_map["qual_score_column"] % score_col) quals.append(filter_map["var_id_col"] % id_col) # remove from dictionary filter_dict.pop("qual_score_column", None) filter_dict.pop("var_id_col", None) filters.append(" ".join(quals)) except KeyError: pass try: assert filter_dict["covariates_file"] assert filter_dict["covariate_filter"] covars = [] covar_file = filter_dict["covariates_file"] covar_val = filter_dict["covariate_filter"] covars.append(filter_map["covariates_file"] % covar_file) covars.append(filter_map["covariate_filter"] % covar_val) # check to filter on specific column numnber, default is 3rd file # column, i.e. (n+2)th column try: assert filter_dict["covariate_column"] covar_col = filter_dict["covariate_column"] covars.append(filter_map["covariate_column"] % covar_col) filter_dict.pop("covariate_column", None) except KeyError: pass # remove from dictionary filter_dict.pop("covariates_file", None) filter_dict.pop("covariate_filter", None) filters.append(" ".join(covars)) except KeyError: pass # range_resolution and snp_bp_range are used together try: assert filter_dict["snp_bp_range"] flags = filter_map["snp_bp_range"] from_pos = filter_dict["snp_bp_range"].split(",")[0] to_pos = filter_dict["snp_bp_range"].split(",")[1] filters.append(flags % (from_pos, to_pos)) # remove so they are not duplicated - source of bugs filter_dict.pop("snp_bp_range", None) except KeyError: pass for each in filter_dict.keys(): try: assert filter_map[each] # check for data type <- behaviour is type dependent if type(filter_dict[each]) == bool: filters.append(filter_map[each]) # handle multiple arguments in string format elif len(filter_dict[each].split(",")) > 1: vals = tuple(filter_dict[each].split(",")) filters.append(filter_map[each] % vals) else: filter_val = filter_dict[each] filters.append(filter_map[each] % filter_val) except KeyError: E.warn("%s filter not recognised, please see " "documentation for allowed filters" % each) pass self.filters.append(" ".join(filters)) self.statement["filters"] = " ".join(self.filters) def calc_ld(self, ld_statistic, ld_threshold, ld_shape="table"): ''' Calculate linkage disequilibrium between all SNP pairs. Arguments --------- ld_statistic: string The LD statistic to report, either correlation or squared correlation of inter-variant allele counts ld_threshold: float minimum value to report for pair-wise LD ld_window: int max distance (in Kb) between SNPs for calculating LD ld_shape: string shape to use for reporting LD, either a table or a matrix. If a matrix then either square, square with diagnonal (square0) or triangular. Square matrices are symmetric. ''' statement = [] ld_map = {"r": " --r %s dprime ", "r2": "--r2 %s dprime "} shape_map = {"table": "inter-chr gz", "square": "square gz", "square0": "square0 gz", "triangle": "triangle gz"} try: statement.append(ld_map[ld_statistic] % shape_map[ld_shape]) except KeyError: raise ValueError("%s LD statistic not recognised. Please " "use eithr 'r' or 'r2'" % ld_statistic) if type(ld_threshold) == float: statement.append(" --ld-window-r2 %0.3f " % ld_threshold) else: E.warn("threshold type not recognised, setting to default " "value of 0.2") self.statement["tasks"] = " ".join(statement) def _run_tasks(self, parameter=None, **kwargs): ''' Plink2 is capable of much more than just running basic association analyses. These include file processing, reformating, filtering, data summaries, PCA, clustering, GRM calculation (slow and memory intense), etc. multiple tasks can be added by separate calls to this function. For instance, adding phenotype and gender information using the update_samples task whilst change the file format. Tasks ----- * change_format - convert from input format to an alternative format after applying filters. * change_missing_values - alters the genotype or phenotype missing value into the value supplied. * update_variants - use this to fill in missing variant IDs, useful for data from exome or whole-genome sequencing that have non-standard IDs. * update_samples - update phenotype and sample information * flip_strands - flip the strand for alleles, swaps A for T and C for G. * flip_scan - use the LD-based scan to check SNPs have not had incorrect strand assignment. Particularly useful if cases and controls were genotyped separately, or the cohort was genotyped in different batches. * sort - sort files by individual and/or family IDs * merge - merge new filesets with reference fileset. * merge_mode - handling of missing values and overwriting values * find_duplicates - find and output duplicate variants based on bp position, or variant ID. Useful to output for the --exclude filtering flag. * remove_relations - remove one of a pair of individuals with IBS >= a threshold. Recommended minimum is 3rd cousins (IBS >= 0.03125). * check_gender - check imputed gender from non-pseudoautosomal X chromsome genotypes against self-reported gender * estimate_haplotypes - assign SNPs to haplotype blocks and get positional information ''' statement = [] # set up a dictionary of recognised tasks with key word argument # values as further dictionaries. Use the parameter argument # to pass arguments by value to string formatting task_map = {'change_format': {"plink_binary": " --make-bed ", "plink": " --recode ", "oxford": " --recode oxford ", "oxford_binary": " --recode oxford gen-gz ", "raw": " --recode A tabx "}, "change_missing_values": {"genotype": " --missing-genotype %s ", "phenotype": " --missing-phenotype %s "}, "update_variants": {"variant_ids": " --set-missing-var-ids %s ", "missing_id": " --mising-var-code %s ", "chromosome": " --update-chr %s ", "centimorgan": " --update-cm %s ", "name": " --update-name %s ", "alleles": " --update-alleles %s ", "map": " --update-map %s "}, "update_samples": {"sample_ids": " --update-ids %s ", "parents": " --update-parents %s ", "gender": " --update-sex %s %s "}, "flip_strands": {"all_samples": " --flip %s ", "subset": " --flip-subset %s "}, "flip_scan": {"default": " --flip-scan verbose ", "window": "--flip-scan --flip-scan-window %s ", "kb": " --flip-scan --flip-scan-window-kb %s ", "threshold": " --flip-scan --flip-scan-threshold %s "}, "sort": {"none": " --indiv-sort %s ", "natural": " --indiv-sort %s ", "ascii": " --indiv-sort %s ", "file": " --indiv-sort %s "}, "merge": {"plink": " --merge %s ", "binary_plink": " --bmerge %s "}, "merge_mode": {"default": " --merge-mode 1 ", "orginal_missing": " --merge-mode 2 ", "new_nonmissing": " --merge-mode 3 ", "no_overwrite": " --merge-mode 4 ", "force": " --merge-mode 5 ", "report_all": " --merge-mode 6 ", "report_nonmissing": " --merge-mode 7"}, "find_duplicates": {"same_ref": " --list-duplicate-vars require-same-ref ", "id_match": " --list-duplicate-vars ids-only ", "suppress_first": " --list-duplicate-vars suppress-first"}, "remove_relations": {"cutoff": " --rel-cutoff %s "}, "check_gender": " --check-sex ", "pca": " --pca %s ", "estimate_haplotypes": " --blocks "} for task, value in kwargs.iteritems(): # check for PCA first as it is not nested in task_map if task == "pca": try: state = task_map[task] % value statement.append(state) except TypeError: statement.append(task_map[task]) statement.append elif task == "check_gender": statement.append(task_map[task]) elif task == "estimate_haplotypes": statement.append(task_map[task]) elif task != "parameter": try: # sub_task is a nested dictionary sub_task = task_map[task] try: assert sub_task[value] try: # gender has two string formats if value == "gender": gcol = 1 statement.append(sub_task[value] % (parameter, gcol)) else: # some tasks do not contain task values for the # parameter argument - catch these with the TypeError # exception statement.append(sub_task[value] % parameter) # the default for parameter is None, check this is appropriate if not parameter: E.warn("Parameter value is set to NoneType. " "Please check this is an appropriate value " "to pass for this task") else: pass except TypeError: statement.append(sub_task[value]) except KeyError: raise KeyError("No sub task found, see docs for details of " "recognised tasks") except KeyError: raise KeyError("Task not recognised, see docs for details of " "recognised tasks") else: pass # handle multiple tasks for a single run try: curr_tasks = self.statement["tasks"] new_tasks = " ".join(statement) self.statement["tasks"] = " ".join([curr_tasks, new_tasks]) except KeyError: self.statement["tasks"] = " ".join(statement) def _output_statistics(self, **kwargs): ''' Summary statistics are written to specific files dictated by the type of statistic Statistics ---------- * allele_frequency - writes out MAF to `plink`.frq, this can be modified with specific keywords. * missing_data - generates a report of data missingness, can be subset into within family and/or cluster reports * hardy_weinberg - calculates all HWE p-values using exact test statistics. For case/control studies reports are written for case, controls and combined. * mendel_errors - generates a Mendelian error report across all trios. There are 10 different codes responding to different Mendelian error scenarios. * inbreeding - calculate observed and expected homozygosity across individuals and F statistics. If the sample size is small then a file of MAFs is required. Inbreeding coefficients can also be reported on request using inbreeding_coef. * gender_checker - checks gender assignment against X chromosome genotypes. Gender values can also be imputed based on genotype information using gender_impute. * wrights_fst - calculate Wright's Fst statistic given a set of subpopulations for each autosomal diploid variant. Used in conjunction with the --within flag. ''' stats_map = {"allele_frequency": " --freq %s ", "missing_data": " --missing %s ", "hardy_weinberg": " --hardy midp ", "mendel_errors": " --mendel %s ", "inbreeding": " --het %s ", "inbreeding_coef": " --ibc ", "gender_checker": " --check-sex ", "gender_impute": " --impute-sex ", "wrights_fst": " --fst --within %s ", "case_control_fst": "--fst %s "} statement = [] for key, value in kwargs.iteritems(): if value: try: assert stats_map[key] statement.append(stats_map[key] % value) except KeyError: raise KeyError("statistic not recognised. Please " "consult the documentation for allowed " "options.") else: try: assert stats_map[key] flag = stats_map[key].rstrip("%s ") statement.append(flag) except KeyError: raise KeyError("statistic not recognised. Please " "consult the documentation for allowed " "options.") self.statement["stats"] = " ".join(statement) def run_association(self, association=None, model=None, run_options=None, permutation=False, n_perms=None, random_seed=None, permutation_options=None, covariates_file=None, covariates=None): ''' Construct a statement for a plink2 association analysis. QC filters are constructed from input during instantiation. run options include redirecting logging output, using parallelisation, defining number of threads to use, etc The default association uses the --assoc flag. Plink will check phenotype coding, if it is not case/control it assumes it is a continuous trait and uses linear regression. Alternative regression models that include covariates can be used, i.e. logistic and linear regression. key *** {CC} - applies to case/control analysis only {quant} - applies to quantitative trait only {CC/quant} - applies to both run_options ----------- ``--assoc``: * `fisher | fisher-midp` - uses Fisher's exact test to calculate association p-values or applies Lancaster's mid-p adjustment. {CC} * `counts` - causes --assoc to report allele counts instead of frequencies. {CC} * `set-test` - implements and tests the significance of variant sets. See documentation below. {CC/quant} * `qt-means` - generates a .qassoc.means file reporting trait means and standard deviations by genotype. {quant} * `lin` - reports the Lin et al (2006) statistic to be reported. If multiple testing adjustments and/or permutation is also used, they will be based on this statistic. {quant} ``--model``: * `fisher | fisher-midp | trend-only` - uses Fisher's exact test to calculate association p-values or applies Lancaster's mid-p adjustment. trend-only forces only a trend test to be performed. {CC} * `dom | rec | gen | trend` - use the specified test as the basis for the model permutation. If none are defined the result with the smallest p-value is reported. {CC} * --cell - sets the minimum number of observations per cell in the 2x3 contingency table. The default is 0 with the Fisher and Fiser-midp test, otherwise 5. {CC} ``--linear/logistic``: * `set-test` - implements and tests the significance of variant sets. See documentation below. {CC/quant} * `hide-covar` - removes the covariate specific sections from the results output. {CC/quant * `sex | no-x-sex` - `sex` adds sex as covariate to all models, whislt `no-x-sex` does not include gender into X-chromosome SNP models. {CC/quant} * `interaction` - adds in genotype X covariate interaction terms into the model. Can only be used with permutation is ``--tests`` is also specified. {CC/quant} * `beta` - reports the beta coefficients instead of the OR in a logistic model. {CC} * `standard-beta` - standardizes the phenotype and all predictor variables to zero mean and unit variance prior to regression (separate for each variant analysed). {quant} * `intercept` - includes the intercept in the output results. {quant} model ----- * `recessive` - `recessive` specifies the model assuming the A1 allele as recessive. {CC/quant} * `dominant` - `dominant` specifies the model assuming the A1 allele is dominant. {CC/quant} * `genotype` - `genotype` adds an additive effect/dominance deviation 2df joint test with two genotype variables in the test (coded 0/1/2 and 0/1/0). {CC/quant} * `trend` - forces a trend test to be performed. {CC/quant} * `hethom` - `hethom` uses 0/0/1 and 0/1/0 instead of the genotype coding. With permutation it will be based on the joint test instead of just the additive effects. This can be overriden using the `--tests` flag. {CC/quant} * `no-snp` - `no-snp` defines a regression of phenotype on covariates without reference to genotype data, except where `--conditon{-list}` is specified. If used with permuation, test results will be reported for every covariate. {CC/quant} permutation ----------- If permutation is True, run an adaptive Monte Carlo permutation test. If n_perms is set, this will run a max(T) permutation test with the n replications. A random seed will need to be provided. * `perm-count` - this alters the permutation output report to include counts instead of frequencies covariates ---------- These should be provided in a separate file. Specifying which covariates to include can be done as either a comma-separated list of covariate names or numbers. These numbers will correspond to the (n+2)th covariate file column as per the plink documentation. ''' # model map maps common option effects onto specific syntax model_map = {"--logistic": {"recessive": "recssive", "dominant": "dominant", "genotype": "genotypic"}, "--linear": {"recessive": "recssive", "dominant": "dominant", "genotype": "genotypic"}, "--model": {"recessive": "rec", "dominant": "dom", "genotype": "gen"}} statement = [] # construct analysis flags # add model, i.e. additive, recessive, dominant, etc. # see docstring for details. Make sure correct modifier is used # with a mapping dictionary if association == "logistic": statement.append(" --logistic ") m_map = model_map["--logistic"] if model: statement.append(m_map[model]) else: pass elif association == "linear": statement.append(" --linear ") m_map = model_map["--linear"] if model: statement.append(m_map[model]) else: pass elif association == "model": statement.append(" --model ") m_map = model_map["--model"] statement.append(m_map[model]) else: statement.append(" --assoc ") # add in run options. These need to be in their correct # format already if run_options: modifiers = " ".join(run_options) statement.append(modifiers) else: pass # permutation should have a random seed set by the user. Allow # this to set it's own seed if one not provided, but report it in # the log file if permutation: try: assert random_seed except AssertionError: rand_seed = random.randint(0, 100000000) E.warn("No seed is provided for the permutation test. " "Setting seed to %s. Record this for future " "replicability" % random_seed) if n_perms: statement.append(" mperm=%i --seed %s " % (n_perms, random_seed)) else: statement.append(" perm --seed %s " % (random_seed)) else: pass # if using linear or logistic, covariates can be added into the model # to adjust for their effects - assumes fixed effects of covariates # mixed models are not yet implemented in Plink2. if covariates: covars = covariates.split(",") if len(covars) > 1: if type(covars[0]) == str: m_covar = " --covar-name %s " % covariates elif type(covars[0]) == int: m_covar = " --covar-number %s " % covariates else: # if none are specified then don't adjust the model for any # and log a warning E.warn("Covariate header or numbers are not recognised." "No covariates will be included in the model. Please" "specifiy them exactly") covariates = None covariates_file = None elif len(covars) == 1: if type(covars) == str: m_covar = " --covar-name %s " % covariates elif type(covars) == int: m_covar = " --covar-number %i " % covariates else: # if none are specified then don't adjust the model for any # and log a warning E.warn("Covariate header or numbers are not recognised." "No covariates will be included in the model. Please" "specifiy them exactly") covariates = None covariates_file = None if covariates and covariates_file: statement.append(" --covar %s %s " % (covariates_file, m_covar)) elif covariates and not covaries_file: E.warn("No covariate file specified. None included in model.") elif covariates_file and not covariates: E.warn("No covariates specified to include in the model." "None included") else: pass self.statement["assoc"] = " ".join(statement) def PCA(self, n_pcs="20"): ''' Perform PCA analysis on previosly generated GRM, output the number n principal componets, default = 20 ''' self._run_tasks(pca=n_pcs) def _dimension_reduction(self, **kwargs): ''' Use PCA to perform dimensionality reduction on input samples. A PCA can be calculated using a subset of samples which can then be projected on to other samples. ''' # FINISH ME!!!! def _detect_interactions(self, method=None, modifier=None, set_file=None, set_mode=None, report_threshold=None, sig_threshold=None, covariates_file=None, covariates=None): ''' Detect epistatic interactions between SNPs using either an inaccurate scan (fast-epistasis) or a fully saturated linear model Methods ------- fast_epistasis - uses an "imprecise but fast" scan of all 3x3 joint genotype count tables to test for interactions. Can be modified to use a likelihood ration test `boost` or a joint-effects test `joint-effects`. Default is `joint-effects`. epistasis - uses a linear model to test for interactions between additive effects after main effects. Logistic regression for case/control and linear regression for quantitative traits. two_locus - tests a single interaction between two variants using joint genotype counts and frequencies. adjusted - allows adjustment for covariates in the interaction test, and also adjusts for main effects from both the test and target SNP. Requires and R plugin script. ''' interact_map = {"fast_epistasis": " --fast-epistasis %s ", "epistasis": " --epistasis %s ", "two_locus": " --twolocus %s ", "adjusted": " --R %s "} statement = [] if modifier: statement.append(interact_map[method] % modifier) else: modifier = "" statement.append(interact_map[method] % modifier) if covariates_file: statement.append("--covar %s --covar-name %s " % (covariates_file, covariates)) else: pass if set_mode and set_file: # does not work with two-locus test if method == "two_locus" and set_mode: E.warn("Two locus test cannot be used in conjunction " "with a set-based test.") elif set_mode: statement.append(" %s --set %s " % (set_mode, set_file)) else: pass else: pass # alter reporting of significant interactions and significance # level of interactions if report_threshold: statement.append(" --epi1 %0.3f " % float(report_threshold)) else: pass if sig_threshold: statement.append(" --epi2 %0.3f " % float(sig_threshold)) else: pass self.statement["epistasis"] = " ".join(statement) def _matrices(self, matrix_type, shape="triangle", compression=None, options=None): ''' Calculate a number of different distance matrices: realised genetic relationship matrix relationship covariance matrix identity by descent/state matrix hamming distance matrix * matrix_type - matrix to compute. Can be either IBS, 1 - IBS, Hamming, GRM ''' statement = [] if matrix_type == "hamming": flag = " --distance " elif matrix_type == "ibs": flag = " --distance ibs " elif matrix_type == "genomic": flag = " --distance 1-ibs " elif matrix_type == "grm": flag = " --make-grm-bin " if options: statement.append(" ".join([flag, shape, compression, options])) elif matrix_type == "grm": statement.append(flag) else: statement.append(" ".join([flag, shape, compression])) return " ".join(statement) def _qc_methods(self, parameter=None, **kwargs): '''' Perform QC on genotyping data, SNP-wise and sample-wise. All arguments are passed as key word arguments, except cases detailed in `Parameters` where they are passed with the ``parameter`` argument. Methods ------- * ld_prune - generate a list of SNPs in linkage equilibrium by pruning SNPs on either an LD statistic threshold, i.e. r^2, or use a variance inflation factor (VIF) threshold * heterozygosity - calculate average heterozygosity from each individual across a set of SNPs, threshold on individuals with deviation from expected proportions * ibd - calculate the genetic relationship of individuals to infer relatedness between individuals, threshold on given degree of relatedness, e.g. IBD > 0.03125, 3rd cousins * genetic_gender - estimate the gender of an individual from the X chromosome genotypes - correlate with reported gender and output discrepancies * ethnicity_pca - perform PCA using a subset of independent SNPs to infer genetic ancestry. Compare and contrast this to individuals reported ancestry. Report discrepancies and individuals greater than a threshold distance away from a reference population. * homozygosity - identifies sets of runs of homozygosity within individuals. These may be indicative of inbreeding, systematic genotyping errors or regions under selection. Parameters ---------- Method parameters can also be passed through this function as keyword=value pairs. * ld_prune: `kb` - this modifier changes the window resolution to kb rather than bp. `r2` - the r^2 threshold above which SNPs are to be removed `vif` - the VIF threshold over which SNPs will be removed `window` - window size to calculate pair-wise LD over `step` - step size to advance window by ''' qc_dict = {"ld_prune": {"R2": " --indep-pairwise %s %s %s ", "VIF": " --indep %s %s %s "}, "heterozygosity": {"gz": " --het gz", "raw": " --het "}, "ibd": {"relatives": " --genome gz rel-check ", "full": " --genome gz full ", "norm": " --genome gz "}, "genetic_gender": "none", "ethnicity_pca": "none", "homozygosity": {"min_snp": " --homozyg-snp %s ", "min_kb": " --homozyg-kb %s ", "default": " --homozyg ", "density": " --homozyg-density ", "set_gap": " --homozyg-gap ", "snp_window": " --homozyg-window-snp %s ", "het_max": " --homozyg-het %s "}} task_dict = {} state = [] # put everything in an accessible dictionary first for task, value in kwargs.iteritems(): task_dict[task] = value # LD pruning can be passed multiple parameters, # handle this separately try: sub_task = task_dict["ld_prune"] ld_prune_task = qc_dict["ld_prune"] try: step = task_dict["step"] except KeyError: raise AttributeError("No step size found, please " "pass a step size to advance the " "window by") try: window = task_dict["window"] try: task_dict["kb"] window = "".join([window, "kb"]) task_dict.pop("kb", None) except KeyError: pass except KeyError: raise AttributeError("No window size found. Please input " "a window size to prune over") try: threshold = task_dict["threshold"] except KeyError: raise AttributeError("No threshold value, please input " "a value to LD prune SNPs on") # add in the kb if it is passed as an argument state.append(ld_prune_task[sub_task] % (window, step, threshold)) task_dict.pop("threshold", None) task_dict.pop("ld_prune", None) task_dict.pop("window", None) task_dict.pop("step", None) except KeyError: pass for task,value in task_dict.iteritems(): try: sub_task = qc_dict[task] try: state.append(sub_task[value] % parameter) except TypeError: state.append(sub_task[value]) except KeyError: raise AttributeError("Task not found, please see " "documentation for available features") self.statement["QC"] = " ".join(state) def build_statement(self, infiles, outfile, threads=None, memory="60G", parallel=None): ''' Build statement and execute from components ''' statement = [] exec_state = self.executable # calls to function add to the self.statement dictionary try: statement.append(self.statement["program"]) except KeyError: raise AttributeError("Input files and format not detected") try: statement.append(self.statement["QC"]) except KeyError: pass try: statement.append(self.statement["filters"]) except KeyError: pass try: statement.append(self.statement["tasks"]) except KeyError: pass try: statement.append(self.statement["stats"]) except KeyError: pass try: statement.append(self.statement["assoc"]) except KeyError: pass try: statement.append(self.statement["matrix"]) except KeyError: pass try: statement.append(self.statement["epistasis"]) except KeyError: pass if threads: statement.append(" --threads %i " % threads) else: pass if not memory: pass elif memory != "60G": memory = int(memory.strip("G")) * 1000 statement.append(" --memory %i " % memory) else: statement.append(" --memory 60000 ") # add output flag # outfile needs to be complete path for Plink to save # results properly - check if it starts with '/', # if so is already a full path if not parallel: if os.path.isabs(outfile): statement.append(" --out %s " % outfile) else: outpath = "/".join([os.getcwd(), outfile]) statement.append(" --out %s " % outpath) os.system(" ".join(statement)) else: # parallelisation only really applies to GRM calculation # at the moment <- need to generalise # if parallelisation is used, invoke temp files # then agglomerate files statements = [] if os.path.isabs(outfile): outpath = outfile else: outpath = "/".join([os.getcwd(), outfile]) for i in range(1, parallel+1): p_state = statement[:] # copy list, assigning just makes a pointer p_state.append(" --parallel %i %i " % (i, parallel)) p_state.append(" --out %s.%i " % (outpath, i)) statements.append(" ".join(p_state)) os.system(";".join(statements)) class PlinkDev(Plink2): ''' Run various Plink functions and analysis, including file processing, GRM calculation, PCA and other GWA tasks Require Plink v1.9_devel to be in the users PATH variable as ``plinkdev`` to distinguish it from Plink v1.07 and v1.9. Currently uses Nov 11 development build. ''' def __init__(self, files, options=None, settings=None, design=None): self.infiles = files self.options = options self.settings = settings self.design = design self.executable = "plinkdev" self.statement = {} self.filters = [] class GWASResults(object): ''' A class for handling the results from a GWA, used for plotting and post-analysis QC ''' def __init__(self, assoc_file, **kwargs): # if the assoc_file is a list of multiple files, # then merge them into a single files if type(assoc_file) == list and len(assoc_file) > 1: E.info("multiple results files detected") self.infiles = assoc_file self.infile = None self.results = self.parse_genome_wide(assoc_file) else: E.info("single results file detected") self.infile = assoc_file self.infiles = None # results is a pandas dataframe to operate on self.results = self.get_results(assoc_file, **kwargs) def parse_genome_wide(self, association_files): ''' Accept a list of results files, merge them together and output as a single dataframe Will this take a lot of memory?? ''' file0 = association_files.pop(0) df = self.get_results(file0) for afile in association_files: _df = self.get_results(afile) df = df.append(_df) df["CHR"] = df["CHR"].astype(np.int64) df.sort_values(by=["CHR", "BP"], inplace=True) return df def get_results(self, association_file, epistasis=False): ''' Parse a GWA results file and return the table ''' # use Pandas for now - try something different later # SQLite DB maybe? # inconsistent number of white spaces between # fields means Pandas parsing breaks down # fields need to be the correct data type, # i.e. BP = int, P = float, SNP = str, etc # if the file has already been parsed and processed # just assign it instead # epistasis results don't have a header try: peek = pd.read_table(association_file, nrows=5, sep="\s*", header=0, index_col=None, engine='python') except StopIteration: peek = pd.read_table(association_file, nrows=5, sep="\t", header=0, index_col=None) if epistasis: try: results_frame = pd.read_table(association_file, sep="\s*", header=0, index_col=None) except StopIteration: results_frame = pd.read_table(association_file, sep="\t", header=None, index_col=None) # results from fast epistasis are different to others if results_frame.shape[1] == 7: results_frame.columns = ["CHR1", "SNP1", "CHR", "SNP", "OR", "STAT", "P"] else: results_frame.columns = ["CHR", "SNP", "BP", "A1", "OR", "SE", "STAT", "P"] results_frame.loc[:, "BP"] = pd.to_numeric(results_frame["BP"], errors="coerce") results_frame.loc[:, "P"] = pd.to_numeric(results_frame["P"], errors="coerce") return results_frame else: try: assert peek["log10P"].any() results_frame = pd.read_table(association_file, sep="\t", header=0, index_col=None, dtype={"BP": np.int64, "NMISS": np.int64}) return results_frame except KeyError: pass l_count = 0 E.info("parsing file: %s" % association_file) with open(association_file, "r") as ifile: for line in ifile: # check if spacing is whitespace or tab if len(line.split(" ")) > 1: parsed = line.split(" ") elif len(line.split("\t")) > 1: parsed = line.split("\t") else: raise IOError("file separator not recognised. " "Must be whitespace or tab") # remove multiple blank spaces for i in range(parsed.count('')): parsed.remove('') # get rid of the newline try: parsed.remove('\n') except ValueError: parsed = [(px).rstrip("\n") for px in parsed] if l_count == 0: header = [iy.upper() for ix, iy in enumerate(parsed)] head_idx = [ix for ix, iy in enumerate(parsed)] map_dict = dict(zip(head_idx, header)) res_dict = dict(zip(header, [[] for each in header])) l_count += 1 else: col_idx = [lx for lx, ly in enumerate(parsed)] col = [ly for lx, ly in enumerate(parsed)] for i in col_idx: res_dict[map_dict[i]].append(col[i]) l_count += 1 # substract one from the index for the header column df_idx = range(l_count-1) results_frame = pd.DataFrame(res_dict, index=df_idx) results_frame.fillna(value=1.0, inplace=True) try: results_frame = results_frame[results_frame["TEST"] == "ADD"] except KeyError: pass # need to handle NA as strings results_frame["P"][results_frame["P"] == "NA"] = 1.0 results_frame["BP"] = [int(bx) for bx in results_frame["BP"]] results_frame["P"] = [np.float64(fx) for fx in results_frame["P"]] try: results_frame["STAT"][results_frame["STAT"] == "NA"] = 1.0 results_frame["STAT"] = [np.float64(sx) for sx in results_frame["STAT"]] except KeyError: try: results_frame["CHISQ"][results_frame["CHISQ"] == "NA"] = 1.0 results_frame["CHISQ"] = [np.float64(sx) for sx in results_frame["CHISQ"]] except KeyError: try: results_frame["T"][results_frame["T"] == "NA"] = 1.0 results_frame["T"] = [np.float64(sx) for sx in results_frame["T"]] except KeyError: pass try: results_frame["F_U"][results_frame["F_U"] == "NA"] = 0.0 results_frame["F_U"] = [np.float64(ux) for ux in results_frame["F_U"]] except KeyError: pass try: results_frame["F_A"][results_frame["F_A"] == "NA"] = 0.0 results_frame["F_A"] = [np.float64(ax) for ax in results_frame["F_A"]] except KeyError: pass try: results_frame["FREQ"][results_frame["FREQ"] == "NA"] = 0.0 results_frame["FREQ"] = [np.float64(fx) for fx in results_frame["FREQ"]] except KeyError: pass try: results_frame["OR"][results_frame["OR"] == "NA"] = 1.0 results_frame["OR"] = [np.float64(ox) for ox in results_frame["OR"]] except KeyError: try: results_frame["BETA"][results_frame["BETA"] == "NA"] = 1.0 results_frame["BETA"] = [np.float64(ox) for ox in results_frame["BETA"]] except KeyError: results_frame["B"][results_frame["B"] == "NA"] = 0.0 results_frame["B"] = [np.float64(ox) for ox in results_frame["B"]] return results_frame def plotManhattan(self, save_path, resolution="chromosome", write_merged=True): ''' Generate a basic manhattan plot of the association results Just deal with chromosome-by-chromosome for now. ''' # use the python ggplot plotting package # need to calculate -log10P values separately self.results["log10P"] = np.log10(self.results["P"]) # or using rpy2 py2ri.activate() R('''suppressPackageStartupMessages(library(ggplot2))''') R('''suppressPackageStartupMessages(library(scales))''') R('''suppressPackageStartupMessages(library(qqman))''') R('''sink(file="sink.text")''') r_df = py2ri.py2ri_pandasdataframe(self.results) R.assign("assoc.df", r_df) if resolution == "chromosome": R('''assoc.df$CHR <- factor(assoc.df$CHR, ''' '''levels=levels(ordered(unique(assoc.df$CHR))),''' '''labels=unique(paste0("chr", assoc.df$CHR)))''') R('''nchrom <- length(unique(assoc.df$CHR))''') R('''myCols <- rep(c("#ca0020", "#404040"), nchrom)[1:nchrom]''') R('''names(myCols) <- sort(unique(assoc.df$CHR))''') R('''colScale <- scale_colour_manual(name = "CHR", values=myCols)''') R('''bp_indx <- seq_len(dim(assoc.df[1]))''') R('''assoc.df$BPI <- bp_indx''') R('''p <- ggplot(assoc.df, aes(x=BPI, y=-log10(P), colour=CHR)) + ''' '''geom_point(size=1) + colScale + ''' '''geom_hline(yintercept=8, linetype="dashed", colour="blue") + ''' '''theme_bw() + labs(x="Chromosome position (bp)", ''' '''y="-log10 P-value") + facet_grid(~CHR, scale="free_x") + ''' '''theme(axis.text.x = element_text(size=8))''') R('''png("%s", res=90, unit="in", height=8, width=12)''' % save_path) R('''print(p)''') R('''dev.off()''') elif resolution == "genome_wide": R('''nchroms <- length(unique(assoc.df$CHR))''') R('''png("%s", width=720, height=540)''' % save_path) R('''p <- manhattan(assoc.df, main="Manhattan plot",''' '''ylim=c(0, 50), cex=0.9, suggestiveline=T,''' '''genomewideline=-log10(5e-8), chrlabs=c(1:nchroms), ''' '''col=c("#8B1A1A","#8470FF"))''') R('''print(p)''') R('''dev.off()''') R('''sink(file=NULL)''') if write_merged: return self.results else: return False def plotQQ(self, save_path, resolution="chromosome"): ''' Generate a QQ-plot of expected vs. observed test statistics ''' self.results["log10P"] = np.log(self.results["P"]) py2ri.activate() R('''suppressPackageStartupMessages(library(ggplot2))''') R('''suppressPackageStartupMessages(library(scales))''') R('''suppressPackageStartupMessages(library(qqman))''') r_df = py2ri.py2ri_pandasdataframe(self.results) R.assign("assoc.df", r_df) R('''png("%s", width=720, height=540)''' % save_path) R('''qq(assoc.df$P)''') R('''dev.off()''') def plotEpistasis(self, save_path, resolution="chromosome"): ''' Generate both manhattan plot of the SNPs tested for epistasis with their target SNP, and a QQplot of the association test p-values ''' # plot QQplot qq_save = "_".join([save_path, "qqplot.png"]) self.plotQQ(qq_save) manhattan_save = "_".join([save_path, "manhattan.png"]) self.plotManhattan(manhattan_save, resolution=resolution, write_merged=False) def getHits(self, threshold=0.00000005): ''' Pull out regions of association by selecting all SNPs with association p-values less than a certain threshold. Defaults is genome-wide signifance, p < 5x10-8. Then select region +/- 1.5Mb of the index SNP. ''' hits_df = self.results[self.results["P"] <= threshold] # find the range of SNPs with 3Mb of each index SNP contig_group = hits_df.groupby(["CHR"]) # there may be multiple independent hits on a given # chromosome. Need to identify independent regions. # Independent regions are defined by their statistical # independence, not distance. Just take all SNPs # in 3Mb of the lead SNP for each signal # this will create overlaps of associatation signals for contig, region in contig_group: region.index = region["BP"] chr_df = self.results[self.results["CHR"] == contig] chr_df.index = chr_df["BP"] # find independent regions and output consecutively # if only a single SNP above threshold then there is # only one independent region!! if len(region) > 1: independents = self.findIndependentRegions(region) indi_group = independents.groupby("Group") else: region["Group"] = 1 indi_group = region.groupby("Group") for group, locus in indi_group: # if there is only a single variant should # the region be kept? Likely a false # positive if min(locus["BP"]) == max(locus["BP"]): pass else: try: try: locus.loc[:, "STAT"] = abs(locus["STAT"]) locus.sort_values(by="STAT", inplace=True) except KeyError: locus.loc[:, "T"] = abs(locus["T"]) locus.sort_values(by="STAT", inplace=True) except KeyError: locus.sort_values(by="CHISQ", inplace=True) index_bp = locus.iloc[0]["BP"] E.info("Lead SNP for regions is: {}".format(locus.iloc[0]["SNP"])) left_end = min(chr_df.loc[chr_df.index >= index_bp - 1500000, "BP"]) right_end = max(chr_df.loc[chr_df.index <= index_bp + 1500000, "BP"]) range_df = chr_df.loc[left_end : right_end, :] max_stat = max(abs(range_df["STAT"])) yield contig, range_df def extractSNPs(self, snp_ids): ''' Extract a specific set of SNP results Arguments --------- snp_ids: list a list of SNP IDs to extract from the GWAS results Returns ------- snp_results: pandasd.Core.DataFrame ''' self.results.index = self.results["SNP"] snp_results = self.results.loc[snp_ids] return snp_results def findIndependentRegions(self, dataframe): ''' Find the number of independent regions on a chromsome. Uses R distance and tree cutting functions ''' # mong dataframe into R py2ri.activate() r_df = py2ri.py2ri_pandasdataframe(dataframe) R.assign("rdf", r_df) R('''mat <- as.matrix(rdf$BP)''') # get distances then cluster, chop tree at 1x10^7bp R('''dist.mat <- dist(mat, method="euclidean")''') R('''clusts <- hclust(dist.mat, "average")''') R('''cut <- cutree(clusts, h=1e6)''') R('''out.df <- rdf''') R('''out.df$Group <- cut''') regions_df = py2ri.ri2py_dataframe(R["out.df"]) return regions_df def mergeFrequencyResults(self, freq_dir, file_regex): ''' Merge GWAS results with frequency information, and format for GCTA joint analysis input ''' # create a dummy regex to compare # file_regex type against test_re = re.compile("A") if type(file_regex) == str: file_regex = re.compile(file_regex) elif type(file_regex) == type(test_re): pass else: raise TypeError("Regex type not recognised. Must" "be string or re.SRE_Pattern") all_files = os.listdir(freq_dir) freq_files = [fx for fx in all_files if re.search(file_regex, fx)] gwas_df = self.results df_container = [] for freq in freq_files: freq_file = os.path.join(freq_dir, freq) E.info("Adding information from {}".format(freq_file)) # files may or may not be tab-delimited try: _df = pd.read_table(freq_file, sep="\s*", header=0, index_col=None, engine='python') except StopIteration: _df = pd.read_table(freq_file, sep="\t", header=0, index_col=None) merge_df = pd.merge(self.results, _df, left_on=["CHR", "SNP"], right_on=["CHR", "SNP"], how='left') df_container.append(merge_df) count = 0 for df in df_container: if not count: gwas_df = df count += 1 else: gwas_df = gwas_df.append(df) E.info("Calculating Z scores and SEs") z_scores = -0.862 + np.sqrt(0.743 - 0.2404 *\ np.log(gwas_df.loc[:, "P"])) se = np.log(gwas_df.loc[:, "OR"])/z_scores gwas_df.loc[:, "Z"] = z_scores gwas_df.loc[:, "SE"] = se gwas_df.loc[:, "logOR"] = np.log(gwas_df.loc[:, "OR"]) out_cols = ["SNP", "A1_x", "A2", "MAF", "logOR", "SE", "P", "NMISS"] out_df = gwas_df[out_cols] # need to remove duplicates, especially those # that contain NaN for A2 and MAF out_df = out_df.loc[~np.isnan(out_df["MAF"])] return out_df ########################################################## # unbound methods that work on files and data structures # ########################################################## def plotMapPhenotype(data, coords, coord_id_col, lat_col, long_col, save_path, xvar, var_type, xlabels=None, level=None): ''' Generate a map of the UK, with phenotype data overlaid ''' # merge co-ordinate data with phenotype data merged_df = pd.merge(left=coords, right=data, left_on=coord_id_col, right_on=coord_id_col, how='inner') # pheno column and set level of categorical variable if xlabels and var_type == "categorical": # convert to string type as a categorical variable # drop NA observations from the merged data frame na_mask = pd.isnull(merged_df.loc[:, xvar]) merged_df = merged_df[~na_mask] rvar = merged_df.loc[:, xvar].copy() nvar = pd.Series(np.nan_to_num(rvar), dtype=str) var = [v for v in set(nvar)] var.sort() # recode the variables according to the input labels xlabs = xlabels.split(",") lbls = [str(xlabs[ix]) for ix in range(len(var))] for xv in range(len(var)): nvar[nvar == var[xv]] = lbls[xv] merged_df.loc[:, "cat_var"] = nvar else: pass if level: lvar = merged_df.loc[:, "cat_var"].copy() mask = lvar.isin([level]) lvar[mask] = 1 lvar[~mask] = 0 lvar = lvar.fillna(0) merged_df.loc[:, "dichot_var"] = lvar else: pass # push the df into the R env py2ri.activate() r_df = py2ri.py2ri_pandasdataframe(merged_df) R.assign("pheno.df", r_df) # setup the map and plot the points R('''suppressPackageStartupMessages(library(maps))''') R('''suppressPackageStartupMessages(library(mapdata))''') R('''uk_map <- map("worldHires", c("UK", "Isle of Wight",''' '''"Ireland", "Isle of Man", "Wales:Anglesey"), ''' '''xlim=c(-11, 3), ylim=c(50, 60.9), plot=F)''') # colour by reference, or a colour for each discrete value if level: R('''red <- rep("#FF0000", ''' '''times=length(pheno.df$dichot_var[pheno.df$dichot_var == 1]))''') R('''black <- rep("#000000", ''' '''times=length(pheno.df$dichot_var[pheno.df$dichot_var == 0]))''') R('''png("%(save_path)s", width=540, height=540, res=90)''' % locals()) R('''map(uk_map)''') R('''points((-pheno.df[,"%(lat_col)s"])[pheno.df$dichot_var == 1], ''' '''(-pheno.df[,"%(long_col)s"])[pheno.df$dichot_var == 1], pch=".", col=red)''' % locals()) R('''points((pheno.df[,"%(long_col)s"])[pheno.df$dichot_var == 0], ''' '''(pheno.df[,"%(lat_col)s"])[pheno.df$dichot_var == 0], pch=".", col=black)''' % locals()) R('''legend('topleft', legend=c("not-%(level)s", "%(level)s"),''' '''fill=c("#000000", "#FF0000"))''' % locals()) R('''dev.off()''') else: R('''png("%(save_path)s", width=540, height=540, res=90)''' % locals()) R('''map(uk_map)''') R('''points(pheno.df[,"%(long_col)s"], pheno.df[,"%(lat_col)s"], pch=".", ''' '''col=factor(pheno.df$cat_var))''' % locals()) R('''legend('topleft', legend=unique(pheno.df$cat_var),''' '''fill=unique(pheno.df$cat_var))''' % locals()) R('''dev.off()''') def plotPhenotype(data, plot_type, x, y=None, group=None, save_path=None, labels=None, xlabels=None, ylabels=None, glabels=None, var_type="continuous"): ''' Generate plots of phenotypes using ggplot ''' # change data format if necessary and convert nan/NA to missing if not y and var_type == "categorical": var = np.nan_to_num(data.loc[:, x].copy()) data.loc[:, x] = pd.Series(var, dtype=str) if group: gvar = np.nan_to_num(data.loc[:, group].copy()) data.loc[:, group] = pd.Series(gvar, dtype=str) else: pass elif not y and var_type == "integer": var = np.nan_to_num(data.loc[:, x].copy()) data.loc[:, x] = pd.Series(var, dtype=np.int64) if group: gvar = np.nan_to_num(data.loc[:, group].copy()) data.loc[:, group] = pd.Series(gvar, dtype=str) else: pass elif not y and var_type == "continuous": var = data.loc[:, x].copy() data.loc[:, x] = pd.Series(var, dtype=np.float64) if group: gvar = np.nan_to_num(data.loc[:, group].copy()) data.loc[:, group] = pd.Series(gvar, dtype=str) else: pass elif y and var_type == "categorical": xvar = np.nan_to_num(data.loc[:, x].copy()) yvar = np.nan_to_num(data.loc[:, y].copy()) data.loc[:, x] = pd.Series(xvar, dtype=str) data.loc[:, y] = pd.Series(yvar, dtype=str) if group: gvar = np.nan_to_num(data.loc[:, group].copy()) data.loc[:, group] = pd.Series(gvar, dtype=str) else: pass elif y and var_type == "integer": xvar = np.nan_to_num(data.loc[:, x].copy()) yvar = np.nan_to_num(data.loc[:, y].copy()) data.loc[:, x] = pd.Series(xvar, dtype=np.int64) data.loc[:, y] = pd.Series(yvar, dtype=np.int64) if group: gvar = np.nan_to_num(data.loc[:, group].copy()) data.loc[:, group] = pd.Series(gvar, dtype=str) else: pass elif y and var_type == "continuous": # NAs and NaNs should be handled by ggplot xvar = data.loc[:, x].copy() yvar = data.loc[:, y].copy() data.loc[:, x] = pd.Series(xvar, dtype=np.float64) data.loc[:, y] = pd.Series(yvar, dtype=np.float64) if group: gvar = np.nan_to_num(data.loc[:, group].copy()) data.loc[:, group] = pd.Series(gvar, dtype=str) else: pass R('''suppressPackageStartupMessages(library(ggplot2))''') # put the pandas dataframe in to R with rpy2 py2ri.activate() r_df = py2ri.py2ri_pandasdataframe(data) R.assign("data_f", r_df) # plotting parameters, including grouping variables and labels # axis labels try: labs = labels.split(",") except AttributeError: labs = [] # if variable labels have been provided then assume they are # categorical/factor variables. # assume variable labels are input in the correct order if xlabels: try: unique_obs = len(set(data.loc[:, x])) xfact = len(xlabels.split(",")) if xfact == unique_obs: R('''lvls <- unique(data_f[,"%(x)s"])''' % locals()) lbls = ro.StrVector([ri for ri in xlabels.split(",")]) R.assign("lbls", lbls) R('''lvls <- lvls[order(lvls, decreasing=F)]''') R('''data_f[,"%(x)s"] <- ordered(data_f[,"%(x)s"], ''' '''levels=lvls, labels=lbls)''' % locals()) else: E.warn("the number of labels does not match the " "number of unique observations, labels not " "used.") pass except AttributeError: xlabels = None else: pass if glabels: unique_obs = len(set(data.loc[:, group])) gfact = len(glabels.split(",")) if gfact == unique_obs: R('''lvls <- unique(data_f[, "%(group)s"])''' % locals()) lbls = ro.StrVector([rg for rg in glabels.split(",")]) R.assign("lbls", lbls) R('''lvls <- lvls[order(lvls, decreasing=F)]''') R('''data_f[,"%(group)s"] <- ordered(data_f[,"%(group)s"], ''' '''levels=lvls, labels=lbls)''' % locals()) else: E.warn("the number of labels does not match the " "number of unique observations, labels not " "used.") pass # start constructing the plot # if X and Y are specified, assume Y is a variable to colour # observations by, unless group is also set. # If Y and group then colour by group and split by Y if y: R('''p <- ggplot(aes(x=%s, y=%s), data=data_f)''' % (x, y)) if plot_type == "histogram": if group: R('''p <- p + geom_histogram(aes(colour=%(group)s)) + ''' '''facet_grid(. ~ %(y)s)''' % locals()) else: R('''p <- p + geom_histogram(aes(colour=%(y)s))''' % locals()) elif plot_type == "barplot": if group: R('''p <- p + geom_bar(aes(colour=%(group)s)) + ''' '''facet_grid(. ~ %(y)s)''' % locals()) else: R('''p <- p + geom_bar(aes(colour=%(y)s))''' % locals()) elif plot_type == "density": if group: R('''p <- p + geom_density(aes(colour=%(group)s)) + ''' '''facet_grid(. ~ %(y)s)''' % locals()) else: R('''p <- p + geom_density(aes(colour=%(y)s))''' % locals()) elif plot_type == "boxplot": if group: R('''p <- p + geom_boxplot(group=%(group)s,''' '''aes(x=factor(%(x)s), y=%(y)s, fill=%(group)s))''' % locals()) else: R('''p <- p + geom_boxplot(aes(colour=%(x)s))''' % locals()) elif plot_type == "scatter": if group: R('''p <- p + geom_point(size=1, aes(colour=%(group)s))''' % locals()) else: R('''p <- p + geom_point(size=1)''') if len(labs) == 1: xlab = labs[0] R('''p <- p + labs(x="%s")''' % xlab) elif len(labs) == 2: xlab = labs[0] ylab = labs[1] R('''p <- p + labs(x="%(xlab)s", y="%(ylab)s")''' % locals()) elif len(labs) == 3: xlab = labs[0] ylab = labs[1] title = labs[2] R('''p <- p + labs(x="%(xlab)s", y="%(ylab)s", ''' '''title="%(title)s")''' % locals()) elif len(labs) == 4: xlab = labs[0] ylab = labs[1] glab = labs[2] title = labs[3] R('''p <- p + labs(x="%(xlab)s", y="%(ylab)s",''' '''title="%(title)s")''' % locals()) # need to add in guide/legend title else: R('''p <- ggplot(data=data_f)''') if plot_type == "histogram": if group: R('''p <- p + geom_histogram(aes(%(x)s)) + ''' '''facet_grid(. ~ %(group)s)''' % locals()) else: R('''p <- p + geom_histogram(aes(%s))''' % x) elif plot_type == "barplot": if group: R(''' p <- p + geom_bar(aes(%(x)s)) + ''' '''facet_grid(. ~ %(group)s)''') else: R('''p <- p + geom_bar(aes(%s))''' % x) elif plot_type == "density": if group: R('''p <- p + geom_density(aes(%(x)s)) + ''' '''facet_grid(. ~ %(group)s)''' % locals()) else: R('''p <- p + geom_density(aes(%s))''' % x) elif plot_type == "boxplot": if group: R('''p <- p + geom_boxplot(aes(y=%(x)s, ''' '''x=factor(%(group)s)))''' % locals()) else: raise AttributeError("Y or group variable is missing") if len(labs) == 1: xlab = labs[0] R('''p <- p + labs(x="%s")''' % xlab) elif len(labs) == 2: xlab = labs[0] title = labs[1] R('''p <- p + labs(x="%(xlab)s", ''' '''title="%(title)s")''' % locals()) elif len(labs) == 3: if group: xlab = labs[0] glab = labs[1] title = labs[2] R('''p <- p + labs(x="%(glab)s", y="%(xlab)s",''' '''title="%(title)s")''' % locals()) else: E.warn("too many labels provided, assume first is X, " "and second is plot title") xlab = labs[0] title = labs[1] R('''p <- p + labs(x="%(xlab)s", ''' '''title="%(title)s")''' % locals()) # the default theme is bw R('''p <- p + theme_bw()''') R('''png("%(save_path)s")''' % locals()) R('''print(p)''') R('''dev.off()''') def parseFlashPCA(pcs_file, fam_file): ''' Parse the principal components file from FlashPCA and match with individual identifiers. This assumes the output order of FlashPCA is the same as the input order in the .fam file ''' pc_df = pd.read_table(pcs_file, sep=None, header=None, index_col=None) # add a header to the pc_df file headers = ["PC%i" % m for n, m in enumerate(pc_df.columns)] pc_df.columns = headers fam_df = pd.read_table(fam_file, sep="\t", header=None, index_col=None) fam_df.columns = ["FID", "IID", "PAR", "MAT", "GENDER", "PHENO"] pc_df[["FID", "IID"]] = fam_df.iloc[:, :2] return pc_df def plotPCA(data, nPCs, point_labels, save_path, headers, metadata=None, multiplot=False): ''' Plot N principal components from a PCA either as a single plot of the first 2 PCs, a grid plot of N PCs. Arguments --------- data: string PATH to file containing principal components nPCs: int number of principal components to plot. If this value is > 2, then multiplot will be enabled automatically point_labels: vector a vector of labels of length correpsonding to the number of rows in the data file. These are used to colour the points in the plot with relevant metadata. Alternatively, can be the column header in the metadata file that corresponds to annotations save_path: string An absolute PATH to save the plot(s) to headers: boolean whether the `data` file contains header delineating the columns metadata: string file containing metadata to annotate plot with, includes point_labels data multiplot: boolean If True, generate a grid of scatter plots with successive PCs plotted against each other Returns ------- None ''' py2ri.activate() if metadata: meta_df = pd.read_table(metadata, sep="\t", header=0, index_col=None) else: pass labels = meta_df[["FID", "IID", point_labels]] merged = pd.merge(data, labels, left_on="FID", right_on="FID", how='inner') # TO DO: enable multiplotting of many PCs r_df = py2ri.py2ri_pandasdataframe(merged) R.assign("pc.df", r_df) R('''suppressPackageStartupMessages(library(ggplot2))''') R('''pc.df[["%(point_labels)s"]] <- as.factor(pc.df[["%(point_labels)s"]])''' % locals()) R('''p_pcs <- ggplot(pc.df, aes(x=PC1, y=PC2, colour=%s)) + ''' '''geom_point(size=1) + theme_bw() + ''' '''labs(x="PC1", y="PC2", title="PC1 vs. PC2 LD trimmed genotypes")''' % point_labels) R('''png("%s")''' % save_path) R('''print(p_pcs)''') R('''dev.off()''') def countByVariantAllele(ped_file, map_file): ''' Count the number of individuals carrying the variant allele for each SNP. Count the number of occurences of each allele with the variant allele of each other SNP. Requires ped file genotyping to be in format A1(minor)=1, A2=2 ''' # parse the ped file - get the variant column headers from # the map file - no headers with these files # variant order in the map file matters, use an ordered dict variants = collections.OrderedDict() with open(map_file, "r") as mfile: for snp in mfile.readlines(): attrs = snp.split("\t") snpid = attrs[1] variants[snpid] = {"chr": attrs[0], "pos": attrs[-1].strip("\n")} variant_ids = variants.keys() # store genotype matrix as an array # rows and columns are variant IDs homA1 = np.zeros((len(variant_ids), len(variant_ids)), dtype=np.int64) homA2 = np.zeros((len(variant_ids), len(variant_ids)), dtype=np.int64) het = np.zeros((len(variant_ids), len(variant_ids)), dtype=np.int64) tcount = 0 with open(ped_file, "r") as pfile: for indiv in pfile.readlines(): indiv = indiv.strip("\n") indiv_split = indiv.split("\t") fid = indiv_split[0] iid = indiv_split[1] mid = indiv_split[2] pid = indiv_split[3] gender = indiv_split[4] phen = indiv_split[5] genos = indiv_split[6:] #genos = ["".join([alleles[i], # alleles[i+1]]) for i in range(0, len(alleles), 2)] tcount += 1 # get genotype counts for i in range(len(genos)): # missing genotypes are coded '00' in plink format if genos[i] == "00": pass elif genos[i] == "11": homA1[i, i] += 1 elif genos[i] == "12": het[i, i] += 1 else: homA2[i, i] += 1 allele_counts = ((2 * homA2) + het)/float(2 * tcount) mafs = 1 - allele_counts.diagonal() maf_df = pd.DataFrame(zip(variant_ids, mafs), columns=["SNP", "MAF"], index=[x for x,y in enumerate(variant_ids)]) maf_df["A2_HOMS"] = (2 * homA1).diagonal() maf_df["A2_HETS"] = het.diagonal() maf_df.index = maf_df["SNP"] maf_df.drop(["SNP"], axis=1, inplace=True) E.info("allele frequencies calculated over %i SNPs and " "%i individuals" % (len(genos), tcount)) return maf_df def calcMaxAlleleFreqDiff(ped_file, map_file, group_file, test=None, ref=None): ''' Calculate the allele frequency difference between two groups of individuals based upon some prior assignment. Arguments --------- ped_file: string plink text format .ped file - see Plink documentation for details (https://www.cog-genomics.org/plink2/input#ped) map_file: string plink test format .map file - see Plink documentation for details (https://www.cog-genomics.org/plink2/input#ped) group_file: string a file containing grouping information, must be in standard Plink format with IID, FID, GROUP as the columns test: string group label to use as the test case for calculating allele frequency differences. If this isn't set, then the first non-ref value encountered will be set as test ref: string group label to use as the reference case for calculating allele frequency differences. If not set, then the first value encountered will be the test. Returns ------- freq_diffs: pandas.Core.DataFrame dataframe of SNP information and allele frequency difference between group labels ''' # group labels need to be of the same type, convert all # group values to string group_df = pd.read_table(group_file, sep="\t", header=0, index_col=None, converters={"GROUP": str, "FID": str, "IID": str}) group_df["GROUP"] = [str(xg) for xg in group_df["GROUP"]] try: assert ref E.info("Reference label set to %s" % ref) except AssertionError: ref = set(group_df["GROUP"])[0] E.info("Reference label not provided. Setting " "reference label to %s" % ref) try: assert test E.info("Test label set to %s" % test) except AssertionError: test = [tx for tx in set(group_df["GROUP"]) if not ref][0] E.info("Test label not provided, setting test " "label to %s." % test) # parse the ped file - get the variant column headers from # the map file - no headers with these files # variant order in the map file matters, use an ordered dict variants = collections.OrderedDict() with open(map_file, "r") as mfile: for snp in mfile.readlines(): attrs = snp.split("\t") snpid = attrs[1] variants[snpid] = {"chr": attrs[0], "pos": attrs[-1].strip("\n")} variant_ids = variants.keys() # store genotype matrix as an array # rows and columns are variant IDs ref_homA1 = np.zeros((len(variant_ids), len(variant_ids)), dtype=np.int64) ref_homA2 = np.zeros((len(variant_ids), len(variant_ids)), dtype=np.int64) ref_het = np.zeros((len(variant_ids), len(variant_ids)), dtype=np.int64) test_homA1 = np.zeros((len(variant_ids), len(variant_ids)), dtype=np.int64) test_homA2 = np.zeros((len(variant_ids), len(variant_ids)), dtype=np.int64) test_het = np.zeros((len(variant_ids), len(variant_ids)), dtype=np.int64) tcount = 0 rcount = 0 ncount = 0 ref_ids = group_df["IID"][group_df["GROUP"] == ref].values test_ids = group_df["IID"][group_df["GROUP"] == test].values total = len(group_df) with open(ped_file, "r") as pfile: for indiv in pfile.readlines(): indiv = indiv.strip("\n") indiv_split = indiv.split("\t") fid = indiv_split[0] iid = indiv_split[1] mid = indiv_split[2] pid = indiv_split[3] gender = indiv_split[4] phen = indiv_split[5] genos = indiv_split[6:] #genos = ["".join([alleles[i], # alleles[i+1]]) for i in range(0, len(alleles), 2)] # check for ref and test conditions # ignore individuals in neither camp if iid in test_ids: tcount += 1 # get genotype counts for i in range(len(genos)): # missing genotypes are coded '00' in plink format if genos[i] == "00": pass elif genos[i] == "11": test_homA1[i, i] += 1 elif genos[i] == "12": test_het[i, i] += 1 else: test_homA2[i, i] += 1 elif iid in ref_ids: rcount += 1 # get genotype counts for i in range(len(genos)): # missing genotypes are coded '00' in plink format if genos[i] == "00": pass elif genos[i] == "11": ref_homA1[i, i] += 1 elif genos[i] == "12": ref_het[i, i] += 1 else: ref_homA2[i, i] += 1 else: ncount += 1 if round((tcount + rcount + ncount)/total, 2) == 0.25: E.info("%i samples counted." "Approximately 25% samples counted" % tcount + rcount + ncount) elif round((tcount + rcount + ncount)/total, 2) == 0.50: E.info("%i samples counted." "Approximately 50% samples counted" % tcount + rcount + ncount) elif round((tcount + rcount + ncount)/total, 2) == 0.75: E.info("%i samples counted." "Approximately 75% samples counted" % tcount + rcount + ncount) E.info("Counted alleles for %i test cases, %i ref cases," " %i neither reference nor test." % (tcount, rcount, ncount)) ref_allele_counts = ((2 * ref_homA2) + ref_het)/float(2 * rcount) test_allele_counts = ((2 * test_homA2) + test_het)/float(2 * tcount) ref_mafs = 1 - ref_allele_counts.diagonal() test_mafs = 1 - ref_allele_counts.diagonal() ref_maf_df = pd.DataFrame(zip(variant_ids, ref_mafs), columns=["SNP", "ref_MAF"], index=[x for x,y in enumerate(variant_ids)]) ref_maf_df["ref_A2_HOMS"] = (2 * ref_homA1).diagonal() ref_maf_df["ref_A2_HETS"] = ref_het.diagonal() ref_maf_df.index = ref_maf_df["SNP"] ref_maf_df.drop(["SNP"], axis=1, inplace=True) test_maf_df = pd.DataFrame(zip(variant_ids, test_mafs), columns=["SNP", "test_MAF"], index=[x for x,y in enumerate(variant_ids)]) test_maf_df["test_A2_HOMS"] = (2 * test_homA1).diagonal() test_maf_df["test_A2_HETS"] = test_het.diagonal() test_maf_df.index = test_maf_df["SNP"] test_maf_df.drop(["SNP"], axis=1, inplace=True) freq_diffs = pd.merge(ref_maf_df, test_maf_df, left_index=True, right_index=True, how='inner') freq_diffs["MAF_diff"] = freq_diffs["ref_MAF"] - freq_diffs["test_MAF"] E.info("allele frequencies calculated over %i SNPs and " "%i individuals" % (len(genos), tcount + rcount)) return freq_diffs def calcPenetrance(ped_file, map_file, mafs=None, subset=None, snpset=None): ''' Calculate the proportion of times an allele is observed in the phenotype subset vs it's allele frequency. This is the penetrance of the allele i.e. if observed in 100% of affected individuals and 0% of controls, then penetrance is 100% Generates a table of penetrances for each variants/allele and a plot of MAF vs # cases carrying the allele Generates a heatmap of compound heterozygotes, and homozygotes with penetrances. Outputs a table of SNPs, homozygote and heterozygote counts among subset individuals and proportion of subset individual phenotype explained by homozygotes and heterozygotes Requires alleles are coded A1(minor)=1, A2=2 ''' # check subset is set, if not then throw an error # cannot calculate penetrance without a phenotype if not subset: raise ValueError("Cannot calculate penetrance of alleles " "without a phenotype to subset in") else: pass # parse the ped file - get the variant column headers from # the map file - no headers with these files # variant order in the map file matters, use an ordered dict variants = collections.OrderedDict() with open(map_file, "r") as mfile: for snp in mfile.readlines(): attrs = snp.split("\t") snpid = attrs[1] variants[snpid] = {"chr": attrs[0], "pos": attrs[-1].strip("\n")} if snpset: with IOTools.openFile(snpset, "r") as sfile: snps = sfile.readlines() snps = [sx.rstrip("\n") for sx in snps] variant_ids = [ks for ks in variants.keys() if ks in snps] else: variant_ids = variants.keys() var_idx = [si for si, sj in enumerate(variant_ids)] case_mat = np.zeros((len(variant_ids), len(variant_ids)), dtype=np.float64) all_mat = np.zeros((len(variant_ids), len(variant_ids)), dtype=np.float64) tcount = 0 ncases = 0 # missing phenotype individuals must be ignored, else # they will cause the number of individuals explained # to be underestimated with open(ped_file, "r") as pfile: for indiv in pfile.readlines(): indiv = indiv.strip("\n") indiv_split = indiv.split("\t") fid = indiv_split[0] iid = indiv_split[1] mid = indiv_split[2] pid = indiv_split[3] gender = int(indiv_split[4]) phen = int(indiv_split[5]) if phen != -9: if subset == "cases": select = phen elif subset == "gender": select = gender else: select = None genos = np.array(indiv_split[6:]) genos = genos[var_idx] #genos = ["".join([alleles[i], # alleles[i+1]]) for i in range(0, len(alleles), 2)] tcount += 1 het = np.zeros(len(genos), dtype=np.float64) hom = np.zeros(len(genos), dtype=np.float64) for i in range(len(genos)): # missing values are coded '00' in plink format # A2 homs are coded '11' in plink format if genos[i] == "11": hom[i] += 1 elif genos[i] == "12": het[i] += 1 else: pass hom_mat = np.outer(hom, hom) het_mat = np.outer(het, het) homs = hom_mat.diagonal() het_mat[np.diag_indices(len(genos))] = homs gen_mat = het_mat # separate matrix for subset # reference is always level 2 for plink files, # either cases or females if select == 2: case_mat += gen_mat all_mat += gen_mat ncases += 1 else: all_mat += gen_mat else: pass E.info("alleles counted over %i SNPs " "and %i individuals, of which %i are " "in the %s subset" % (len(genos), tcount, ncases, subset)) penetrance = np.divide(case_mat, all_mat) # round for the sake of aesthetics penetrance = np.round(penetrance, decimals=5) pen_df = pd.DataFrame(penetrance, columns=variant_ids, index=variant_ids) pen_df = pen_df.fillna(0.0) case_df = pd.DataFrame(case_mat, columns=variant_ids, index=variant_ids) all_df = pd.DataFrame(all_mat, columns=variant_ids, index=variant_ids) # plot heatmap of penetrances as percentages indf = pen_df * 100 py2ri.activate() # only plot penetrances > 0% r_pen = py2ri.py2ri_pandasdataframe(indf) r_cases = py2ri.py2ri_pandasdataframe(case_df) r_all = py2ri.py2ri_pandasdataframe(all_df) R.assign("pen.df", r_pen) R.assign("case.df", r_cases) R.assign("all.df", r_all) R('''suppressPackageStartupMessages(library(gplots))''') R('''suppressPackageStartupMessages(library(RColorBrewer))''') # penetrances E.info("plotting penetrance matrix") R('''hmcol <- colorRampPalette(brewer.pal(9, "BuGn"))(100)''') R('''rowpen <- pen.df[rowSums(pen.df) > 0,]''') R('''colpen <- rowpen[,colSums(rowpen) > 0]''') R('''png("%s/penetrance-matrix.png", width=720, height=720)''' % os.getcwd()) R('''heatmap.2(as.matrix(colpen), trace="none", col=hmcol,''' '''dendrogram="none", Colv=colnames(colpen), key=FALSE, ''' '''Rowv=rownames(colpen), margins=c(10,10), cellnote=round(colpen),''' '''notecol="white")''') R('''dev.off()''') E.info("plotting case counts matrix") R('''rowcase <- case.df[rowSums(case.df) > 0,]''') R('''colcase <- rowcase[,colSums(rowcase) > 0]''') R('''png("%s/cases-matrix.png", width=720, height=720)''' % os.getcwd()) R('''heatmap.2(as.matrix(colcase), trace="none", col=rep("#F0F8FF", 100),''' '''dendrogram="none", Colv=colnames(colcase), key=FALSE, ''' '''colsep=seq(1:length(colnames(colcase))), ''' '''rowsep=seq(1:length(rownames(colcase))),''' '''Rowv=rownames(colcase), margins=c(10,10), cellnote=round(colcase),''' '''notecol="black")''') R('''dev.off()''') E.info("plotting all individuals matrix") R('''rowall <- all.df[rownames(colcase),]''') R('''colall <- rowall[,colnames(colcase)]''') R('''png("%s/all-matrix.png", width=720, height=720)''' % os.getcwd()) R('''heatmap.2(as.matrix(colall), trace="none", col=rep("#F0F8FF", 100),''' '''dendrogram="none", Colv=colnames(colall), key=FALSE, ''' '''colsep=seq(1:length(colnames(colall))), ''' '''rowsep=seq(1:length(rownames(colall))), ''' '''Rowv=rownames(colall), margins=c(10,10), cellnote=round(colall),''' '''notecol="black")''') R('''dev.off()''') # plot MAF vs homozygosity maf_df = pd.read_table(mafs, sep="\t", header=0, index_col=0) plot_df = pd.DataFrame(columns=["MAF"], index=maf_df.index) plot_df["MAF"] = maf_df["MAF"] homs = case_mat.diagonal() hom_series = pd.Series({x: y for x, y in zip(variant_ids, homs)}) plot_df["explained_by_homozygotes"] = hom_series plot_df["SNP"] = plot_df.index plot_df.index = [ix for ix, iy in enumerate(plot_df.index)] plotPenetrances(plotting_df=plot_df) out_df = summaryPenetrance(maf_df=maf_df, case_counts=case_mat, variants=variant_ids, n_cases=ncases, n_total=tcount) return out_df, pen_df def summaryPenetrance(maf_df, case_counts, variants, n_cases, n_total): ''' Summarise genotype counts and proportion of cases explained by the observed homozygotes and compound heterozygotes. This is a function of the total population size and population allele frequency - does this assume 100% penetrance of each allele? ''' # homozygous individuals are on the # diagonal of the case_counts array homozyg_cases = case_counts.diagonal() homozyg_series = pd.Series({x: y for x, y in zip(variants, homozyg_cases)}) # heterozygotes are on the off-diagonal elements # get all off diagonal elements by setting diagonals to zero # matrix is diagonal symmetric np.fill_diagonal(case_counts, 0) het_counts = np.sum(case_counts, axis=0) het_series = pd.Series({x: y for x, y in zip(variants, het_counts)}) out_df = pd.DataFrame(columns=["homozygote_cases", "heterozygote_cases"], index=maf_df.index) out_df["MAF"] = maf_df["MAF"] out_df["homozygote_cases"] = np.round(homozyg_series, 1) out_df["expected_cases"] = np.round(((out_df["MAF"] ** 2) * n_total), 3) out_df["heterozygote_cases"] = het_series out_df["hom_prop_explained"] = np.round(homozyg_series/float(n_cases), 3) out_df["het_prop_explained"] = np.round(het_series/float(n_cases), 3) return out_df def plotPenetrances(plotting_df): ''' Plot the proportion of cases/phenotype explained by individuals carrying allele vs. population allele frequency. Generate final output summary table (should be in separate function) ''' # only need to plot variants with MAF >= 0.01 low_frq = plotting_df["MAF"] < 0.01 hi_df = plotting_df[~low_frq] # get into R and use ggplot for MAF vs homozygosity amongs cases r_plot = py2ri.py2ri_pandasdataframe(hi_df) R.assign("hom.df", r_plot) R('''suppressPackageStartupMessages(library(ggplot2))''') R('''png("%s/penetrance-plot.png", height=720, width=720)''' % os.getcwd()) R('''pen_p <- ggplot(hom.df, aes(x=explained_by_homozygotes, y=MAF, colour=SNP)) + ''' '''geom_point(size=4) + theme_bw() + ''' '''geom_text(aes(label=explained_by_homozygotes),''' '''colour="black",vjust=0.5, hjust=0.5) + ''' '''labs(x="Number of Red haired homozygotes", y="MAF") + ''' '''theme(axis.title=element_text(size=10, colour="black"))''') R('''print(pen_p)''') R('''dev.off()''') def findDuplicateVariants(bim_file, take_last=False): ''' identify variants with duplicate position and reference alleles ''' # count the number of lines first to get # the necessary array sizes E.info("getting number of variants") lines = 1 with open(bim_file, "r") as bfile: for line in bfile.readlines(): lines += 1 E.info("%i variants found" % lines) # setup index arrays var_array = np.empty(lines, dtype=object) ref_alleles = np.empty(lines, dtype=object) pos_array = np.zeros(lines, dtype=np.int64) minor_alleles = np.empty(lines, dtype=object) idx = 0 # find duplicates on position with open(bim_file, "r") as bfile: for line in bfile.readlines(): line = line.rstrip("\n") varline = line.split("\t") var = varline[1] pos = int(varline[3]) ref_allele = varline[-1] minor_allele = varline[-2] var_array[idx] = var ref_alleles[idx] = ref_allele minor_alleles[idx] = minor_allele pos_array[idx] = pos idx += 1 # find duplicates using pandas series pos_series = pd.Series(pos_array) dup_last = pos_series[pos_series.duplicated(take_last=True)] dup_first = pos_series[pos_series.duplicated(take_last=False)] var_series = pd.Series(var_array) ref_series = pd.Series(ref_alleles) alt_series = pd.Series(minor_alleles) # a few variants have duplicate IDs - count these as duplicates # and add to the exclusion list - these won't be identified # based on shared position necessarily - force add them ref_first = ref_series[ref_series.duplicated(take_last=False)] ref_last = ref_series[ref_series.duplicated(take_last=True)] ref_dups = set(ref_first.index).union(ref_last.index) # union of take first and take last dup_all = set(dup_last.index).union(set(dup_first.index)) dup_complete = dup_all.union(ref_dups) dup_idx = np.array([sx for sx in dup_complete]) dup_idx.sort() # make a dataframe to hold all triallelic and duplicate variants dup_dict = {"SNP": var_series[dup_idx], "BP": pos_series[dup_idx], "REF": ref_series[dup_idx], "VAR": alt_series[dup_idx]} dup_df = pd.DataFrame(dup_dict) # some variants may have more than one ID/entry # step through using pandas groupby - group on position E.info("looking for duplicates and triallelic variants") tri_alleles = [] dups_alleles = [] overlap_vars = [] for names, groups in dup_df.groupby(["BP"]): # if there is only one reference allele, indicates a # triallelic variant, otherwise its probably a duplicate # or overlaping INDEL and SNV var_lens = groups["VAR"].apply(len) if groups.shape[0] == 1: pass elif np.mean(var_lens) > 1: # probably overlapping variants, exclude, but report # separately over_vars = groups["SNP"].values.tolist() for ovs in over_vars: overlap_vars.append(ovs) elif len(set(groups["REF"])) == 1: tri_vars = groups["SNP"].values.tolist() for tri in tri_vars: tri_alleles.append(tri) else: dup_vars = groups["SNP"].values.tolist() for dup in dup_vars: dups_alleles.append(dup) E.info("%i triallelic variants found" % len(tri_alleles)) E.info("%i duplicate position variants found" % len(dups_alleles)) E.info("%i overlapping SNVs and INDELs found" % len(overlap_vars)) return dups_alleles, tri_alleles, overlap_vars def flagExcessHets(hets_file, plot=True, plot_path=None): ''' Take output from Plink 1.9 --het command calculate heterozygosity rate and flag individuals with heterozygosity > 3 s.d. from the mean value. This assumes all individuals are from the same population, and thus form a homogenous cluster, with only outliers at the extremes. Visualise the data, if there are multiple apparent clusters then filter for ethnicity/ancestry first ''' if hets_file.endswith("gz"): compression = "gzip" else: compression = None het_df = pd.read_table(hets_file, header=0, index_col=None, sep="\t", compression=compression) nmiss = pd.Series(het_df.loc[:, "N(NM)"], dtype=np.float64) nhoms = het_df.loc[:, "O(HOM)"] het_df["het_rate"] = (nmiss - nhoms) / nmiss # get mean value and std, set upper and lower thresholds mean_het = np.mean(het_df.loc[:, "het_rate"].values) sd_het = np.std(het_df.loc[:, "het_rate"].values) upper = mean_het + (3 * sd_het) lower = mean_het - (3 * sd_het) hi_hets = het_df[het_df["het_rate"] > upper] lo_hets = het_df[het_df["het_rate"] < lower] E.info("%i individuals with high heterozygosity" % len(hi_hets)) E.info("%i individuals with low heterozygosity" % len(lo_hets)) hi_hets["exclude"] = "high_heterozygosity" lo_hets["exclude"] = "low_heterozygosity" all_flags = lo_hets.append(hi_hets) if plot: E.info("plotting heterozygosity rate distribution") py2ri.activate() r_df = py2ri.py2ri_pandasdataframe(het_df) R.assign("het.df", r_df) R('''suppressPackageStartupMessages(library(ggplot2))''') R('''p <- ggplot(het.df, aes(het_rate)) + ''' '''geom_histogram() + ''' '''labs(title="Distribution of heterozygosity rate") + ''' '''theme_bw() + ''' '''geom_vline(xintercept=c(%0.3f, %0.3f), ''' '''linetype=2, col="#838B83")''' % (lower, upper)) R('''png("%s/het_rate-hist.png")''' % plot_path) R('''print(p)''') R('''dev.off()''') return all_flags def flagGender(gender_file, plot=True, plot_path=None): ''' Parse the .sexcheck output report from Plink --sex-check and flag gender discordant individuals. Arguments --------- gender_file: string the .sexcheck output report file from Plink --sex-check plot: boolean generate a histogram of F values distributions showing male and female clusters, split by reported gender plot_path: string PATH to save F coefficient histogram Returns ------- discords: pandas.Core.DataFrame a pandas dataframe of individuals that are gender discordant ''' gender_df = pd.read_table(gender_file, header=0, index_col=None, sep=None) genders = lambda x: "male" if x == 1 else "female" gender_df["GENDER"] = gender_df["PEDSEX"].apply(genders) E.info("checking individuals for discordance") discords = gender_df[gender_df["STATUS"] != "OK"] discords.drop(labels=["PEDSEX", "SNPSEX", "STATUS", "F", "GENDER"], axis=1, inplace=True) E.info("%i individuals with discordant gender" % len(discords)) if plot: E.info("plotting F gender coefficient distributions") py2ri.activate() r_df = py2ri.py2ri_pandasdataframe(gender_df) R.assign("gender.df", r_df) R('''suppressPackageStartupMessages(library(ggplot2))''') R('''p <- ggplot(gender.df, aes(F, fill=GENDER)) + ''' '''geom_histogram() + ''' '''labs(title="F coefficient distributions for gender") + ''' '''theme_bw() + facet_grid(. ~ GENDER)''') R('''png("%s/gender_check-hist.png")''' % plot_path) R('''print(p)''') R('''dev.off()''') else: pass return discords def _compare_ibds(ibd_entry, threshold=0.03125): ''' Just for internal use in `flagRelated` function. To compare IBD estimates and flag up related individuals Arguments --------- ibd_entry: pandas.Core.Series a single line entry from an IBD estimates file threshold: float the threshold at which to flag an individual as related Returns ------- flag: boolean True if related, else false ''' if ibd_entry["PI_HAT"] < threshold: return False else: return True def flagRelated(ibd_file, chunk_size=None, threshold=0.03125, plot=True, plotting_path=None): ''' Use IBS estimates to find pairs of related individuals above a threshold. This will also flag up the number of duplicated/monozygotic twin pairs (matrix diagonals). Arguments --------- ibd_file: string file containing IBS estimates between pairs from Plink or GCTA. chunk_size: int the file chunk size to read in at a time, should correspond to the number of individuals. If not set, the whole file is read in. Not recommend for large (>2GB) files. threshold: float IBS threshold, above which individuals will be flagged as related. Default is 3rd cousins. plot: boolean generate a histogram of the distribution of IBS values. Default = True plotting_path: string PATH to plot histogram to Returns ------- flagged: pandas.Core.DataFrame dataframe of individuals to remove, with the estimated relationship to another individual. ''' # need to make this faster # sequentially add new IDs only related_list = [] ibds = [] if ibd_file.endswith("gz"): comp = "gzip" else: pass E.info("reading file in chunks of %i lines" % chunk_size) if chunk_size: # read in and operate on chunks df_iter = pd.read_table(ibd_file, header=0, index_col=None, delim_whitespace=True, compression=comp, chunksize=chunk_size) count = 0 for chunk in df_iter: count += 1 entrys = chunk[["FID1", "IID1", "FID2", "IID2", "PI_HAT"]] ibds.append(entrys) relate_mask = entrys.apply(_compare_ibds, axis=1) related = entrys[relate_mask] E.info("%i relations found" % len(related)) related_list.append(related) else: pass df = pd.concat(ibds, axis=0, keys=None) if plot: # for lots of observations, plot log counts E.info("plotting pair-wise IBD distribution") py2ri.activate() r_df = py2ri.py2ri_pandasdataframe(df) R.assign("relate.df", r_df) R('''suppressPackageStartupMessages(library(ggplot2))''') R('''p <- ggplot(relate.df, aes(PI_HAT+0.5)) + ''' '''geom_histogram(binwidth=0.01) + ''' '''labs(title="Proportion of IBD shared distribution") + ''' '''theme_bw() + scale_y_log10() + ''' '''geom_vline(xintercept=%(threshold)f, ''' '''linetype=4, colour="#838B83")''' % locals()) R('''png("%s/IBD-hist.png")''' % plotting_path) R('''print(p)''') R('''dev.off()''') else: pass return related_list def flagInbred(inbred_file, inbreeding_coefficient, ibc_threshold=0.05, plot=True, plot_path=None): ''' Use Plink or GCTA's estimate of F, inbreeding coefficient to flag individuals that are highly inbred. Arguments --------- inbred_file: string file containing estimates of F inbreeding_coefficient: string coefficient to use to identify inbred individuals. This name should correspond to one of the columns in `inbred_file`. ibc_threshold: float the threshold above which individuals will be flagged as inbred plot: boolean generate a histogram of the distribution of F coefficients plotting_path: string PATH to directoru for plotting F coefficient distribution Returns ------- inbreds: padas.Core.DataFrame dataframe of inbred individuals to exclude from analysis ''' inbreed_df = pd.read_table(inbred_file, header=0, index_col=None, sep="\t") E.info("Identifing individuals with inbreeding coefficient" " greater than %0.3f" % ibc_threshold) inbreds = inbreed_df[inbreed_df[inbreeding_coefficient] > ibc_threshold] inbreds = inbreds[["FID", "IID"]] E.info("%i individuals with high inbreeding " "coefficient" % len(inbreds)) if plot: E.info("plotting F coefficient distributions") py2ri.activate() r_df = py2ri.py2ri_pandasdataframe(inbreed_df) R.assign("inbreed.df", r_df) R('''suppressPackageStartupMessages(library(ggplot2))''') R('''p <- ggplot(inbreed.df, aes(%(inbreeding_coefficient)s)) + ''' '''geom_histogram(binwidth=0.01) + ''' '''labs(title="Inbreeding coefficient, %(inbreeding_coefficient)s,''' '''distribution") + theme_bw() + ''' '''geom_vline(xintercept=%(ibc_threshold)0.3f, ''' '''linetype=4, colour="#838B83")''' % locals()) R('''png("%s/inbreeding-hist.png")''' % plot_path) R('''print(p)''') R('''dev.off()''') else: pass return inbreds def mergeQcExclusions(hets_file=None, inbred_file=None, related_file=None, gender_file=None, mask_file=None): ''' Merge sets of excluded individuals into a single file for downstream analysis, processing, etc Arguments --------- hets_file: string file containing individuals to remove due to excessive or reduced heterozygosity inbred_file: string file of individuals highly related to themselves for exclusion related_file: string file of IDs of individuals pruned due to greater relatedness than an arbitrary threshold gender_file: string individuals with discordant reported vs. genetic gender mask_file: string individuals to be excluded from analyses, unrelated for reasons to QC (i.e. mask out category of individuals) Returns ------- exclusions: pandas.Core.DataFrame A dataframe of FID and IIDs of the unique set of excluded individuals ''' if hets_file: hets_df = pd.read_table(hets_file, sep="\t", header=0, index_col=None) E.info("%i exclusions due to " "heterozygosity deviation" % len(hets_df)) else: hets_df = None E.warn("No heterozygosity exclusion file") if inbred_file: inbred_df = pd.read_table(inbred_file, sep="\t", header=0, index_col=None) E.info("%i exclusions due " "to consanguinuity" % len(inbred_df)) else: inbred_df = None E.warn("No inbred exclusions") if related_file: related_df = pd.read_table(related_file, delim_whitespace=True, header=None, index_col=None) related_df.columns = ["FID", "IID"] E.info("%i individuals excluded due " "to high relatedness" % len(related_df)) else: related_df = None E.warn("No individuals excluded on relatedness") if gender_file: gender_df = pd.read_table(gender_file, sep="\t", header=0, index_col=None) E.info("%i individuals with discordant " "gender recorded" % len(gender_df)) else: gender_df = None E.warn("No individuals exclued with " "discordant gender") if mask_file: mask_df = pd.read_table(mask_file, sep="\t", header=None, index_col=None) E.info("%i individuals to be excluded " "for additional reasons" % len(gender_df)) mask_df.columns = ["FID", "IID"] else: mask_df = None df_list = [hets_df, inbred_df, related_df, gender_df, mask_df] df_true = [True for x in df_list if x is not False] if not all(df_true): raise ValueError("no QC files detected - do some QC!!") else: pass # assume all df have FID and IID columns real_df = [x for x in df_list if x is not None] real_df = [x[["FID", "IID"]] for x in real_df] full_df = pd.concat(real_df, keys=None, axis=0) exclusions = full_df.drop_duplicates(subset=["FID", "IID"], take_last=True, inplace=False) return exclusions def selectLdFromTabix(ld_dir, chromosome, snp_pos, ld_threshold=0.01): ''' Select all LD values from a tabix indexed BGZIP file of LD. Assumes Plink format. Arguments --------- ld_dir: string path to directory containing LD data chromosome: string chromosome of SNP to pull out LD values assumes chrN format snp_pos: int bp mapping position of the SNP on the same genome build as the LD was calculated ld_threshold: float minimum LD value to return Returns ------- ld_df: pandas.Core.DataFrame Pandas dataframe containing LD values over target range. ''' tab_dir = [td for td in os.listdir(ld_dir) if re.search(".bgz$", td)] contig = int(chromosome.lstrip("chr")) start = snp_pos end = snp_pos tab_query = """ tabix %(ld_dir)s/%(tab_indx)s %(contig)i:%(start)i-%(end)i | awk '{if($7 >= %(ld_threshold)s) print $0}'""" tab_indx = [tx for tx in tab_dir if re.search(chromosome, tx)][-1] E.info("Retrieving LD values at bp: %i" % snp_pos) proc = subprocess.Popen(tab_query % locals(), shell=True, stdout=subprocess.PIPE) ld_dict = {} count = 0 for line in proc.stdout: snp_dict = {} parse = line.split("\t") snp_dict["CHR_A"] = int(parse[0]) snp_dict["BP_A"] = int(parse[1]) snp_dict["SNP_A"] = parse[2] snp_dict["CHR_B"] = int(parse[3]) snp_dict["BP_B"] = int(parse[4]) snp_dict["SNP_B"] = parse[5] snp_dict["R2"] = float(parse[6]) snp_dict["DP"] = float(parse[7]) count += 1 ld_dict[count] = snp_dict ld_df = pd.DataFrame(ld_dict).T # ld Dataframe may be empty, return # empty dataframe try: ld_df.index = ld_df["SNP_B"] ld_df.drop_duplicates(subset="SNP_B", keep="last", inplace=True) except KeyError: E.info("No SNPs detected in LD " "with r^2 > {}".format(ld_threshold)) ld_df = pd.DataFrame(0.0, index=[snp_pos], columns=["SNP_A", "R2"]) return ld_df def selectLdFromDB(database, table_name, index_snp, index_label=None, ld_threshold=None): ''' Select LD values from an SQL table over a specific range. Large regions will consume large memory and queries may take several minutes to complete. Arguments --------- database: sql.connection An SQL database connection to the DB containing the LD values table_name: string The table to query containing LD information index_snp: string SNP ID to select LD values from the SQL database on index_label: str Column label in SQL database to use as the index in the output dataframe ld_threshold: float minimum LD value to return Returns ------- ld_df: pandas.Core.DataFrame Pandas dataframe containing LD values over target range. ''' # UTF-8 codec struggles to decode ';' in some columns database.text_factory = str if ld_threshold: state = ''' select SNP_A,SNP_B,R2 FROM %s where %s = "%s" AND R2 > %0.3f; ''' % (table_name, index_label, index_snp, ld_threshold) else: state = ''' select SNP_A,SNP_B,R2 FROM %s where %s = "%s"; ''' % (table_name, index_label, index_snp) ld_df = pdsql.read_sql(sql=state, con=database, index_col=index_label) return ld_df def calcLdScores(ld_table, snps, scale=False): ''' Calculate the LD scores for SNPs across a chromosome, stored in a SQL database. Arguments --------- ld_table: pandas.Core.DataFrame Pandas dataframe in table format containing LD values between SNPs. Columns are `SNP_A`, `SNP_B` and `R2`. snps: list the snps over which to calculate LD scores scale: bool Whether to scale LD score by the number of SNPs used to calculate the score. Useful if used as a weighting for other SNP scores. Returns ------- ld_scores: float LD scores for each SNP ''' if len(ld_table) > 0: ld_score = sum(ld_table["R2"]) else: ld_score = 0 if scale: ld_scores = ld_score/len(ld_table) else: ld_scores = ld_score return ld_scores def calcWeightedEffects(gwas_results, snps, calc_se=True, scale=False): ''' Calculate the standard error weighted effect sizes score for each SNP: score = sum(ln(OR) * se) Arguments --------- gwas_results: pandas.Core.DataFrame A dataframe of the results from a genome_wide association study. Assumes SNP IDs are the index column. snps: list the snps over which to calculate the total weighted effect size score. calc_se: boolean Calculate the standard error from the p-values and effect sizes: SE = ln(OR)/Z Z = -0.862 + sqrt(0.743 - 2.404 * ln(P)) scale: boolean Scale the sum of standard error weighted effect sizes by the number of SNPs Returns ------- es_score: float sum of SE weighted effect sizes ''' # calculate standard error of effect size based on # p-value and effect size if calc_se: # check p-values that = 0 are set to smallest floating # point representation instead gwas_results["P"][gwas_results["P"] == 0] = np.finfo(np.float64).min z_func = lambda x: - 0.862 + sqrt(0.743 - 2.404 * np.log(x)) gwas_results["Z"] = gwas_results["P"].apply(z_func) gwas_results["SE"] = abs(np.log(gwas_results["OR"])/gwas_results["Z"]) else: E.warn("Standard errors have not been calculated, please " "make sure they exist in this results table") es_score = sum((abs(np.log(gwas_results["OR"])) * gwas_results["SE"]).fillna(0)) if scale and len(gwas_results): return es_score/len(gwas_results) else: return es_score def snpPriorityScore(gwas_results, chromosome, ld_dir=None, clean=True, database=None, table_name=None): ''' Generate SNP scores based on the amount of genetic variation they capture and the sum of the weighted effect sizes for the trait of interest. This score can then be integrated with a score based on the overlap with functional annotation features of interest. Arguments --------- gwas_results: string Results from a GWAS, assumed to be in Plink format. ld_dir: string directory containing tabix index LD files from Plink database: string Path to an SQL database containing LD values in table format table_name: string Specific table, often referring to a specific chromosome, that contains LD values with columns SNP_A, SNP_B, BP_A, BP_B and R2. chromosome: string A chromosome to select from the gwas_results file. clean: boolean Whether the results table has been pre-cleaned to remove results not relevant to SNPs. e.g. if covariates had been included in the regression model these should be removed. Returns ------- SNP_scores: pd.Core.DataFrame A pandas dataframe of LDscores, weight effect size scores and SNP priority score. ''' E.info("Reading association results from %s" % gwas_results) gwas_df = pd.read_table(gwas_results, index_col=None, sep="\t", header=0) if clean: gwas_df = pd.read_table(gwas_results, index_col=None, sep="\t", header=0) else: gwas_df = pd.read_table(gwas_results, index_col=None, sep="\s*", header=0) gwas_df = gwas_df[gwas_df["TEST"] == "ADD"] gwas_df.index = gwas_df["SNP"] # in order to reduce the computational load it is # necessary to break up the SNPs into regions. # The logical way would be to use recombination # hotspots, however, this will still leave # some very large windows # Use a moving window over the chromosome of # ~250Kb, with 25kb overlap. chr_df = gwas_df[gwas_df["CHR"] == int(chromosome)] # duplicates cause selection of individual SNPs # to break - why are there still duplicates?? chr_df.drop_duplicates(subset="BP", keep="last", inplace=True) priority_list = [] ld_scores = {} es_scores = {} priority_scores = {} snp_set = chr_df.index if database: dbh = sql.connect(database) else: pass # iterate over SNPs for snp in snp_set: if database: ld_values = selectLdFromDB(dbh, table_name=table_name, index_snp=snp, index_label="SNP_B") elif ld_dir: snp_pos = int(chr_df.loc[snp, "BP"]) ld_values = selectLdFromTabix(ld_dir=ld_dir, chromosome=chromosome, snp_pos=snp_pos) ldsnps = ld_values.loc[: ,"SNP_A"].values ldsnps = {sx for sx in ldsnps} ldscore = calcLdScores(ld_table=ld_values, snps=ldsnps, scale=False) ld_scores[snp] = ldscore try: gwas_results = chr_df.loc[ldsnps] escore = calcWeightedEffects(gwas_results=gwas_results, snps=ldsnps, calc_se=True, scale=True) except KeyError: gwas_results = chr_df.loc[snp] if gwas_results["P"] == 0: gwas_results["P"] = np.finfo(np.float64).min else: pass z_func = lambda x: - 0.862 + sqrt(0.743 - 2.404 * np.log(x)) gwas_results["Z"] = z_func(gwas_results["P"]) gwas_results["SE"] = abs(np.log(gwas_results["OR"])/gwas_results["Z"]) escore = gwas_results["SE"] * abs(np.log(gwas_results["OR"])) es_scores[snp] = escore weight = escore * ldscore priority_scores[snp] = weight SNP_scores = pd.DataFrame([pd.Series(ld_scores), pd.Series(es_scores), pd.Series(priority_scores)]).T SNP_scores.columns = ["LDScore", "WeightEffectSize", "PriorityScore"] SNP_scores.sort_values(by="PriorityScore", inplace=True) return SNP_scores def fitPrior(value, distribution, dist_params): ''' Fit a prior probability given a value, distribution and distribution parameters. You are responsible for defining the appropriate distribution and parameters Arguments --------- Value: float A value to calculate a prior probability from distribution: string A distribution from which to calculate a probability. Current values are "normal", "t", "gamma", "lognormal", "exponential". dist_params: tuple parameters to define the distribution, * normal: (mean, std) * t: (df, ncp) * gamma: (k, theta) * lognormal: (ln(mean), std) * exponential: (lambda) Returns ------- prior: float Prior probability attached to input value ''' # distribution parameters should be passed # explicitly if distribution == "normal": prior = stats.norm(*dist_params).pdf(value) elif distribution == "t": prior = stats.t(*dist_params).pdf(value) elif distribution == "gamma": prior = stats.gamma(*dist_params).pdf(value) elif distribution == "lognormal": prior = stats.lognorm(*dist_params).pdf(value) elif distribution == "exponential": prior = stats.expon(*dist_params).pdf(value) else: raise ValueError("Distrubtion %s not " "implemented" % distribution) return prior def calcPriorsOnSnps(snp_list, distribution, params=None): ''' Calculate prior probabilities on SNPs based on a predefined value, a distribution and parameters to describe the distribution. This relies inherently on the correct and appropriate distribution to be defined, i.e. that it is conjugate to the marginal likelihood distribution. TO DO: introduce robust Bayesian modelling Arguments --------- snp_list: dict SNPs with score/value attached to determine the prior probability distribution: string the distribution from which to draw probabilities params: tuple parameters to describe the appropriate distribution. Returns ------- prior_probs: dict dictionary of priors for SNPs ''' prior_probs = {} # if there is no score for that SNP then use an # uninformative or Jeffrey's prior for snp in snp_list.keys(): if snp_list[snp] != 0: prior_probs[snp] = fitPrior(value=snp_list[snp], distribution=distribution, dist_params=params) else: prior_probs[snp] = 0.5 return prior_probs def estimateDistributionParameters(data, distribution, fscale=None, floc=None, **kwargs): ''' Use maximum likelihood to estimate the parameters of the defined distribution. Arguments --------- data: pd.Series/np.array data used to estimate parameters from distribution: string distribution assumed to underlie the data generation process fscale: float scale parameter of the distribution to fix floc: float location parameter of the distribution to fix **kwargs: float additional kwargs to pass as fixed parameters Returns ------- est_params: tuple estimated distribution parameters ''' if distribution == "normal": mu, sigma = stats.norm.fit(data) est_params = (mu, sigma,) elif distribution == "t": df, mu, sigma = stats.t.fit(data) est_params = (df, mu, sigma,) elif distribution == "gamma": k, theta, mu = stats.gamma.fit(data) est_params = (k, theta, mu,) elif distribution == "lognormal": exp_mu, sigma, theta = stats.lognorm.fit(data) est_params = (exp_mu, sigma, theta,) elif distribution == "exponential": beta, lambda_x = stats.expon.fit(data) est_params = (beta, lambda_x,) else: raise ValueError("Distrubtion %s not " "implemented" % distribution) return est_params def calculatePicsValues(snp_id, index_log10p, ld_values, priors=None, k=2): ''' Use the PICS method to assign probability to SNPs as being causal for association signals at a locus, given the strength of their association (log10 P-value), and linkage disequilbrium with the lead SNP (smallest p-value at the locus). This method allows the prioritisation of SNPs, including those where there are multiple independent signals. It requires that these independent signals are however input as separate SNPs. NB:: Fahr et al initially use k=6.4 based on their observation that altering k=[6,8] does not have an appreciable impact on the PICS values. However, when comparing this implementation to the PICS webserver, k=2 gives more similar values based on the current Phase3 1000 Genomes LD values and SNPs. Arguments --------- snp_id: string rs ID of the lead SNP from the associated region/ independent signal of association index_log10p: float the negative log10(p-value) of association with the phenotype of interest ld_values: pd.Core.DataFrame A pandas dataframe of LD values between the index SNP and all other SNPs given an arbitrary threshold. The expected columns are index SNP, SNP of interest, r^2. priors: dict the prior value to attach to each SNP. Can be used to integrate functional information into the PICS calculations. EXPERIMENTAL k: float The power to raise the correlation of alleles to. When k=2, this scales the standard deviation of the sample distribution for the marignal likelihood by the residual LD. Increasing k downweights the LD difference between the index SNP and SNP of interest. Returns ------- PICS: pandas.Core.Series A pandas series of SNPs and calculated PICS scores. ''' # assume the SNPs of interest are all contained in the # ld_values table index top_p = stats.norm(index_log10p, sqrt(index_log10p)/2).cdf(index_log10p) prob_dict = {} prob_dict[snp_id] = top_p E.info("calculating scores for %i SNPs" % len(ld_values)) # If a SNP is in perfect LD with the index SNP this forces # the standard deviation to be 0, add a small correction # to allow the calculation of marginal likelihood value # e.g. 0.0001 for snp in ld_values.index: try: r2 = ld_values.loc[snp]["R2"] r = sqrt(r2) mu = r2 * index_log10p sigma = sqrt(1 - (r ** k)) * (sqrt(index_log10p)/2) if sigma == 0: sigma = 0.0001 else: pass # use log likelihoods, these are more numerically # stable and avoid the multiplication of very small # numbers # if priors are not set, force uninformative prior # i.e. if not conjugate with likelihood likelihood = np.log(stats.norm(mu, sigma).pdf(index_log10p)) try: prior = np.log(priors[snp]) except: prior = np.log(1.0) prob_dict[snp] = np.exp(likelihood + prior) except KeyError: E.warn("SNP %s not found in LD with %s" % (snp, snp_id)) # calculate normalized probabilities, where sum of all probs=1 # use numpy sum to handle NaN values sum_probs = np.sum(prob_dict.values()) pics_dict = {} for snp_p in prob_dict.keys(): pics_dict[snp_p] = prob_dict[snp_p]/sum_probs pics_series = pd.Series(pics_dict) PICS = pics_series.sort_values(ascending=False) return PICS def getLdValues(database, table_name, index_snp, ld_threshold=0.5): ''' Get all LD values for the index SNP above a given r^2 threshold Arguments --------- database: sql.connection An SQL database connection to the DB containing the LD values table_name: string The table to query containing LD information index_snp: string SNP ID to select LD values from the SQL database on ld_threshold: float a threshold above which to select LD values with the index SNP Returns ------- ld_df: pandas.Core.DataFrame Pandas dataframe containing LD values over target range. ''' E.info("executing SQL query on table: %s" % table_name) ld_a = selectLdFromDB(database=database, table_name=table_name, index_snp=index_snp, index_label="SNP_B", ld_threshold=ld_threshold) ld_a.columns = ["SNP", "R2"] ld_b = selectLdFromDB(database=database, table_name=table_name, index_snp=index_snp, index_label="SNP_A", ld_threshold=ld_threshold) ld_b.columns = ["SNP", "R2"] ld_df = ld_a.append(ld_b) ld_df.index = ld_df["SNP"] # drop duplicate indices ld_df.drop_duplicates(subset="SNP", keep="last", inplace=True) E.info("%i records found matching query" % len(ld_df)) return ld_df def PICSscore(gwas_results, chromosome, database=None, table_name=None, priors=None, clean=True, ld_threshold=0.5, ld_dir=None): ''' Prioritise SNPs based on the conditional probability of being the causal SNP at an associated region given the strength of association and LD with surrounding SNPs. Originally described in:: Fahr et al Nature 518 (2015) pp337 The current implementation does not allow the integration of a prior probability - this will come in the future. Arguments --------- gwas_results: string Results from a GWAS, assumed to be in Plink format. ld_dir: string directory containing tabix index LD files from Plink database: string Path to an SQL database containing LD values in table format table_name: string Specific table, often referring to a specific chromosome, that contains LD values with columns SNP_A, SNP_B, BP_A, BP_B and R2. chromosome: string A chromosome to select from the gwas_results file. priors: dict the prior value to attach to each SNP. Can be used to integrate functional information into the PICS calculations. EXPERIMENTAL clean: boolean Whether the results table has been pre-cleaned to remove results not relevant to SNPs. e.g. if covariates had been included in the regression model these should be removed. ld_threshold: float Threshold above which to select SNPs in LD with the lead SNP Returns ------- PICS_scores: pd.Core.DataFrame A pandas dataframe of PICS scores for SNPs. ''' E.info("Reading association results from %s" % gwas_results) if clean: gwas_df = pd.read_table(gwas_results, index_col=None, sep="\t", header=0) else: gwas_df = pd.read_table(gwas_results, index_col=None, sep="\s*", header=0) gwas_df = gwas_df[gwas_df["TEST"] == "ADD"] gwas_df.index = gwas_df["SNP"] E.info("subsetting data on chromosome %s" % chromosome) chr_df = gwas_df[gwas_df["CHR"] == int(chromosome)] try: chr_df.loc[:, "STAT"] = abs(chr_df["STAT"]) chr_df.sort_values(by="STAT", inplace=True, ascending=False) except KeyError: chr_df.sort_values(by="CHISQ", inplace=True, ascending=False) chr_df.loc[:, "P"][chr_df["P"] == 0] = 1.79769e-308 chr_df["P"].fillna(1.0) chr_df.loc[:, "log10P"] = np.log10(chr_df["P"]) index_snp = chr_df.iloc[0]["SNP"] try: indexp = -chr_df.iloc[0]["log10P"] except KeyError: indexp = -np.log10(chr_df.iloc[0]["P"]) E.info("index SNP is %s with -log10(p)= %0.3f" % (index_snp, indexp)) if database: dbh = sql.connect(database) ld_values = getLdValues(database=dbh, table_name=table_name, index_snp=index_snp, ld_threshold=ld_threshold) elif ld_dir: snp_pos = int(chr_df.loc[index_snp]["BP"]) ld_values = selectLdFromTabix(ld_dir=ld_dir, chromosome=chromosome, snp_pos=snp_pos) PICS_scores = calculatePicsValues(snp_id=index_snp, index_log10p=indexp, ld_values=ld_values, priors=priors, k=2) return PICS_scores def LdRank(gwas_results, chromosome, ld_dir=None, database=None, table_name=None, ld_threshold=0.8, top_snps=0.01, clean=True): ''' Rank SNPs based on the LD with the lead SNP from the association region. Take the top N% SNPs as the SNP set. Arguments --------- gwas_results: string Results from a GWAS, assumed to be in Plink format. ld_dir: string directory containing tabix index LD files from Plink database: string Path to an SQL database containing LD values in table format table_name: string Specific table, often referring to a specific chromosome, that contains LD values with columns SNP_A, SNP_B, BP_A, BP_B and R2. chromosome: string A chromosome to select from the gwas_results file. ld_threshold: float Threshold above which to select SNPs in LD with the lead SNP top_snps: float % SNPs to select, ranked on LD with the lead SNP Returns ------- ''' E.info("Reading association results from %s" % gwas_results) gwas_df = pd.read_table(gwas_results, index_col=None, sep="\t", header=0) if clean: gwas_df = pd.read_table(gwas_results, index_col=None, sep="\t", header=0) else: gwas_df = pd.read_table(gwas_results, index_col=None, sep="\s*", header=0) gwas_df = gwas_df[gwas_df["TEST"] == "ADD"] gwas_df.index = gwas_df["SNP"] E.info("subsetting data on chromosome %s" % chromosome) chr_df = gwas_df[gwas_df["CHR"] == int(chromosome)] try: chr_df.loc[:, "STAT"] = abs(chr_df["STAT"]) chr_df.sort_values(by="STAT", inplace=True, ascending=False) except KeyError: chr_df.sort_values(by="CHISQ", inplace=True, ascending=False) chr_df.loc[:, "P"][chr_df["P"] == 0] = 1.79769e-308 chr_df["P"].fillna(1.0) chr_df.loc[:, "log10P"] = np.log10(chr_df["P"]) index_snp = chr_df.iloc[0]["SNP"] if database: dbh = sql.connect(database) ld_values = getLdValues(database=dbh, table_name=table_name, index_snp=index_snp, ld_threshold=ld_threshold) elif ld_dir: snp_pos = int(chr_df.loc[index_snp]["BP"]) ld_values = selectLdFromTabix(ld_dir=ld_dir, chromosome=chromosome, snp_pos=snp_pos) # rank on LD with index SNP E.info("sort and rank top %0.3f SNPs in " "r2 > %0.3f with SNP %s" % (top_snps, ld_threshold, index_snp)) index_series = pd.DataFrame( {"SNP": index_snp, "R2": 1.00}, index=[index_snp]) if len(ld_values): ld_values = ld_values.append(index_series) else: ld_values = index_series ld_values.columns = ["SNP", "R2"] ld_values.sort_values(by="R2", inplace=True, ascending=False) size = len(ld_values) # use the math module ceil function to get # smallest integer greater than or equal to # the top %n SNPs top = int(ceil(size * top_snps)) top_ld = ld_values.iloc[0:top,] return top_ld def calcApproxBayesFactor(log_or, standard_error, prior_variance): ''' Calculate the approximate Bayes Factor (ABF) from Wakefield Am. J. Hum. Genet.(2015) for a SNP. The ABF is calculated from the effect size (log OR), variance (Standard error ^2) and a prior weight on the variance (W). Arguments --------- log_or: float The natural logarithm of the odds ratio or the effect size estimate on the observed scale. standard_error: float The standard error estimate on the effect size from the appropriate regression model prior_variance: float A prior variance weighting to apply to the variance for calculating the ABF. Returns ------- ABF: float The calculated Approximate Bayes Factor ''' # the variance on the MLE log OR is the squared standard error variance = standard_error ** 2 _top = sqrt((prior_variance + variance)/variance) _exp_left = -((log_or ** 2)/variance)/2.0 _exp_right = prior_variance/(prior_variance + variance) ABF = _top * exp(_exp_left * _exp_right) return ABF def ABFScore(gwas_results, region_size, chromosome, prior=None, prior_variance=0.04, clean=True): ''' Using approximate Bayes factors calculate the posterior association signal for each variant. Credible intervals will be constructed later. Arguments --------- gwas_results: string Results from a GWAS, assumed to be in Plink format. region_size: int The region (in bp) by which to extend around the association signal index SNP - taken as the fine-mapping region. Region is index bp +/- region_size/2 chromosome: string A chromosome to select from the gwas_results file. prior: float Prior probability NOT YET IMPLEMENTED prior_variance: float The variance prior that weights the standard error clean: boolean Whether the results table has been pre-cleaned to remove results not relevant to SNPs. e.g. if covariates had been included in the regression model these should be removed. Returns ------- out_df: pandas.Core.DataFrame All input SNPs in the fine-mapping interval with their approximate Bayes Factors and posterior probabilities ''' E.info("Reading association results from %s" % gwas_results) try: gwas_df = pd.read_table(gwas_results, index_col=None, sep="\s*", header=0) except StopIteration: gwas_df = pd.read_table(gwas_results, index_col=None, sep="\t", header=0) if clean: pass else: gwas_df = gwas_df[gwas_df["TEST"] == "ADD"] gwas_df.index = gwas_df["SNP"] E.info("subsetting data on chromosome %s" % chromosome) chr_df = gwas_df[gwas_df["CHR"] == int(chromosome)] try: try: chr_df.loc[:, "STAT"] = abs(chr_df["STAT"]) chr_df.sort_values(by="STAT", inplace=True, ascending=False) except KeyError: chr_df.loc[:, "T"] = abs(chr_df["T"]) chr_df.sort_values(by="T", inplace=True, ascending=False) except KeyError: chr_df.sort_values(by="CHISQ", inplace=True, ascending=False) # set p = 0 to minimum float value, ~1.79x10-308 chr_df.loc[:, "P"][chr_df["P"] == 0] = 1.79769e-308 chr_df["P"].fillna(1.0) chr_df.loc[:, "log10P"] = np.log10(chr_df["P"]) # get the index SNP and calculate standard errors # used to calculate the approximate Bayes factor E.info("calculating standard errors from association " "p-values") index_snp = chr_df.iloc[0]["SNP"] E.info("The lead SNP is {}".format(index_snp)) index_bp = chr_df.iloc[0]["BP"] z_func = lambda x: - 0.862 + sqrt(0.743 - (2.404 * np.log(x))) chr_df["Z"] = abs(chr_df["P"].apply(z_func)) chr_df["SE"] = np.log(chr_df["OR"])/abs(chr_df["Z"]) start = index_bp - region_size/2 end = index_bp + region_size/2 chr_df.index = chr_df["BP"] E.info("Fine mapping region defined as %i - %i " "on chromosome %i" % (start, end, int(chromosome))) # subsetting on range will create many NaNs due to # pandas broadcasting and filling in rows of DF sig_df = chr_df.loc[range(start, end+1)] sig_df.dropna(axis=0, how='all', inplace=True) sig_df.drop_duplicates(subset="SNP", inplace=True) sig_df.index = sig_df["SNP"] # calculate the approximate bayes factor for # each SNP E.info("calculating approximate Bayes Factors") bayes = {} # test overriding the prior on the variance # use the standard error on the medina log OR med_logor = np.log(np.median(sig_df["OR"])) std_logor = np.std(np.log(sig_df["OR"])) prior_variance = std_logor/np.sqrt(sig_df.shape[0]) E.info("The prior variance for this fine-mapping" " interval is {}, and the median log OR" " is {:f}".format(prior_variance, med_logor)) for snp in sig_df.index: logor = np.log(sig_df.loc[snp]["OR"]) se = abs(sig_df.loc[snp]["SE"]) abf = calcApproxBayesFactor(log_or=logor, standard_error=se, prior_variance=prior_variance) bayes[snp] = abf sum_bayes = np.nansum(bayes.values()) # calculate posterior probabilities as the proportion # of bayes factor/ sum all bayes factors E.info("calculating posterior probabilities") bayes_rank = pd.Series(bayes) bayes_rank.sort_values(inplace=True, ascending=False) bayes_rank = bayes_rank.fillna(0.0) posteriors = bayes_rank/sum_bayes posteriors.sort_values(ascending=False, inplace=True) # side effect - write all ABFs and Posteriors out to file out_df = pd.DataFrame({"Posterior": posteriors, "ApproxBayesFactor": bayes_rank, "SNP": posteriors.index}) out_df.index = out_df["SNP"] out_df.drop(["SNP"], axis=1, inplace=True) out_df.sort(["Posterior"], inplace=True, ascending=False) index_bayes = out_df.loc[index_snp]["ApproxBayesFactor"] index_p = sig_df.loc[index_snp]["log10P"] index_or = sig_df.loc[index_snp]["OR"] index_se = sig_df.loc[index_snp]["SE"] E.info("Bayes factor for lead SNP {} is {}, " "p-value {}, OR {} and SE {}".format(index_snp, index_bayes, index_p, index_or, index_se)) return out_df def getSnpIds(snp_set): ''' Parse a text file with SNP IDs, one per row. Remove duplicates. Arguments --------- snp_set: string file containing SNP IDs Returns ------- snp_list: set set of unique SNP IDs ''' E.info("Parsing SNP set IDs") with IOTools.openFile(snp_set, "r") as sfile: snps = [sn.split("\t")[0] for sn in sfile.readlines()] snpset = set(snps) snp_list = [s.rstrip("\n") for s in snpset] return snp_list def getEigenScores(eigen_dir, bim_file, snp_file): ''' Extract Eigen scores from tabix-index files for all SNPs in a provided .bim file Arguments --------- eigen_dir: string PATH to directory containing eigen scores, with suffix .tab.bgz bim_file: string plink .bim file containing SNP co-ordinates and alleles - assumes minor allele is A2 snp_file: string file containing SNP IDs, one per line Returns ------- snp_dict: dict SNP eigen scores ''' # setup a generic tabix query to reduce number # of operations tab_query = """ tabix %(eigen_dir)s/%(tab_indx)s %(contig)i:%(start)i-%(end)i | awk '{if($4 == "%(A1)s") print $0}' """ tab_dir = [td for td in os.listdir(eigen_dir) if re.search(".bgz$", td)] snp_list = getSnpIds(snp_file) E.info("SNP set of %i SNPs" % len(snp_list)) snp_dict = {} E.info("Parsing SNP co-ordinates") # tried straightforward file parsing, took too long # as average .bim file contains millions of lines # read in chunks in to pandas DataFrame, return # a generator header = ["CHR", "SNP", "cM", "BP", "A1", "A2"] file_iterator = pd.read_table(bim_file, sep="\t", chunksize=50000, header=None, index_col=None, names=header) for dataframe in file_iterator: dataframe.index = dataframe["SNP"] try: snp_frame = dataframe.loc[snp_list] # not all SNPs will appear together in a chunk # remove NA rows and duplicates snp_frame.dropna(axis=0, how='all', inplace=True) snp_frame.drop_duplicates(subset="SNP", keep="last", inplace=True) snp_frame.loc[:, "CHR"] = snp_frame["CHR"].astype(np.int64) contig = snp_frame["CHR"][0] recontig = re.compile("chr%i" % contig) tab_indx = [tx for tx in tab_dir if re.search(recontig, tx)][-1] # redefine float types as int for output # prettify and reduce downstream bugs with assumed # data types snp_frame.loc[:, "BP"] = snp_frame["BP"].astype(np.int64) for snp in snp_frame.index: # open a process with query, process on the fly A1 = snp_frame.loc[snp, "A1"] A2 = snp_frame.loc[snp, "A2"] start = snp_frame.loc[snp, "BP"] end = start proc = subprocess.Popen(tab_query % locals(), shell=True, stdout=subprocess.PIPE) score_line = proc.stdout.readlines() if len(score_line): eigen_score = score_line[0].split("\t")[-1].rstrip("\n") else: eigen_score = np.nan score_dict = {"CHR": contig, "BP": start, "A1": A1, "A2": A2, "SCORE": eigen_score} snp_dict[snp] = score_dict E.info("Eigen scores found for %i SNPs" % len(snp_dict)) except KeyError: pass return snp_dict def getSNPs(map_file, snp_list): ''' Given a SNP list with GWAS results, extract the relevant index Arguments --------- map_file: string plink format .map file with SNP positions in same order as .ped file snp_list: list list of SNP rs IDs with GWAS results Returns ------- snp_index: dict dict of SNP, indices key,value pairs to select ''' # variant order in the map file matters, use an ordered dict variants = collections.OrderedDict() with open(map_file, "r") as mfile: for snp in mfile.readlines(): attrs = snp.split("\t") snpid = attrs[1] variants[snpid] = {"chr": attrs[0], "pos": attrs[-1].strip("\n")} variant_ids = [vj for vi, vj in enumerate(variants.keys()) if vj in snp_list] variant_idx = [i for i,j in enumerate(variants.keys()) if j in snp_list] var_idx = dict(zip(variant_ids, variant_idx)) return var_idx def flipRiskAlleles(snp_index, snp_results, genos): ''' Given an OR of a SNP on a binary phenotype, convert minor alleles to "risk" alleles, i.e. where OR > 1, if not, then invert allele Arguments --------- snp_index: list list of snp indices with GWAS results snp_results: dict snp:OR key, value pairs of SNPs and GWAS results genos: np.ndarray array of genotypes in format "11", "12" or "22" where 1 = minor allele, 2 = major allele. Returns ------- risk_genos: np.ndarray Genotypes where all "1" alleles are risk alleles, not major alleles. ''' genarray = np.array(genos) # find SNP alleles to flip flip = [] for snp in snp_results.keys(): if snp_results[snp] < 1: flip.append(snp_index[snp]) else: pass E.info("Flipped alleles: %i" % len(flip)) # swap alleles for SNPs where the minor (A1) allele # is protective # use intermediate values to avoid overwriting values flip_array = genarray[:, flip] np.place(flip_array, flip_array == "22", ["88"]) np.place(flip_array, flip_array == "11", ["99"]) np.place(flip_array, flip_array == "88", ["11"]) np.place(flip_array, flip_array == "99", ["22"]) genarray[:, flip] = flip_array return genarray def parsePed(ped_file, delim="\t", compound_geno="False"): ''' Parse a plink .ped file into a dataframe Arguments --------- ped_file: string Path to a plink .ped file delim: string delimiter that separates columns in ped_file compound_geno: boolean Whether alleles of genotype are separated by a whitespace or not. Returns ------- ped_frame: pd.Core.DataFrame pandas dataframe representation of the ped_file. Genotypes are presented as a numpy array. ''' samples = [] # parse the ped file, return a dataframe with open(ped_file, "r") as pfile: for indiv in pfile.readlines(): ped_dict = {} indiv = indiv.strip("\n") indiv_split = indiv.split(delim) ped_dict["FID"] = indiv_split[0] ped_dict["IID"] = indiv_split[1] ped_dict["SEX"] = int(indiv_split[4]) ped_dict["PHEN"] = int(indiv_split[5]) ped_dict["GENOS"] = np.array(indiv_split[6:]) samples.append(ped_dict) ped_frame = pd.DataFrame(samples) return ped_frame def countRiskAlleles(ped_frame, snp_index, report, flag): ''' Count the number of risk alleles per individual and calculate the probability of the phenotype Arguments --------- ped_frame: pd.Core.DataFrame Dataframe of SNP genotypes and phenotype information snp_index: list list of snp indices denoting which columns of ped_frame are the relevant genotypes report: string either `cases_explained` - the proportion of cases explained by risk allele carriage, or `probability_phenotype` - the probability (frequency) of the binary phenotype amongst all individuals given the risk allele carriage flag: boolean output individuals explained by carriage of 2 risk alleles Returns ------- count_freq: np.ndarray cumulative frequency array of #risk alleles ''' case_freq = np.zeros(shape=len(snp_index)*2, dtype=np.float64) cntrl_freq = np.zeros(shape=len(snp_index)*2, dtype=np.float64) # group by phenotype column phen_groups = ped_frame.groupby(by="PHEN") for name, group in phen_groups: genos = group.loc[:,snp_index] # convert to 0,1,2 coding for ease of counting # treat 00 as missing/NA genos.replace({"22": 0, "12": 1, "11": 2, "00": np.nan}, inplace=True) risk_sums = np.nansum(genos, axis=1) for val in risk_sums: if name == 1: cntrl_freq[val] += 1 elif name == 2: case_freq[val] += 1 if flag: explained = pd.DataFrame(risk_sums) explained.index = group["FID"] explained["IID"] = explained.index explained.columns = ["IID", "riskAlleles"] explained = explained[explained["riskAlleles"] == 2.0] explained.to_csv("/".join([os.getcwd(), "cases_explained.tsv"]), sep="\t", index_label="FID") else: pass if report == "cases_explained": # express as the proportion of cases explained cumulative = np.cumsum(case_freq)/np.nansum(case_freq) freqs = case_freq/np.nansum(case_freq) elif report == "probability_phenotype": cumulative = np.cumsum(case_freq + cntrl_freq)/np.nansum(case_freq + cntrl_freq) freqs = case_freq/(case_freq + cntrl_freq) freqs[np.isnan(freqs)] = 0 E.info("Individuals with pheno 1: %i" % np.nansum(cntrl_freq)) E.info("Individuals with pheno 2: %i" % np.nansum(case_freq)) res_dict = {"freqs": freqs, "cumulative": cumulative, "cases": case_freq, "controls": cntrl_freq} return res_dict def plotRiskFrequency(bins, frequencies, savepath=None, ytitle=None): ''' Generate a plot of #risk alleles vs. P(binary phenotype). Arguments --------- bins: list list of histogram bins, i.e. #risk alleles frequencies: list list of frequencies of binary phenotype corresponding to #risk allele bins Returns ------- None - plot is generated ''' hist_df = pd.DataFrame({"bins": bins, "freq": frequencies}) py2ri.activate() R('''suppressPackageStartupMessages(library(ggplot2))''') R('''suppressPackageStartupMessages(library(scales))''') r_df = py2ri.py2ri_pandasdataframe(hist_df) R.assign("hist.df", r_df) R('''p_hist <- ggplot(hist.df, aes(x=bins, y=freq)) + ''' '''geom_point() + theme_bw() + ''' '''xlim(c(0, dim(hist.df)[1])) + ylim(c(0, 1)) + ''' '''labs(x="Number of Risk Alleles", ''' '''y="%(ytitle)s")''' % locals()) R('''png("%(savepath)s")''' % locals()) R('''print(p_hist)''') R('''dev.off()''') return hist_df def makeCredibleSet(probs_file, credible_set=0.95, lead_snp_indx=2, filename_sep="_", snp_column=0, probs_column=1): ''' Construct an N% credible set from a list of SNPs with posterior probabilities attached. If the top SNP has posterior prob >= 80%, then just this SNP will be output. Otherwise the N% credible set is output. In addition to the output credible set, this function also outputs several pieces of important information for the credible set: * The lead SNP * The SNP with the highest posterior probability, and whether this is also the lead SNP * The size of the credible set Arguments: ---------- probs_file: string Path to a file containing SNP IDs and probabilities. It must have these two columns, any others are optional and will be ignored credible_set: float percentage of posterior probability signal to capture in the credible set lead_snp_indx: int 0-based index of the lead SNP for the associated region. Used in the output file name and summary information filename_sep: string single character delimiter in the filename that can be used to extract the information, i.e. chromosome, position and lead SNP. snp_column: int 0-based column number in the input file containing SNP IDs. probs_column: int 1-based column number in the input file containing the posterior probabilities Returns: -------- posterior_set: pandas.Core.DataFrame data frame of the N% credible set containing SNP IDs and posterior probabilities ''' df = pd.read_table(probs_file, index_col=None, sep="\t", header=None) prob_df = df.iloc[:, [snp_column, probs_column]] prob_df.columns = ["SNP", "Posterior"] # some files may have header, others may not if prob_df.iloc[0, 0] == "SNP": prob_df = prob_df.iloc[1:, :] else: pass # check probabilities have been properly interpreted as floats #prob_df["Posterior"].astype(np.float64) prob_df.loc[:, "Posterior"] = pd.to_numeric(prob_df["Posterior"]) # need to allow for non-rs IDs. check length of split file name # expectation is 4, if longer then 2-5 together split_name = probs_file.split("/")[-1].split(filename_sep) if len(split_name) > 4: lead_snp = filename_sep.join(split_name[lead_snp_indx:-1]) else: lead_snp = split_name[lead_snp_indx] E.info("Lead SNP is {}".format(lead_snp)) # sort by posterior signal then create credible set prob_df.sort(["Posterior"], inplace=True, ascending=False) top_snp = prob_df.iloc[0, 0] top_prob = prob_df.iloc[0, 1] E.info("Top posterior signal SNP is {} with P = {}".format(top_snp, top_prob)) if top_snp == lead_snp: E.info("Lead SNP is the same as top posterior signal SNP") else: pass # often if the top SNP posterior probability is >= 80% # the remaining variants have extremely small probs # in that case we're only practically interested in the # top variant if top_prob >= 0.8: posterior_set = prob_df[:1] posterior_set.index = posterior_set.loc[:, "SNP"] posterior_set.drop(["SNP"], inplace=True, axis=1) E.info("Size of {}% credible set: 1".format(credible_set * 100)) else: set_indx = [] prob_set = 0.0 for ix in range(len(prob_df.index)): prob_set += prob_df.iloc[ix, 1] set_indx.append(ix) if prob_set >= credible_set: break else: continue posterior_set = prob_df.iloc[set_indx] posterior_set.index = posterior_set.iloc[:, 0] posterior_set = pd.DataFrame(posterior_set.iloc[:, 1]) posterior_set.columns = ["Posterior"] E.info("Size of {}% credible set: {}".format(credible_set * 100, posterior_set.shape[0])) return posterior_set def summariseResults(file_list): ''' Take a list of files from SNP prioritsation and collate into a single table Arguments --------- file_list: list list container of input files to collate into the results table. File names are expected to follow the format: <contig>_<position>_<lead_snp>_<method>.tsv Returns: -------- summary_table: pandas.Core.DataFrame pandas dataframe with columns: * lead SNP * credible set size * top set SNP * top set SNP probability * chromosome * lead SNP position ''' # extract info from file name # read in file as temporary dataframe # extract info into a dictionary # convert dict into a dataframe name_re = re.compile(r"(?P<contig>\w{2,5})_(?P<position>\d+)_(?P<snp_id>\w+)_(?P<method>\w+).tsv") snp_dicts = [] for path in file_list: filename = re.search(name_re, path.split("/")[-1]) contig = filename.group("contig") position = filename.group("position") snp_id = filename.group("snp_id") with open(path, "r") as ofile: lines = ofile.readlines() components = [xl.split("\t") for xl in lines[1:]] # snp id is index 0 in first component top_snp = components[0][0] top_prob = components[0][1].rstrip("\n") size = len(components) file_dict = {"Lead_SNP": snp_id, "Credible_set_size": size, "Top_set_SNP": top_snp, "Top_set_prob": top_prob, "Chr": contig, "Position": position} snp_dicts.append(file_dict) summary_table = pd.DataFrame(snp_dicts, index=range(len(snp_dicts))) summary_table.index = summary_table["Lead_SNP"] summary_table.drop(["Lead_SNP"], axis=1, inplace=True) return summary_table
mit
-427,224,829,082,830,100
35.841396
102
0.53317
false
4.021689
false
false
false
scttcper/hangry-py
hangrypy/__init__.py
1
1419
from bs4 import BeautifulSoup from .default_recipe_parser import recipe_parser from .foodnetwork import foodnetwork from .recipe import Recipe from .schema_org_recipe_parser import schema_org_recipe_parser, use_schema_org # messy python 3 support try: from urllib.request import urlopen, quote from urllib.parse import urlunsplit, urlsplit except ImportError: from urllib2 import urlopen, quote from urlparse import urlsplit, urlunsplit parsers = {'schema_org_recipe_parser': schema_org_recipe_parser} non_standard = {'foodnetwork.com': foodnetwork} def url_setup(url): scheme, netloc, path, qs, anchor = urlsplit(url) domain = '.'.join(netloc.split('.')[-2:]) path = quote(path, '/%') # remove everything after path url = urlunsplit((scheme, netloc, path, '', '')) return url, domain def select_parser(html, parser, domain): if parser: return parsers[parser] if domain in non_standard: return non_standard[domain] if use_schema_org(html): return schema_org_recipe_parser return recipe_parser def Hangry(url, html=None, parser=None): # open url or use passed if not html: html = urlopen(url).read() soup = BeautifulSoup(html, 'html5lib') url, domain = url_setup(url) parser = select_parser(html, parser, domain)(soup) recipe = Recipe(parser, domain, url) return recipe
mit
-7,977,861,761,553,599,000
28.5625
78
0.681466
false
3.824798
false
false
false
codylane/python_twiddle
test_twiddle.py
1
12348
#!/usr/bin/env python from __future__ import print_function import twiddle import sys def get_database_connection_maximum(host, port=twiddle.DEFAULT_PORT): ''' Returns the current maximum total connections for the database pool. ''' result = twiddle.connect_factory(host, 'bean', 'datasource', 'MaxPoolSize', port) return int(result) def get_database_connection_minimum(host, port=twiddle.DEFAULT_PORT): ''' Returns the current minim total connections for the database pool. ''' result = twiddle.connect_factory(host, 'bean', 'datasource', 'MinPoolSize', port) return int(result) def get_database_connection_current_used(host, port=twiddle.DEFAULT_PORT): ''' Returns the current number of used total connections for the database pool.' ''' result = twiddle.connect_factory(host, 'bean', 'datasource', 'NumBusyConnections', port) return int(result) def get_database_connection_current_idle(host, port=twiddle.DEFAULT_PORT): ''' Returns the current number of idle total connections for the database pool.' ''' result = twiddle.connect_factory(host, 'bean', 'datasource', 'NumIdleConnections', port) return int(result) def calculate_percentage_used(host, port=twiddle.DEFAULT_PORT, decimals=0): ''' Calculate the percentage of used database connections based from the maximum and calculate the result to the nearest decimal. Due to the way rounding works in binary form it is not a bug that if you wanted the result to be 1.6, with one decimal it cannot be represented as 1.6, instead the result would be 1.6000000000000001 ''' if decimals < 0: decimals = 0 max = float(get_database_connection_maximum(host, port)) used = float(get_database_connection_current_used(host, port)) result = (used / max) * 100 return round(result, decimals) def calculate_percentage_idle(host, port=twiddle.DEFAULT_PORT, decimals=0): ''' Calculate the percentage of idle database connections based from the maximum and calculate the result to the nearest decimal. Due to the way rounding works in binary form it is not a bug that if you wanted the result to be 1.6, with one decimal it cannot be represented as 1.6, instead the result would be 1.6000000000000001 ''' max = float(get_database_connection_maximum(host, port)) idle = float(get_database_connection_current_idle(host, port)) result = (idle / max) * 100 return round(result, decimals) def validate_required_options(): ''' Ensures that all required command line options are present. If not, exits with error message. ''' # check for required options if options.host is None: print('ERR: required option --host', file=sys.stderr) sys.exit(1) if options.port is None: print('ERR: required option --port', file=sys.stderr) sys.exit(1) def add_additional_options(): parser = twiddle.create_default_cmdline_options() parser.add_option( '--max-connections', action='store_true', default=False, metavar='MAXCONNECTIONS', dest='maxconnections', help='Returns the amount of maximum connections' ) parser.add_option( '--min-connections', action='store_true', default=False, metavar='MINCONNECTIONS', dest='minconnections', help='Returns the amount of minimum connections' ) parser.add_option( '--idle-connections', action='store_true', default=False, metavar='IDLECONNECTIONS', dest='idleconnections', help='Returns the amount of idle connections if ' \ '-w and -c are not present. ' \ 'Otherise this option is required with -w and -c' ) parser.add_option( '--used-connections', action='store_true', default=False, metavar='USEDCONNECTIONS', dest='usedconnections', help='Returns the amount of used connections if ' \ '-w and -c are not present. ' \ 'Otherwise this option is required with -w and -c' ) parser.add_option( '--idle-connection-percent', action='store_true', default=False, metavar='IDLECONNECTIONPERCENT', dest='idleconnectionpercent', help='Returns the percentage amount of idle connections' ) parser.add_option( '--used-connection-percent', action='store_true', default=False, metavar='USEDCONNECTIONPERCENT', dest='usedconnectionpercent', help='Returns the percentage amount of used connections' ) parser.add_option( '--operator', action='store_action', default='>=', metavar='OPERATOR', dest='operator', help='Sets the operator that is used when calculating thresholds' ) return parser def critical_alarm(alarm_type, datasource, operator, retrieved_value, tresh_value): ''' Constructs a critical alarm message that would look like the following alarm_type --------| datasource --------|---------| operator ----------|---------|----------------------------|------------------| retrieved_value ---|---------|----------------------------|--------------| | thresh_value ------|---------|----------------------------|--------------|---|--| V V V V V V CRITICAL: The percentage of used database connections is >= threshold [60.0 >= 40] @alarm_type The type of the alarm, example [percentage, number] @datasource The datasource attribute for the alarm: example [used] @operator The boolean operator for the alarm in string form, example: [>=, <=, <, >] @retrieved_value The retrieved value that we got from the endpoint, example [60.0] @thres_value The threshold value that was breached, example: [40] ''' print('CRITICAL: The %s of %s database connections is %s threshold [%s %s %s]' \ %(alarm_type, datasource, operator, retrieved_value, operator, tresh_value),\ file=sys.stderr) def warning_alarm(alarm_type, datasource, operator, retrieved_value, tresh_value): ''' Constructs a warning alarm message that would look like the following alarm_type --------| datasource --------|---------| operator ----------|---------|----------------------------|------------------| retrieved_value ---|---------|----------------------------|--------------| | thresh_value ------|---------|----------------------------|--------------|---|--| V V V V V V WARNING: The percentage of used database connections is >= threshold [60.0 >= 40] @alarm_type The type of the alarm, example [percentage, number] @datasource The datasource attribute for the alarm: example [used] @operator The boolean operator for the alarm in string form, example: [>=, <=, <, >] @retrieved_value The retrieved value that we got from the endpoint, example [60.0] ''' print('WARNING: The %s of %s database connections is %s threshold [%s %s %s]' \ %(alarm_type, datasource, operator, retrieved_value, operator, tresh_value), file=sys.stderr) def process_thresholds(crit_thresh, warn_thresh, idle_pcnt, used_pcnt, used, idle): ''' ''' calc_crit_percentage = False calc_warn_percentage = False if crit_thresh is not None: calc_crit_percentage = crit_thresh.endswith('%') crit_thresh = int(crit_thresh.rstrip('%')) if warn_thresh is not None: calc_warn_percentage = warn_thresh.endswith('%') warn_thresh = int(warn_thresh.rstrip('%')) print('DEBUG: crit_treshold ', crit_thresh, ' calc_crit_percentage ', calc_crit_percentage) print('DEBUG: warn_treshold ', warn_thresh, ' calc_warn_percentage ', calc_warn_percentage) if calc_crit_percentage: print('DEBUG: calculating critical threshold percentages') print('DEBUG: used_pcnt ', used_pcnt) print('DEBUG: idle_pcnt ', idle_pcnt) if used_pcnt and used_pcnt >= crit_thresh: critical_alarm('percentage', 'used', '>=', used_pcnt, crit_thresh) sys.exit(2) elif idle_pcnt and idle_pcnt >= crit_thresh: critical_alarm('percentage', 'idle', '>=', idle_pcnt, crit_thresh) sys.exit(2) else: print('DEBUG: calculating critical threshold numbers') print('DEBUG: used ', used) print('DEBUG: idle ', idle) if used and used >= crit_thresh: critical_alarm('number', 'used', '>=', used, crit_thresh) sys.exit(2) elif idle and idle >= crit_thresh: critical_alarm('number', 'idle', '>=', idle, crit_thresh) sys.exit(2) if calc_warn_percentage: print('DEBUG: calculating warning threshold percentages') print('DEBUG: used_pcnt ', used_pcnt) print('DEBUG: idle_pcnt ', idle_pcnt) if used_pcnt and used_pcnt >= warn_thresh: warning_alarm('percentage', 'used', '>=', used_pcnt, warn_thresh) sys.exit(1) elif idle_pcnt and idle_pcnt >= warn_thresh: warning_alarm('percentage', 'idle', '>=', idle_pcnt, warn_thresh) sys.exit(1) else: print('DEBUG: calculating warning threshold numbers') print('DEBUG: used ', used) print('DEBUG: idle ', idle) if used and used >= warn_thresh: warning_alarm('percentage', 'used', '>=', used, warn_thresh) sys.exit(1) elif idle and idle >= warn_thresh: warning_alarm('percentage', 'idle', '>=', idle, warn_thresh) sys.exit(1) decimals = 0 parser = add_additional_options() (options, args) = parser.parse_args() # ensure all required options are present validate_required_options() cmdline_results = {} cmdline_results['max'] = None cmdline_results['min'] = None cmdline_results['used'] = None cmdline_results['idle'] = None cmdline_results['idle%'] = None cmdline_results['used%'] = None cmdline_results['warning'] = options.warning cmdline_results['critical'] = options.critical if options.maxconnections: cmdline_results['max'] = get_database_connection_maximum(options.host) if options.minconnections: cmdline_results['min'] = get_database_connection_minimum(options.host) if options.usedconnections: cmdline_results['used'] = get_database_connection_current_used(options.host) if options.idleconnections: cmdline_results['idle'] = get_database_connection_current_idle(options.host) if options.idleconnectionpercent: cmdline_results['idle%'] = calculate_percentage_idle(options.host, options.port, decimals) if options.usedconnectionpercent: cmdline_results['used%'] = calculate_percentage_used(options.host, options.port, decimals) if options.warning or options.critical: if options.warning.endswith('%s') or options.critical.endswith('%'): if cmdline_results.get('used%') is None: cmdline_results['used%'] = calculate_percentage_used(options.host, options.port, decimals) if cmdline_results.get('idle%') is None: cmdline_results['idle%'] = calculate_percentage_idle(options.host, options.port, decimals) if options.warning or options.critical: process_thresholds( \ crit_thresh = cmdline_results.get('critical'), \ warn_thresh = cmdline_results.get('warning'), \ idle_pcnt = cmdline_results.get('idle%'), \ used_pcnt = cmdline_results.get('used%'), \ used = cmdline_results.get('used'), \ idle = cmdline_results.get('idle') ) #if cmdline_results.get('idle') is None and cmdline_results.get('used') is None: # print('ERR: You cannot specify a warning percentage without --idle-connections or --used-connections') # sys.exit(1) print(cmdline_results)
mit
4,603,005,909,222,156,300
39.090909
107
0.604227
false
4.131148
false
false
false
32bitmicro/EDA
python/eda/eda/dump.py
1
2948
# -*- coding: utf-8 -*- # # Copyright (c) 2014, Paweł Wodnicki # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the 32bitmicro nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND #ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #DISCLAIMED. IN NO EVENT SHALL Paweł Wodnicki BE LIABLE FOR ANY #DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND #ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from edautils import * from eda import * CRLF = "\n" class CDump: " Dump class " def __init__(self, sch=None,brd=None): self.name="" self.sch=sch self.brd=brd def dumpNet(self,net): ns = '' for node in net.nodes: ns += " pin " + str(node.pin.num) + " - " + node.pin.name + " dev " + node.dev.refid + CRLF return ns def dumpNets(self, design): ns = '' ns += "NETS: " + CRLF ns += "" + CRLF for netname in design.nets: net = design.nets[netname] ns += " " + netname + CRLF ns += self.dumpNet(net) ns += "" + CRLF return ns def dumpDevice(self, dev): ns = '' for pinnum in dev.pins: pin = dev.pins[pinnum] ns += " pin " + str(pin.num) + " - " + pin.name + " net " + pin.netname + CRLF return ns def dumpDevices(self, design): ns = '' ns += "Devices: " + CRLF ns += "" + CRLF for devname in design.devices: dev = design.devices[devname] ns += " " + devname + CRLF ns += self.dumpDevice(dev) ns += "" + CRLF return ns
bsd-3-clause
4,909,801,486,996,867,000
34.071429
111
0.598099
false
4.13764
false
false
false
djangocon/2017.djangocon.eu
conference/schedule/models.py
1
5351
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals import datetime as dt from autoslug import AutoSlugField from autoslug.utils import slugify from django.core.exceptions import ValidationError from django.db import models from django.template.defaultfilters import truncatechars_html from django.utils.translation import gettext_lazy as _ from filer.fields.image import FilerImageField from filer.models import ThumbnailOption from meta.models import ModelMeta from conference.cfp.models import Submission, WorkshopSubmission class Slot(ModelMeta, models.Model): """ Model for conference time slots. It can be for a talk, a workshop, or a custom time slot (i. e. coffee break) """ talk = models.ForeignKey( Submission, related_name='talks', limit_choices_to={'selected': True}, null=True, blank=True ) slug = AutoSlugField( _('Slug'), max_length=400, blank=True, populate_from='generated_slug', always_update=True ) workshop = models.ForeignKey( WorkshopSubmission, related_name='workshops', limit_choices_to={'selected': True}, null=True, blank=True ) name = models.CharField( _('Name'), max_length=250, null=True, blank=True, help_text=_('Field for time slots that does not relate to a Talk or a Workshop.') ) mugshot = FilerImageField(verbose_name=_('Speaker mughshot'), null=True, blank=True) twitter = models.CharField(_('Twitter'), max_length=200, default='', blank=True) schedule_abstract = models.TextField(_('Schedule abstract'), blank=True, null=True) day = models.DateField(_('Date')) start = models.TimeField(_('Start')) duration = models.DurationField(_('Duration')) sprint_days = models.BooleanField(_('Part of sprint days'), default=False) show_end_time = models.BooleanField(_('Show end time in schedule'), default=False) slides = models.URLField(_('Speaker slides'), blank=True, null=True) video = models.URLField(_('Talk video'), blank=True, null=True) _metadata = { 'title': 'title', 'description': 'get_meta_abstract', 'image': 'get_image', } class Meta: verbose_name = _('Time slot') verbose_name_plural = _('Time slots') ordering = ('day', 'start') def clean(self): # ensure talk and workshop are NOT filled at the same time if self.talk and self.workshop: message = _('Please, select either a Talk or a Workshop, not both.') raise ValidationError({ 'talk': ValidationError(message=message, code='invalid'), 'workshop': ValidationError(message=message, code='invalid'), }) def get_image(self): if self.mugshot: return self.mugshot.url else: return None def get_meta_abstract(self): return truncatechars_html(self.abstract, 180) @property def title(self): if self.talk_id: return self.talk.proposal_title elif self.workshop_id: return self.workshop.proposal_title elif self.name: return self.name return '' @property def author(self): if self.talk: return self.talk.author elif self.workshop: return self.workshop.author return '' @property def generated_slug(self): return slugify(self.title) @property def twitter_split(self): if self.twitter: return self.twitter.split(',') return '' @property def abstract(self): if self.schedule_abstract: return self.schedule_abstract if self.talk: return self.talk.proposal_abstract elif self.workshop: return self.workshop.proposal_abstract return '' @property def bio(self): if self.is_talk() and self.talk.author_bio and len(self.talk.author_bio) > 3: return self.talk.author_bio if self.is_workshop() and self.workshop.author_bio and len(self.workshop.author_bio) > 3: return self.workshop.author_bio return '' @property def parsed_duration(self): minutes = self.duration.seconds//60 hours = minutes//60 if hours: minutes -= hours * 60 if minutes: return '{}h {}min'.format(hours, minutes) return '{}h'.format(hours) return '{}min'.format(minutes) @property def end_time(self): combined = dt.datetime.combine(dt.date.today(), self.start) end_time = combined + self.duration return end_time.time() @property def height(self): return self.duration.total_seconds() / 100 * 6 @property def thumbnail_option(self): return ThumbnailOption.objects.get(name__icontains='speaker').as_dict def is_talk(self): return True if self.talk else False is_talk.short_description = _('Talk') is_talk.boolean = True def is_workshop(self): return True if self.workshop else False is_workshop.short_description = _('Workshop') is_workshop.boolean = True def is_custom(self): return True if self.name else False is_custom.short_description = _('Custom') is_custom.boolean = True
bsd-3-clause
-7,714,352,984,290,550,000
32.44375
113
0.63091
false
4.044596
false
false
false
zliau/pivotalpy
pivotalpy/story.py
1
1404
import json from project import Project class Story(object): def __init__(self, pivotal): self.pivotal = pivotal self.stories_url = pivotal.base_url + 'projects/%s/stories/' # Get stories matching the query in @params def get_all(self, project_id, params=None): url = self.stories_url % (str(project_id)) r = self.pivotal.make_api_request(url, 'GET', params=params) return r.json() # Create new story with @data def create(self, project_id, data): url = self.stories_url % (str(project_id)) r = self.pivotal.make_api_request(url, 'POST', data=data) return r.json() # Get story specified by @story_id def get(self, project_id, story_id): url = self.stories_url % (str(project_id)) + story_id + '/' r = self.pivotal.make_api_request(url, 'GET') return r.json() # Update story specified by @story_id def update(self, project_id, story_id, data): url = self.stories_url % (str(project_id)) + story_id + '/' r = self.pivotal.make_api_request(url, 'PUT', data=data) return r.json() # Post comment on story specified by @story_id def post_comment(self, project_id, story_id, data): url = self.stories_url % (str(project_id)) + story_id + '/comments/' r = self.pivotal.make_api_request(url, 'POST', data=data) return r.json()
mit
8,934,251,698,385,529,000
36.945946
76
0.611823
false
3.198178
false
false
false
steinbep/Meraki-API---Python
mx_fw_rules.py
1
1190
#!/usr/bin/python import meraki import json # # Python Script Using Meraki API to collect all MX L3 Firewall Rules in all Networks to CSV file. # Returns Site, Comments, Source port and CIDR, Destination port and CIDR. # # Enter User's API Key apikey = 'xxxxxx' # Enter Organization ID Here organizationid = 'xxxxxxxxx' #User Input of filename print('Enter a file name below,\nthe .csv will be appended to the name given') filename = input('Name: ') #Network lookup networks = meraki.getnetworklist(apikey, organizationid, suppressprint=True) # print(format(str(networks))) #Loop through Network for row in networks: # Device Lookup rules = meraki.getmxl3fwrules(apikey, row['id'], suppressprint=True) # print(format(str(rules))) for rule in rules: # print (rule) try: with open(filename + '.csv', 'a', newline='') as wr: a = csv.writer(wr, delimiter=',' ) data = [str(row['name']), str(rule['comment']), str(rule['policy']), str(rule['protocol']), str(rule['srcPort']), str(rule['srcCidr']), str(rule['destPort']), str(rule['destCidr'])] a.writerow(data) except: pass
gpl-3.0
3,407,995,353,311,586,000
29.512821
197
0.647899
false
3.661538
false
false
false
SergeySatskiy/codimension
codimension/ui/runparamsdlg.py
1
35213
# -*- coding: utf-8 -*- # # codimension - graphics python two-way code editor and analyzer # Copyright (C) 2010-2017 Sergey Satskiy <sergey.satskiy@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """Run parameters dialog""" import os import os.path import copy from utils.runparams import RunParameters, RUN, PROFILE, DEBUG from utils.run import parseCommandLineArguments, checkOutput from .qt import (Qt, QDoubleValidator, QDialog, QDialogButtonBox, QVBoxLayout, QSizePolicy, QLabel, QGridLayout, QHBoxLayout, QRadioButton, QGroupBox, QPushButton, QFileDialog, QLineEdit, QTreeWidget, QAbstractItemView, QTreeWidgetItem, QCheckBox) from .itemdelegates import NoOutlineHeightDelegate class EnvVarDialog(QDialog): """Single environment variable add/edit dialog""" def __init__(self, name='', value='', parent=None): QDialog.__init__(self, parent) self.name = name self.value = value self.__nameEdit = None self.__valueEdit = None self.__OKButton = None self.__createLayout() self.setWindowTitle("Environment variable") self.setMaximumHeight(self.sizeHint().height()) self.setMaximumHeight(self.sizeHint().height()) self.__nameEdit.setText(name) self.__valueEdit.setText(value) self.__nameEdit.setEnabled(name == "") self.__OKButton.setEnabled(name != "") def __createLayout(self): """Creates the dialog layout""" self.resize(300, 50) self.setSizeGripEnabled(True) # Top level layout layout = QVBoxLayout(self) gridLayout = QGridLayout() nameLabel = QLabel("Name") gridLayout.addWidget(nameLabel, 0, 0) valueLabel = QLabel("Value") gridLayout.addWidget(valueLabel, 1, 0) self.__nameEdit = QLineEdit() self.__nameEdit.textChanged.connect(self.__nameChanged) gridLayout.addWidget(self.__nameEdit, 0, 1) self.__valueEdit = QLineEdit() self.__valueEdit.textChanged.connect(self.__valueChanged) gridLayout.addWidget(self.__valueEdit, 1, 1) layout.addLayout(gridLayout) buttonBox = QDialogButtonBox(self) buttonBox.setOrientation(Qt.Horizontal) buttonBox.setStandardButtons(QDialogButtonBox.Ok | QDialogButtonBox.Cancel) self.__OKButton = buttonBox.button(QDialogButtonBox.Ok) self.__OKButton.setDefault(True) buttonBox.accepted.connect(self.accept) buttonBox.rejected.connect(self.close) layout.addWidget(buttonBox) def __nameChanged(self, newName): """Triggered when a variable name is changed""" strippedName = str(newName).strip() self.__OKButton.setEnabled(strippedName != "" and ' ' not in strippedName) self.name = strippedName def __valueChanged(self, newValue): """Triggered when a variable value is changed""" self.value = newValue class RunDialog(QDialog): """Run parameters dialog implementation""" ACTION_TO_VERB = {RUN: 'Run', PROFILE: 'Profile', DEBUG: 'Debug'} # See utils.run for runParameters def __init__(self, path, runParameters, profilerParams, debuggerParams, action, parent=None): QDialog.__init__(self, parent) # Used as a return value self.runParams = copy.deepcopy(runParameters) self.profilerParams = copy.deepcopy(profilerParams) self.debuggerParams = copy.deepcopy(debuggerParams) self.__action = action # Avoid pylint complains self.__argsEdit = None self.__scriptWDRButton = None self.__dirRButton = None self.__dirEdit = None self.__dirSelectButton = None self.__inheritParentRButton = None self.__inheritParentPlusRButton = None self.__inhPlusEnvTable = None self.__addInhButton = None self.__delInhButton = None self.__editInhButton = None self.__specificRButton = None self.__specEnvTable = None self.__addSpecButton = None self.__delSpecButton = None self.__editSpecButton = None self.__runButton = None self.__nodeLimitEdit = None self.__edgeLimitEdit = None self.__debugChildCheckBox = None self.__edgeLimitValidator = None self.__nodeLimitValidator = None self.__intSelectButton = None self.__intEdit = None self.__redirectedRButton = None self.__customIntRButton = None self.__customTermRButton = None self.__stopAtFirstCheckBox = None self.__traceInterpreterCheckBox = None self.__autoforkCheckBox = None self.__reportExceptionCheckBox = None self.__termEdit = None self.__inheritedInterpreterRButton = None self.__createLayout() self.setWindowTitle(RunDialog.ACTION_TO_VERB[action] + ' parameters for ' + path) self.__populateValues() def __populateValues(self): """Populates the dialog UI controls""" self.__argsEdit.setText(self.runParams['arguments']) self.__populateWorkingDir() self.__populateEnvironment() self.__populateInterpreter() self.__populateIO() if self.__action == PROFILE: self.__populateProfile() elif self.__action == DEBUG: self.__populateDebug() self.__setRunButtonProps() def __populateWorkingDir(self): """Populates the working directory""" if self.runParams['useScriptLocation']: self.__scriptWDRButton.setChecked(True) self.__dirEdit.setEnabled(False) self.__dirSelectButton.setEnabled(False) else: self.__dirRButton.setChecked(True) self.__dirEdit.setEnabled(True) self.__dirSelectButton.setEnabled(True) self.__dirEdit.setText(self.runParams['specificDir']) def __populateEnvironment(self): """Populates the environment variables""" self.__populateTable(self.__inhPlusEnvTable, self.runParams['additionToParentEnv']) self.__populateTable(self.__specEnvTable, self.runParams['specificEnv']) if self.runParams['envType'] == RunParameters.InheritParentEnv: self.__inheritParentRButton.setChecked(True) self.__setEnabledInheritedPlusEnv(False) self.__setEnabledSpecificEnv(False) elif self.runParams['envType'] == RunParameters.InheritParentEnvPlus: self.__inheritParentPlusRButton.setChecked(True) self.__setEnabledSpecificEnv(False) else: self.__specificRButton.setChecked(True) self.__setEnabledInheritedPlusEnv(False) def __populateInterpreter(self): """Populates the interpreter""" if self.runParams['useInherited']: self.__inheritedInterpreterRButton.setChecked(True) self.__intEdit.setEnabled(False) self.__intSelectButton.setEnabled(False) else: self.__customIntRButton.setChecked(True) self.__intEdit.setEnabled(True) self.__intSelectButton.setEnabled(True) self.__intEdit.setText(self.runParams['customInterpreter']) def __populateIO(self): """Populate I/O""" if self.runParams['redirected']: self.__redirectedRButton.setChecked(True) self.__termEdit.setEnabled(False) else: self.__customTermRButton.setChecked(True) self.__termEdit.setEnabled(True) self.__termEdit.setText(self.runParams['customTerminal']) self.__termEdit.setToolTip( 'Use ${prog} substitution if needed.\n' 'Otherwise the command line is attached at the end.\n' 'E.g.: xterm -e /bin/bash -c "${prog}; /bin/bash" &') def __populateProfile(self): """Populates profile""" if self.profilerParams.nodeLimit < 0.0 or \ self.profilerParams.nodeLimit > 100.0: self.profilerParams.nodeLimit = 1.0 self.__nodeLimitEdit.setText(str(self.profilerParams.nodeLimit)) if self.profilerParams.edgeLimit < 0.0 or \ self.profilerParams.edgeLimit > 100.0: self.profilerParams.edgeLimit = 1.0 self.__edgeLimitEdit.setText(str(self.profilerParams.edgeLimit)) def __populateDebug(self): """Populates debug""" self.__reportExceptionCheckBox.setChecked( self.debuggerParams.reportExceptions) self.__traceInterpreterCheckBox.setChecked( self.debuggerParams.traceInterpreter) self.__stopAtFirstCheckBox.setChecked( self.debuggerParams.stopAtFirstLine) self.__autoforkCheckBox.setChecked(self.debuggerParams.autofork) self.__debugChildCheckBox.setChecked(self.debuggerParams.followChild) self.__debugChildCheckBox.setEnabled(self.debuggerParams.autofork) @staticmethod def __populateTable(table, dictionary): """Populates the given table""" for key, value in dictionary.items(): item = QTreeWidgetItem([key, value]) table.addTopLevelItem(item) if dictionary: table.setCurrentItem(table.topLevelItem(0)) def __setEnabledInheritedPlusEnv(self, value): """Disables/enables 'inherited and add' section controls""" self.__inhPlusEnvTable.setEnabled(value) self.__addInhButton.setEnabled(value) self.__delInhButton.setEnabled(value) self.__editInhButton.setEnabled(value) def __setEnabledSpecificEnv(self, value): """Disables/enables 'specific env' section controls""" self.__specEnvTable.setEnabled(value) self.__addSpecButton.setEnabled(value) self.__delSpecButton.setEnabled(value) self.__editSpecButton.setEnabled(value) def __createLayout(self): """Creates the dialog layout""" self.resize(650, 300) self.setSizeGripEnabled(True) layout = QVBoxLayout(self) # top level layout layout.addLayout(self.__getArgLayout()) layout.addWidget(self.__getWorkingDirGroupbox()) layout.addWidget(self.__getEnvGroupbox()) layout.addWidget(self.__getInterpreterGroupbox()) layout.addWidget(self.__getIOGroupbox()) if self.__action == PROFILE: layout.addWidget(self.__getProfileLimitsGroupbox()) elif self.__action == DEBUG: layout.addWidget(self.__getDebugGroupbox()) # Buttons at the bottom buttonBox = QDialogButtonBox(self) buttonBox.setOrientation(Qt.Horizontal) buttonBox.setStandardButtons(QDialogButtonBox.Cancel) self.__runButton = buttonBox.addButton( RunDialog.ACTION_TO_VERB[self.__action], QDialogButtonBox.AcceptRole) self.__runButton.setDefault(True) self.__runButton.clicked.connect(self.onAccept) layout.addWidget(buttonBox) buttonBox.rejected.connect(self.close) def __getArgLayout(self): """Provides the arguments layout""" argsLabel = QLabel("Command line arguments") self.__argsEdit = QLineEdit() self.__argsEdit.textChanged.connect(self.__argsChanged) argsLayout = QHBoxLayout() argsLayout.addWidget(argsLabel) argsLayout.addWidget(self.__argsEdit) return argsLayout @staticmethod def __getSizePolicy(item): """Provides a common size policy""" sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(item.sizePolicy().hasHeightForWidth()) return sizePolicy def __getWorkingDirGroupbox(self): """Provides the working dir groupbox""" workDirGroupbox = QGroupBox('Working Directory', self) workDirGroupbox.setSizePolicy(self.__getSizePolicy(workDirGroupbox)) gridLayoutWD = QGridLayout(workDirGroupbox) self.__scriptWDRButton = QRadioButton("&Use script location", workDirGroupbox) gridLayoutWD.addWidget(self.__scriptWDRButton, 0, 0) self.__scriptWDRButton.clicked.connect(lambda: self.__wdDir(True)) self.__dirRButton = QRadioButton("Select &directory", workDirGroupbox) gridLayoutWD.addWidget(self.__dirRButton, 1, 0) self.__dirRButton.clicked.connect(lambda: self.__wdDir(False)) self.__dirEdit = QLineEdit(workDirGroupbox) gridLayoutWD.addWidget(self.__dirEdit, 1, 1) self.__dirEdit.textChanged.connect(self.__workingDirChanged) self.__dirSelectButton = QPushButton("...", workDirGroupbox) gridLayoutWD.addWidget(self.__dirSelectButton, 1, 2) self.__dirSelectButton.clicked.connect(self.__selectDirClicked) return workDirGroupbox def __getEnvGroupbox(self): """Provides the environment groupbox""" envGroupbox = QGroupBox('Environment', self) envGroupbox.setSizePolicy(self.__getSizePolicy(envGroupbox)) layoutEnv = QVBoxLayout(envGroupbox) self.__inheritParentRButton = QRadioButton("Inherit &parent", envGroupbox) self.__inheritParentRButton.clicked.connect(self.__inhClicked) layoutEnv.addWidget(self.__inheritParentRButton) self.__inheritParentPlusRButton = QRadioButton( "Inherit parent and add/&modify", envGroupbox) self.__inheritParentPlusRButton.clicked.connect(self.__inhPlusClicked) layoutEnv.addWidget(self.__inheritParentPlusRButton) hInhPlusLayout = QHBoxLayout() self.__inhPlusEnvTable = QTreeWidget() self.__inhPlusEnvTable.itemActivated.connect( lambda x, y: self.__editInhClicked()) self.__tuneTable(self.__inhPlusEnvTable) hInhPlusLayout.addWidget(self.__inhPlusEnvTable) vInhPlusLayout = QVBoxLayout() self.__addInhButton = QPushButton('Add') self.__addInhButton.clicked.connect(self.__addInhClicked) vInhPlusLayout.addWidget(self.__addInhButton) self.__delInhButton = QPushButton('Delete') self.__delInhButton.clicked.connect(self.__delInhClicked) vInhPlusLayout.addWidget(self.__delInhButton) self.__editInhButton = QPushButton("Edit") self.__editInhButton.clicked.connect(self.__editInhClicked) vInhPlusLayout.addWidget(self.__editInhButton) hInhPlusLayout.addLayout(vInhPlusLayout) layoutEnv.addLayout(hInhPlusLayout) self.__specificRButton = QRadioButton("&Specific", envGroupbox) self.__specificRButton.clicked.connect(self.__specClicked) layoutEnv.addWidget(self.__specificRButton) hSpecLayout = QHBoxLayout() self.__specEnvTable = QTreeWidget() self.__specEnvTable.itemActivated.connect( lambda x, y: self.__editSpecClicked()) self.__tuneTable(self.__specEnvTable) hSpecLayout.addWidget(self.__specEnvTable) vSpecLayout = QVBoxLayout() self.__addSpecButton = QPushButton('Add') self.__addSpecButton.clicked.connect(self.__addSpecClicked) vSpecLayout.addWidget(self.__addSpecButton) self.__delSpecButton = QPushButton('Delete') self.__delSpecButton.clicked.connect(self.__delSpecClicked) vSpecLayout.addWidget(self.__delSpecButton) self.__editSpecButton = QPushButton("Edit") self.__editSpecButton.clicked.connect(self.__editSpecClicked) vSpecLayout.addWidget(self.__editSpecButton) hSpecLayout.addLayout(vSpecLayout) layoutEnv.addLayout(hSpecLayout) return envGroupbox def __getInterpreterGroupbox(self): """Creates the interpreter groupbox""" interpreterGroupbox = QGroupBox('Python Interpreter', self) interpreterGroupbox.setSizePolicy( self.__getSizePolicy(interpreterGroupbox)) gridLayoutInt = QGridLayout(interpreterGroupbox) self.__inheritedInterpreterRButton = QRadioButton( "&Inherited", interpreterGroupbox) gridLayoutInt.addWidget(self.__inheritedInterpreterRButton, 0, 0) self.__inheritedInterpreterRButton.clicked.connect( lambda: self.__interpreter(True)) self.__customIntRButton = QRadioButton( "Select interpreter (series &3)", interpreterGroupbox) gridLayoutInt.addWidget(self.__customIntRButton, 1, 0) self.__customIntRButton.clicked.connect( lambda: self.__interpreter(False)) self.__intEdit = QLineEdit(interpreterGroupbox) gridLayoutInt.addWidget(self.__intEdit, 1, 1) self.__intEdit.textChanged.connect(self.__interpreterChanged) self.__intSelectButton = QPushButton("...", interpreterGroupbox) gridLayoutInt.addWidget(self.__intSelectButton, 1, 2) self.__intSelectButton.clicked.connect(self.__selectIntClicked) return interpreterGroupbox def __getIOGroupbox(self): """Creates the interpreter groupbox""" ioGroupbox = QGroupBox('Input/output', self) ioGroupbox.setSizePolicy(self.__getSizePolicy(ioGroupbox)) gridLayoutInt = QGridLayout(ioGroupbox) self.__redirectedRButton = QRadioButton("&Redirected I/O", ioGroupbox) gridLayoutInt.addWidget(self.__redirectedRButton, 0, 0) self.__redirectedRButton.clicked.connect( lambda: self.__redirected(True)) self.__customTermRButton = QRadioButton("Custom terminal string", ioGroupbox) gridLayoutInt.addWidget(self.__customTermRButton, 1, 0) self.__customTermRButton.clicked.connect( lambda: self.__redirected(False)) self.__termEdit = QLineEdit(ioGroupbox) gridLayoutInt.addWidget(self.__termEdit, 1, 1) self.__termEdit.textChanged.connect(self.__customTermChanged) return ioGroupbox def __getProfileLimitsGroupbox(self): """Creates the profile limits groupbox""" limitsGroupbox = QGroupBox('Profiler diagram limits (IDE wide)', self) limitsGroupbox.setSizePolicy(self.__getSizePolicy(limitsGroupbox)) layoutLimits = QGridLayout(limitsGroupbox) self.__nodeLimitEdit = QLineEdit() self.__nodeLimitEdit.textEdited.connect(self.__setRunButtonProps) self.__nodeLimitValidator = QDoubleValidator(0.0, 100.0, 2, self) self.__nodeLimitValidator.setNotation( QDoubleValidator.StandardNotation) self.__nodeLimitEdit.setValidator(self.__nodeLimitValidator) nodeLimitLabel = QLabel("Hide nodes below") self.__edgeLimitEdit = QLineEdit() self.__edgeLimitEdit.textEdited.connect(self.__setRunButtonProps) self.__edgeLimitValidator = QDoubleValidator(0.0, 100.0, 2, self) self.__edgeLimitValidator.setNotation( QDoubleValidator.StandardNotation) self.__edgeLimitEdit.setValidator(self.__edgeLimitValidator) edgeLimitLabel = QLabel("Hide edges below") layoutLimits.addWidget(nodeLimitLabel, 0, 0) layoutLimits.addWidget(self.__nodeLimitEdit, 0, 1) layoutLimits.addWidget(QLabel("%"), 0, 2) layoutLimits.addWidget(edgeLimitLabel, 1, 0) layoutLimits.addWidget(self.__edgeLimitEdit, 1, 1) layoutLimits.addWidget(QLabel("%"), 1, 2) return limitsGroupbox def __getDebugGroupbox(self): """Creates the debug settings groupbox""" dbgGroupbox = QGroupBox('Debugger (IDE wide)', self) dbgGroupbox.setSizePolicy(self.__getSizePolicy(dbgGroupbox)) dbgLayout = QVBoxLayout(dbgGroupbox) self.__reportExceptionCheckBox = QCheckBox("Report &exceptions") self.__reportExceptionCheckBox.stateChanged.connect( self.__onReportExceptionChanged) self.__traceInterpreterCheckBox = QCheckBox("T&race interpreter libs") self.__traceInterpreterCheckBox.stateChanged.connect( self.__onTraceInterpreterChanged) self.__stopAtFirstCheckBox = QCheckBox("Stop at first &line") self.__stopAtFirstCheckBox.stateChanged.connect( self.__onStopAtFirstChanged) self.__autoforkCheckBox = QCheckBox("&Fork without asking") self.__autoforkCheckBox.stateChanged.connect(self.__onAutoforkChanged) self.__debugChildCheckBox = QCheckBox("Debu&g child process") self.__debugChildCheckBox.stateChanged.connect(self.__onDebugChild) dbgLayout.addWidget(self.__reportExceptionCheckBox) dbgLayout.addWidget(self.__traceInterpreterCheckBox) dbgLayout.addWidget(self.__stopAtFirstCheckBox) dbgLayout.addWidget(self.__autoforkCheckBox) dbgLayout.addWidget(self.__debugChildCheckBox) return dbgGroupbox @staticmethod def __tuneTable(table): """Sets the common settings for a table""" table.setAlternatingRowColors(True) table.setRootIsDecorated(False) table.setItemsExpandable(False) table.setUniformRowHeights(True) table.setSelectionMode(QAbstractItemView.SingleSelection) table.setSelectionBehavior(QAbstractItemView.SelectRows) table.setItemDelegate(NoOutlineHeightDelegate(4)) table.setHeaderLabels(["Variable", "Value"]) header = table.header() header.setSortIndicator(0, Qt.AscendingOrder) header.setSortIndicatorShown(True) header.setSectionsClickable(True) table.setSortingEnabled(True) def __wdDir(self, useScriptLocation): """Working dir radio selection changed""" self.__dirEdit.setEnabled(not useScriptLocation) self.__dirSelectButton.setEnabled(not useScriptLocation) self.runParams['useScriptLocation'] = useScriptLocation self.__setRunButtonProps() def __interpreter(self, useInherited): """Interpreter radio selection changed""" self.__intEdit.setEnabled(not useInherited) self.__intSelectButton.setEnabled(not useInherited) self.runParams['useInherited'] = useInherited self.__setRunButtonProps() def __redirected(self, redirected): """I/O radio button changed""" self.__termEdit.setEnabled(not redirected) self.runParams['redirected'] = redirected self.__setRunButtonProps() def __customTermChanged(self, value): """Triggered when a custom terminal string changed""" value = str(value).strip() self.runParams['customTerminal'] = value self.__setRunButtonProps() def __argsChanged(self, value): """Triggered when cmd line args are changed""" value = str(value).strip() self.runParams['arguments'] = value self.__setRunButtonProps() def __workingDirChanged(self, value): """Triggered when a working dir value is changed""" value = str(value) self.runParams['specificDir'] = value self.__setRunButtonProps() def __interpreterChanged(self, value): """Triggered when an interpreter is changed""" value = str(value).strip() self.runParams['customInterpreter'] = value self.__setRunButtonProps() def __onCloseChanged(self, state): """Triggered when the close terminal check box changed""" self.runParams['closeTerminal'] = state != 0 def __onReportExceptionChanged(self, state): """Triggered when exception report check box changed""" self.debuggerParams.reportExceptions = state != 0 def __onTraceInterpreterChanged(self, state): """Triggered when trace interpreter changed""" self.debuggerParams.traceInterpreter = state != 0 def __onStopAtFirstChanged(self, state): """Triggered when stop at first changed""" self.debuggerParams.stopAtFirstLine = state != 0 def __onAutoforkChanged(self, state): """Triggered when autofork changed""" self.debuggerParams.autofork = state != 0 self.__debugChildCheckBox.setEnabled(self.debuggerParams.autofork) def __onDebugChild(self, state): """Triggered when debug child changed""" self.debuggerParams.followChild = state != 0 def __argumentsOK(self): """Returns True if the arguments are OK""" try: parseCommandLineArguments(self.runParams['arguments']) return True except: return False def __dirOK(self): """Returns True if the working dir is OK""" if self.__scriptWDRButton.isChecked(): return True return os.path.isdir(self.__dirEdit.text()) def __interpreterOK(self): """Checks if the interpreter is OK""" if self.__inheritedInterpreterRButton.isChecked(): return True path = self.__intEdit.text().strip() if not path: return 'No executable specified' try: code = "from __future__ import print_function; " \ "import sys; print(sys.version_info.major)" output = checkOutput(path + ' -c "' + code + '"', useShell=True) output = output.strip() if output != '3': return 'Only python series 3 is supported ' \ '(provided: series ' + output + ')' except: return 'Error checking the provided interpreter' def __ioOK(self): """Checks if the IO is correct""" if self.__redirectedRButton.isChecked(): return True term = self.__termEdit.text().strip() if not term: return 'No custom terminal line specified' def __setRunButtonProps(self, _=None): """Enable/disable run button and set its tooltip""" if not self.__argumentsOK(): self.__runButton.setEnabled(False) self.__runButton.setToolTip("No closing quotation in arguments") return if not self.__dirOK(): self.__runButton.setEnabled(False) self.__runButton.setToolTip("The given working " "dir is not found") return interpreterOK = self.__interpreterOK() if isinstance(interpreterOK, str): self.__runButton.setEnabled(False) self.__runButton.setToolTip('Invalid interpreter. ' + interpreterOK) return ioOK = self.__ioOK() if isinstance(ioOK, str): self.__runButton.setEnabled(False) self.__runButton.setToolTip('Invalid terminal. ' + ioOK) return if self.__nodeLimitEdit is not None: txt = self.__nodeLimitEdit.text().strip() try: value = float(txt) if value < 0.0 or value > 100.0: raise Exception("Out of range") except: self.__runButton.setEnabled(False) self.__runButton.setToolTip("The given node limit " "is out of range") return if self.__edgeLimitEdit is not None: txt = self.__edgeLimitEdit.text().strip() try: value = float(txt) if value < 0.0 or value > 100.0: raise Exception("Out of range") except: self.__runButton.setEnabled(False) self.__runButton.setToolTip("The given edge limit " "is out of range") return self.__runButton.setEnabled(True) self.__runButton.setToolTip( "Save parameters and " + RunDialog.ACTION_TO_VERB[self.__action].lower() + " script") def __selectDirClicked(self): """Selects the script working dir""" options = QFileDialog.Options() options |= QFileDialog.DontUseNativeDialog | QFileDialog.ShowDirsOnly dirName = QFileDialog.getExistingDirectory( self, "Select the script working directory", self.__dirEdit.text(), options=options) if dirName: self.__dirEdit.setText(os.path.normpath(dirName)) def __selectIntClicked(self): """Selects a python interpreter""" options = QFileDialog.Options() options |= QFileDialog.DontUseNativeDialog path, _ = QFileDialog.getOpenFileName( self, "Select python series 3 interpreter", options=options) if path: self.__intEdit.setText(os.path.normpath(path)) self.__setRunButtonProps() def __inhClicked(self): """Inerit parent env radio button clicked""" self.__setEnabledInheritedPlusEnv(False) self.__setEnabledSpecificEnv(False) self.runParams['envType'] = RunParameters.InheritParentEnv def __inhPlusClicked(self): """Inherit parent and add radio button clicked""" self.__setEnabledInheritedPlusEnv(True) self.__setEnabledSpecificEnv(False) self.runParams['envType'] = RunParameters.InheritParentEnvPlus if self.__inhPlusEnvTable.selectedIndexes(): self.__delInhButton.setEnabled(True) self.__editInhButton.setEnabled(True) else: self.__delInhButton.setEnabled(False) self.__editInhButton.setEnabled(False) def __specClicked(self): """Specific env radio button clicked""" self.__setEnabledInheritedPlusEnv(False) self.__setEnabledSpecificEnv(True) self.runParams['envType'] = RunParameters.SpecificEnvironment if self.__specEnvTable.selectedIndexes(): self.__delSpecButton.setEnabled(True) self.__editSpecButton.setEnabled(True) else: self.__delSpecButton.setEnabled(False) self.__editSpecButton.setEnabled(False) @staticmethod def __delAndInsert(table, name, value): """Deletes an item by name if so; insert new; highlight it""" for index in range(table.topLevelItemCount()): item = table.topLevelItem(index) if str(item.text(0)) == name: table.takeTopLevelItem(index) break item = QTreeWidgetItem([name, value]) table.addTopLevelItem(item) table.setCurrentItem(item) return item def __addInhClicked(self): """Add env var button clicked""" dlg = EnvVarDialog() if dlg.exec_() == QDialog.Accepted: name = str(dlg.name) value = str(dlg.value) self.__delAndInsert(self.__inhPlusEnvTable, name, value) self.runParams['additionToParentEnv'][name] = value self.__delInhButton.setEnabled(True) self.__editInhButton.setEnabled(True) def __addSpecClicked(self): """Add env var button clicked""" dlg = EnvVarDialog() if dlg.exec_() == QDialog.Accepted: name = str(dlg.name) value = str(dlg.value) self.__delAndInsert(self.__specEnvTable, name, value) self.runParams['specificEnv'][name] = value self.__delSpecButton.setEnabled(True) self.__editSpecButton.setEnabled(True) def __delInhClicked(self): """Delete the highlighted variable""" if self.__inhPlusEnvTable.topLevelItemCount() == 0: return name = self.__inhPlusEnvTable.currentItem().text(0) for index in range(self.__inhPlusEnvTable.topLevelItemCount()): item = self.__inhPlusEnvTable.topLevelItem(index) if name == item.text(0): self.__inhPlusEnvTable.takeTopLevelItem(index) break del self.runParams['additionToParentEnv'][str(name)] if self.__inhPlusEnvTable.topLevelItemCount() == 0: self.__delInhButton.setEnabled(False) self.__editInhButton.setEnabled(False) else: self.__inhPlusEnvTable.setCurrentItem( self.__inhPlusEnvTable.topLevelItem(0)) def __delSpecClicked(self): """Delete the highlighted variable""" if self.__specEnvTable.topLevelItemCount() == 0: return name = self.__specEnvTable.currentItem().text(0) for index in range(self.__specEnvTable.topLevelItemCount()): item = self.__specEnvTable.topLevelItem(index) if name == item.text(0): self.__specEnvTable.takeTopLevelItem(index) break del self.runParams['specificEnv'][str(name)] if self.__specEnvTable.topLevelItemCount() == 0: self.__delSpecButton.setEnabled(False) self.__editSpecButton.setEnabled(False) else: self.__specEnvTable.setCurrentItem( self.__specEnvTable.topLevelItem(0)) def __editInhClicked(self): """Edits the highlighted variable""" if self.__inhPlusEnvTable.topLevelItemCount() == 0: return item = self.__inhPlusEnvTable.currentItem() dlg = EnvVarDialog(str(item.text(0)), str(item.text(1)), self) if dlg.exec_() == QDialog.Accepted: name = str(dlg.name) value = str(dlg.value) self.__delAndInsert(self.__inhPlusEnvTable, name, value) self.runParams['additionToParentEnv'][name] = value def __editSpecClicked(self): """Edits the highlighted variable""" if self.__specEnvTable.topLevelItemCount() == 0: return item = self.__specEnvTable.currentItem() dlg = EnvVarDialog(str(item.text(0)), str(item.text(1)), self) if dlg.exec_() == QDialog.Accepted: name = str(dlg.name) value = str(dlg.value) self.__delAndInsert(self.__specEnvTable, name, value) self.runParams['specificEnv'][name] = value def onAccept(self): """Saves the selected terminal and profiling values""" if self.__action == PROFILE: self.profilerParams.nodeLimit = float( self.__nodeLimitEdit.text()) self.profilerParams.edgeLimit = float( self.__edgeLimitEdit.text()) self.accept()
gpl-3.0
4,505,693,968,886,528,000
39.708671
78
0.63221
false
4.323266
false
false
false
rouxcode/django-text-ckeditor
text_ckeditor/utils.py
1
3968
import random import re from lxml.html import fragments_fromstring, fragment_fromstring, tostring from django.apps import apps from django.utils.safestring import mark_safe from . import conf try: basestring except NameError: basestring = str class CKEditorHtml(object): # TODO replace data-djangolink="true" constant link_model = apps.get_model( app_label=conf.LINK_MODULE, model_name=conf.LINK_MODEL_NAME ) def __init__(self, input): self.input = input self.empty_link = self.link_model() def render(self): output = '' fragments = fragments_fromstring(self.input) for fragment in fragments: output += self._render_fragment(fragment) if conf.CKEDITOR_HTML_MARK_SAFE: output = mark_safe(output) return output def _render_fragment(self, fragment): if isinstance(fragment, basestring): fragment = fragment_fromstring('<p>' + fragment + '</p>') django_links = fragment.cssselect('a[data-djangolink="true"]') for link in django_links: self._alter_link(link, fragment) return tostring(fragment, encoding='unicode') def _alter_link(self, link, fragment): link.attrib.pop('data-djangolink') kwargs = {} for key, value in link.items(): if key.startswith('data-'): field = key.replace('data-', '', 1) value = link.attrib.pop(key) if hasattr(self.empty_link, field) and value: # TODO find a proper way to do this try: value = int(value) field = '{0}_id'.format(field) except Exception: pass kwargs.update({field: value}) obj = self.link_model(**kwargs) href = obj.get_link() if hasattr(obj, 'get_css_class'): css_class = obj.get_css_class() else: css_class = '' if 'mailto:' in href and conf.CKEDITOR_HTML_PROTECT_MAILTO: if hasattr(obj, 'get_email'): href = obj.get_email() else: href = href.replace('mailto:', '') if link.text: text = link.text else: text = href mail = mail_to_js(href, link_text=text, css_class=css_class) link_new = fragment_fromstring(mail) link.addnext(link_new) link.getparent().remove(link) else: link.set('href', href) if hasattr(obj, 'get_target'): link.set('target', obj.get_target()) if css_class: link.set('class', css_class) def mail_to_js(email, *args, **kwargs): result = '' text = kwargs.get('link_text', email) css_class = kwargs.get('css_class', '') email_array_content = '' text_array_content = '' def r(c): return '"' + str(ord(c)) + '",' # NOQA for c in email: email_array_content += r(c) for c in text: text_array_content += r(c) id = "_tyjsdfss-" + str(random.randint(1000, 999999999999999999)) re_email = re.sub(r',$', '', email_array_content) re_text = re.sub(r',$', '', text_array_content) result = ( '<span id="%s"><script>' 'var _tyjsdf=[%s];' 'var _qplmks=[%s];' 'var content=(' '\'<a class="%s" href="&#x6d;&#97;&#105;&#x6c;&#000116;&#111;&#x3a;\'' ');' 'for(_i=0;_i<_tyjsdf.length;_i++){' 'content+=("&#"+_tyjsdf[_i]+";");' '}' 'content+=(\'">\');' 'for(_i=0;_i<_qplmks.length;_i++){' 'content+=(\'&#\'+_qplmks[_i]+\';\');' '}' 'content+=(\'</a>\');' 'document.getElementById(\'%s\').innerHTML=content;' '</script></span>' ) % (id, re_email, re_text, css_class, id) return mark_safe(result)
mit
-5,744,339,242,675,329,000
32.066667
78
0.519153
false
3.680891
false
false
false
chrisb87/advent_of_code_2016
day11/day11.py
1
3413
import pdb import itertools def layout_to_grid(layout, elevator, objects): text = [] for floor in reversed(xrange(len(layout))): floor_text = ["F%d" % (floor + 1)] if floor == elevator: floor_text.append("E ") else: floor_text.append(". ") floor_objects = [objects[n] if n in layout[floor] else ". " for n, i in enumerate(objects)] text.append(' '.join(floor_text + floor_objects)) return '\n'.join(text) def next_moves(layout, current_floor): results = [] next_floors = [] # can move up? if current_floor < (len(layout) - 1): next_floors.append(current_floor + 1) # can move down? if current_floor > 0: next_floors.append(current_floor - 1) for next_floor in next_floors: for moved_objects in itertools.chain( itertools.combinations(layout[current_floor], 1), itertools.combinations(layout[current_floor], 2) ): new_floor_layout = layout[next_floor] + moved_objects if valid_floor(new_floor_layout): new_layout = [] for floor_number in xrange(len(layout)): if floor_number == current_floor: new_layout.append(tuple(filter( lambda o: o not in moved_objects, [m for m in layout[floor_number]]))) elif floor_number == next_floor: new_layout.append(new_floor_layout) else: new_layout.append(layout[floor_number]) results.append((tuple(new_layout), next_floor)) return results def solve(floors, objects, max_depth): elevator = 0 queue = [] seen = set() path = [(floors, elevator)] nexts = next_moves(floors, elevator) queue.append((path, nexts)) seen.add(seen_hash(floors, elevator)) while queue: path, nexts = queue.pop(0) for floors_i, elevator_i in nexts: hsh = seen_hash(floors_i, elevator_i) if hsh in seen: continue else: seen.add(hsh) new_path = path + [(floors_i, elevator_i)] if is_solution(floors_i, elevator_i): return new_path[1:] if len(new_path) < max_depth: new_nexts = next_moves(floors_i, elevator_i) queue.append((new_path, new_nexts)) def is_solution(layout, elevator): if elevator != (len(layout) - 1): return False for floor in xrange(len(layout) - 1): if len(layout[floor]) > 0: return False return True def valid_floor(floor_layout): generators = filter(lambda i: i%2==0, floor_layout) chips = filter(lambda i: i%2==1, floor_layout) unpaired_generators = [] unpaired_chips = [] for generator in generators: if (generator + 1) not in chips: unpaired_generators.append(generator) for chip in chips: if (chip - 1) not in generators: unpaired_chips.append(chip) if (len(unpaired_chips) > 0) and (len(unpaired_generators) > 0): return False else: return True def seen_hash(layout, elevator): pairs = {} for f_n, floor in enumerate(layout): for obj in floor: k = obj / 2 if k not in pairs: pairs[k] = [] pairs[k].append(f_n) pairs = sorted(map(lambda p: tuple(p), pairs.values())) return (elevator, tuple(pairs)) if __name__ == '__main__': objects = ( 'PG', 'PM', 'TG', 'TM', 'MG', 'MM', 'RG', 'RM', 'CG', 'CM', 'EG', 'EM', 'DG', 'DM' ) layout = ( (0,2,3,4,6,7,8,9,10,11,12,13), (1,5), (), () ) elevator = 0 print layout_to_grid(layout, elevator, objects) print "" solution = solve(layout, objects, max_depth = 100) if solution: print "%d step solution found" % len(solution) else: print "no solution"
unlicense
-7,786,298,473,346,507,000
19.93865
65
0.63639
false
2.700158
false
false
false
germtb/LightBeam
src/medium.py
1
1262
from math import pi from drawable import Drawable from matrix2D import Matrix from ray import Ray class Medium(Drawable): def __init__(self, refractive_index, polygon): self.refractiveIndex = refractive_index self.polygon = polygon def on_hit(self, ray, hit_point): pass def draw(self, resolution=100): self.polygon.draw(resolution) class Detector(Medium): def __init__(self, refractive_index, polygon): super().__init__(refractive_index, polygon) self.detections = {} def on_hit(self, ray, hit_point): if hit_point not in self.detections.keys(): self.detections[hit_point] = [] self.detections[hit_point].append(ray) return [] class Reflector(Medium): def __init__(self, refractive_index, polygon): super().__init__(refractive_index, polygon) def on_hit(self, ray, hit_point): line = filter(lambda l: l.contains(hit_point), self.polygon.lines()).__next__() alpha = line.angle if alpha > pi: alpha -= pi reflection_matrix = Matrix.reflection_matrix(alpha) new_direction = reflection_matrix.dot(ray.direction) return [Ray(new_direction, hit_point, ray.energy, ray.phase)]
mit
5,808,206,299,481,627,000
27.704545
87
0.631537
false
3.647399
false
false
false
zstars/weblabdeusto
server/src/weblab/core/data_retriever.py
1
11519
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2005 onwards University of Deusto # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. # # This software consists of contributions made by many individuals, # listed below: # # Author: Pablo Orduña <pablo@ordunya.com> # import threading import time import voodoo.log as log from weblab.data.experiments import CommandSent, ExperimentUsage, FileSent import weblab.core.file_storer as file_storer import weblab.data.command as Command class TemporalInformationRetriever(threading.Thread): """ This class retrieves continuously the information of initial and finished experiments. """ PRINT_ERRORS = True def __init__(self, cfg_manager, initial_store, finished_store, commands_store, completed_store, db_manager): threading.Thread.__init__(self) self.cfg_manager = cfg_manager self.keep_running = True self.initial_store = initial_store self.finished_store = finished_store self.commands_store = commands_store self.completed_store = completed_store self.iterations = 0 self.db_manager = db_manager self.timeout = None # Take the default of TemporalInformationStore self.entry_id2command_id = {} self.entry_id2command_id_lock = threading.Lock() self.setDaemon(True) def run(self): while self.keep_running: try: self.iterations += 1 self.iterate() except: if self.PRINT_ERRORS: import traceback traceback.print_exc() log.log( TemporalInformationRetriever, log.level.Critical, "Exception iterating in TemporalInformationRetriever!!!") log.log_exc( TemporalInformationRetriever, log.level.Critical ) def stop(self): self.keep_running = False def iterate(self): self.iterate_initial() if self.keep_running and self.commands_store.empty() and self.completed_store.empty(): self.iterate_finish() if self.keep_running: self.iterate_command() if self.keep_running: self.iterate_completed() def iterate_initial(self): initial_information = self.initial_store.get(timeout=self.timeout) if initial_information is not None: initial_timestamp = time.mktime(initial_information.initial_time.timetuple()) + initial_information.initial_time.microsecond / 1e6 end_timestamp = time.mktime(initial_information.end_time.timetuple()) + initial_information.end_time.microsecond / 1e6 request_info = initial_information.request_info from_ip = request_info.pop('from_ip','<address not found>') try: username = request_info.pop('username') except: log.log( TemporalInformationRetriever, log.level.Critical, "Provided information did not contain some required fields (such as username or role). This usually means that the reservation has previously been expired. Provided request_info: %r; provided data: %r" % (request_info, initial_information), max_size = 10000) log.log_exc( TemporalInformationRetriever, log.level.Critical ) return usage = ExperimentUsage() usage.start_date = initial_timestamp usage.from_ip = from_ip usage.experiment_id = initial_information.experiment_id usage.reservation_id = initial_information.reservation_id usage.coord_address = initial_information.exp_coordaddr usage.request_info = initial_information.request_info command_request = CommandSent( Command.Command("@@@initial::request@@@"), initial_timestamp, Command.Command(str(initial_information.client_initial_data)), end_timestamp) command_response = CommandSent( Command.Command("@@@initial::response@@@"), initial_timestamp, Command.Command(str(initial_information.initial_configuration)), end_timestamp) usage.append_command(command_request) usage.append_command(command_response) self.db_manager.store_experiment_usage(username, usage) def iterate_completed(self): completed_information = self.completed_store.get(timeout=self.timeout) if completed_information is not None: username, usage, callback = completed_information self.db_manager.store_experiment_usage(username, usage) callback() def iterate_finish(self): information = self.finished_store.get(timeout=self.timeout) if information is not None: reservation_id, obj, initial_time, end_time = information if not self.commands_store.empty() or not self.completed_store.empty(): # They have higher priority self.finished_store.put(reservation_id, obj, initial_time, end_time) return initial_timestamp = time.mktime(initial_time.timetuple()) + initial_time.microsecond / 1e6 end_timestamp = time.mktime(end_time.timetuple()) + end_time.microsecond / 1e6 command = CommandSent( Command.Command("@@@finish@@@"), initial_timestamp, Command.Command(str(obj)), end_timestamp) if not self.db_manager.finish_experiment_usage(reservation_id, initial_timestamp, command): # If it could not be added because the experiment id # did not exist, put it again in the queue self.finished_store.put(reservation_id, obj, initial_time, end_time) time.sleep(0.01) def iterate_command(self): information = self.commands_store.get(timeout=self.timeout) if information is not None: all_information = [ information ] # Retrieve all the remaining information to ensure that it it finally empty, # with a maximum of 1000 registries per request max_registries = 1000 counter = 0 while not self.commands_store.empty() and counter < max_registries: counter += 1 information = self.commands_store.get(timeout=0) if information is not None: all_information.append(information) command_pairs = [] command_responses = [] command_requests = {} file_pairs = [] file_responses = [] file_requests = {} backup_information = {} backup_information_responses = {} # Process for information in all_information: if information.is_command: if information.is_before: backup_information[information.entry_id] = information command_requests[information.entry_id] = (information.reservation_id, CommandSent( information.payload, information.timestamp)) else: backup_information_responses[information.entry_id] = information command_request = command_requests.pop(information.entry_id, None) if command_request is not None: reservation_id, command_sent = command_request complete_command = CommandSent( command_sent.command, command_sent.timestamp_before, information.payload, information.timestamp) command_pairs.append((reservation_id, information.entry_id, complete_command)) else: with self.entry_id2command_id_lock: command_id = self.entry_id2command_id.pop(information.entry_id, None) if command_id is None: self.commands_store.put(information) else: command_responses.append((information.entry_id, command_id, information.payload, information.timestamp)) else: if information.is_before: backup_information[information.entry_id] = information file_requests[information.entry_id] = (information.reservation_id, information.payload) else: backup_information_responses[information.entry_id] = information file_request = file_requests.pop(information.entry_id, None) if file_request is not None: reservation_id, file_sent = file_request if file_sent.is_loaded(): storer = file_storer.FileStorer(self.cfg_manager, reservation_id) stored = storer.store_file(self, file_sent.file_content, file_sent.file_info) file_path = stored.file_path file_hash = stored.file_hash else: file_path = file_sent.file_path file_hash = file_sent.file_hash complete_file = FileSent(file_path, file_hash, file_sent.timestamp_before, information.payload, information.timestamp) file_pairs.append((reservation_id, information.entry_id, complete_file)) else: with self.entry_id2command_id_lock: command_id = self.entry_id2command_id.pop(information.entry_id, None) if command_id is None: self.commands_store.put(information) else: file_responses.append((information.entry_id, command_id, information.payload, information.timestamp)) # At this point, we have all the information processed and # ready to be passed to the database in a single commit mappings = self.db_manager.store_commands(command_pairs, command_requests, command_responses, file_pairs, file_requests, file_responses) elements_to_backup = [] with self.entry_id2command_id_lock: for entry_id in mappings: command_id = mappings[entry_id] if command_id is not None and command_id is not False: self.entry_id2command_id[entry_id] = mappings[entry_id] else: elements_to_backup.append(entry_id) for entry_id in elements_to_backup: if entry_id in backup_information: self.commands_store.put(backup_information[entry_id]) if entry_id in backup_information_responses: self.commands_store.put(backup_information_responses[entry_id])
bsd-2-clause
2,681,901,111,945,592,000
47.805085
333
0.575534
false
4.763441
false
false
false
HiroyukiSakai/Contour
contour/views.py
1
25693
# Contour Copyright (C) 2013-2014 Hiroyuki Sakai # # This file is part of Contour. # # Contour is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Contour is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Contour. If not, see <http://www.gnu.org/licenses/>. """Describes the views used in Contour. .. moduleauthor:: Hiroyuki Sakai <hiroyuki.sakai@student.tuwien.ac.at> """ import base64, os, pdb, urllib from PIL import Image as PImage from django.core.files import File from django.http import Http404, HttpResponse from django.shortcuts import render import flickrapi, numpy as np from scipy import misc, ndimage from skimage import filter, io, transform import secret from .. import settings from .forms import * from .models import * from .set_metrics import * from .util import slugify MISSING_PIXELS_PENALTY_FACTOR = 1. SUPERFLUOUS_PIXELS_PENALTY_FACTOR = .1 def create_session(request, view_name, id): """Creates a user session. :param request: The request object containing the user request. :type request: :class:`django.http.HttpRequest`. :param view_name: The name of the view to which the session should be associated. :type view_name: string. :param id: The id of the :class:`.models.Image` or :class:`.models.Track`. :type id: int. """ if not request.session.get('is_playing'): request.session['is_playing'] = True request.session['view_name'] = view_name request.session['id'] = id def clear_session(request): """Clears all varibles of the user session. :param request: The request object containing the user session. :type request: :class:`django.http.HttpRequest`. """ request.session['is_playing'] = False request.session['view_name'] = None request.session['id'] = None request.session['image_id'] = None request.session['image_index'] = None request.session['track_session_id'] = None request.session['drawing_id'] = None request.session['last_drawing_id'] = None def destroy_session(request): """Destroys a currently running user session if such a request has been sent. :param request: The request object containing the user request. :type request: :class:`django.http.HttpRequest`. :returns: bool -- `True` if the session was cleared, otherwise `None`. """ if request.session.get('is_playing') and request.method == 'POST': form = DiscardSessionForm(request.POST) if form.is_valid() and form.cleaned_data['discard_session']: clear_session(request) return True return def check_session(request, view_name=None, id=None): """Checks if the requested URL is in canon with the currently running session. The user will be asked if he wants to discad his session if there's a discrepancy. :param request: The request object containing the user request. :type request: :class:`django.http.HttpRequest`. :param view_name: The name of the requested view to which the session should be associated. :type view_name: string. :param id: The id of the requested :class:`.models.Image` or :class:`.models.Track`. :type id: int. :returns: :class:`django.http.HttpResponse` -- The rendered template as response. """ if request.session.get('is_playing') and (view_name != request.session.get('view_name') or id != request.session.get('id')): return render(request, 'confirm_discard.html', { 'form': DiscardSessionForm(), 'view_name': request.session.get('view_name'), 'id': request.session.get('id'), }); def get_player(name): """Returns a :class:`.models.Player` object. A new player will be created if the requested player doesn't exist. :param view_name: The name of the requested player. :type view_name: string. :returns: :class:`models.Player` -- The requested player. """ try: player = Player.objects.get(name=name) except Player.DoesNotExist: player = Player(name=name) player.save() return player def save_session(request): """Saves a track session. This function is called as soon as the player chooses to save his scores. :param request: The request object containing the user request. :type request: :class:`django.http.HttpRequest`. """ if request.session.get('is_playing') and request.method == 'POST': form = SaveSessionForm(request.POST) if form.is_valid() and form.cleaned_data['save_session']: if request.session.get('drawing_id'): drawing = Drawing.objects.get(id=request.session.get('drawing_id')) player = get_player(form.cleaned_data['name']) drawing.player = player drawing.save() elif request.session.get('track_session_id'): track_session = TrackSession.objects.get(id=request.session.get('track_session_id')) player = get_player(form.cleaned_data['name']) track_session.player = player track_session.save() for drawing in Drawing.objects.filter(track_session=track_session): drawing.player = player drawing.save() clear_session(request) def process_image(request, image): """Creates an edge image and calculates the values needed for the score calculation if necessary. This function is called as soon as an image is requested. :param request: The request object containing the user request. :type request: :class:`django.http.HttpRequest`. :param image: The image to be processed. :type image: :class:`models.Image`. """ # detect edges if not image.edge_image: greyscale_image = io.imread(os.path.join(settings.MEDIA_ROOT, image.image.name), as_grey=True) # resize image height = len(greyscale_image) width = len(greyscale_image[0]) factor = 768.0 / height greyscale_image = transform.resize(greyscale_image, [height * factor, width * factor]) # detect edges edges = filter.canny(greyscale_image, sigma=image.canny_sigma, low_threshold=image.canny_low_threshold, high_threshold=image.canny_high_threshold) # save edge image temp_filename = '/tmp/' + request.session.session_key + '.png' io.imsave(temp_filename, ~edges * 1.) image.edge_image.save(slugify(os.path.splitext(os.path.basename(image.image.name))[0]) + '.png', File(open(temp_filename))) os.remove(temp_filename) if not image.dilated_edge_image: edge_image = io.imread(os.path.join(settings.MEDIA_ROOT, image.edge_image.name), as_grey=True) edge_image = edge_image.astype(np.float64) if edge_image.max() > 1.: edge_image /= 255. # map values greater .5 as edge edge_image = (1. - edge_image) / .5 # save dilated edge image temp_filename = '/tmp/' + request.session.session_key + '.png' io.imsave(temp_filename, ~ndimage.binary_dilation(edge_image, iterations=2) * 1.) image.dilated_edge_image.save(slugify(os.path.splitext(os.path.basename(image.image.name))[0]) + '.png', File(open(temp_filename))) os.remove(temp_filename) # save maximum distance (needed for score calculation) if not image.max_distance: ones = np.ones(image.edge_image.height * image.edge_image.width).reshape((image.edge_image.height, image.edge_image.width)) dilated_edge_image = io.imread(os.path.join(settings.MEDIA_ROOT, image.dilated_edge_image.name), as_grey=True) dilated_edge_image = dilated_edge_image.astype(np.float64) if dilated_edge_image.max() > 1.: dilated_edge_image /= 255. image.max_distance = np.sum(np.absolute(ones - dilated_edge_image)) image.save() def handle_finished_drawing(request): """This function is called as soon as the user finishes his drawing. It saves and associates his drawing to the running track session. It also assesses the score of the drawing. :param request: The request object containing the user request. :type request: :class:`django.http.HttpRequest`. :returns: :class:`models.Drawing` -- The created drawing object. """ if request.session.get('is_playing'): if request.method == 'POST': form = FinishDrawingForm(request.POST) if form.is_valid() and form.cleaned_data['finish_drawing']: # save drawing image_data = base64.b64decode(request.POST['image']) temp_filename = '/tmp/' + request.session.session_key + '.png' file = open(temp_filename, 'wb') file.write(image_data) file.close() im = PImage.open(temp_filename) im = im.convert("RGB") im.save(temp_filename, "PNG") image = Image.objects.get(id=request.session.get('image_id')) # calculate distance greyscale_drawing = io.imread(temp_filename, as_grey=True) greyscale_drawing = misc.imresize(greyscale_drawing, (image.edge_image.height, image.edge_image.width), mode='F') dilated_edge_image = io.imread(os.path.join(settings.MEDIA_ROOT, image.dilated_edge_image.name), as_grey=True) greyscale_drawing = greyscale_drawing.astype(np.float64) dilated_edge_image = dilated_edge_image.astype(np.float64) # correct ranges of images if necessary if greyscale_drawing.max() > 1.: greyscale_drawing /= 255. if dilated_edge_image.max() > 1.: dilated_edge_image /= 255. missing_pixels = np.clip(greyscale_drawing - dilated_edge_image, 0., 1.) overlapping_pixels = np.clip((1. - greyscale_drawing) * (1. - dilated_edge_image), 0., 1.) superfluous_pixels = np.clip(dilated_edge_image - greyscale_drawing, 0., 1.) # number of pixels in the edge image which are not covered distance = np.sum(missing_pixels) * MISSING_PIXELS_PENALTY_FACTOR; # number of pixels in the drawing which are misplaced distance += np.sum(superfluous_pixels) * SUPERFLUOUS_PIXELS_PENALTY_FACTOR; score = max((image.max_distance - distance) / image.max_distance * 100, 0.) # save drawing drawing = Drawing(image=image, distance=distance, score=score) drawing.drawing.save(request.session.session_key + '.png', File(open(temp_filename))) # generate and save score image score_image = np.zeros((image.edge_image.height, image.edge_image.width, 3), dtype=np.float64) score_image[:, :, 0] += missing_pixels score_image[:, :, 1] += missing_pixels score_image[:, :, 2] += missing_pixels score_image[:, :, 0] += superfluous_pixels score_image[:, :, 1] += overlapping_pixels io.imsave(temp_filename, score_image * 1.) drawing.score_image.save(request.session.session_key + '_score.png', File(open(temp_filename))) drawing.save() # delete temporary file os.remove(temp_filename) return drawing return def handle_finished_edge_image(request): """This function is called as soon as the admin finishes his drawing. :param request: The request object containing the user request. :type request: :class:`django.http.HttpRequest`. :returns: :class:`models.Drawing` -- The created drawing object. """ if request.method == 'POST': form = FinishEdgeImageForm(request.POST) if form.is_valid() and form.cleaned_data['finish_edge_image']: # save new edge image image_data = base64.b64decode(request.POST['image']) temp_filename = '/tmp/' + request.session.session_key + '.png' file = open(temp_filename, 'wb') file.write(image_data) file.close() image = Image.objects.get(id=form.cleaned_data['image_id']) im = PImage.open(temp_filename) im = im.convert("RGB") im.save(temp_filename, "PNG") edge_image = io.imread(temp_filename, as_grey=True) edge_image = misc.imresize(edge_image, (image.edge_image.height, image.edge_image.width), mode='F') edge_image = edge_image.astype(np.float64) # correct ranges of images if necessary if edge_image.max() > 1.: edge_image /= 255. # save edge image image.edge_image.save(image.edge_image.name, File(open(temp_filename))) # delete old computed values image.max_distance = None image.dilated_edge_image.delete() image.save() # delete temporary file os.remove(temp_filename) return image.edge_image return def handle_uploaded_file(request, form): """This function is called as soon as the user uploads a file. It saves his image on the filesystem. :param request: The request object containing the user request. :type request: :class:`django.http.HttpRequest`. :returns: :class:`models.Image` -- The created image object. """ file = request.FILES['file'] sigma = form.cleaned_data['sigma'] # save file temp_filename = '/tmp/' + request.session.session_key with open(temp_filename, 'wb+') as destination: for chunk in file.chunks(): destination.write(chunk) image = Image(title=slugify(file.name), canny_sigma=sigma) split = os.path.splitext(os.path.basename(file.name)) image.image.save(slugify(split[0]) + split[1], File(open(temp_filename))) image.save() os.remove(temp_filename) return image def handle_flickr_search(request, form): """This function is called as soon as the user submits a Flickr search query. It saves the found image on the filesystem. :param request: The request object containing the user request. :type request: :class:`django.http.HttpRequest`. :returns: :class:`models.Image` -- The created image object. """ query = form.cleaned_data['query'] sigma = form.cleaned_data['sigma'] flickr = flickrapi.FlickrAPI(secret.FLICKR_API_KEY) for photo in flickr.walk(text=query, extras='1'): temp_filename = '/tmp/' + request.session.session_key + '.jpg' urllib.urlretrieve('http://farm' + photo.get('farm') + '.staticflickr.com/' + photo.get('server') + '/' + photo.get('id') + '_' + photo.get('secret') + '.jpg', temp_filename) title = slugify(str(photo.get('title'))) image = Image(title=title, url='http://www.flickr.com/photos/' + photo.get('owner') + '/' + photo.get('id'), canny_sigma=sigma) image.image.save(title[:64] + '.jpg', File(open(temp_filename))) image.save() os.remove(temp_filename) return image def index(request): """This is the view function for the home page. :param request: The request object containing the user request. :type request: :class:`django.http.HttpRequest`. :returns: :class:`django.http.HttpResponse` -- The rendered template as response. """ clear_canvas = destroy_session(request) save_session(request) discard_session_page = check_session(request) if discard_session_page: return discard_session_page tracks = Track.objects.all() track_highscores = {} for track in tracks: track_highscores[track.id] = { 'title': track.title, 'highscores': TrackSession.objects.filter(player__isnull=False, track=track).order_by('-score'), } return render(request, 'main_menu.html', { 'upload_file_form': UploadFileForm(), 'search_flickr_form': SearchFlickrForm(), 'tracks': tracks, 'track_highscores': track_highscores, 'single_drawing_highscores': Drawing.objects.filter(player__isnull=False, track_session__isnull=True), 'clear_canvas': clear_canvas, }) def canvas(request, id=None): """This is the view function for a single drawing canvas. It is called for the file upload and Flickr game modes. :param request: The request object containing the user request. :type request: :class:`django.http.HttpRequest`. :param id: The id of the requested :class:`.models.Image`. :type id: int. :returns: :class:`django.http.HttpResponse` -- The rendered template as response. """ view_name = 'Contour.contour.views.canvas' if id: id = long(id) elif view_name == request.session.get('view_name'): id = request.session.get('id') image = None; if request.method == 'POST': form = UploadFileForm(request.POST, request.FILES) if form.is_valid(): image = handle_uploaded_file(request, form) if image: id = image.id form = SearchFlickrForm(request.POST) if form.is_valid(): image = handle_flickr_search(request, form) if image: id = image.id if not image: try: image = Image.objects.get(id=id) except Image.DoesNotExist: raise Http404 clear_canvas = destroy_session(request) discard_session_page = check_session(request, view_name, id) if discard_session_page: return discard_session_page if not request.session.get('is_playing'): request.session['image_id'] = id request.session['image_index'] = 0 create_session(request, view_name, id) drawing = handle_finished_drawing(request) if drawing: request.session['last_drawing_id'] = drawing.id request.session['dont_show_welcome'] = True request.session['drawing_id'] = drawing.id request.session['image_index'] = request.session.get('image_index') + 1 return HttpResponse(True) last_drawing = None if request.session.get('last_drawing_id'): try: last_drawing = Drawing.objects.get(id=request.session.get('last_drawing_id')) except Drawing.DoesNotExist: None if request.method == 'POST': form = RetryDrawingForm(request.POST) if form.is_valid() and form.cleaned_data['retry_drawing']: request.session['last_drawing_id'] = None request.session['image_index'] = request.session.get('image_index') - 1 last_drawing.delete() last_drawing = None if request.session.get('image_index'): return render(request, 'completed.html', { 'retry_drawing_form': RetryDrawingForm(), 'save_session_form': SaveSessionForm(), 'discard_session_form': DiscardSessionForm(), 'drawing': Drawing.objects.get(id=request.session.get('drawing_id')), 'last_drawing': last_drawing, }) process_image(request, image) return render(request, 'game.html', { 'finish_drawing_form': FinishDrawingForm(), 'retry_drawing_form': RetryDrawingForm(), 'image': image, 'score': 0, 'clear_canvas': clear_canvas, 'show_welcome': not request.session.get('dont_show_welcome', False), 'last_drawing': last_drawing, }) def track(request, id): """This is the view function for track sessions. :param request: The request object containing the user request. :type request: :class:`django.http.HttpRequest`. :param id: The id of the requested :class:`.models.Track`. :type id: int. :returns: :class:`django.http.HttpResponse` -- The rendered template as response. """ view_name = 'Contour.contour.views.track' if id: id = long(id) try: track = Track.objects.get(id=id) except Track.DoesNotExist: raise Http404 clear_canvas = destroy_session(request) discard_session_page = check_session(request, view_name, id) if discard_session_page: return discard_session_page if not request.session.get('is_playing'): track_session = TrackSession(session_key=request.session.session_key, track=track, score=0) track_session.save() request.session['image_index'] = 0 request.session['track_session_id'] = track_session.id else: track_session = TrackSession.objects.get(id=request.session.get('track_session_id')) create_session(request, view_name, id) drawing = handle_finished_drawing(request) if drawing: drawing.track_session = track_session drawing.track_session_index = request.session.get('image_index') drawing.save() request.session['last_drawing_id'] = drawing.id request.session['dont_show_welcome'] = True request.session['image_index'] = request.session.get('image_index') + 1 track_session.score += drawing.score track_session.save() return HttpResponse(True) last_drawing = None if request.session.get('last_drawing_id'): try: last_drawing = Drawing.objects.get(id=request.session.get('last_drawing_id')) except Drawing.DoesNotExist: None if request.method == 'POST': form = RetryDrawingForm(request.POST) if form.is_valid() and form.cleaned_data['retry_drawing']: request.session['last_drawing_id'] = None request.session['image_index'] = request.session.get('image_index') - 1 track_session.score -= last_drawing.score track_session.save() last_drawing.delete() last_drawing = None try: image = TrackImage.objects.filter(track=track)[request.session.get('image_index')].image except IndexError: return render(request, 'completed.html', { 'retry_drawing_form': RetryDrawingForm(), 'save_session_form': SaveSessionForm(), 'discard_session_form': DiscardSessionForm(), 'track_session': track_session, 'drawings': Drawing.objects.filter(track_session=track_session), 'last_drawing': last_drawing, }) request.session['image_id'] = image.id process_image(request, image) return render(request, 'game.html', { 'finish_drawing_form': FinishDrawingForm(), 'retry_drawing_form': RetryDrawingForm(), 'image': image, 'score': track_session.score, 'clear_canvas': clear_canvas, 'image_number': request.session.get('image_index') + 1, 'image_count': TrackImage.objects.filter(track=track).count(), 'show_welcome': not request.session.get('dont_show_welcome', False), 'last_drawing': last_drawing, }) def drawing(request, id): """This is the view function to view the score summary of single drawings. :param request: The request object containing the user request. :type request: :class:`django.http.HttpRequest`. :param id: The id of the requested :class:`.models.Drawing`. :type id: int. :returns: :class:`django.http.HttpResponse` -- The rendered template as response. """ if id: id = long(id) try: drawing = Drawing.objects.get(id=id, player__isnull=False) except Drawing.DoesNotExist: raise Http404 return render(request, 'drawing.html', { 'drawing': drawing, }) def session(request, id): """This is the view function to view the score summary of track sessions. :param request: The request object containing the user request. :type request: :class:`django.http.HttpRequest`. :param id: The id of the requested :class:`.models.TrackSession`. :type id: int. :returns: :class:`django.http.HttpResponse` -- The rendered template as response. """ if id: id = long(id) try: track_session = TrackSession.objects.get(id=id, player__isnull=False) except TrackSession.DoesNotExist: raise Http404 return render(request, 'session.html', { 'track_session': track_session, 'drawings': Drawing.objects.filter(track_session=track_session), }) def admin_edge_image(request, id): """This is the view function to edit the edge images in the admin section. :param request: The request object containing the user request. :type request: :class:`django.http.HttpRequest`. :param id: The id of the requested :class:`.models.Image`. :type id: int. :returns: :class:`django.http.HttpResponse` -- The rendered template as response. """ if id: id = long(id) try: image = Image.objects.get(id=id) except Image.DoesNotExist: raise Http404 process_image(request, image) edge_image = handle_finished_edge_image(request) if edge_image: return HttpResponse(True) return render(request, 'admin/edge_image.html', { 'form': FinishEdgeImageForm(), 'image': image, })
gpl-3.0
-2,998,517,735,398,188,500
35.392351
182
0.63313
false
3.859546
false
false
false
jdfekete/progressivis
progressivis/core/changemanager_literal.py
1
1960
"Change Manager for literal values (supporting ==)" from .bitmap import bitmap from .index_update import IndexUpdate from .changemanager_base import BaseChangeManager class LiteralChangeManager(BaseChangeManager): """ Manage changes that occured in a literal value between runs. """ VALUE = bitmap([0]) def __init__(self, slot, buffer_created=True, buffer_updated=False, buffer_deleted=True, buffer_exposed=False, buffer_masked=False): super(LiteralChangeManager, self).__init__( slot, buffer_created, buffer_updated, buffer_deleted, buffer_exposed, buffer_masked) self._last_value = None def reset(self, name=None): super(LiteralChangeManager, self).reset(name) self._last_value = None def compute_updates(self, data): last_value = self._last_value changes = IndexUpdate() if last_value == data: return changes if last_value is None: if self.created.buffer: changes.created.update(self.VALUE) elif data is None: if self.deleted.buffer: changes.deleted.update(self.VALUE) elif self.updated.buffer: changes.updated.update(self.VALUE) self._last_value = data return changes def update(self, run_number, data, mid): # pylint: disable=unused-argument assert isinstance(data, bitmap) if run_number != 0 and run_number <= self._last_update: return changes = self.compute_updates(data) self._last_update = run_number self._row_changes.combine(changes, self.created.buffer, self.updated.buffer, self.deleted.buffer)
bsd-2-clause
7,233,266,110,331,283,000
31.131148
64
0.557653
false
4.677804
false
false
false
dteal/dteal-site
pages/links.py
1
2392
title = 'Links' class Link: def __init__(self, section, url, name): self.section = section; self.url = url; self.name = name links = [ Link('News', 'https://hackaday.com', 'Hackaday : viz, articles on crazy engineering projects'), Link('News', 'https://news.ycombinator.com', 'Hacker News : a mostly-software-related news aggregator'), Link('News', 'https://slashdot.org', 'Slashdot : "News for nerds, stuff that matters."'), Link('News', 'https://www.nytimes.com', 'The New York Times : the canonical world news source'), Link('References', 'https://en.wikipedia.org/wiki/Main_Page', 'Wikipedia : the nascent <em>Encyclopedia Galactica</em>'), Link('References', 'https://oeis.org', 'The On-Line Encyclopedia of Integer Sequences (OEIS).'), Link('Vendors', 'https://www.mcmaster.com/#', 'McMaster-Carr : <em>the</em> American hardware vendor'), Link('Vendors', 'https://www.digikey.com', 'Digi-Key : McMaster-Carr for electronics'), Link('Vendors', 'https://www.pololu.com', 'Pololu : small robot parts'), Link('Vendors', 'https://www.dreamhost.com', 'Dreamhost : excellent web hosting'), Link('Vendors', 'https://darksky.net', 'Dark Sky : sufficiently modern weather forecasting'), Link('Journals', 'https://www.nature.com/nature/', 'Nature : the premier scientific journal'), Link('Journals', 'https://www.nature.com/nnano/', 'Nature Nanotechnology : nanotechnology from the Nature publishing group'), Link('Journals', 'https://onlinelibrary.wiley.com/journal/15214095', 'Advanced Materials : best of materials science'), Link('Journals', 'https://onlinelibrary.wiley.com/journal/16163028', 'Advanced Functional Materials : more materials science'), Link('Journals', 'https://pubs.acs.org/journal/ancac3', 'ACS Nano : nanoscience'), Link('Libraries', 'https://www.gutenberg.org', 'Project Gutenberg : public domain e-books'), Link('Libraries', 'https://pixabay.com', 'Pixabay : public domain images'), Link('Libraries', 'http://magnatune.com', 'Magnatune : DRM-free music'), ] content="""<header> <h1>Links / Daniel Teal</h1> <p>Herein lie portals to stupendously awesome websites I find useful.</p> </header>""" sections = [] for link in links: if not link.section in sections: sections.append(link.section) content += '<div class="section">' + link.section + '</div>\n' content += '<a href="' + link.url + '" class="link">' + link.name + '</a>\n'
cc0-1.0
3,724,751,775,368,633,000
60.333333
127
0.693562
false
3.043257
false
false
false
rzzzwilson/morse_trainer
tests/test_proficiency.py
1
1854
#!/usr/bin/env python3 """ Test the 'show proficiency' widget. """ import sys sys.path.append('..') from random import randint from PyQt5.QtWidgets import (QApplication, QWidget, QPushButton, QHBoxLayout, QVBoxLayout) from proficiency import Proficiency import utils class ProficiencyExample(QWidget): """Application to demonstrate the Morse Trainer 'display' widget.""" def __init__(self): super().__init__() self.initUI() def initUI(self): self.alphabet_status = Proficiency(utils.Alphabetics) self.numbers_status = Proficiency(utils.Numbers) self.punctuation_status = Proficiency(utils.Punctuation) redisplay_button = QPushButton('Redisplay', self) hbox1 = QHBoxLayout() hbox1.addWidget(self.alphabet_status) hbox1.addWidget(self.numbers_status) hbox1.addWidget(self.punctuation_status) hbox2 = QHBoxLayout() hbox2.addWidget(redisplay_button) vbox = QVBoxLayout() vbox.addLayout(hbox1) vbox.addLayout(hbox2) self.setLayout(vbox) redisplay_button.clicked.connect(self.redisplayButtonClicked) self.setGeometry(100, 100, 800, 200) self.setWindowTitle('Example of Proficiency widget') self.show() def redisplayButtonClicked(self): """Regenerate some data (random) and display it.""" for gd in (self.alphabet_status, self.numbers_status, self.punctuation_status): # generate random data new = {} for char in gd.data: new[char] = randint(0,100)/100 # set first character to 0 new[gd.data[0]] = 0 # redisplay gd.setState(new) app = QApplication(sys.argv) ex = ProficiencyExample() sys.exit(app.exec())
mit
-354,986,496,323,980,800
27.090909
72
0.625674
false
4.048035
false
false
false
UpSea/midProjects
BasicOperations/01_01_PyQt4/StandardSaveAndOpen.py
1
1219
from PyQt4 import QtGui,QtCore import os #from PyQt4.QtWidgets import QFileDialog class MyWindow(QtGui.QWidget): def __init__(self): super(MyWindow,self).__init__() self.myButton = QtGui.QPushButton(self) self.myButton.setObjectName("myButton") self.myButton.setText("Test") self.myButton.clicked.connect(self.msg) def msg(self): #directory1 = QtGui.QFileDialog.getExistingDirectory(self,"选取文件夹",os.getcwd()) #起始路径 #print(directory1) #fileName1 = QtGui.QFileDialog.getOpenFileName(self, "选取文件", os.getcwd(), "All Files (*);;Text Files (*.txt)") #设置文件扩展名过滤,注意用双分号间隔 #print(fileName1) #files= QtGui.QFileDialog.getOpenFileNames(self,"多文件选择",os.getcwd(), "All Files (*);;Text Files (*.txt)") #print(files) fileName2 = QtGui.QFileDialog.getSaveFileName(self, "文件保存", os.getcwd(),"All Files (*);;Text Files (*.txt)") print(fileName2) if __name__=="__main__": import sys app=QtGui.QApplication(sys.argv) myshow=MyWindow() myshow.show() sys.exit(app.exec_())
mit
6,061,444,577,844,532,000
41.296296
142
0.617003
false
3.160665
false
false
false
debugger06/MiroX
tv/lib/frontends/widgets/gtk/tableviewcells.py
1
10798
# Miro - an RSS based video player application # Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 # Participatory Culture Foundation # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # # In addition, as a special exception, the copyright holders give # permission to link the code of portions of this program with the OpenSSL # library. # # You must obey the GNU General Public License in all respects for all of # the code used other than OpenSSL. If you modify file(s) with this # exception, you may extend this exception to your version of the file(s), # but you are not obligated to do so. If you do not wish to do so, delete # this exception statement from your version. If you delete this exception # statement from all source files in the program, then also delete it here. """tableviewcells.py - Cell renderers for TableView.""" import gobject import gtk import pango from miro import signals from miro import infolist from miro.frontends.widgets import widgetconst from miro.frontends.widgets.gtk import drawing from miro.frontends.widgets.gtk import wrappermap from miro.frontends.widgets.gtk.base import make_gdk_color class CellRenderer(object): """Simple Cell Renderer https://develop.participatoryculture.org/index.php/WidgetAPITableView""" def __init__(self): self._renderer = self.build_renderer() self.want_hover = False def build_renderer(self): return gtk.CellRendererText() def setup_attributes(self, column, attr_map): column.add_attribute(self._renderer, 'text', attr_map['value']) def set_align(self, align): if align == 'left': self._renderer.props.xalign = 0.0 elif align == 'center': self._renderer.props.xalign = 0.5 elif align == 'right': self._renderer.props.xalign = 1.0 else: raise ValueError("unknown alignment: %s" % align) def set_color(self, color): self._renderer.props.foreground_gdk = make_gdk_color(color) def set_bold(self, bold): font_desc = self._renderer.props.font_desc if bold: font_desc.set_weight(pango.WEIGHT_BOLD) else: font_desc.set_weight(pango.WEIGHT_NORMAL) self._renderer.props.font_desc = font_desc def set_text_size(self, size): if size == widgetconst.SIZE_NORMAL: self._renderer.props.scale = 1.0 elif size == widgetconst.SIZE_SMALL: # FIXME: on 3.5 we just ignored the call. Always setting scale to # 1.0 basically replicates that behavior, but should we actually # try to implement the semantics of SIZE_SMALL? self._renderer.props.scale = 1.0 else: raise ValueError("unknown size: %s" % size) def set_font_scale(self, scale_factor): self._renderer.props.scale = scale_factor class ImageCellRenderer(CellRenderer): """Cell Renderer for images https://develop.participatoryculture.org/index.php/WidgetAPITableView""" def build_renderer(self): return gtk.CellRendererPixbuf() def setup_attributes(self, column, attr_map): column.add_attribute(self._renderer, 'pixbuf', attr_map['image']) class GTKCheckboxCellRenderer(gtk.CellRendererToggle): def do_activate(self, event, treeview, path, background_area, cell_area, flags): iter = treeview.get_model().get_iter(path) self.set_active(not self.get_active()) wrappermap.wrapper(self).emit('clicked', iter) gobject.type_register(GTKCheckboxCellRenderer) class CheckboxCellRenderer(signals.SignalEmitter): """Cell Renderer for booleans https://develop.participatoryculture.org/index.php/WidgetAPITableView""" def __init__(self): signals.SignalEmitter.__init__(self) self.create_signal("clicked") self._renderer = GTKCheckboxCellRenderer() wrappermap.add(self._renderer, self) self.want_hover = False def set_control_size(self, size): pass def setup_attributes(self, column, attr_map): column.add_attribute(self._renderer, 'active', attr_map['value']) class GTKCustomCellRenderer(gtk.GenericCellRenderer): """Handles the GTK hide of CustomCellRenderer https://develop.participatoryculture.org/index.php/WidgetAPITableView""" def on_get_size(self, widget, cell_area=None): wrapper = wrappermap.wrapper(self) widget_wrapper = wrappermap.wrapper(widget) style = drawing.DrawingStyle(widget_wrapper, use_base_color=True) # NOTE: CustomCellRenderer.cell_data_func() sets up its attributes # from the model itself, so we don't have to worry about setting them # here. width, height = wrapper.get_size(style, widget_wrapper.layout_manager) x_offset = self.props.xpad y_offset = self.props.ypad width += self.props.xpad * 2 height += self.props.ypad * 2 if cell_area: x_offset += cell_area.x y_offset += cell_area.x extra_width = max(0, cell_area.width - width) extra_height = max(0, cell_area.height - height) x_offset += int(round(self.props.xalign * extra_width)) y_offset += int(round(self.props.yalign * extra_height)) return x_offset, y_offset, width, height def on_render(self, window, widget, background_area, cell_area, expose_area, flags): widget_wrapper = wrappermap.wrapper(widget) cell_wrapper = wrappermap.wrapper(self) selected = (flags & gtk.CELL_RENDERER_SELECTED) if selected: if widget.flags() & gtk.HAS_FOCUS: state = gtk.STATE_SELECTED else: state = gtk.STATE_ACTIVE else: state = gtk.STATE_NORMAL if cell_wrapper.IGNORE_PADDING: area = background_area else: xpad = self.props.xpad ypad = self.props.ypad area = gtk.gdk.Rectangle(cell_area.x + xpad, cell_area.y + ypad, cell_area.width - xpad * 2, cell_area.height - ypad * 2) context = drawing.DrawingContext(window, area, expose_area) if (selected and not widget_wrapper.draws_selection and widget_wrapper.use_custom_style): # Draw the base color as our background. This erases the gradient # that GTK draws for selected items. window.draw_rectangle(widget.style.base_gc[state], True, background_area.x, background_area.y, background_area.width, background_area.height) context.style = drawing.DrawingStyle(widget_wrapper, use_base_color=True, state=state) widget_wrapper.layout_manager.update_cairo_context(context.context) hotspot_tracker = widget_wrapper.hotspot_tracker if (hotspot_tracker and hotspot_tracker.hit and hotspot_tracker.column == self.column and hotspot_tracker.path == self.path): hotspot = hotspot_tracker.name else: hotspot = None if (self.path, self.column) == widget_wrapper.hover_info: hover = widget_wrapper.hover_pos hover = (hover[0] - xpad, hover[1] - ypad) else: hover = None # NOTE: CustomCellRenderer.cell_data_func() sets up its attributes # from the model itself, so we don't have to worry about setting them # here. widget_wrapper.layout_manager.reset() cell_wrapper.render(context, widget_wrapper.layout_manager, selected, hotspot, hover) def on_activate(self, event, widget, path, background_area, cell_area, flags): pass def on_start_editing(self, event, widget, path, background_area, cell_area, flags): pass gobject.type_register(GTKCustomCellRenderer) class CustomCellRenderer(CellRenderer): """Customizable Cell Renderer https://develop.participatoryculture.org/index.php/WidgetAPITableView""" IGNORE_PADDING = False def __init__(self): CellRenderer.__init__(self) wrappermap.add(self._renderer, self) def build_renderer(self): return GTKCustomCellRenderer() def setup_attributes(self, column, attr_map): column.set_cell_data_func(self._renderer, self.cell_data_func, attr_map) def cell_data_func(self, column, cell, model, iter, attr_map): cell.column = column cell.path = model.get_path(iter) row = model[iter] # Set attributes on self instead cell This works because cell is just # going to turn around and call our methods to do the rendering. for name, index in attr_map.items(): setattr(self, name, row[index]) def hotspot_test(self, style, layout, x, y, width, height): return None class ItemListRenderer(CustomCellRenderer): """Custom Renderer for InfoListModels https://develop.participatoryculture.org/index.php/WidgetAPITableView""" def cell_data_func(self, column, cell, model, it, attr_map): item_list = wrappermap.wrapper(model).item_list row = model.row_of_iter(it) self.info = item_list.get_row(row) self.attrs = item_list.get_attrs(self.info.id) self.group_info = item_list.get_group_info(row) cell.column = column cell.path = row class ItemListRendererText(CellRenderer): """Renderer for InfoListModels that only display text https://develop.participatoryculture.org/index.php/WidgetAPITableView""" def setup_attributes(self, column, attr_map): column.set_cell_data_func(self._renderer, self.cell_data_func, attr_map) def cell_data_func(self, column, cell, model, it, attr_map): item_list = wrappermap.wrapper(model).item_list info = item_list.get_row(model.row_of_iter(it)) cell.set_property("text", self.get_value(info)) def get_value(self, info): """Get the text to render for this cell :param info: ItemInfo to render. """
gpl-2.0
-5,552,584,922,404,404,000
39.291045
80
0.655214
false
3.811507
false
false
false
kvar/ansible
lib/ansible/plugins/action/nxos.py
1
8406
# # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import copy import re import sys from ansible import constants as C from ansible.module_utils._text import to_text from ansible.module_utils.connection import Connection from ansible.plugins.action.network import ActionModule as ActionNetworkModule from ansible.module_utils.network.common.utils import load_provider from ansible.module_utils.network.nxos.nxos import nxos_provider_spec from ansible.utils.display import Display display = Display() class ActionModule(ActionNetworkModule): def run(self, tmp=None, task_vars=None): del tmp # tmp no longer has any effect module_name = self._task.action.split('.')[-1] self._config_module = True if module_name == 'nxos_config' else False socket_path = None persistent_connection = self._play_context.connection.split('.')[-1] if (persistent_connection == 'httpapi' or self._task.args.get('provider', {}).get('transport') == 'nxapi') \ and module_name in ('nxos_file_copy', 'nxos_nxapi'): return {'failed': True, 'msg': "Transport type 'nxapi' is not valid for '%s' module." % (module_name)} if module_name == 'nxos_file_copy': self._task.args['host'] = self._play_context.remote_addr self._task.args['password'] = self._play_context.password if self._play_context.connection == 'network_cli': self._task.args['username'] = self._play_context.remote_user elif self._play_context.connection == 'local': self._task.args['username'] = self._play_context.connection_user if module_name == 'nxos_install_os': persistent_command_timeout = 0 persistent_connect_timeout = 0 connection = self._connection if connection.transport == 'local': persistent_command_timeout = C.PERSISTENT_COMMAND_TIMEOUT persistent_connect_timeout = C.PERSISTENT_CONNECT_TIMEOUT else: persistent_command_timeout = connection.get_option('persistent_command_timeout') persistent_connect_timeout = connection.get_option('persistent_connect_timeout') display.vvvv('PERSISTENT_COMMAND_TIMEOUT is %s' % str(persistent_command_timeout), self._play_context.remote_addr) display.vvvv('PERSISTENT_CONNECT_TIMEOUT is %s' % str(persistent_connect_timeout), self._play_context.remote_addr) if persistent_command_timeout < 600 or persistent_connect_timeout < 600: msg = 'PERSISTENT_COMMAND_TIMEOUT and PERSISTENT_CONNECT_TIMEOUT' msg += ' must be set to 600 seconds or higher when using nxos_install_os module.' msg += ' Current persistent_command_timeout setting:' + str(persistent_command_timeout) msg += ' Current persistent_connect_timeout setting:' + str(persistent_connect_timeout) return {'failed': True, 'msg': msg} if persistent_connection in ('network_cli', 'httpapi'): provider = self._task.args.get('provider', {}) if any(provider.values()): display.warning('provider is unnecessary when using %s and will be ignored' % self._play_context.connection) del self._task.args['provider'] if self._task.args.get('transport'): display.warning('transport is unnecessary when using %s and will be ignored' % self._play_context.connection) del self._task.args['transport'] elif self._play_context.connection == 'local': provider = load_provider(nxos_provider_spec, self._task.args) transport = provider['transport'] or 'cli' display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr) if transport == 'cli': pc = copy.deepcopy(self._play_context) pc.connection = 'network_cli' pc.network_os = 'nxos' pc.remote_addr = provider['host'] or self._play_context.remote_addr pc.port = int(provider['port'] or self._play_context.port or 22) pc.remote_user = provider['username'] or self._play_context.connection_user pc.password = provider['password'] or self._play_context.password pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file pc.become = provider['authorize'] or False if pc.become: pc.become_method = 'enable' pc.become_pass = provider['auth_pass'] display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr) connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin) command_timeout = int(provider['timeout']) if provider['timeout'] else connection.get_option('persistent_command_timeout') connection.set_options(direct={'persistent_command_timeout': command_timeout}) socket_path = connection.run() display.vvvv('socket_path: %s' % socket_path, pc.remote_addr) if not socket_path: return {'failed': True, 'msg': 'unable to open shell. Please see: ' + 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'} task_vars['ansible_socket'] = socket_path else: self._task.args['provider'] = ActionModule.nxapi_implementation(provider, self._play_context) else: return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection} if (self._play_context.connection == 'local' and transport == 'cli') or self._play_context.connection == 'network_cli': # make sure we are in the right cli context which should be # enable mode and not config module if socket_path is None: socket_path = self._connection.socket_path conn = Connection(socket_path) # Match prompts ending in )# except those with (maint-mode)# config_prompt = re.compile(r'^.*\((?!maint-mode).*\)#$') out = conn.get_prompt() while config_prompt.match(to_text(out, errors='surrogate_then_replace').strip()): display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr) conn.send_command('exit') out = conn.get_prompt() result = super(ActionModule, self).run(task_vars=task_vars) return result @staticmethod def nxapi_implementation(provider, play_context): provider['transport'] = 'nxapi' if provider.get('host') is None: provider['host'] = play_context.remote_addr if provider.get('port') is None: if provider.get('use_ssl'): provider['port'] = 443 else: provider['port'] = 80 if provider.get('timeout') is None: provider['timeout'] = C.PERSISTENT_COMMAND_TIMEOUT if provider.get('username') is None: provider['username'] = play_context.connection_user if provider.get('password') is None: provider['password'] = play_context.password if provider.get('use_ssl') is None: provider['use_ssl'] = False if provider.get('validate_certs') is None: provider['validate_certs'] = True return provider
gpl-3.0
4,933,697,414,735,714,000
47.589595
138
0.621342
false
4.267005
false
false
false
intelmakers/candy_machine
Python/listen.py
1
1677
#!/usr/bin/python import collections import mraa import os import sys import time # Import things for pocketsphinx import pyaudio import wave import pocketsphinx as ps import sphinxbase my_dir = os.path.dirname(__file__) dict_name = 8670 # Parameters for pocketsphinx LMD = "{0}/dict/{1}.lm".format(my_dir, dict_name) DICTD = "{0}/dict/{1}.dic".format(my_dir, dict_name) CHUNK = 1024 FORMAT = pyaudio.paInt16 CHANNELS = 1 RATE = 16000 RECORD_SECONDS = 3 PATH = 'output' # if "ALL" in words: # print "ALL" if not os.path.exists(PATH): os.makedirs(PATH) pya = pyaudio.PyAudio() speech_rec = ps.Decoder(lm=LMD, dict=DICTD) def decodeSpeech(speech_rec, wav_file): wav_file = file(wav_file,'rb') wav_file.seek(44) speech_rec.decode_raw(wav_file) result = speech_rec.get_hyp() return result[0] def doListen(): # Record audio stream = pya.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) print("* recording") frames = [] for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)): data = stream.read(CHUNK) frames.append(data) print("* done recording") stream.stop_stream() stream.close() #pya.terminate() # Write .wav file fn = "o.wav" wf = wave.open(os.path.join(PATH, fn), 'wb') wf.setnchannels(CHANNELS) wf.setsampwidth(pya.get_sample_size(FORMAT)) wf.setframerate(RATE) wf.writeframes(b''.join(frames)) wf.close() # Decode speech wav_file = os.path.join(PATH, fn) recognised = decodeSpeech(speech_rec, wav_file) rec_words = recognised.split() print "Recognized: {0}".format(recognised) # Playback recognized word(s) cm = 'espeak "'+recognised+'"' os.system(cm) return recognised
cc0-1.0
-399,519,975,128,383,940
20.5
100
0.698271
false
2.691814
false
false
false
kietdlam/Dator
data_api/models.py
1
7208
from uuid import uuid4 from django.db import models from django.contrib.auth.models import Group from django.db.models.signals import pre_save, post_save from django.dispatch import receiver from data_api import file_provider import pandas as pd import django.utils.timezone as tmz import pytz import delorean class SystemModel(models.Model): created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) uuid = models.CharField(max_length=128, db_index=True) class Meta: abstract = True class Event(SystemModel): """ An event is used to record controller specific events for correlation with data signals. """ group = models.ManyToManyField(Group) type = models.CharField(max_length=32) info = models.TextField(null=True) local_computer = models.ForeignKey('LocalComputer', null=True) system = models.ForeignKey('System', null=True) def __unicode__(self): return "{}:{}".format(self.local_computer_id, self.type) class System(SystemModel): """ A system is a group of related LocalComputers that coordinate actions and signals with each other. """ group = models.ManyToManyField(Group) name = models.CharField(max_length=128) timezone = models.CharField(max_length=32) shifts = models.ManyToManyField('Shift') def __unicode__(self): return self.name class Shift(SystemModel): """ A Shift is used to record the beginning and the end of an experiment """ name = models.CharField(max_length=128) ended_at = models.DateTimeField(null=True) class LocalComputer(SystemModel): """ A LocalComputer system is a cpu capable of loading a program, recording data from sensors and operating actuators. """ group = models.ManyToManyField(Group) name = models.CharField(max_length=128) registration_token = models.CharField(max_length=128) secret_uuid = models.CharField(max_length=128) system = models.ForeignKey('System', null=True) command_refresh_sec = models.IntegerField(default=10) is_running = models.BooleanField(default=False) def __unicode__(self): return self.name # No-op COMMAND_NOOP = 0 # shut down local_computer listener COMMAND_DONE = 1 # load and start the indicated program on the local computer COMMAND_LOAD_PROGRAM = 2 # stop the indicated program on the local computer COMMAND_STOP_PROGRAM = 3 class Command(SystemModel): """ Commands are enumerated json messages for LocalComputers. When command has been successfully executed, the is_executed flag is set to True """ local_computer = models.ForeignKey('LocalComputer') type = models.IntegerField(default=COMMAND_NOOP, db_index=True) json_command = models.CharField(max_length="512", null=True) is_executed = models.BooleanField(default=False, db_index=True) def __unicode__(self): return "{}:{}:{}".format(self.local_computer_id, self.type, self.created_at) class Program(SystemModel): """ A loadable script/code file that can be run on a local computer. A program will be run periodically with with a pause of the indicated sleep_time between sucessive runs. """ group = models.ManyToManyField(Group) code = models.TextField(null=True) description = models.TextField(null=True) name = models.CharField(max_length=128) sleep_time_sec = models.FloatField(default=1.0) def __unicode__(self): return self.name class Map(SystemModel): """ A map is a list of known signals with semantic meaning. """ group = models.ManyToManyField(Group) name = models.CharField(max_length=128) controller = models.ForeignKey('LocalComputer') def __unicode__(self): return self.name ACTUATOR = 1 SENSOR = 2 class MapPoint(SystemModel): map = models.ForeignKey('Map') point_type = models.IntegerField(default=SENSOR) name = models.CharField(max_length=128) path = models.CharField(max_length=128) controller = models.ForeignKey('LocalComputer') def __unicode__(self): return self.name SIGNAL_PROVIDER = file_provider BLOB_PROVIDER = file_provider class Signal(SystemModel): """ A time signal of floats. """ group = models.ManyToManyField(Group) name = models.CharField(max_length=128, db_index=True) system = models.ForeignKey('System', null=True) local_computer = models.ForeignKey('LocalComputer', null=True) def __unicode__(self): return self.name def add_points(self, data_points): """Add points to the signal :param data_points [[<float value>,<time in millisec>],...] """ SIGNAL_PROVIDER.startup() SIGNAL_PROVIDER.append_data(self.uuid, ''.join(["[{:.15},{:.15}]".format(float(datum[0]),float(datum[1])) for datum in data_points])) def get_data(self): SIGNAL_PROVIDER.startup() data = SIGNAL_PROVIDER.get_blob(self.uuid) tokens = data.split("]") points = [] for token in tokens: if token != '': ts = token[1:].split(",") points.append((float(ts[0]), float(ts[1]))) return points @classmethod def millisec_to_utc(cls, millisec): return tmz.datetime.fromtimestamp(float(millisec), tz=pytz.UTC) @classmethod def utc_to_millisec(cls, dt): return delorean.Delorean(dt, timezone="UTC").epoch() def get_time_series(self): values, dates = self.get_data() return pd.TimeSeries(values, index=dates) def clear(self): SIGNAL_PROVIDER.startup() SIGNAL_PROVIDER.clear(self.uuid) class Setting(SystemModel): group = models.ManyToManyField(Group) key = models.CharField(max_length=128, db_index=True) value = models.CharField(max_length=128) local_computer = models.ForeignKey('LocalComputer', null=True) system = models.ForeignKey('System', null=True) def __unicode__(self): return '{},{}'.format(self.key, self.value) class Blob(SystemModel): group = models.ManyToManyField(Group) name = models.CharField(max_length=128, db_index=True) system = models.ForeignKey('System', null=True) local_computer = models.ForeignKey('LocalComputer', null=True) def __unicode__(self): return self.name def get_data(self): BLOB_PROVIDER.startup() data = BLOB_PROVIDER.get_blob(self.uuid) return data def set_data(self, json_data): BLOB_PROVIDER.startup() BLOB_PROVIDER.write_blob(self.uuid, json_data) @receiver(pre_save, sender=Command) @receiver(pre_save, sender=LocalComputer) @receiver(pre_save, sender=Map) @receiver(pre_save, sender=MapPoint) @receiver(pre_save, sender=Program) @receiver(pre_save, sender=Shift) @receiver(pre_save, sender=Signal) @receiver(pre_save, sender=System) @receiver(pre_save, sender=Setting) @receiver(pre_save, sender=Event) @receiver(pre_save, sender=Blob) def set_uuid(sender, instance, **kwargs): """ Register all SystemModel derived classes to set uuid """ if not instance.uuid: instance.uuid = str(uuid4())
mit
-4,165,941,014,930,690,000
30.203463
130
0.677026
false
3.805702
false
false
false
4dsolutions/Python5
get_movie.py
1
1400
# -*- coding: utf-8 -*- """ Created on Mon Jul 11 2016 @author: Kirby Urner Uses API documented at http://www.omdbapi.com/ to query IMDB movie database. """ import requests # from collections import namedtuple import json # Movie = namedtuple("Movie", "status_code content") class Movie: def __init__(self, status_code, json_data): self.status = status_code self.content = json.loads(str(json_data, encoding="UTF-8")) def __str__(self): obj = self.content # decoded json, a dict the_title = "Title: {:^40}\n".format(obj["Title"]) the_actors = "Actors: \n" for actor in obj["Actors"].split(","): the_actors += ".....{:>30}\n".format(actor) the_story = ("Story: \n") the_story += obj["Plot"] return the_title + the_actors + the_story def __repr__(self): return "Movie(Title={}, Released={})".format(self.content["Title"], self.content["Released"]) def get_movie(url): r = requests.get(url) return Movie(r.status_code, r.content) # test the_url = "http://www.omdbapi.com/?i=tt0120338&plot=full&r=json" # GET result = get_movie(the_url) print(result.content) print("-----------") the_url = "http://www.omdbapi.com/?t=Titanic&y=1997&plot=full&r=json" result = get_movie(the_url) print(result)
mit
-994,236,702,834,751,500
26.45098
76
0.575
false
3.286385
false
false
false
peterbe/headsupper
headsupper/base/models.py
1
1493
from django.db import models from django.utils import timezone from django.conf import settings from jsonfield import JSONField class Project(models.Model): # e.g. mozilla/socorro github_full_name = models.CharField(max_length=200) # This'll match '^Headsup: ...' trigger_word = models.CharField(default='Headsup', max_length=100) case_sensitive_trigger_word = models.BooleanField(default=False) github_webhook_secret = models.CharField(max_length=100) # email(s) send_to = models.TextField() send_cc = models.TextField(blank=True, null=True) send_bcc = models.TextField(blank=True, null=True) # If this is set to true, don't react to individual commit # payloads, but only on commits that are tags, and then # find all the commits in that tag range. on_tag_only = models.BooleanField(default=False) on_branch = models.CharField(default='master', blank=True, max_length=200) creator = models.ForeignKey(settings.AUTH_USER_MODEL) created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __repr__(self): return '<%s %r>' % (self.__class__.__name__, self.github_full_name) class Payload(models.Model): project = models.ForeignKey(Project, null=True) payload = JSONField() http_error = models.IntegerField() messages = JSONField() date = models.DateTimeField(default=timezone.now) def replay(self): raise NotImplementedError
mpl-2.0
7,377,486,451,298,321,000
32.177778
78
0.701273
false
3.7325
false
false
false
robtherad/BC-Mod
tools/search_privates.py
1
4456
#!/usr/bin/env python3 # Created by ACE 3 team, modified by BC: https://github.com/acemod/ACE3 import fnmatch import os import re import ntpath import sys import argparse def get_private_declare(content): priv_declared = [] srch = re.compile('private.*') priv_srch_declared = srch.findall(content) priv_srch_declared = sorted(set(priv_srch_declared)) priv_dec_str = ''.join(priv_srch_declared) srch = re.compile('(?<![_a-zA-Z0-9])(_[a-zA-Z0-9]*?)[ ,\}\]\)";]') priv_split = srch.findall(priv_dec_str) priv_split = sorted(set(priv_split)) priv_declared += priv_split; srch = re.compile('params \[.*\]|PARAMS_[0-9].*|EXPLODE_[0-9]_PVT.*|DEFAULT_PARAM.*|KEY_PARAM.*|IGNORE_PRIVATE_WARNING.*') priv_srch_declared = srch.findall(content) priv_srch_declared = sorted(set(priv_srch_declared)) priv_dec_str = ''.join(priv_srch_declared) srch = re.compile('(?<![_a-zA-Z0-9])(_[a-zA-Z0-9]*?)[ ,\}\]\)";]') priv_split = srch.findall(priv_dec_str) priv_split = sorted(set(priv_split)) priv_declared += priv_split; srch = re.compile('(?i)[\s]*local[\s]+(_[\w\d]*)[\s]*=.*') priv_local = srch.findall(content) priv_local_declared = sorted(set(priv_local)) priv_declared += priv_local_declared; return priv_declared def check_privates(filepath): bad_count_file = 0 def pushClosing(t): closingStack.append(closing.expr) closing << Literal( closingFor[t[0]] ) def popClosing(): closing << closingStack.pop() with open(filepath, 'r') as file: content = file.read() priv_use = [] priv_use = [] # Regex search privates srch = re.compile('(?<![_a-zA-Z0-9])(_[a-zA-Z0-9]*?)[ =,\^\-\+\/\*\%\}\]\)";]') priv_use = srch.findall(content) priv_use = sorted(set(priv_use)) # Private declaration search priv_declared = get_private_declare(content) if '_this' in priv_declared: priv_declared.remove('_this') if '_this' in priv_use: priv_use.remove('_this') if '_x' in priv_declared: priv_declared.remove('_x') if '_x' in priv_use: priv_use.remove('_x') if '_forEachIndex' in priv_declared: priv_declared.remove('_forEachIndex') if '_forEachIndex' in priv_use: priv_use.remove('_forEachIndex') if '_foreachIndex' in priv_declared: priv_declared.remove('_foreachIndex') if '_foreachIndex' in priv_use: priv_use.remove('_foreachIndex') if '_foreachindex' in priv_declared: priv_declared.remove('_foreachindex') if '_foreachindex' in priv_use: priv_use.remove('_foreachindex') missing = [] for s in priv_use: if s.lower() not in map(str.lower,priv_declared): if s.lower() not in map(str.lower,missing): missing.append(s) if len(missing) > 0: print (filepath) private_output = 'private['; first = True for bad_priv in missing: if first: first = False private_output = private_output + '"' + bad_priv else: private_output = private_output + '", "' + bad_priv private_output = private_output + '"];'; print (private_output) for bad_priv in missing: print ('\t' + bad_priv) bad_count_file = bad_count_file + 1 return bad_count_file def main(): print("#########################") print("# Search your Privates #") print("#########################") sqf_list = [] bad_count = 0 parser = argparse.ArgumentParser() parser.add_argument('-m','--module', help='only search specified module addon folder', required=False, default=".") args = parser.parse_args() for root, dirnames, filenames in os.walk('../addons' + '/' + args.module): for filename in fnmatch.filter(filenames, '*.sqf'): sqf_list.append(os.path.join(root, filename)) for filename in sqf_list: bad_count = bad_count + check_privates(filename) print ("Bad Count {0}".format(bad_count)) if __name__ == "__main__": main()
gpl-3.0
-4,066,306,607,683,146,000
31.75
126
0.540638
false
3.653815
false
false
false
ssebastianj/ia2013-tpi-rl
src/gui/qtgen/codetailsdialog.py
1
5881
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'D:\Sebastian\Mis documentos\Programacion\Proyectos\IA2013TPIRL\gui\qt\IA2013TPIRLGUI\codetailsdialog.ui' # # Created: Tue Jul 09 15:27:46 2013 # by: PyQt4 UI code generator 4.10.2 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_CODetailsDialog(object): def setupUi(self, CODetailsDialog): CODetailsDialog.setObjectName(_fromUtf8("CODetailsDialog")) CODetailsDialog.setWindowModality(QtCore.Qt.WindowModal) CODetailsDialog.resize(345, 490) self.gridLayout_2 = QtGui.QGridLayout(CODetailsDialog) self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2")) self.verticalLayout = QtGui.QVBoxLayout() self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.label_3 = QtGui.QLabel(CODetailsDialog) self.label_3.setObjectName(_fromUtf8("label_3")) self.verticalLayout.addWidget(self.label_3) self.tblSecuenciaEstados = QtGui.QTableWidget(CODetailsDialog) self.tblSecuenciaEstados.setObjectName(_fromUtf8("tblSecuenciaEstados")) self.tblSecuenciaEstados.setColumnCount(3) self.tblSecuenciaEstados.setRowCount(0) item = QtGui.QTableWidgetItem() self.tblSecuenciaEstados.setHorizontalHeaderItem(0, item) item = QtGui.QTableWidgetItem() self.tblSecuenciaEstados.setHorizontalHeaderItem(1, item) item = QtGui.QTableWidgetItem() self.tblSecuenciaEstados.setHorizontalHeaderItem(2, item) self.verticalLayout.addWidget(self.tblSecuenciaEstados) self.gridLayout_2.addLayout(self.verticalLayout, 2, 0, 1, 2) self.btnCerrar = QtGui.QPushButton(CODetailsDialog) self.btnCerrar.setDefault(True) self.btnCerrar.setObjectName(_fromUtf8("btnCerrar")) self.gridLayout_2.addWidget(self.btnCerrar, 4, 1, 1, 1) self.line = QtGui.QFrame(CODetailsDialog) self.line.setFrameShape(QtGui.QFrame.HLine) self.line.setFrameShadow(QtGui.QFrame.Sunken) self.line.setObjectName(_fromUtf8("line")) self.gridLayout_2.addWidget(self.line, 3, 0, 1, 2) spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.gridLayout_2.addItem(spacerItem, 4, 0, 1, 1) spacerItem1 = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed) self.gridLayout_2.addItem(spacerItem1, 1, 0, 1, 1) self.gridLayout = QtGui.QGridLayout() self.gridLayout.setObjectName(_fromUtf8("gridLayout")) self.label = QtGui.QLabel(CODetailsDialog) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth()) self.label.setSizePolicy(sizePolicy) self.label.setMinimumSize(QtCore.QSize(125, 0)) self.label.setObjectName(_fromUtf8("label")) self.gridLayout.addWidget(self.label, 0, 0, 1, 1) self.lblCOCantidadEstados = QtGui.QLabel(CODetailsDialog) self.lblCOCantidadEstados.setObjectName(_fromUtf8("lblCOCantidadEstados")) self.gridLayout.addWidget(self.lblCOCantidadEstados, 0, 1, 1, 1) self.label_2 = QtGui.QLabel(CODetailsDialog) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth()) self.label_2.setSizePolicy(sizePolicy) self.label_2.setMinimumSize(QtCore.QSize(125, 0)) self.label_2.setObjectName(_fromUtf8("label_2")) self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1) self.lblCOSumValQ = QtGui.QLabel(CODetailsDialog) self.lblCOSumValQ.setObjectName(_fromUtf8("lblCOSumValQ")) self.gridLayout.addWidget(self.lblCOSumValQ, 1, 1, 1, 1) self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 2) self.retranslateUi(CODetailsDialog) QtCore.QObject.connect(self.btnCerrar, QtCore.SIGNAL(_fromUtf8("clicked()")), CODetailsDialog.accept) QtCore.QMetaObject.connectSlotsByName(CODetailsDialog) def retranslateUi(self, CODetailsDialog): CODetailsDialog.setWindowTitle(_translate("CODetailsDialog", "Detalles de camino óptimo", None)) self.label_3.setText(_translate("CODetailsDialog", "Secuencia de estados:", None)) item = self.tblSecuenciaEstados.horizontalHeaderItem(0) item.setText(_translate("CODetailsDialog", "Estado", None)) item = self.tblSecuenciaEstados.horizontalHeaderItem(1) item.setText(_translate("CODetailsDialog", "Coordenadas", None)) item = self.tblSecuenciaEstados.horizontalHeaderItem(2) item.setText(_translate("CODetailsDialog", "Valor Q", None)) self.btnCerrar.setText(_translate("CODetailsDialog", "&Cerrar", None)) self.label.setText(_translate("CODetailsDialog", "Cantidad de estados:", None)) self.lblCOCantidadEstados.setText(_translate("CODetailsDialog", "-", None)) self.label_2.setText(_translate("CODetailsDialog", "Sumatoria de valores Q:", None)) self.lblCOSumValQ.setText(_translate("CODetailsDialog", "-", None))
mit
4,508,036,040,088,869,400
52.944954
158
0.713435
false
3.587553
false
false
false
CraigBryan/pellinglab_twitter_microscope
src/production_files/receivers/gui_receiver.py
1
2402
''' Created on Sep 6, 2013 @author: Craig Bryan ''' from receiver import Receiver class GuiReceiver(Receiver): """ A receiver that allows the router to receive and send data to a local GUI. This is to allow local requests for images, without using the Twitter-based interface. No GUI is currently implemented, so this acts as a hook for later. Attributes: gui: The gui this receiver is communicating with. The gui must have a post method that allows data to be displayed, and a retreive_requests method that allows pulling of a list of requests for images or information. """ def __init__(self, router, r_id, gui): """ Args: router: A reference to the router that this receiver is associated with. r_id: The string that the router refers to this receiver with. gui: The gui this receiver is communicating with. """ super(GuiReceiver, self).__init__(router, r_id) self.gui = gui def process_transaction(self, transaction): """ The method the router calls when a transaction is routed to this receiver. Args: transaction: The transaction that is being processed by the receiver. Commands: update: Pull any new requests from the GUI and post turn them into new transactions. post: Send data to the GUI to display. """ if transaction.command == "update": requests = self.gui.retrieve_requests() while len(requests) > 0: #requests are routed to the translator self.router.create_transaction(origin = self.r_id, to_id = "translator", command = "parse", command_arg = requests.popleft()) transaction.process(success = True, finished = True, allow_log = False) elif transaction.command == "post": self.gui.send_data(transaction.message) transaction.process(success = True, finished = True) else: transaction.log(info = "Unknown command passed to gui receiver: %s" % transaction.command)
mit
-4,373,891,099,005,391,400
37.126984
94
0.56453
false
5.056842
false
false
false
leppa/home-assistant
homeassistant/components/idteck_prox/__init__.py
1
2205
"""Component for interfacing RFK101 proximity card readers.""" import logging from rfk101py.rfk101py import rfk101py import voluptuous as vol from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PORT, EVENT_HOMEASSISTANT_STOP, ) import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) DOMAIN = "idteck_prox" EVENT_IDTECK_PROX_KEYCARD = "idteck_prox_keycard" CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.All( cv.ensure_list, [ vol.Schema( { vol.Required(CONF_HOST): cv.string, vol.Required(CONF_PORT): cv.port, vol.Required(CONF_NAME): cv.string, } ) ], ) }, extra=vol.ALLOW_EXTRA, ) def setup(hass, config): """Set up the IDTECK proximity card component.""" conf = config[DOMAIN] for unit in conf: host = unit[CONF_HOST] port = unit[CONF_PORT] name = unit[CONF_NAME] try: reader = IdteckReader(hass, host, port, name) reader.connect() hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, reader.stop) except OSError as error: _LOGGER.error("Error creating %s. %s", name, error) return False return True class IdteckReader: """Representation of an IDTECK proximity card reader.""" def __init__(self, hass, host, port, name): """Initialize the reader.""" self.hass = hass self._host = host self._port = port self._name = name self._connection = None def connect(self): """Connect to the reader.""" self._connection = rfk101py(self._host, self._port, self._callback) def _callback(self, card): """Send a keycard event message into HASS whenever a card is read.""" self.hass.bus.fire( EVENT_IDTECK_PROX_KEYCARD, {"card": card, "name": self._name} ) def stop(self): """Close resources.""" if self._connection: self._connection.close() self._connection = None
apache-2.0
-3,721,587,121,090,566,000
24.941176
77
0.559637
false
3.87522
false
false
false
magenta-aps/mox
python_agents/notification_service/notify_to_amqp_service.py
1
2455
# Copyright (C) 2015-2019 Magenta ApS, https://magenta.dk. # Contact: info@magenta.dk. # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. """ Simple class to relay messages from PostgreSQL notifications into an AMQP-queue """ import select import json import pika import psycopg2 from os import getenv def AMQPNotifier(database, user, password, host): """ Main notification thread. :param database: The PostgreSQL database :param user: The PostgreSQL user :param password: The PostgreSQL password :param host: The PostgreSQL hostname """ pg_conn = psycopg2.connect(database=database, user=user, password=password, host=host) pg_cursor = pg_conn.cursor() pika_params = pika.ConnectionParameters('localhost') pika_connection = pika.BlockingConnection(pika_params) amqp = pika_connection.channel() amqp.queue_declare(queue='mox.notifications') pg_cursor.execute("LISTEN mox_notifications;") pg_conn.poll() pg_conn.commit() while True: if select.select([pg_conn], [], [], 60) == ([], [], []): pass else: pg_conn.poll() pg_conn.commit() while pg_conn.notifies: notify = pg_conn.notifies.pop(0) payload_dict = json.loads(notify.payload) table = payload_dict['table'] objekttype = table[0:table.find('registrering')-1] objektID = payload_dict['data'][objekttype + '_id'] registrering = payload_dict['data']['registrering'] livscykluskode = registrering['livscykluskode'] amqp_payload = {'beskedtype': 'Notification', 'objektID': objektID, 'objekttype': objekttype, 'livscykluskode': livscykluskode} amqp.basic_publish(exchange='', routing_key='mox.notifications', body=json.dumps(amqp_payload)) if __name__ == '__main__': amqp_notifier = AMQPNotifier( database=getenv("DB_NAME", "mox"), user=getenv("DB_USER", "mox"), password=getenv("DB_PASS", "mox"), host=getenv("DB_HOST", "localhost") )
mpl-2.0
3,567,605,309,189,501,400
35.102941
69
0.58167
false
3.878357
false
false
false
NervanaSystems/neon
examples/ssd/layer.py
1
15145
from neon.layers.layer import Layer, ParameterLayer import numpy as np from neon.transforms import Softmax from neon.initializers.initializer import Constant import math from collections import OrderedDict class Normalize(ParameterLayer): def __init__(self, init=Constant(20.0), name=None): super(Normalize, self).__init__(name=name, init=init) self.bottom_data = None self.norm_data = None self.owns_outputs = True def allocate(self, shared_outputs=None): super(Normalize, self).allocate() self.outputs_view = self.outputs.reshape(self.channels, -1) def configure(self, in_obj): self.prev_layer = in_obj self.in_shape = in_obj.out_shape self.out_shape = in_obj.out_shape assert len(self.in_shape) == 3, "Normalize layer must have (C, H, W) input" self.channels = self.in_shape[0] self.weight_shape = (self.channels, 1) return self def fprop(self, inputs, inference=True): self.bottom_data = inputs.reshape(self.channels, -1) self.norm_data = self.be.sqrt(self.be.sum(self.be.square(self.bottom_data), axis=0)) self.outputs_view[:] = self.W * self.bottom_data / self.norm_data return self.outputs def bprop(self, error, alpha=1.0, beta=0.0): error_rs = error.reshape(self.channels, -1) self.dW[:] = self.be.sum(self.outputs_view*error_rs, axis=1)/self.W self.deltas_view = self.deltas.reshape(self.channels, -1) # we may be able to join these back together into 1 assing call self.deltas_view[:] = -self.outputs_view * self.be.sum(self.bottom_data * error_rs, axis=0) self.deltas_view[:] = self.deltas_view / self.be.square(self.norm_data) # this is separate self.deltas_view[:] += self.W * error_rs / self.norm_data return self.deltas class ConcatTranspose(Layer): """ Takes a list of inputs, each with a shape CHWN, and transposes to HWCN, then concatenates along the HWC axis. """ def __init__(self, name=None, parallelism='Disabled'): super(ConcatTranspose, self).__init__(name, parallelism=parallelism) def configure(self, in_obj): # we expect a list of layers assert isinstance(in_obj, list) self.in_shapes = [l.out_shape for l in in_obj] self.num_elements = np.sum(np.prod(l.out_shape) for l in in_obj) self.out_shape = (self.num_elements) # store the number of channels from the layer shapes self.channels = [l.out_shape[0] for l in in_obj] def allocate(self, shared_outputs=None): self.outputs = self.be.iobuf((self.num_elements)) # create destination delta buffers self.deltas = [self.be.iobuf(in_shape) for in_shape in self.in_shapes] def fprop(self, inputs): start = 0 for (layer, num_channels) in zip(inputs, self.channels): # reshape (C, HW, N) rlayer = layer.reshape((num_channels, -1, self.be.bsz)) # transpose to (HW, C, N) and store in buffer C, HW, N = rlayer.shape end = start + C * HW output_view = self.outputs[start:end, :].reshape((HW, C, N)) self.be.copy_transpose(rlayer, output_view, axes=(1, 0, 2)) start = end return self.outputs def bprop(self, error): # error is in (HWC, N) # need to transpose to (CHW, N) and unstack start = 0 for (delta, num_channels) in zip(self.deltas, self.channels): # reshape (C, HW, N) rdelta = delta.reshape((num_channels, -1, self.be.bsz)) C, HW, N = rdelta.shape end = start + C * HW error_view = error[start:end, :].reshape((HW, C, N)) self.be.copy_transpose(error_view, rdelta, axes=(1, 0, 2)) start = end return self.deltas class DetectionOutput(Layer): def __init__(self, num_classes, nms_threshold=0.45, nms_topk=400, topk=200, threshold=0.01, name=None): super(DetectionOutput, self).__init__(name) self.num_classes = num_classes self.nms_threshold = nms_threshold self.nms_topk = nms_topk self.topk = topk self.threshold = 0.01 self.softmax = Softmax(axis=1) def configure(self, in_obj): self.out_shape = (self.topk, 5) # we expect a list of layers from the SSD model (leafs, prior_boxes) = in_obj # store total number of boxes self.num_boxes = np.sum([prior_box.num_boxes for prior_box in prior_boxes]) def allocate(self, shared_outputs=None): self.conf = self.be.iobuf((self.num_boxes * self.num_classes)) self.loc = self.be.iobuf((self.num_boxes * 4)) # intermediate buffer for compute # these are needed to keep compute on the GPU # 1. proposals for each class and image # 2. store detections after sort/threshold # 3. store softmax self.proposals = self.be.empty((self.num_boxes, 4)) self.detections = self.be.empty((self.nms_topk, 5)) self.scores = self.be.empty((self.num_boxes, self.num_classes)) def fprop(self, inputs, inference=True): # assumes the inputs are a tuple of (outputs, prior_boxes), # where outputs is a vector of outputs from the model. # flatten the nested vector generated by tree-in-tree # also reorder the list in: 4_3, fc7, conv6, conv7, conv8, conv9 # x = self.reorder(inputs[0]) self.loc = inputs[0][0] self.conf = inputs[0][1] prior_boxes = inputs[1] # reshape loc from (HWC, N) to (HWK, 4, N) # reshape conf from (HWC, N) to (HWK, 21, N) conf_view = self.conf.reshape((-1, self.num_classes, self.be.bsz)) loc_view = self.loc.reshape((-1, 4, self.be.bsz)) # for mkl backend optimization if self.be.is_mkl(): return self.be.detectionOutput_fprop(conf_view, loc_view, self.detections, prior_boxes, self.proposals, self.nms_topk, self.topk, self.threshold, self.nms_threshold) # convert the prior boxes to bbox predictions by applying # the loc regression targets # process each image individually batch_all_detections = [None] * self.be.bsz for k in range(self.be.bsz): self.bbox_transform_inv(prior_boxes, loc_view[:, :, k], self.proposals) all_detections = np.zeros((0, 6)) # detections for this image conf = conf_view[:, :, k] self.scores[:] = self.softmax(conf) for c in range(self.num_classes): if (c == 0): # skip processing of background classes continue # apply softmax scores = self.scores[:, c] # 1. apply threshold, sort, and get the top nms_k top_N_ind = self.get_top_N_index(scores, self.nms_topk, self.threshold) # fill the detections if len(top_N_ind) > 0: self.detections.fill(0) self.detections[:len(top_N_ind), :4] = self.proposals[top_N_ind, :] self.detections[:len(top_N_ind), 4] = scores[top_N_ind] # 2. apply NMS keep = self.be.nms(self.detections, self.nms_threshold, normalized=True) keep = keep[:self.nms_topk] # 3. store the detections per class # add an additional dimension for the category label dets = np.append(self.detections[keep, :].get(), c * np.ones((len(keep), 1)), axis=1) all_detections = np.vstack([all_detections, dets]) if all_detections.shape[0] > self.topk: top_N_ind = self.get_top_N_index(all_detections[:, 4], self.topk, None) all_detections = all_detections[top_N_ind, :] batch_all_detections[k] = all_detections return batch_all_detections def bprop(self, error, alpha=1.0, beta=0.0): raise NotImplementedError def get_top_N_index(self, scores, N, threshold): # this function handles scores still being device tensors # move scores to host if needed if isinstance(scores, np.ndarray): np_scores = scores.ravel() else: np_scores = scores.get().ravel() # apply threshold if needed if threshold is None: count = len(np_scores) else: count = len(np.where(np_scores > threshold)[0]) order = np_scores.argsort()[::-1].tolist() order = order[:count] if N > 0: order = order[:N] return order def bbox_transform_inv(self, boxes, deltas, output, variance=[0.1, 0.1, 0.2, 0.2]): widths = boxes[:, 2] - boxes[:, 0] heights = boxes[:, 3] - boxes[:, 1] ctr_x = boxes[:, 0] + 0.5 * widths ctr_y = boxes[:, 1] + 0.5 * heights dx = deltas[:, 0] dy = deltas[:, 1] dw = deltas[:, 2] dh = deltas[:, 3] pred_ctr_x = variance[0] * dx * widths + ctr_x pred_ctr_y = variance[1] * dy * heights + ctr_y pred_w = self.be.exp(variance[2] * dw) * widths pred_h = self.be.exp(variance[3] * dh) * heights # pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype) # x1 output[:, 0] = pred_ctr_x - 0.5 * pred_w # y1 output[:, 1] = pred_ctr_y - 0.5 * pred_h # x2 output[:, 2] = pred_ctr_x + 0.5 * pred_w # y2 output[:, 3] = pred_ctr_y + 0.5 * pred_h return output class PriorBox(Layer): def __init__(self, min_sizes, max_sizes, step=None, aspect_ratios=[2, 3], img_shape=(300, 300), flip=True, clip=False, variance=[0.1, 0.1, 0.2, 0.2], offset=0.5, name=None): super(PriorBox, self).__init__(name) self.offset = offset self.variance = variance self.flip = flip self.clip = clip if type(step) in (dict, OrderedDict): assert set(step.keys()) == set(('step_w', 'step_h')) self.step_w = step['step_w'] self.step_h = step['step_h'] else: assert step is not None self.step_w = step self.step_h = step self.prior_boxes = None self.img_w = img_shape[0] self.img_h = img_shape[1] assert isinstance(min_sizes, tuple) assert isinstance(max_sizes, tuple) self.min_sizes = min_sizes self.max_sizes = max_sizes assert len(self.min_sizes) == len(self.max_sizes) # compute the number of prior boxes # with flip, order the aspect ratios in the same was as caffe self.aspect_ratios = [] for ar in aspect_ratios: self.aspect_ratios.extend([ar]) if(self.flip): self.aspect_ratios.extend([1.0 / float(ar)]) # number of prior baxes per feature map pixel # there is 1 box with AR=1 for each min and max sizes self.num_priors_per_pixel = len(self.min_sizes) * 2 # and one box for each aspect ratio at the min size self.num_priors_per_pixel += len(self.aspect_ratios) * len(self.min_sizes) def configure(self, in_objs): conv_layer = in_objs[1] self.in_shape = conv_layer.out_shape (_, self.layer_height, self.layer_width) = self.in_shape if self.step_w is None or self.step_h is None: self.step_w = math.ceil(float(self.img_w) / self.layer_width) self.step_h = math.ceil(float(self.img_h) / self.layer_height) self.num_boxes = self.layer_height * self.layer_width * self.num_priors_per_pixel self.out_shape = (4*self.num_priors_per_pixel, self.layer_height, self.layer_width) def allocate(self, shared_outputs=None): self.outputs = self.be.empty((self.num_boxes, 4)) def fprop(self, inputs, inference=True): # the priors will be of shape layer_width * layer_height * num_priors_per_pixel * 4 # with 2 chans per element (one for mean fnd one or vairance) # we only need to calculate these once if the image size does not change # right now we don't support changing image sizes anyways if self.prior_boxes is not None: return self.outputs img_shape = [self.img_w, self.img_h] self.prior_boxes = [] def gen_box(center, box_size, image_size, variance, clip): box_ = [None] * 4 box_[0] = (center[0] - box_size[0] * 0.5) / image_size[0] # xmin box_[1] = (center[1] - box_size[1] * 0.5) / image_size[1] # ymin box_[2] = (center[0] + box_size[0] * 0.5) / image_size[0] # xmax box_[3] = (center[1] + box_size[1] * 0.5) / image_size[1] # ymax if clip: for ind in range(4): box_[ind] = min([max([box_[ind], 0.0]), 1.0]) return box_ offset = self.offset # the output is 2 chans (the 4 prior coordinates, the 4 prior variances) for # each output feature map pixel so the output array is # 2 x layer_height x layer_width x num_priors x 4 center = [0, 0] for h in range(self.layer_height): center[1] = (h + offset) * self.step_h for w in range(self.layer_width): center[0] = (w + offset) * self.step_w # do the min and max boxes with aspect ratio 1 for (min_size, max_size) in zip(self.min_sizes, self.max_sizes): # do the min box box_shape = [min_size, min_size] self.prior_boxes += (gen_box(center, box_shape, img_shape, self.variance, self.clip)) # do the max size box sz_ = math.sqrt(min_size * max_size) box_shape = [sz_, sz_] self.prior_boxes += (gen_box(center, box_shape, img_shape, self.variance, self.clip)) # now do the different aspect ratio boxes for ar in self.aspect_ratios: assert np.abs(ar - 1.0) > 1.0e-6 box_width = min_size * math.sqrt(ar) box_height = min_size / math.sqrt(ar) box_shape = [box_width, box_height] self.prior_boxes += (gen_box(center, box_shape, img_shape, self.variance, self.clip)) self.outputs.set(np.array(self.prior_boxes).reshape(-1, 4)) return self.outputs def bprop(self, error, alpha=1.0, beta=0.0): raise NotImplementedError
apache-2.0
-5,077,914,769,578,952,000
37.341772
99
0.553516
false
3.532774
false
false
false
fimad/mitmproxy
mitmproxy/controller.py
1
3905
from __future__ import absolute_import from six.moves import queue import threading class DummyReply: """ A reply object that does nothing. Useful when we need an object to seem like it has a channel, and during testing. """ def __init__(self): self.acked = False def __call__(self, msg=False): self.acked = True class Reply: """ Messages sent through a channel are decorated with a "reply" attribute. This object is used to respond to the message through the return channel. """ def __init__(self, obj): self.obj = obj self.q = queue.Queue() self.acked = False def __call__(self, msg=None): if not self.acked: self.acked = True if msg is None: self.q.put(self.obj) else: self.q.put(msg) class Channel: def __init__(self, q, should_exit): self.q = q self.should_exit = should_exit def ask(self, mtype, m): """ Decorate a message with a reply attribute, and send it to the master. then wait for a response. """ m.reply = Reply(m) self.q.put((mtype, m)) while not self.should_exit.is_set(): try: # The timeout is here so we can handle a should_exit event. g = m.reply.q.get(timeout=0.5) except queue.Empty: # pragma: no cover continue return g def tell(self, mtype, m): """ Decorate a message with a dummy reply attribute, send it to the master, then return immediately. """ m.reply = DummyReply() self.q.put((mtype, m)) class Slave(threading.Thread): """ Slaves get a channel end-point through which they can send messages to the master. """ def __init__(self, channel, server): self.channel, self.server = channel, server self.server.set_channel(channel) threading.Thread.__init__(self) self.name = "SlaveThread (%s:%s)" % ( self.server.address.host, self.server.address.port) def run(self): self.server.serve_forever() class Master(object): """ Masters get and respond to messages from slaves. """ def __init__(self, server): """ server may be None if no server is needed. """ self.server = server self.masterq = queue.Queue() self.should_exit = threading.Event() def tick(self, q, timeout): changed = False try: # This endless loop runs until the 'Queue.Empty' # exception is thrown. If more than one request is in # the queue, this speeds up every request by 0.1 seconds, # because get_input(..) function is not blocking. while True: msg = q.get(timeout=timeout) self.handle(*msg) q.task_done() changed = True except queue.Empty: pass return changed def run(self): self.should_exit.clear() self.server.start_slave(Slave, Channel(self.masterq, self.should_exit)) while not self.should_exit.is_set(): # Don't choose a very small timeout in Python 2: # https://github.com/mitmproxy/mitmproxy/issues/443 # TODO: Lower the timeout value if we move to Python 3. self.tick(self.masterq, 0.1) self.shutdown() def handle(self, mtype, obj): c = "handle_" + mtype m = getattr(self, c, None) if m: m(obj) else: obj.reply() def shutdown(self): if not self.should_exit.is_set(): self.should_exit.set() if self.server: self.server.shutdown()
mit
5,079,605,450,579,444,000
26.307692
79
0.541613
false
4.07195
false
false
false
imathur/HPC-PPE
extract-timing-info.py
1
3102
import os import numpy as np import pandas as pd # Create dataframe in which dataset will be stored df = pd.DataFrame(columns=['pre-job input', 'opening input file', 'upto appmgr start', 'initialisation', 'event loop', 'ending']) # Initialize counting variables filecount = 0 included = 0 # Loop through all files in directory test1/output/ and search for log.EVNTtoHITS files for each Athena job for subdir, dirs, files in os.walk('/work/d60/d60/shared/optimisation/benchmark/test1/output'): for file in files: filepath = subdir + os.sep + file if filepath.endswith('.EVNTtoHITS'): filecount = filecount + 1 # Extract lines containing certain strings from the log file and write the lines to a list linelist = [ line.rstrip('\n') for line in open(filepath) if ('Setting up DBRelease' in line or \ 'in ISF_Input' in line or \ 'Welcome to ApplicationMgr' in line or \ 'Event Counter process created' in line or \ 'Statuses of sub-processes' in line) ] # Extract last line of log file and append it to the list with open(filepath,'rb') as source: source.seek(-2, 2) while source.read(1) != b"\n": source.seek(-2, 1) linelist.append(str(source.readline())) # Create a list 'timelist' of the first word (string containing timestamp) on each line in the temporary file timelist = [line.split()[0] for line in linelist] # Convert each timestamp string element in the list to its equivalent value in seconds ftr = [3600,60,1] timelist = map(lambda x: sum([a*b for a,b in zip(ftr, [int(i) for i in x.split(":")])]), timelist) # Create a new list 'timelist2' containing the difference of each consecutive pair of elements from 'timelist' timelist2 = [] timelist2 = np.diff(timelist) # If the list 'timelist2' has 6 elements (i.e., if the job finished execution and wasn't stopped prematurely), append the list as a new row to the dataframe if timelist2.size == 6: included = included + 1 print (filepath) df = df.append(pd.Series(timelist2, index=['pre-job input', 'opening input file', 'upto appmgr start', 'initialisation', 'event loop', 'ending']), ignore_index=True) # Write dataframe back to CSV file and print confirmation of completion of program. df.to_csv('csvfiles/stageTiming.csv') print ("\nFinished scanning %d of %d log files. Output: csvfiles/stageTiming.csv\n") % (included, filecount)
gpl-3.0
6,145,991,294,068,977,000
46
181
0.548034
false
4.561765
false
false
false
AmauryOrtega/Python-6.00.1x
Week 3/File5.py
1
4677
# -*- coding: utf-8 -*- """ Created on 25/09/2016 @author: Amaury Ortega <amauryocortega@gmail.com> """ ''' Things that can be done to strings, range, list or tuple being seq the name of any of the above variable -----OPERATIONS----- seq[i] len(seq) seq1 + seq2 (not range) n*seq (not range) seq[start:end] e in seq e not in seq for e in seq -----PROPERTIES----- ----- type mutable str char not mutable tuple any not mutable range int not mutable list any yes mutable ''' # Dictionaries ''' -----PROPERTIES----- Value any type (mutable and not mutable) Key unique not mutable type (int, float, string, tuple, bool) really it needs and hashable type, all immutable types are hashable ''' grades = {'Ana': 'B', 'John': 'A+', 'Denise': 'A', 'Katy': 'A'} grades['John'] grades['Sylvan'] = 'A' 'John' in grades del (grades['Ana']) grades.keys() grades.values() d = {4: {1: 0}, (1, 3): "twelve", 'const': [3.14, 2.7, 8.44]} # Analyze song lyrics def lyrics_to_frecuencies(lyrics): myDict = {} for word in lyrics: if word in myDict: myDict[word] += 1 else: myDict[word] = 1 return myDict she_loves_you = ['she', 'loves', 'you', 'yeah', 'yeah', 'yeah', 'she', 'loves', 'you', 'yeah', 'yeah', 'yeah', 'she', 'loves', 'you', 'yeah', 'yeah', 'yeah', 'you', 'think', "you've", 'lost', 'your', 'love', 'well', 'i', 'saw', 'her', 'yesterday-yi-yay', "it's", 'you', "she's", 'thinking', 'of', 'and', 'she', 'told', 'me', 'what', 'to', 'say-yi-yay', 'she', 'says', 'she', 'loves', 'you', 'and', 'you', 'know', 'that', "can't", 'be', 'bad', 'yes', 'she', 'loves', 'you', 'and', 'you', 'know', 'you', 'should', 'be', 'glad', 'she', 'said', 'you', 'hurt', 'her', 'so', 'she', 'almost', 'lost', 'her', 'mind', 'and', 'now', 'she', 'says', 'she', 'knows', "you're", 'not', 'the', 'hurting', 'kind', 'she', 'says', 'she', 'loves', 'you', 'and', 'you', 'know', 'that', "can't", 'be', 'bad', 'yes', 'she', 'loves', 'you', 'and', 'you', 'know', 'you', 'should', 'be', 'glad', 'oo', 'she', 'loves', 'you', 'yeah', 'yeah', 'yeah', 'she', 'loves', 'you', 'yeah', 'yeah', 'yeah', 'with', 'a', 'love', 'like', 'that', 'you', 'know', 'you', 'should', 'be', 'glad', 'you', 'know', "it's", 'up', 'to', 'you', 'i', 'think', "it's", 'only', 'fair', 'pride', 'can', 'hurt', 'you', 'too', 'pologize', 'to', 'her', 'Because', 'she', 'loves', 'you', 'and', 'you', 'know', 'that', "can't", 'be', 'bad', 'Yes', 'she', 'loves', 'you', 'and', 'you', 'know', 'you', 'should', 'be', 'glad', 'oo', 'she', 'loves', 'you', 'yeah', 'yeah', 'yeah', 'she', 'loves', 'you', 'yeah', 'yeah', 'yeah', 'with', 'a', 'love', 'like', 'that', 'you', 'know', 'you', 'should', 'be', 'glad', 'with', 'a', 'love', 'like', 'that', 'you', 'know', 'you', 'should', 'be', 'glad', 'with', 'a', 'love', 'like', 'that', 'you', 'know', 'you', 'should', 'be', 'glad', 'yeah', 'yeah', 'yeah', 'yeah', 'yeah', 'yeah', 'yeah' ] beatles = lyrics_to_frecuencies(she_loves_you) def most_common_words(freqs): values = freqs.values() best = max(values) words = [] for k in freqs: if freqs[k] == best: words.append(k) return (words, best) (w, b) = most_common_words(beatles) def words_often(freqs, minTimes): result = [] done = False while not done: temp = most_common_words(freqs) if temp[1] >= minTimes: result.append(temp) for w in temp[0]: del (freqs[w]) else: done = True return result print(words_often(beatles, 5)) # Fibonnaci with dictionaries def fib(x): if x == 1: return 1 elif x == 2: return 2 else: return fib(x - 1) + fib(x - 2) def fib_efficient(n, d): if n in d: return d[n] else: ans = fib_efficient(n - 1, d) + fib_efficient(n - 2, d) d[n] = ans return ans d = {1: 1, 2: 2} print(fib_efficient(6, d))
gpl-3.0
-5,361,887,684,765,019,000
27.345455
75
0.446226
false
2.930451
false
false
false
singlasahil14/char-rnn
utils.py
1
2427
import numpy as np import os from six.moves import cPickle class TextLoader(): def __init__(self, data_dir='nietzsche', batch_size=128, seq_length=8): self.data_dir = "data/" + data_dir self.batch_size = batch_size self.seq_length = seq_length self.input_file = os.path.join(self.data_dir, "input.txt") self.vocab_map_file = os.path.join(self.data_dir, "vocab-map.pkl") self.tensor_file = os.path.join(self.data_dir, "tensor.npy") if not(os.path.exists(self.vocab_map_file) and os.path.exists(self.tensor_file)): self.preprocess() else: self.load_preprocessed() def preprocess(self): input_file = self.input_file vocab_map_file = self.vocab_map_file tensor_file = self.tensor_file text = open(input_file).read() chars = list(set(text)) chars.insert(0, "\0") self.chars = sorted(chars) self.vocab_size = len(chars) self.char2indices = dict((c, i) for i, c in enumerate(chars)) self.indices2char = dict((i, c) for i, c in enumerate(chars)) with open(vocab_map_file, 'wb') as f: cPickle.dump(self.char2indices, f) self.tensor = np.array(list(map(self.char2indices.get, text))) np.save(tensor_file, self.tensor) def load_preprocessed(self): with open(self.vocab_map_file, 'rb') as f: self.char2indices = cPickle.load(f) self.chars = sorted(self.char2indices.keys()) self.vocab_size = len(self.char2indices) self.tensor = np.load(self.tensor_file) self.indices2char = {v: k for k, v in self.char2indices.iteritems()} def data_iterator(self): tensor = self.tensor batch_size = self.batch_size seq_length = self.seq_length data_len = len(tensor) batch_len = batch_size * seq_length data_len = data_len - (data_len%batch_len) - batch_len size_per_batch = data_len//batch_size epoch_size = data_len//batch_len data = np.zeros([batch_size, size_per_batch + 1], dtype=np.int32) for i in range(batch_size): data[i] = tensor[size_per_batch * i: size_per_batch * (i + 1) + 1] for i in range(epoch_size): x = data[:, i * seq_length:(i + 1) * seq_length] y = data[:, i * seq_length + 1:(i + 1) * seq_length + 1] yield(x, y)
mit
-1,414,082,991,156,815,600
36.921875
89
0.588381
false
3.262097
false
false
false
j00bar/django-widgy
widgy/contrib/page_builder/forms/__init__.py
2
5872
import os from django import forms from django.utils.safestring import mark_safe from django.contrib.staticfiles.storage import staticfiles_storage from django.template.loader import render_to_string from django.conf import settings import bleach from django_pyscss import DjangoScssCompiler PAGEDOWN_EDITOR_TEMPLATE = u''' <div class="pagedown-buttonbar"></div> {textarea} <div class="pagedown-preview"></div> ''' def scss_compile(scss_filename): scss = DjangoScssCompiler() css_content = scss.compile(scss_filename) return css_content class MarkdownWidget(forms.Textarea): class Media: css = { 'all': ('widgy/js/components/markdown/lib/pagedown.css',), } def render(self, *args, **kwargs): textarea = super(MarkdownWidget, self).render(*args, **kwargs) return mark_safe(PAGEDOWN_EDITOR_TEMPLATE.format(textarea=textarea)) class MarkdownField(forms.CharField): widget = MarkdownWidget class CKEditorWidget(forms.Textarea): CONFIG = { 'toolbar': [ {'name': 'clipboard', 'groups': ['clipboard', 'undo'], 'items': ['Cut', 'Copy', 'Paste', 'PasteText', 'PasteFromWord', '-', 'Undo', 'Redo']}, {'name': 'links', 'items': ['Link', 'Unlink', 'Anchor']}, {'name': 'insert', 'items': ['HorizontalRule', 'SpecialChar']}, {'name': 'justify', 'groups': ['justify'], 'items': ['JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock']}, {'name': 'document', 'groups': ['mode', 'document', 'doctools'], 'items': ['Source']}, {'name': 'tools', 'items': ['Maximize']}, '/', {'name': 'basicstyles', 'groups': ['basicstyles', 'cleanup'], 'items': ['Bold', 'Italic', 'Strike', 'Underline', '-', 'Subscript', 'Superscript', '-', 'RemoveFormat']}, {'name': 'paragraph', 'groups': ['list', 'indent', 'blocks', 'align'], 'items': ['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Blockquote']}, {'name': 'editing', 'groups': ['find', 'selection', 'spellchecker'], 'items': ['Scayt']}, {'name': 'styles', 'items': ['Styles', 'Format']}, ], 'stylesSet': [ {'name': 'Big', 'element': 'big'}, {'name': 'Small', 'element': 'small'}, {'name': 'Typewriter', 'element': 'tt'}, {'name': 'Computer Code', 'element': 'code'}, {'name': 'Keyboard Phrase', 'element': 'kbd'}, {'name': 'Sample Text', 'element': 'samp'}, {'name': 'Variable', 'element': 'var'}, {'name': 'Deleted Text', 'element': 'del'}, {'name': 'Inserted Text', 'element': 'ins'}, {'name': 'Cited Work', 'element': 'cite'}, {'name': 'Inline Quotation', 'element': 'q'}, {'name': 'Language: RTL', 'element': 'span', 'attributes': {'dir': 'rtl'}}, {'name': 'Language: LTR', 'element': 'span', 'attributes': {'dir': 'ltr'}}, ], 'allowedContent': True, 'removeButtons': '', 'extraPlugins': 'justify', 'justifyClasses': ['align-left', 'align-center', 'align-right', 'align-justify'], 'indentClasses': ['text-indent-%d' % i for i in range(1,6)], 'contentsCss': scss_compile('/widgy/page_builder/html.scss'), } def __init__(self, *args, **kwargs): super(CKEditorWidget, self).__init__(*args, **kwargs) self.attrs['class'] = 'widgy_ckeditor' def render(self, name, value, attrs=None): textarea = super(CKEditorWidget, self).render(name, value, attrs) return render_to_string('page_builder/ckeditor_widget.html', { 'html_id': attrs['id'], 'textarea': textarea, 'ckeditor_path': staticfiles_storage.url('widgy/js/lib/ckeditor/'), 'config': self.CONFIG, }) class CKEditorField(forms.CharField): widget = CKEditorWidget ALLOWED_ATTRIBUTES = { '*': ['class', 'dir', 'title'], 'a': ['href', 'target', 'rel', 'name'], 'time': ['datetime', 'pubdate'], 'img': ['src'], 'table': ['border'], 'colgroup': ['span'], 'col': ['span'], 'td': ['colspan', 'rowspan', 'headers'], 'th': ['colspan', 'rowspan', 'headers', 'scope'], } ALLOWED_TAGS = [ 'a', 'abbr', 'acronym', 'address', 'b', 'big', 'br', 'cite', 'code', 'del', 'dfn', 'div', 'em', 'hr', 'i', 'ins', 'kbd', 'mark', 'p', 'pre', 'q', 'samp', 'small', 'span', 'strong', 'sub', 'sup', 'time', 'u', 'var', 'wbr', 's', 'tt', 'ul', 'ol', 'li', 'dl', 'dt', 'dd', 'blockquote', 'details', 'summary', 'hgroup', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'figure', 'figcaption', 'img', 'caption', 'col', 'colgroup', 'table', 'tbody', 'td', 'tfoot', 'th', 'thead', 'tr', ] def clean(self, value): value = super(CKEditorField, self).clean(value) return bleach.clean(value, tags=self.ALLOWED_TAGS, attributes=self.ALLOWED_ATTRIBUTES) class MiniCKEditorWidget(CKEditorWidget): CONFIG = { 'toolbar': [ {'name': 'basicstyles', 'groups': ['basicstyles', 'cleanup'], 'items': ['Bold', 'Italic', 'Strike', 'Underline', '-', 'Subscript', 'Superscript', '-', 'RemoveFormat']}, {'name': 'undo', 'groups': ['undo'], 'items': ['Undo', 'Redo']}, {'name': 'links', 'items': ['Link', 'Unlink', 'Anchor']}, {'name': 'mode', 'groups': ['mode'], 'items': ['Source']}, {'name': 'editing', 'groups': ['find', 'selection', 'spellchecker'], 'items': ['Scayt']}, ], 'contentsCss': scss_compile('/widgy/page_builder/html.scss') } class MiniCKEditorField(forms.CharField): widget = MiniCKEditorWidget
apache-2.0
-7,728,175,427,997,485,000
37.12987
180
0.533719
false
3.495238
false
false
false
sio2project/filetracker
filetracker/scripts/migrate_test.py
1
2281
"""Tests for migrate script.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from multiprocessing import Process import os import shutil import tempfile import time import unittest from filetracker.client import Client, FiletrackerError from filetracker.scripts import migrate from filetracker.servers.run import main as server_main _TEST_PORT_NUMBER = 45785 class MigrateScriptTest(unittest.TestCase): def setUp(self): self.temp_dir = tempfile.mkdtemp() os.makedirs(os.path.join(self.temp_dir, 'old_root', 'foo', 'bar')) os.makedirs(os.path.join(self.temp_dir, 'new_root')) self.server_process = Process( target=_start_server, args=(os.path.join(self.temp_dir, 'new_root'),) ) self.server_process.start() time.sleep(2) self.server_url = 'http://127.0.0.1:{}'.format(_TEST_PORT_NUMBER) self.client = Client(local_store=None, remote_url=self.server_url) def tearDown(self): self.server_process.terminate() shutil.rmtree(self.temp_dir) def test_should_upload_files_with_correct_relative_root(self): _touch(os.path.join(self.temp_dir, 'old_root', 'foo', 'a.txt')) _touch(os.path.join(self.temp_dir, 'old_root', 'foo', 'bar', 'b.txt')) _touch(os.path.join(self.temp_dir, 'old_root', 'c.txt')) _touch(os.path.join(self.temp_dir, 'old_root', 'd.txt')) migrate.main( [ os.path.join(self.temp_dir, 'old_root', 'foo'), self.server_url, '--root', os.path.join(self.temp_dir, 'old_root'), '-s', ] ) self.assertEqual(self.client.get_stream('/foo/a.txt')[0].read(), b'') self.assertEqual(self.client.get_stream('/foo/bar/b.txt')[0].read(), b'') with self.assertRaises(FiletrackerError): self.client.get_stream('/c.txt') with self.assertRaises(FiletrackerError): self.client.get_stream('/d.txt') def _start_server(server_dir): server_main( ['-p', str(_TEST_PORT_NUMBER), '-d', server_dir, '-D', '--workers', '4'] ) def _touch(path): with open(path, 'w') as f: pass
gpl-3.0
8,214,431,620,466,431,000
29.824324
81
0.604559
false
3.379259
true
false
false
aio-libs/aiokafka
tests/record/test_util.py
1
3681
import struct import pytest from aiokafka.record import util varint_data = [ (b"\x00", 0), (b"\x01", -1), (b"\x02", 1), (b"\x7E", 63), (b"\x7F", -64), (b"\x80\x01", 64), (b"\x81\x01", -65), (b"\xFE\x7F", 8191), (b"\xFF\x7F", -8192), (b"\x80\x80\x01", 8192), (b"\x81\x80\x01", -8193), (b"\xFE\xFF\x7F", 1048575), (b"\xFF\xFF\x7F", -1048576), (b"\x80\x80\x80\x01", 1048576), (b"\x81\x80\x80\x01", -1048577), (b"\xFE\xFF\xFF\x7F", 134217727), (b"\xFF\xFF\xFF\x7F", -134217728), (b"\x80\x80\x80\x80\x01", 134217728), (b"\x81\x80\x80\x80\x01", -134217729), (b"\xFE\xFF\xFF\xFF\x7F", 17179869183), (b"\xFF\xFF\xFF\xFF\x7F", -17179869184), (b"\x80\x80\x80\x80\x80\x01", 17179869184), (b"\x81\x80\x80\x80\x80\x01", -17179869185), (b"\xFE\xFF\xFF\xFF\xFF\x7F", 2199023255551), (b"\xFF\xFF\xFF\xFF\xFF\x7F", -2199023255552), (b"\x80\x80\x80\x80\x80\x80\x01", 2199023255552), (b"\x81\x80\x80\x80\x80\x80\x01", -2199023255553), (b"\xFE\xFF\xFF\xFF\xFF\xFF\x7F", 281474976710655), (b"\xFF\xFF\xFF\xFF\xFF\xFF\x7F", -281474976710656), (b"\x80\x80\x80\x80\x80\x80\x80\x01", 281474976710656), (b"\x81\x80\x80\x80\x80\x80\x80\x01", -281474976710657), (b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\x7F", 36028797018963967), (b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x7F", -36028797018963968), (b"\x80\x80\x80\x80\x80\x80\x80\x80\x01", 36028797018963968), (b"\x81\x80\x80\x80\x80\x80\x80\x80\x01", -36028797018963969), (b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x7F", 4611686018427387903), (b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x7F", -4611686018427387904), (b"\x80\x80\x80\x80\x80\x80\x80\x80\x80\x01", 4611686018427387904), (b"\x81\x80\x80\x80\x80\x80\x80\x80\x80\x01", -4611686018427387905), ] @pytest.mark.parametrize("encoded, decoded", varint_data) def test_encode_varint(encoded, decoded): res = bytearray() util.encode_varint(decoded, res.append) assert res == encoded @pytest.mark.parametrize("encoded, decoded", varint_data) def test_decode_varint(encoded, decoded): # We add a bit of bytes around just to check position is calculated # correctly value, pos = util.decode_varint( bytearray(b"\x01\xf0" + encoded + b"\xff\x01"), 2) assert value == decoded assert pos - 2 == len(encoded) @pytest.mark.parametrize("encoded, decoded", varint_data) def test_size_of_varint(encoded, decoded): assert util.size_of_varint(decoded) == len(encoded) def test_crc32c(): def make_crc(data): crc = util.calc_crc32c(data) return struct.pack(">I", crc) assert make_crc(b"") == b"\x00\x00\x00\x00" assert make_crc(b"a") == b"\xc1\xd0\x43\x30" # Took from librdkafka testcase long_text = b"""\ This software is provided 'as-is', without any express or implied warranty. In no event will the author be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution.""" assert make_crc(long_text) == b"\x7d\xcd\xe1\x13"
apache-2.0
-1,897,615,598,977,457,200
37.747368
79
0.65906
false
2.493902
true
false
false
i2c2-caj/Utilities
Python/file_test.py
1
1763
import os, time def make_version(mtime): # t = ['Text Day', 'Month', 'Num day', 'time', 'year'] t = time.ctime(mtime).split() year = t[4] month = int(time.strptime(t[1], '%b').tm_mon) day = t[2] if (month < 10): new_version = 'y{0}m0{1}d{2}'.format(year, month, day) else: new_version = 'y{0}m{1}d{2}'.format(year, month, day) return new_version def update_version(mtime, old_version, lines): new_version = make_version(mtime) if (len(old_version) != 0 and old_version[1] == 'Version:'): if (old_version[2] != new_version): lines.append('# Version: {0}\n'.format(new_version)) else: lines.append('# Version: {0}\n'.format(new_version)) ''' print '---FOUND VERSION' print '---old: ', old_version print '---new: ', new_version ''' def main(): file_name = 'version_me.txt' mtime = os.path.getmtime(file_name) lines = [] file_object = open(file_name, 'r') updated = False for line in file_object: # Check for version tag until it is found if (updated == False): check_line = line.strip().split() if (len(check_line)): # Found version tag, update it if (check_line[1] == 'Version:'): update_version(mtime, check_line[2], lines) updated = True else: lines.append(line) else: lines.append('\n') else: lines.append(line) # No version tag found, insert one if (updated == False): lines = ['# Version: {0}\n'.format(make_version(mtime))] + lines w = open('text.txt', 'w') w.writelines(lines) main()
gpl-2.0
-6,161,450,614,470,024,000
26.123077
72
0.519569
false
3.470472
false
false
false
LabAdvComp/tukey_middleware
setup.py
1
1983
# Copyright 2013 Open Cloud Consortium # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #from distutils.core import setup from setuptools import setup import subprocess name='tukey_middleware' sub_packages = ['modules', 'api', 'auth', 'cloud_driver', # tests 'tests', 'tests.services'] sub_modules = ['ids', 'instance_metadata', 'metadata'] modules = ['modules.%s' % s for s in sub_modules] setup( name=name, version='0.4.2.1', packages=[name] + ['%s.%s' % (name, s) for s in sub_packages + modules], license='Apache License 2.0"', dependency_links=[ 'https://github.com/LabAdvComp/novacluster/tarball/master#egg=novacluster'], install_requires=[ 'novacluster', 'flask', 'python-glanceclient', 'python-cinderclient', 'python-magic', 'python-memcached', 'dnspython', 'prettytable', 'apache-libcloud==0.14.0-beta3', 'xmldict', 'SQLAlchemy', 'psycopg2', 'couchdb', 'fuse-python', 'requests', 'python-novaclient', 'python-swiftclient', 'psutil', 'python-gnupg', 'M2Crypto', ], long_description=open('README.rst').read(), scripts=['bin/osdcfs', 'bin/osdc-upload-metadata', 'bin/osdc-upload-file', 'bin/osdc-register-file'], ) #VIRTUAL_ENV #%s/lib/python2.7/site-packages/tukey_middleware/local_settings.py #subprocess.Popen(", shell=True)
apache-2.0
-2,085,793,852,312,047,400
29.045455
84
0.638931
false
3.553763
false
false
false
arvinsahni/ml4
learn/forall.py
1
2547
import pandas as pd from sklearn.preprocessing import label_binarize from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier from sklearn.metrics import r2_score, roc_auc_score from learn import utils class Regression(): def __init__(self, time_to_compute=None): self.time_to_compute = time_to_compute def fit(self, X, y): model = RandomForestRegressor(n_estimators=100, oob_score=True) model.fit(X, y) self.model = model self.oob_predictions = model.oob_prediction_ self.score_type = "R2" self.score = r2_score(y, self.oob_predictions) return self def predict(self, X): predictions = self.model.predict(X) return predictions class Classification(): def __init__(self, time_to_compute=None): """ """ self.time_to_compute = time_to_compute def fit(self, X, y): """ Currently y must be numeric. Wrap LabelVectorizer as TODO. """ y = pd.Series(y) self.n_classes = len(y.unique()) model = RandomForestClassifier(n_estimators=100, oob_score=True) model.fit(X, y) self.model = model # Evaluation metrics if self.n_classes == 2: self.oob_predictions = model.oob_decision_function_[:, 1] self.score_type = "AUC" self.score = roc_auc_score(y, self.oob_predictions) else: self.oob_predictions = model.oob_decision_function_ self.score_type = "AUC" y_bin = label_binarize(y, sorted(pd.Series(y).unique())) self.score = roc_auc_score(y_bin, self.oob_predictions) return self def predict(self, X): predictions = self.model.predict(X) return predictions class All(): def __init__(self, time_to_compute=None): self.time_to_compute = time_to_compute def fit(self, X, y): self.classification = utils.is_classification_problem(y) if self.classification: model = Classification() else: model = Regression() model.fit(X, y) self.model = model self.score = model.score self.score_type = model.score_type return self def predict(self, X): predictions = self.model.predict(X) return predictions
mit
-7,940,005,392,520,095,000
30.45679
74
0.556733
false
4.049285
false
false
false
chromium/chromium
third_party/wpt_tools/wpt/tools/wptrunner/wptrunner/vcs.py
13
1926
import subprocess from functools import partial from typing import Callable from mozlog import get_default_logger from wptserve.utils import isomorphic_decode logger = None def vcs(bin_name: str) -> Callable[..., None]: def inner(command, *args, **kwargs): global logger if logger is None: logger = get_default_logger("vcs") repo = kwargs.pop("repo", None) log_error = kwargs.pop("log_error", True) stdout = kwargs.pop("stdout", None) stdin = kwargs.pop("stdin", None) if kwargs: raise TypeError(kwargs) args = list(args) proc_kwargs = {} if repo is not None: # Make sure `cwd` is str type to work in different sub-versions of Python 3. # Before 3.8, bytes were not accepted on Windows for `cwd`. proc_kwargs["cwd"] = isomorphic_decode(repo) if stdout is not None: proc_kwargs["stdout"] = stdout if stdin is not None: proc_kwargs["stdin"] = stdin command_line = [bin_name, command] + args logger.debug(" ".join(command_line)) try: func = subprocess.check_output if not stdout else subprocess.check_call return func(command_line, stderr=subprocess.STDOUT, **proc_kwargs) except OSError as e: if log_error: logger.error(e) raise except subprocess.CalledProcessError as e: if log_error: logger.error(e.output) raise return inner git = vcs("git") hg = vcs("hg") def bind_to_repo(vcs_func, repo, log_error=True): return partial(vcs_func, repo=repo, log_error=log_error) def is_git_root(path, log_error=True): try: rv = git("rev-parse", "--show-cdup", repo=path, log_error=log_error) except subprocess.CalledProcessError: return False return rv == b"\n"
bsd-3-clause
6,284,325,737,113,984,000
28.630769
88
0.593458
false
3.930612
false
false
false
hidext/oemedical
oemedical_his/models/oemedical_hospital_unit.py
1
1618
# -*- coding: utf-8 -*- ############################################################################## # # Tech-Receptives Solutions Pvt. Ltd. # Copyright (C) 2004-TODAY Tech-Receptives(<http://www.techreceptives.com>) # Special Credit and Thanks to Thymbra Latinoamericana S.A. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, orm class OeMedicalHospitalUnit(orm.Model): _name = 'oemedical.hospital.unit' _columns = { 'code': fields.char(size=8, string='Code'), 'institution': fields.many2one( 'res.partner', string='Institution', help='Medical Center'), 'name': fields.char( string='Name', size=256, required=True, help='Name of the unit, eg Neonatal, Intensive Care, ...'), 'extra_info': fields.text(string='Extra Info'), }
agpl-3.0
3,963,752,721,680,169,000
39.45
78
0.592089
false
4.024876
false
false
false
justinmeister/The-Stolen-Crown-RPG
data/components/attack.py
1
1379
""" Sprites for attacks. """ import sys import pygame as pg from .. import setup, tools #Python 2/3 compatibility. if sys.version_info[0] == 2: range = xrange class Fire(pg.sprite.Sprite): """ Fire animation for attacks. """ def __init__(self, x, y): super(Fire, self).__init__() self.spritesheet = setup.GFX['explosion'] self.get_image = tools.get_image self.image_list = self.make_image_list() self.index = 0 self.image = self.image_list[self.index] self.rect = self.image.get_rect(left=x, top=y) self.timer = 0.0 def make_image_list(self): """ Make a list of images to cycle through for the animation. """ image_list = [] for row in range(8): for column in range(8): posx = column * 128 posy = row * 128 new_image = self.get_image(posx, posy, 128, 128, self.spritesheet) image_list.append(new_image) return image_list def update(self): """ Update fire explosion. """ if self.index < (len(self.image_list) - 1): self.index += 1 self.image = self.image_list[self.index] elif self.index == (len(self.image_list) - 1): self.kill()
mit
3,362,993,657,341,950,500
26.039216
64
0.518492
false
3.757493
false
false
false
alangenfeld/cloud-nfs
pyCloud/recovery.py
1
1086
#!/usr/bin/env python import boto import os import tempfile import pickle import ./cloudnfs.py #bucketName = cloudnfs bucketName = "cs699wisc_samanas" ######################################################################### # Recovery ######################################################################### # "Load your developer keys from the .boto config file." config = boto.config #"Create a URI, but don't specify a bucket or object because you are listing buckets." uri = boto.storage_uri("", "gs") #"Get your buckets." buckets = uri.get_all_buckets() l = list(); for bucket in buckets: "Create a URI for a bucket." uri = boto.storage_uri(bucket.name, "gs") "Get the objects that are in the bucket." objs = uri.get_bucket() for obj in objs : if (obj.name == 'table.pkl') : cloudnfs.download(obj.name, obj.name) else : cloudnfs.download(obj.name, "/" + obj.name) #if 'table.pkl' in l : # download('table.pkl', 'temp_table.pkl') # table = open(temp_table.pkl) # table_dict = pickle.load(table)
lgpl-3.0
4,373,986,769,432,987,600
23.681818
86
0.55709
false
3.668919
false
false
false