repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
timothycrosley/thedom
thedom/document.py
1
6794
''' Document.py Provides elements that define the html document being served to the client-side Copyright (C) 2015 Timothy Edmund Crosley This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ''' from . import Base, Factory from .MethodUtils import CallBack from .MultiplePythonSupport import * from .Resources import ResourceFile Factory = Factory.Factory("Document") DOCTYPE_XHTML_TRANSITIONAL = ('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" ' '"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">') DOCTYPE_XHTML_STRICT = ('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" ' '"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">') DOCTYPE_XHTML_FRAMESET = ('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" ' '"http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">') DOCTYPE_HTML4_TRANSITIONAL = ('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" ' '"http://www.w3.org/TR/REC-html40/loose.dtd">') DOCTYPE_HTML4_STRICT = ('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"' '"http://www.w3.org/TR/html4/strict.dtd">') DOCTYPE_HTML4_FRAMESET = ('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" ' '"http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">') DOCTYPE_HTML5 = "<!DOCTYPE html>" class MetaData(Base.Node): """ A webelement implementation of the meta tag """ __slots__ = () tagName = "meta" displayable = False properties = Base.Node.properties.copy() properties['value'] = {'action':'setValue'} properties['name'] = {'action':'setName'} properties['http-equiv'] = {'action':'attribute'} def _create(self, id=None, name=None, parent=None, **kwargs): Base.Node._create(self) def value(self): """ Returns the meta tags value """ return self.attributes.get('content') def setValue(self, value): """ Sets the meta tags value """ self.attributes['content'] = value def getName(self): """ Returns the name of the meta tag """ return self.name def setName(self, name): """ Sets the name of the meta tag """ self.name = name def shown(self): """ Meta tags are never visible """ return False Factory.addProduct(MetaData) class HTTPHeader(MetaData): """ A webelement that represents an http header meta tag """ __slots__ = () def getName(self): """ Returns the headers name """ return self.attributes.get('http-equiv') def setName(self, name): """ Sets the headers name """ self.attributes['http-equiv'] = name Factory.addProduct(HTTPHeader) class Document(Base.Node): """ A Node representation of the overall document that fills a single page """ __slots__ = ('head', 'body', 'title', 'contentType') doctype = DOCTYPE_HTML5 tagName = "html" properties = Base.Node.properties.copy() properties['doctype'] = {'action':'classAttribute'} properties['title'] = {'action':'title.setText'} properties['contentType'] = {'action':'contentType.setValue'} properties['xmlns'] = {'action':'attribute'} class Head(Base.Node): """ Documents Head """ tagName = "head" class Body(Base.Node): """ Documents Body """ tagName = "body" class Title(Base.Node): """ Documents Title """ tagName = "title" def _create(self, id=None, name=None, parent=None, **kwargs): Base.Node._create(self, id=id, name=name, parent=parent) self._textNode = self.add(Base.TextNode()) def setText(self, text): """ Sets the document title """ self._textNode.setText(text) def text(self): """ Returns the document title """ return self._textNode.text(text) def _create(self, id=None, name=None, parent=None, **kwargs): Base.Node._create(self) self.head = self.add(self.Head()) self.body = self.add(self.Body()) self.title = self.head.add(self.Title()) self.contentType = self.addHeader('Content-Type', 'text/html; charset=UTF-8') def addMetaData(self, name=None, value="", **kwargs): """ Will add a meta tag based on name+value pair """ metaTag = self.head.add(MetaData(**kwargs)) metaTag.setName(name) metaTag.setValue(value) return metaTag def addHeader(self, name, value): """ Will add an HTTP header pair based on name + value pair """ header = self.head.add(HTTPHeader()) header.setName(name) header.setValue(value) return header def toHTML(self, formatted=False, *args, **kwargs): """ Overrides toHTML to include the doctype definition before the open tag. """ return self.doctype + "\n" + Base.Node.toHTML(self, formatted, *args, **kwargs) def add(self, childElement, ensureUnique=True): """ Overrides add to place header elements and resources in the head and all others in the body. """ if type(childElement) in [self.Head, self.Body]: return Base.Node.add(self, childElement, ensureUnique) elif type(childElement) == ResourceFile or childElement._tagName in ['title', 'base', 'link', 'meta', 'script', 'style']: return self.head.add(childElement, ensureUnique) else: return self.body.add(childElement, ensureUnique) Head = Document.Head Body = Document.Body Title = Document.Title Factory.addProduct(Document)
gpl-2.0
7,022,688,719,562,299,000
31.507177
104
0.584781
false
4.063397
false
false
false
jiyfeng/RSTParser
model.py
1
3945
## model.py ## Author: Yangfeng Ji ## Date: 09-09-2014 ## Time-stamp: <yangfeng 11/05/2014 20:44:25> ## Last changed: umashanthi 11/19/2014 """ As a parsing model, it includes the following functions 1, Mini-batch training on the data generated by the Data class 2, Shift-Reduce RST parsing for a given text sequence 3, Save/load parsing model """ from sklearn.svm import LinearSVC from cPickle import load, dump from parser import SRParser from feature import FeatureGenerator from tree import RSTTree from util import * from datastructure import ActionError import gzip, sys import numpy as np class ParsingModel(object): def __init__(self, vocab=None, idxlabelmap=None, clf=None): """ Initialization :type vocab: dict :param vocab: mappint from feature templates to feature indices :type idxrelamap: dict :param idxrelamap: mapping from parsing action indices to parsing actions :type clf: LinearSVC :param clf: an multiclass classifier from sklearn """ self.vocab = vocab # print labelmap self.labelmap = idxlabelmap if clf is None: self.clf = LinearSVC() def train(self, trnM, trnL): """ Perform batch-learning on parsing model """ self.clf.fit(trnM, trnL) def predict(self, features): """ Predict parsing actions for a given set of features :type features: list :param features: feature list generated by FeatureGenerator """ vec = vectorize(features, self.vocab) predicted_output = self.clf.decision_function(vec) idxs = np.argsort(predicted_output[0])[::-1] possible_labels = [] for index in idxs: possible_labels.append(self.labelmap[index]) return possible_labels def savemodel(self, fname): """ Save model and vocab """ if not fname.endswith('.gz'): fname += '.gz' D = {'clf':self.clf, 'vocab':self.vocab, 'idxlabelmap':self.labelmap} with gzip.open(fname, 'w') as fout: dump(D, fout) print 'Save model into file: {}'.format(fname) def loadmodel(self, fname): """ Load model """ with gzip.open(fname, 'r') as fin: D = load(fin) self.clf = D['clf'] self.vocab = D['vocab'] self.labelmap = D['idxlabelmap'] print 'Load model from file: {}'.format(fname) def sr_parse(self, texts): """ Shift-reduce RST parsing based on model prediction :type texts: list of string :param texts: list of EDUs for parsing """ # Initialize parser srparser = SRParser([],[]) srparser.init(texts) # Parsing while not srparser.endparsing(): # Generate features stack, queue = srparser.getstatus() # Make sure call the generator with # same arguments as in data generation part fg = FeatureGenerator(stack, queue) features = fg.features() labels = self.predict(features) # Enumerate through all possible actions ranked based on predcition scores for i,label in enumerate(labels): action = label2action(label) try: srparser.operate(action) break # if legal action, end the loop except ActionError: if i < len(labels): # if not a legal action, try the next possible action continue else: print "Parsing action error with {}".format(action) sys.exit() tree = srparser.getparsetree() rst = RSTTree(tree=tree) return rst
mit
-4,449,401,833,782,390,000
30.56
93
0.570089
false
4.388209
false
false
false
sherpaman/MolToolPy
bin/hbond_stat.py
1
1064
#!/usr/bin/env python from sys import argv,stderr #Prende in input il nome di un file che contiene, i dati di coppie di residui per ogni frame. #Ogni riga ha il seguente formato: #frame atom1_id res1_name res1_id atom1_name atom2_id res2_name res2_id atom2_name ........... #0 8661 T 273 N3 8577 T 271 O2P 0.287049 4.688220 #L'output è un dizionario #diz[(res1,res2)=frequenza def group_values(filename): hbond={} local={} resname={} prev_frame=-1 tot_frame=0 for line in f: flags=line.split() frame=int(flags[0]) res1 =int(flags[3]) res2 =int(flags[7]) resname[res1]=flags[2] resname[res2]=flags[6] if frame<>prev_frame: prev_frame=frame tot_frame+=1 for k in local.keys(): try: hbond[k]+=1 except KeyError: hbond[k]=1 local={} stderr.write("\rframe %d " %(frame)) if res1<=res2: local[res1,res2]=1 else: local[res1,res2]=1 stderr.write("\n") return hbond
gpl-2.0
-7,142,410,232,880,668,000
23.159091
102
0.575729
false
2.812169
false
false
false
sebp/scikit-survival
sksurv/preprocessing.py
1
3945
# This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sklearn.base import BaseEstimator, TransformerMixin from sklearn.utils.validation import check_is_fitted from .column import encode_categorical __all__ = ['OneHotEncoder'] def check_columns_exist(actual, expected): missing_features = expected.difference(actual) if len(missing_features) != 0: raise ValueError("%d features are missing from data: %s" % ( len(missing_features), missing_features.tolist() )) class OneHotEncoder(BaseEstimator, TransformerMixin): """Encode categorical columns with `M` categories into `M-1` columns according to the one-hot scheme. The order of non-categorical columns is preserved, encoded columns are inserted inplace of the original column. Parameters ---------- allow_drop : boolean, optional, default: True Whether to allow dropping categorical columns that only consist of a single category. Attributes ---------- feature_names_ : pandas.Index List of encoded columns. categories_ : dict Categories of encoded columns. encoded_columns_ : list Name of columns after encoding. Includes names of non-categorical columns. """ def __init__(self, allow_drop=True): self.allow_drop = allow_drop def fit(self, X, y=None): # pylint: disable=unused-argument """Retrieve categorical columns. Parameters ---------- X : pandas.DataFrame Data to encode. y : Ignored. For compatibility with Pipeline. Returns ------- self : object Returns self """ self.fit_transform(X) return self def _encode(self, X, columns_to_encode): return encode_categorical(X, columns=columns_to_encode, allow_drop=self.allow_drop) def fit_transform(self, X, y=None, **fit_params): # pylint: disable=unused-argument """Convert categorical columns to numeric values. Parameters ---------- X : pandas.DataFrame Data to encode. y : Ignored. For compatibility with TransformerMixin. fit_params : Ignored. For compatibility with TransformerMixin. Returns ------- Xt : pandas.DataFrame Encoded data. """ columns_to_encode = X.select_dtypes(include=["object", "category"]).columns x_dummy = self._encode(X, columns_to_encode) self.feature_names_ = columns_to_encode self.categories_ = {k: X[k].cat.categories for k in columns_to_encode} self.encoded_columns_ = x_dummy.columns return x_dummy def transform(self, X): """Convert categorical columns to numeric values. Parameters ---------- X : pandas.DataFrame Data to encode. Returns ------- Xt : pandas.DataFrame Encoded data. """ check_is_fitted(self, "encoded_columns_") check_columns_exist(X.columns, self.feature_names_) Xt = X.copy() for col, cat in self.categories_.items(): Xt[col].cat.set_categories(cat, inplace=True) new_data = self._encode(Xt, self.feature_names_) return new_data.loc[:, self.encoded_columns_]
gpl-3.0
-6,087,449,575,147,389,000
31.073171
91
0.628897
false
4.417693
false
false
false
mikesname/ehri-collections
ehriportal/portal/migrations/0012_auto__add_field_authority_languages__add_field_authority_scripts.py
1
17152
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Authority.languages' db.add_column('portal_authority', 'languages', self.gf('jsonfield.fields.JSONField')(default='[]'), keep_default=False) # Adding field 'Authority.scripts' db.add_column('portal_authority', 'scripts', self.gf('jsonfield.fields.JSONField')(default='[]'), keep_default=False) def backwards(self, orm): # Deleting field 'Authority.languages' db.delete_column('portal_authority', 'languages') # Deleting field 'Authority.scripts' db.delete_column('portal_authority', 'scripts') models = { 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'portal.authority': { 'Meta': {'object_name': 'Authority', '_ormbases': ['portal.Resource']}, 'dates_of_existence': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'functions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'general_context': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'institution_responsible_identifier': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'internal_structures': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'languages': ('jsonfield.fields.JSONField', [], {'default': "'[]'"}), 'legal_status': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'lod': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'mandates': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'places': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'resource_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['portal.Resource']", 'unique': 'True', 'primary_key': 'True'}), 'revision_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'scripts': ('jsonfield.fields.JSONField', [], {'default': "'[]'"}), 'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'db_index': 'True'}), 'sources': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'type_of_entity': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}) }, 'portal.collection': { 'Meta': {'object_name': 'Collection', '_ormbases': ['portal.Resource']}, 'access_conditions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'accruals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'acquisition': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'alternate_title': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'appraisal': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'archival_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'arrangement': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Authority']", 'null': 'True', 'blank': 'True'}), 'edition': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'extent_and_medium': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'finding_aids': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'institution_responsible_identifier': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'languages': ('jsonfield.fields.JSONField', [], {'default': "'[]'"}), 'languages_of_description': ('jsonfield.fields.JSONField', [], {'default': "'[]'"}), 'location_of_copies': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'location_of_originals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'lod': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'physical_characteristics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'related_units_of_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'repository': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Repository']"}), 'reproduction_conditions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'resource_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['portal.Resource']", 'unique': 'True', 'primary_key': 'True'}), 'revision_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'rules': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'scope_and_content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'scripts': ('jsonfield.fields.JSONField', [], {'default': "'[]'"}), 'scripts_of_description': ('jsonfield.fields.JSONField', [], {'default': "'[]'"}), 'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'db_index': 'True'}), 'sources': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}) }, 'portal.contact': { 'Meta': {'object_name': 'Contact'}, 'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'contact_person': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'contact_type': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'country_code': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'created_on': ('django.db.models.fields.DateTimeField', [], {}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}), 'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'region': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'repository': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Repository']"}), 'street_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'telephone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}) }, 'portal.fuzzydate': { 'Meta': {'object_name': 'FuzzyDate'}, 'circa': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'collection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'date_set'", 'to': "orm['portal.Collection']"}), 'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'end_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'precision': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'start_date': ('django.db.models.fields.DateField', [], {}), 'start_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}) }, 'portal.othername': { 'Meta': {'object_name': 'OtherName'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Resource']"}), 'type': ('django.db.models.fields.PositiveIntegerField', [], {}) }, 'portal.place': { 'Meta': {'object_name': 'Place'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Resource']"}) }, 'portal.property': { 'Meta': {'object_name': 'Property'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Resource']"}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'portal.relation': { 'Meta': {'object_name': 'Relation'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['portal.Resource']"}), 'subject': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['portal.Resource']"}), 'type': ('django.db.models.fields.PositiveIntegerField', [], {}) }, 'portal.repository': { 'Meta': {'object_name': 'Repository', '_ormbases': ['portal.Resource']}, 'access_conditions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'buildings': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'collecting_policies': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'dates_of_existence': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'disabled_access': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'finding_aids': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'functions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'general_context': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'geocultural_context': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'holdings': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'internal_structures': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'languages': ('jsonfield.fields.JSONField', [], {'default': "'[]'"}), 'legal_status': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'lod': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'logo': ('portal.thumbs.ImageWithThumbsField', [], {'name': "'logo'", 'sizes': '((100, 100), (300, 300))', 'max_length': '100', 'blank': 'True', 'null': 'True'}), 'maintenance_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'mandates': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'opening_times': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'places': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'reproduction_services': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'research_services': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'resource_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['portal.Resource']", 'unique': 'True', 'primary_key': 'True'}), 'rules': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'scripts': ('jsonfield.fields.JSONField', [], {'default': "'[]'"}), 'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'db_index': 'True'}), 'sources': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'type_of_entity': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}) }, 'portal.resource': { 'Meta': {'object_name': 'Resource'}, 'created_on': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}) }, 'portal.resourceimage': { 'Meta': {'object_name': 'ResourceImage'}, 'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('portal.thumbs.ImageWithThumbsField', [], {'max_length': '100', 'name': "'image'", 'sizes': '((100, 100), (300, 300))'}), 'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Resource']"}) }, 'taggit.tag': { 'Meta': {'object_name': 'Tag'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}) }, 'taggit.taggeditem': { 'Meta': {'object_name': 'TaggedItem'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"}) } } complete_apps = ['portal']
mit
2,309,109,972,575,588,000
80.67619
174
0.543027
false
3.636209
false
false
false
jtbattle/wangemu
wvdutil/wvdHandler_base.py
1
4196
# Purpose: template class for file handler for the wvdutil.py program # Author: Jim Battle # # Version: 1.0, 2018/09/15, JTB # massive restructuring of the old wvdutil code base # Version: 1.1, 2021/06/19, JTB # get rid of bilingualism (aka python2 support); # convert to inline type hints instead of type hint pragma comments # Version: 1.2, 2021/06/20, JTB # declare and use type aliases Sector and SectorList for clarity from typing import List, Dict, Any, Tuple # pylint: disable=unused-import from wvdTypes import Sector, SectorList, Options class WvdHandler_base(object): # pylint: disable=useless-object-inheritance def __init__(self): self._errors: List[str] = [] self._warnings: List[str] = [] self._firsterr: int = 0 # which was the first sector with an error self._firstwarn: int = 0 # which was the first sector with a warning @staticmethod def name() -> str: return 'short description' @staticmethod def nameLong() -> str: # optional: override with longer description if useful return WvdHandler_base.name() # return either "P "(rogram) or "D "(ata) @staticmethod def fileType() -> str: return 'D ' # pylint: disable=unused-argument, no-self-use def checkBlocks(self, blocks: SectorList, opts: Options) -> Dict[str, Any]: # the opts dictionary can contain these keys: # 'sector' = <number> -- the absolute address of the first sector # 'used' = <number> -- the "used" field from the catalog, if it is known # 'warnlimit' = <number> -- stop when the number of warnings is exceeded # the return dict contains these keys: # 'failed' = bool -- True if any errors or warnings # 'errors' = [str] -- list of error messages # 'warnings' = [str] -- list of warning messages # 'lastsec' = <number> -- last valid sector before giving up return { 'errors':0, 'warnings':0, 'lastsec':0 } # the bool is True if this is a terminating block # pylint: disable=unused-argument, no-self-use def listOneBlock(self, blk: Sector, opts: Options) -> Tuple[bool, List[str]]: # the opts dictionary can contain these keys: # 'sector' = <number> -- the absolute address of the first sector # 'used' = <number> -- the "used" field from the catalog, if it is known # 'warnlimit' = <number> -- stop when the number of warnings is exceeded return (True, []) # if the file type doesn't have context which crosses sectors, then # the default method will just repeated use listOneBlock def listBlocks(self, blocks: SectorList, opts: Options) -> List[str]: # same opts as listOneBlock listing = [] opt = dict(opts) for offset, blk in enumerate(blocks): opt['secnum'] = opts['sector'] + offset done, morelines = self.listOneBlock(blk, opt) listing.extend(morelines) if done: break return listing # utilities to be used by derived classes def clearErrors(self) -> None: self._errors = [] self._warnings = [] self._firsterr = 0 self._firstwarn = 0 def error(self, secnum: int, text: str) -> None: if (not self._errors) or (secnum < self._firsterr): self._firsterr = secnum self._errors.append(text) def warning(self, secnum: int, text: str) -> None: if (not self._warnings) or (secnum < self._firstwarn): self._firstwarn = secnum self._warnings.append(text) def status(self, sec: int, opts: Options) -> Dict[str, Any]: failed = (len(self._errors) > 0) or (len(self._warnings) > opts['warnlimit']) if self._errors: last_good_sector = self._firsterr-1 elif self._warnings: last_good_sector = self._firstwarn-1 else: last_good_sector = sec return { 'failed': failed, 'errors': self._errors, 'warnings': self._warnings, 'lastsec': last_good_sector }
mit
-7,556,717,784,087,207,000
39.346154
87
0.602717
false
3.793852
false
false
false
endlessm/chromium-browser
build/win/reorder-imports.py
4
4054
#!/usr/bin/env python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import glob import optparse import os import shutil import subprocess import sys sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'third_party', 'pefile')) import pefile def reorder_imports(input_dir, output_dir, architecture): """Swap chrome_elf.dll to be the first import of chrome.exe. Also copy over any related files that might be needed (pdbs, manifests etc.). """ # TODO(thakis): See if there is a reliable way to write the # correct executable in the first place, so that this script # only needs to verify that and not write a whole new exe. input_image = os.path.join(input_dir, 'chrome.exe') output_image = os.path.join(output_dir, 'chrome.exe') # pefile mmap()s the whole executable, and then parses parts of # it into python data structures for ease of processing. # To write the file again, only the mmap'd data is written back, # so modifying the parsed python objects generally has no effect. # However, parsed raw data ends up in pe.Structure instances, # and these all get serialized back when the file gets written. # So things that are in a Structure must have their data set # through the Structure, while other data must bet set through # the set_bytes_*() methods. pe = pefile.PE(input_image, fast_load=True) if architecture == 'x64' or architecture == 'arm64': assert pe.PE_TYPE == pefile.OPTIONAL_HEADER_MAGIC_PE_PLUS else: assert pe.PE_TYPE == pefile.OPTIONAL_HEADER_MAGIC_PE pe.parse_data_directories(directories=[ pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT']]) found_elf = False for i, peimport in enumerate(pe.DIRECTORY_ENTRY_IMPORT): if peimport.dll.lower() == 'chrome_elf.dll': assert not found_elf, 'only one chrome_elf.dll import expected' found_elf = True if i > 0: swap = pe.DIRECTORY_ENTRY_IMPORT[0] # Morally we want to swap peimport.struct and swap.struct here, # but the pe module doesn't expose a public method on Structure # to get all data of a Structure without explicitly listing all # field names. # NB: OriginalFirstThunk and Characteristics are an union both at # offset 0, handling just one of them is enough. peimport.struct.OriginalFirstThunk, swap.struct.OriginalFirstThunk = \ swap.struct.OriginalFirstThunk, peimport.struct.OriginalFirstThunk peimport.struct.TimeDateStamp, swap.struct.TimeDateStamp = \ swap.struct.TimeDateStamp, peimport.struct.TimeDateStamp peimport.struct.ForwarderChain, swap.struct.ForwarderChain = \ swap.struct.ForwarderChain, peimport.struct.ForwarderChain peimport.struct.Name, swap.struct.Name = \ swap.struct.Name, peimport.struct.Name peimport.struct.FirstThunk, swap.struct.FirstThunk = \ swap.struct.FirstThunk, peimport.struct.FirstThunk assert found_elf, 'chrome_elf.dll import not found' pe.write(filename=output_image) for fname in glob.iglob(os.path.join(input_dir, 'chrome.exe.*')): shutil.copy(fname, os.path.join(output_dir, os.path.basename(fname))) return 0 def main(argv): usage = 'reorder_imports.py -i <input_dir> -o <output_dir> -a <target_arch>' parser = optparse.OptionParser(usage=usage) parser.add_option('-i', '--input', help='reorder chrome.exe in DIR', metavar='DIR') parser.add_option('-o', '--output', help='write new chrome.exe to DIR', metavar='DIR') parser.add_option('-a', '--arch', help='architecture of build (optional)', default='ia32') opts, args = parser.parse_args() if not opts.input or not opts.output: parser.error('Please provide and input and output directory') return reorder_imports(opts.input, opts.output, opts.arch) if __name__ == "__main__": sys.exit(main(sys.argv[1:]))
bsd-3-clause
5,867,402,285,721,175,000
41.229167
78
0.694869
false
3.695533
false
false
false
qedsoftware/commcare-hq
corehq/apps/callcenter/fixturegenerators.py
1
4228
from xml.etree import ElementTree from datetime import datetime import pytz from corehq.apps.callcenter.app_parser import get_call_center_config_from_app from casexml.apps.phone.models import OTARestoreUser from corehq.util.soft_assert import soft_assert from corehq.util.timezones.conversions import ServerTime from dimagi.utils.logging import notify_exception utc = pytz.utc def should_sync(domain, last_sync, utcnow=None): # definitely sync if we haven't synced before if not last_sync or not last_sync.date: return True # utcnow only used in tests to mock other times utcnow = utcnow or datetime.utcnow() try: timezone = domain.get_default_timezone() except pytz.UnknownTimeZoneError: timezone = utc last_sync_utc = last_sync.date # check if user has already synced today (in local timezone). # Indicators only change daily. last_sync_local = ServerTime(last_sync_utc).user_time(timezone).done() current_date_local = ServerTime(utcnow).user_time(timezone).done() if current_date_local.date() != last_sync_local.date(): return True return False class IndicatorsFixturesProvider(object): id = 'indicators' def __call__(self, restore_user, version, last_sync=None, app=None): assert isinstance(restore_user, OTARestoreUser) domain = restore_user.project fixtures = [] if self._should_return_no_fixtures(domain, last_sync): return fixtures config = None if app: try: config = get_call_center_config_from_app(app) except: notify_exception(None, "Error getting call center config from app", details={ 'domain': app.domain, 'app_id': app.get_id }) if config: _assert = soft_assert(['skelly_at_dimagi_dot_com'.replace('_at_', '@').replace('_dot_', '.')]) _assert(not config.includes_legacy(), 'Domain still using legacy call center indicators', { 'domain': domain.name, 'config': config.to_json() }) try: fixtures.append(gen_fixture(restore_user, restore_user.get_call_center_indicators(config))) except Exception: # blanket exception catching intended notify_exception(None, 'problem generating callcenter fixture', details={ 'user_id': restore_user.user_id, 'domain': restore_user.domain }) return fixtures @staticmethod def _should_return_no_fixtures(domain, last_sync): config = domain.call_center_config return ( not domain or not (config.fixtures_are_active() and config.config_is_valid()) or not should_sync(domain, last_sync) ) indicators_fixture_generator = IndicatorsFixturesProvider() def gen_fixture(restore_user, indicator_set): """ Generate the fixture from the indicator data. :param user: The user. :param indicator_set: A subclass of SqlIndicatorSet """ """ Example output: indicator_set.name = 'demo' indicator_set.get_data() = {'user_case1': {'indicator_a': 1, 'indicator_b': 2}} <fixture id="indicators:demo" user_id="..."> <indicators> <case id="user_case1"> <indicator_a>1</indicator_a> <indicator_b>2</indicator_2> </case> </indicators> </fixture> """ if indicator_set is None: return [] name = indicator_set.name data = indicator_set.get_data() fixture = ElementTree.Element('fixture', attrib={ 'id': ':'.join((IndicatorsFixturesProvider.id, name)), 'user_id': restore_user.user_id, 'date': indicator_set.reference_date.isoformat() }) indicators_node = ElementTree.SubElement(fixture, 'indicators') for case_id, indicators in data.iteritems(): group = ElementTree.SubElement(indicators_node, 'case', attrib={'id': case_id}) for name, value in indicators.items(): indicator = ElementTree.SubElement(group, name) indicator.text = str(value) return fixture
bsd-3-clause
1,972,465,104,134,683,000
31.274809
106
0.62228
false
3.996219
true
false
false
mikemhenry/arcade
examples/sprite_tiled_map.py
1
6561
""" Load a map stored in csv format, as exported by the program 'Tiled.' Artwork from http://kenney.nl """ import arcade SPRITE_SCALING = 0.5 SCREEN_WIDTH = 800 SCREEN_HEIGHT = 600 # How many pixels to keep as a minimum margin between the character # and the edge of the screen. VIEWPORT_MARGIN = 40 RIGHT_MARGIN = 150 # Physics MOVEMENT_SPEED = 5 JUMP_SPEED = 14 GRAVITY = 0.5 def get_map(): map_file = open("map.csv") map_array = [] for line in map_file: line = line.strip() map_row = line.split(",") for index, item in enumerate(map_row): map_row[index] = int(item) map_array.append(map_row) return map_array class MyApplication(arcade.Window): """ Main application class. """ def __init__(self, width, height): """ Initializer :param width: :param height: """ super().__init__(width, height) # Sprite lists self.all_sprites_list = None self.coin_list = None # Set up the player self.score = 0 self.player_sprite = None self.wall_list = None self.physics_engine = None self.view_left = 0 self.view_bottom = 0 self.game_over = False def setup(self): """ Set up the game and initialize the variables. """ # Sprite lists self.all_sprites_list = arcade.SpriteList() self.wall_list = arcade.SpriteList() # Set up the player self.score = 0 self.player_sprite = arcade.Sprite("images/character.png", SPRITE_SCALING) self.player_sprite.center_x = 64 self.player_sprite.center_y = 270 self.all_sprites_list.append(self.player_sprite) map_array = get_map() for row_index, row in enumerate(map_array): for column_index, item in enumerate(row): if item == -1: continue elif item == 0: wall = arcade.Sprite("images/boxCrate_double.png", SPRITE_SCALING) elif item == 1: wall = arcade.Sprite("images/grassLeft.png", SPRITE_SCALING) elif item == 2: wall = arcade.Sprite("images/grassMid.png", SPRITE_SCALING) elif item == 3: wall = arcade.Sprite("images/grassRight.png", SPRITE_SCALING) wall.right = column_index * 64 wall.top = (7 - row_index) * 64 self.all_sprites_list.append(wall) self.wall_list.append(wall) self.physics_engine = \ arcade.PhysicsEnginePlatformer(self.player_sprite, self.wall_list, gravity_constant=GRAVITY) # Set the background color arcade.set_background_color(arcade.color.AMAZON) # Set the viewport boundaries # These numbers set where we have 'scrolled' to. self.view_left = 0 self.view_bottom = 0 self.game_over = False def on_draw(self): """ Render the screen. """ # This command has to happen before we start drawing arcade.start_render() # Draw all the sprites. self.all_sprites_list.draw() # Put the text on the screen. # Adjust the text position based on the viewport so that we don't # scroll the text too. distance = self.view_left + self.player_sprite.right output = "Distance: {}".format(distance) arcade.draw_text(output, self.view_left + 10, self.view_bottom + 20, arcade.color.WHITE, 14) if self.game_over: output = "Game Over" arcade.draw_text(output, self.view_left + 200, self.view_bottom + 200, arcade.color.WHITE, 30) def on_key_press(self, key, modifiers): """ Called whenever the mouse moves. """ if key == arcade.key.UP: if self.physics_engine.can_jump(): self.player_sprite.change_y = JUMP_SPEED elif key == arcade.key.LEFT: self.player_sprite.change_x = -MOVEMENT_SPEED elif key == arcade.key.RIGHT: self.player_sprite.change_x = MOVEMENT_SPEED def on_key_release(self, key, modifiers): """ Called when the user presses a mouse button. """ if key == arcade.key.LEFT or key == arcade.key.RIGHT: self.player_sprite.change_x = 0 def animate(self, delta_time): """ Movement and game logic """ if self.view_left + self.player_sprite.right >= 5630: self.game_over = True # Call update on all sprites (The sprites don't do much in this # example though.) if not self.game_over: self.physics_engine.update() # --- Manage Scrolling --- # Track if we need to change the viewport changed = False # Scroll left left_bndry = self.view_left + VIEWPORT_MARGIN if self.player_sprite.left < left_bndry: self.view_left -= left_bndry - self.player_sprite.left changed = True # Scroll right right_bndry = self.view_left + SCREEN_WIDTH - RIGHT_MARGIN if self.player_sprite.right > right_bndry: self.view_left += self.player_sprite.right - right_bndry changed = True # Scroll up top_bndry = self.view_bottom + SCREEN_HEIGHT - VIEWPORT_MARGIN if self.player_sprite.top > top_bndry: self.view_bottom += self.player_sprite.top - top_bndry changed = True # Scroll down bottom_bndry = self.view_bottom + VIEWPORT_MARGIN if self.player_sprite.bottom < bottom_bndry: self.view_bottom -= bottom_bndry - self.player_sprite.bottom changed = True # If we need to scroll, go ahead and do it. if changed: arcade.set_viewport(self.view_left, SCREEN_WIDTH + self.view_left, self.view_bottom, SCREEN_HEIGHT + self.view_bottom) window = MyApplication(SCREEN_WIDTH, SCREEN_HEIGHT) window.setup() arcade.run()
mit
-5,013,089,162,620,566,000
30.242857
76
0.53757
false
3.954792
false
false
false
anderspitman/scikit-bio
skbio/sequence/distance.py
1
5233
""" Sequence distance metrics (:mod:`skbio.sequence.distance`) ========================================================== .. currentmodule:: skbio.sequence.distance This module contains functions for computing distances between scikit-bio ``Sequence`` objects. These functions can be used directly or supplied to other parts of the scikit-bio API that accept a sequence distance metric as input, such as :meth:`skbio.sequence.Sequence.distance` and :meth:`skbio.stats.distance.DistanceMatrix.from_iterable`. Functions --------- .. autosummary:: :toctree: generated/ hamming kmer_distance """ # ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- import numpy as np import scipy.spatial.distance import skbio from skbio.util._decorator import experimental @experimental(as_of='0.4.2') def hamming(seq1, seq2): """Compute Hamming distance between two sequences. The Hamming distance between two equal-length sequences is the proportion of differing characters. Parameters ---------- seq1, seq2 : Sequence Sequences to compute Hamming distance between. Returns ------- float Hamming distance between `seq1` and `seq2`. Raises ------ TypeError If `seq1` and `seq2` are not ``Sequence`` instances. TypeError If `seq1` and `seq2` are not the same type. ValueError If `seq1` and `seq2` are not the same length. See Also -------- scipy.spatial.distance.hamming Notes ----- ``np.nan`` will be returned if the sequences do not contain any characters. This function does not make assumptions about the sequence alphabet in use. Each sequence object's underlying sequence of characters are used to compute Hamming distance. Characters that may be considered equivalent in certain contexts (e.g., `-` and `.` as gap characters) are treated as distinct characters when computing Hamming distance. Examples -------- >>> from skbio import Sequence >>> from skbio.sequence.distance import hamming >>> seq1 = Sequence('AGGGTA') >>> seq2 = Sequence('CGTTTA') >>> hamming(seq1, seq2) 0.5 """ _check_seqs(seq1, seq2) # Hamming requires equal length sequences. We are checking this here # because the error you would get otherwise is cryptic. if len(seq1) != len(seq2): raise ValueError( "Hamming distance can only be computed between sequences of equal " "length (%d != %d)" % (len(seq1), len(seq2))) # scipy throws a RuntimeWarning when computing Hamming distance on length 0 # input. if not seq1: distance = np.nan else: distance = scipy.spatial.distance.hamming(seq1.values, seq2.values) return float(distance) @experimental(as_of='0.4.2-dev') def kmer_distance(seq1, seq2, k, overlap=True): """Compute the kmer distance between a pair of sequences The kmer distance between two sequences is the fraction of kmers that are unique to either sequence. Parameters ---------- seq1, seq2 : Sequence Sequences to compute kmer distance between. k : int The kmer length. overlap : bool, optional Defines whether the kmers should be overlapping or not. Returns ------- float kmer distance between `seq1` and `seq2`. Raises ------ ValueError If `k` is less than 1. TypeError If `seq1` and `seq2` are not ``Sequence`` instances. TypeError If `seq1` and `seq2` are not the same type. Notes ----- kmer counts are not incorporated in this distance metric. ``np.nan`` will be returned if there are no kmers defined for the sequences. Examples -------- >>> from skbio import Sequence >>> seq1 = Sequence('ATCGGCGAT') >>> seq2 = Sequence('GCAGATGTG') >>> kmer_distance(seq1, seq2, 3) # doctest: +ELLIPSIS 0.9230769230... """ _check_seqs(seq1, seq2) seq1_kmers = set(map(str, seq1.iter_kmers(k, overlap=overlap))) seq2_kmers = set(map(str, seq2.iter_kmers(k, overlap=overlap))) all_kmers = seq1_kmers | seq2_kmers if not all_kmers: return np.nan shared_kmers = seq1_kmers & seq2_kmers number_unique = len(all_kmers) - len(shared_kmers) fraction_unique = number_unique / len(all_kmers) return fraction_unique def _check_seqs(seq1, seq2): # Asserts both sequences are skbio.sequence objects for seq in seq1, seq2: if not isinstance(seq, skbio.Sequence): raise TypeError( "`seq1` and `seq2` must be Sequence instances, not %r" % type(seq).__name__) # Asserts sequences have the same type if type(seq1) is not type(seq2): raise TypeError( "Sequences must have matching type. Type %r does not match type %r" % (type(seq1).__name__, type(seq2).__name__))
bsd-3-clause
-1,355,334,489,428,729,900
28.398876
79
0.623543
false
3.991609
false
false
false
kennedyshead/home-assistant
homeassistant/components/media_player/__init__.py
1
39109
"""Component to interface with various media players.""" from __future__ import annotations import asyncio import base64 import collections from contextlib import suppress import datetime as dt import functools as ft import hashlib import logging import secrets from typing import final from urllib.parse import urlparse from aiohttp import web from aiohttp.hdrs import CACHE_CONTROL, CONTENT_TYPE from aiohttp.typedefs import LooseHeaders import async_timeout import voluptuous as vol from yarl import URL from homeassistant.components import websocket_api from homeassistant.components.http import KEY_AUTHENTICATED, HomeAssistantView from homeassistant.components.websocket_api.const import ( ERR_NOT_FOUND, ERR_NOT_SUPPORTED, ERR_UNKNOWN_ERROR, ) from homeassistant.const import ( HTTP_INTERNAL_SERVER_ERROR, HTTP_NOT_FOUND, HTTP_OK, HTTP_UNAUTHORIZED, SERVICE_MEDIA_NEXT_TRACK, SERVICE_MEDIA_PAUSE, SERVICE_MEDIA_PLAY, SERVICE_MEDIA_PLAY_PAUSE, SERVICE_MEDIA_PREVIOUS_TRACK, SERVICE_MEDIA_SEEK, SERVICE_MEDIA_STOP, SERVICE_REPEAT_SET, SERVICE_SHUFFLE_SET, SERVICE_TOGGLE, SERVICE_TURN_OFF, SERVICE_TURN_ON, SERVICE_VOLUME_DOWN, SERVICE_VOLUME_MUTE, SERVICE_VOLUME_SET, SERVICE_VOLUME_UP, STATE_IDLE, STATE_OFF, STATE_PLAYING, ) from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.config_validation import ( # noqa: F401 PLATFORM_SCHEMA, PLATFORM_SCHEMA_BASE, datetime, ) from homeassistant.helpers.entity import Entity from homeassistant.helpers.entity_component import EntityComponent from homeassistant.helpers.network import get_url from homeassistant.loader import bind_hass from .const import ( ATTR_APP_ID, ATTR_APP_NAME, ATTR_GROUP_MEMBERS, ATTR_INPUT_SOURCE, ATTR_INPUT_SOURCE_LIST, ATTR_MEDIA_ALBUM_ARTIST, ATTR_MEDIA_ALBUM_NAME, ATTR_MEDIA_ARTIST, ATTR_MEDIA_CHANNEL, ATTR_MEDIA_CONTENT_ID, ATTR_MEDIA_CONTENT_TYPE, ATTR_MEDIA_DURATION, ATTR_MEDIA_ENQUEUE, ATTR_MEDIA_EPISODE, ATTR_MEDIA_EXTRA, ATTR_MEDIA_PLAYLIST, ATTR_MEDIA_POSITION, ATTR_MEDIA_POSITION_UPDATED_AT, ATTR_MEDIA_REPEAT, ATTR_MEDIA_SEASON, ATTR_MEDIA_SEEK_POSITION, ATTR_MEDIA_SERIES_TITLE, ATTR_MEDIA_SHUFFLE, ATTR_MEDIA_TITLE, ATTR_MEDIA_TRACK, ATTR_MEDIA_VOLUME_LEVEL, ATTR_MEDIA_VOLUME_MUTED, ATTR_SOUND_MODE, ATTR_SOUND_MODE_LIST, DOMAIN, MEDIA_CLASS_DIRECTORY, REPEAT_MODES, SERVICE_CLEAR_PLAYLIST, SERVICE_JOIN, SERVICE_PLAY_MEDIA, SERVICE_SELECT_SOUND_MODE, SERVICE_SELECT_SOURCE, SERVICE_UNJOIN, SUPPORT_BROWSE_MEDIA, SUPPORT_CLEAR_PLAYLIST, SUPPORT_GROUPING, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_REPEAT_SET, SUPPORT_SEEK, SUPPORT_SELECT_SOUND_MODE, SUPPORT_SELECT_SOURCE, SUPPORT_SHUFFLE_SET, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP, ) from .errors import BrowseError # mypy: allow-untyped-defs, no-check-untyped-defs _LOGGER = logging.getLogger(__name__) ENTITY_ID_FORMAT = DOMAIN + ".{}" CACHE_IMAGES = "images" CACHE_MAXSIZE = "maxsize" CACHE_LOCK = "lock" CACHE_URL = "url" CACHE_CONTENT = "content" ENTITY_IMAGE_CACHE = {CACHE_IMAGES: collections.OrderedDict(), CACHE_MAXSIZE: 16} SCAN_INTERVAL = dt.timedelta(seconds=10) DEVICE_CLASS_TV = "tv" DEVICE_CLASS_SPEAKER = "speaker" DEVICE_CLASS_RECEIVER = "receiver" DEVICE_CLASSES = [DEVICE_CLASS_TV, DEVICE_CLASS_SPEAKER, DEVICE_CLASS_RECEIVER] DEVICE_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(DEVICE_CLASSES)) MEDIA_PLAYER_PLAY_MEDIA_SCHEMA = { vol.Required(ATTR_MEDIA_CONTENT_TYPE): cv.string, vol.Required(ATTR_MEDIA_CONTENT_ID): cv.string, vol.Optional(ATTR_MEDIA_ENQUEUE): cv.boolean, vol.Optional(ATTR_MEDIA_EXTRA, default={}): dict, } ATTR_TO_PROPERTY = [ ATTR_MEDIA_VOLUME_LEVEL, ATTR_MEDIA_VOLUME_MUTED, ATTR_MEDIA_CONTENT_ID, ATTR_MEDIA_CONTENT_TYPE, ATTR_MEDIA_DURATION, ATTR_MEDIA_POSITION, ATTR_MEDIA_POSITION_UPDATED_AT, ATTR_MEDIA_TITLE, ATTR_MEDIA_ARTIST, ATTR_MEDIA_ALBUM_NAME, ATTR_MEDIA_ALBUM_ARTIST, ATTR_MEDIA_TRACK, ATTR_MEDIA_SERIES_TITLE, ATTR_MEDIA_SEASON, ATTR_MEDIA_EPISODE, ATTR_MEDIA_CHANNEL, ATTR_MEDIA_PLAYLIST, ATTR_APP_ID, ATTR_APP_NAME, ATTR_INPUT_SOURCE, ATTR_SOUND_MODE, ATTR_MEDIA_SHUFFLE, ATTR_MEDIA_REPEAT, ] @bind_hass def is_on(hass, entity_id=None): """ Return true if specified media player entity_id is on. Check all media player if no entity_id specified. """ entity_ids = [entity_id] if entity_id else hass.states.entity_ids(DOMAIN) return any( not hass.states.is_state(entity_id, STATE_OFF) for entity_id in entity_ids ) def _rename_keys(**keys): """Create validator that renames keys. Necessary because the service schema names do not match the command parameters. Async friendly. """ def rename(value): for to_key, from_key in keys.items(): if from_key in value: value[to_key] = value.pop(from_key) return value return rename async def async_setup(hass, config): """Track states and offer events for media_players.""" component = hass.data[DOMAIN] = EntityComponent( logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL ) hass.components.websocket_api.async_register_command(websocket_handle_thumbnail) hass.components.websocket_api.async_register_command(websocket_browse_media) hass.http.register_view(MediaPlayerImageView(component)) await component.async_setup(config) component.async_register_entity_service( SERVICE_TURN_ON, {}, "async_turn_on", [SUPPORT_TURN_ON] ) component.async_register_entity_service( SERVICE_TURN_OFF, {}, "async_turn_off", [SUPPORT_TURN_OFF] ) component.async_register_entity_service( SERVICE_TOGGLE, {}, "async_toggle", [SUPPORT_TURN_OFF | SUPPORT_TURN_ON] ) component.async_register_entity_service( SERVICE_VOLUME_UP, {}, "async_volume_up", [SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP], ) component.async_register_entity_service( SERVICE_VOLUME_DOWN, {}, "async_volume_down", [SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP], ) component.async_register_entity_service( SERVICE_MEDIA_PLAY_PAUSE, {}, "async_media_play_pause", [SUPPORT_PLAY | SUPPORT_PAUSE], ) component.async_register_entity_service( SERVICE_MEDIA_PLAY, {}, "async_media_play", [SUPPORT_PLAY] ) component.async_register_entity_service( SERVICE_MEDIA_PAUSE, {}, "async_media_pause", [SUPPORT_PAUSE] ) component.async_register_entity_service( SERVICE_MEDIA_STOP, {}, "async_media_stop", [SUPPORT_STOP] ) component.async_register_entity_service( SERVICE_MEDIA_NEXT_TRACK, {}, "async_media_next_track", [SUPPORT_NEXT_TRACK] ) component.async_register_entity_service( SERVICE_MEDIA_PREVIOUS_TRACK, {}, "async_media_previous_track", [SUPPORT_PREVIOUS_TRACK], ) component.async_register_entity_service( SERVICE_CLEAR_PLAYLIST, {}, "async_clear_playlist", [SUPPORT_CLEAR_PLAYLIST] ) component.async_register_entity_service( SERVICE_VOLUME_SET, vol.All( cv.make_entity_service_schema( {vol.Required(ATTR_MEDIA_VOLUME_LEVEL): cv.small_float} ), _rename_keys(volume=ATTR_MEDIA_VOLUME_LEVEL), ), "async_set_volume_level", [SUPPORT_VOLUME_SET], ) component.async_register_entity_service( SERVICE_VOLUME_MUTE, vol.All( cv.make_entity_service_schema( {vol.Required(ATTR_MEDIA_VOLUME_MUTED): cv.boolean} ), _rename_keys(mute=ATTR_MEDIA_VOLUME_MUTED), ), "async_mute_volume", [SUPPORT_VOLUME_MUTE], ) component.async_register_entity_service( SERVICE_MEDIA_SEEK, vol.All( cv.make_entity_service_schema( {vol.Required(ATTR_MEDIA_SEEK_POSITION): cv.positive_float} ), _rename_keys(position=ATTR_MEDIA_SEEK_POSITION), ), "async_media_seek", [SUPPORT_SEEK], ) component.async_register_entity_service( SERVICE_JOIN, {vol.Required(ATTR_GROUP_MEMBERS): vol.All(cv.ensure_list, [cv.entity_id])}, "async_join_players", [SUPPORT_GROUPING], ) component.async_register_entity_service( SERVICE_SELECT_SOURCE, {vol.Required(ATTR_INPUT_SOURCE): cv.string}, "async_select_source", [SUPPORT_SELECT_SOURCE], ) component.async_register_entity_service( SERVICE_SELECT_SOUND_MODE, {vol.Required(ATTR_SOUND_MODE): cv.string}, "async_select_sound_mode", [SUPPORT_SELECT_SOUND_MODE], ) component.async_register_entity_service( SERVICE_PLAY_MEDIA, vol.All( cv.make_entity_service_schema(MEDIA_PLAYER_PLAY_MEDIA_SCHEMA), _rename_keys( media_type=ATTR_MEDIA_CONTENT_TYPE, media_id=ATTR_MEDIA_CONTENT_ID, enqueue=ATTR_MEDIA_ENQUEUE, ), ), "async_play_media", [SUPPORT_PLAY_MEDIA], ) component.async_register_entity_service( SERVICE_SHUFFLE_SET, {vol.Required(ATTR_MEDIA_SHUFFLE): cv.boolean}, "async_set_shuffle", [SUPPORT_SHUFFLE_SET], ) component.async_register_entity_service( SERVICE_UNJOIN, {}, "async_unjoin_player", [SUPPORT_GROUPING] ) component.async_register_entity_service( SERVICE_REPEAT_SET, {vol.Required(ATTR_MEDIA_REPEAT): vol.In(REPEAT_MODES)}, "async_set_repeat", [SUPPORT_REPEAT_SET], ) return True async def async_setup_entry(hass, entry): """Set up a config entry.""" return await hass.data[DOMAIN].async_setup_entry(entry) async def async_unload_entry(hass, entry): """Unload a config entry.""" return await hass.data[DOMAIN].async_unload_entry(entry) class MediaPlayerEntity(Entity): """ABC for media player entities.""" _access_token: str | None = None _attr_app_id: str | None = None _attr_app_name: str | None = None _attr_group_members: list[str] | None = None _attr_is_volume_muted: bool | None = None _attr_media_album_artist: str | None = None _attr_media_album_name: str | None = None _attr_media_artist: str | None = None _attr_media_channel: str | None = None _attr_media_content_id: str | None = None _attr_media_content_type: str | None = None _attr_media_duration: int | None = None _attr_media_episode: str | None = None _attr_media_image_hash: str | None _attr_media_image_remotely_accessible: bool = False _attr_media_image_url: str | None = None _attr_media_playlist: str | None = None _attr_media_position_updated_at: dt.datetime | None = None _attr_media_position: int | None = None _attr_media_season: str | None = None _attr_media_series_title: str | None = None _attr_media_title: str | None = None _attr_media_track: int | None = None _attr_repeat: str | None = None _attr_shuffle: bool | None = None _attr_sound_mode_list: list[str] | None = None _attr_sound_mode: str | None = None _attr_source_list: list[str] | None = None _attr_source: str | None = None _attr_state: str | None = None _attr_supported_features: int = 0 _attr_volume_level: float | None = None # Implement these for your media player @property def state(self) -> str | None: """State of the player.""" return self._attr_state @property def access_token(self) -> str: """Access token for this media player.""" if self._access_token is None: self._access_token = secrets.token_hex(32) return self._access_token @property def volume_level(self) -> float | None: """Volume level of the media player (0..1).""" return self._attr_volume_level @property def is_volume_muted(self) -> bool | None: """Boolean if volume is currently muted.""" return self._attr_is_volume_muted @property def media_content_id(self) -> str | None: """Content ID of current playing media.""" return self._attr_media_content_id @property def media_content_type(self) -> str | None: """Content type of current playing media.""" return self._attr_media_content_type @property def media_duration(self) -> int | None: """Duration of current playing media in seconds.""" return self._attr_media_duration @property def media_position(self) -> int | None: """Position of current playing media in seconds.""" return self._attr_media_position @property def media_position_updated_at(self) -> dt.datetime | None: """When was the position of the current playing media valid. Returns value from homeassistant.util.dt.utcnow(). """ return self._attr_media_position_updated_at @property def media_image_url(self) -> str | None: """Image url of current playing media.""" return self._attr_media_image_url @property def media_image_remotely_accessible(self) -> bool: """If the image url is remotely accessible.""" return self._attr_media_image_remotely_accessible @property def media_image_hash(self) -> str | None: """Hash value for media image.""" if hasattr(self, "_attr_media_image_hash"): return self._attr_media_image_hash url = self.media_image_url if url is not None: return hashlib.sha256(url.encode("utf-8")).hexdigest()[:16] return None async def async_get_media_image(self): """Fetch media image of current playing image.""" url = self.media_image_url if url is None: return None, None return await self._async_fetch_image_from_cache(url) async def async_get_browse_image( self, media_content_type: str, media_content_id: str, media_image_id: str | None = None, ) -> tuple[str | None, str | None]: """ Optionally fetch internally accessible image for media browser. Must be implemented by integration. """ return None, None @property def media_title(self) -> str | None: """Title of current playing media.""" return self._attr_media_title @property def media_artist(self) -> str | None: """Artist of current playing media, music track only.""" return self._attr_media_artist @property def media_album_name(self) -> str | None: """Album name of current playing media, music track only.""" return self._attr_media_album_name @property def media_album_artist(self) -> str | None: """Album artist of current playing media, music track only.""" return self._attr_media_album_artist @property def media_track(self) -> int | None: """Track number of current playing media, music track only.""" return self._attr_media_track @property def media_series_title(self) -> str | None: """Title of series of current playing media, TV show only.""" return self._attr_media_series_title @property def media_season(self) -> str | None: """Season of current playing media, TV show only.""" return self._attr_media_season @property def media_episode(self) -> str | None: """Episode of current playing media, TV show only.""" return self._attr_media_episode @property def media_channel(self) -> str | None: """Channel currently playing.""" return self._attr_media_channel @property def media_playlist(self) -> str | None: """Title of Playlist currently playing.""" return self._attr_media_playlist @property def app_id(self) -> str | None: """ID of the current running app.""" return self._attr_app_id @property def app_name(self) -> str | None: """Name of the current running app.""" return self._attr_app_name @property def source(self) -> str | None: """Name of the current input source.""" return self._attr_source @property def source_list(self) -> list[str] | None: """List of available input sources.""" return self._attr_source_list @property def sound_mode(self) -> str | None: """Name of the current sound mode.""" return self._attr_sound_mode @property def sound_mode_list(self) -> list[str] | None: """List of available sound modes.""" return self._attr_sound_mode_list @property def shuffle(self) -> bool | None: """Boolean if shuffle is enabled.""" return self._attr_shuffle @property def repeat(self) -> str | None: """Return current repeat mode.""" return self._attr_repeat @property def group_members(self) -> list[str] | None: """List of members which are currently grouped together.""" return self._attr_group_members @property def supported_features(self) -> int: """Flag media player features that are supported.""" return self._attr_supported_features def turn_on(self): """Turn the media player on.""" raise NotImplementedError() async def async_turn_on(self): """Turn the media player on.""" await self.hass.async_add_executor_job(self.turn_on) def turn_off(self): """Turn the media player off.""" raise NotImplementedError() async def async_turn_off(self): """Turn the media player off.""" await self.hass.async_add_executor_job(self.turn_off) def mute_volume(self, mute): """Mute the volume.""" raise NotImplementedError() async def async_mute_volume(self, mute): """Mute the volume.""" await self.hass.async_add_executor_job(self.mute_volume, mute) def set_volume_level(self, volume): """Set volume level, range 0..1.""" raise NotImplementedError() async def async_set_volume_level(self, volume): """Set volume level, range 0..1.""" await self.hass.async_add_executor_job(self.set_volume_level, volume) def media_play(self): """Send play command.""" raise NotImplementedError() async def async_media_play(self): """Send play command.""" await self.hass.async_add_executor_job(self.media_play) def media_pause(self): """Send pause command.""" raise NotImplementedError() async def async_media_pause(self): """Send pause command.""" await self.hass.async_add_executor_job(self.media_pause) def media_stop(self): """Send stop command.""" raise NotImplementedError() async def async_media_stop(self): """Send stop command.""" await self.hass.async_add_executor_job(self.media_stop) def media_previous_track(self): """Send previous track command.""" raise NotImplementedError() async def async_media_previous_track(self): """Send previous track command.""" await self.hass.async_add_executor_job(self.media_previous_track) def media_next_track(self): """Send next track command.""" raise NotImplementedError() async def async_media_next_track(self): """Send next track command.""" await self.hass.async_add_executor_job(self.media_next_track) def media_seek(self, position): """Send seek command.""" raise NotImplementedError() async def async_media_seek(self, position): """Send seek command.""" await self.hass.async_add_executor_job(self.media_seek, position) def play_media(self, media_type, media_id, **kwargs): """Play a piece of media.""" raise NotImplementedError() async def async_play_media(self, media_type, media_id, **kwargs): """Play a piece of media.""" await self.hass.async_add_executor_job( ft.partial(self.play_media, media_type, media_id, **kwargs) ) def select_source(self, source): """Select input source.""" raise NotImplementedError() async def async_select_source(self, source): """Select input source.""" await self.hass.async_add_executor_job(self.select_source, source) def select_sound_mode(self, sound_mode): """Select sound mode.""" raise NotImplementedError() async def async_select_sound_mode(self, sound_mode): """Select sound mode.""" await self.hass.async_add_executor_job(self.select_sound_mode, sound_mode) def clear_playlist(self): """Clear players playlist.""" raise NotImplementedError() async def async_clear_playlist(self): """Clear players playlist.""" await self.hass.async_add_executor_job(self.clear_playlist) def set_shuffle(self, shuffle): """Enable/disable shuffle mode.""" raise NotImplementedError() async def async_set_shuffle(self, shuffle): """Enable/disable shuffle mode.""" await self.hass.async_add_executor_job(self.set_shuffle, shuffle) def set_repeat(self, repeat): """Set repeat mode.""" raise NotImplementedError() async def async_set_repeat(self, repeat): """Set repeat mode.""" await self.hass.async_add_executor_job(self.set_repeat, repeat) # No need to overwrite these. @property def support_play(self): """Boolean if play is supported.""" return bool(self.supported_features & SUPPORT_PLAY) @property def support_pause(self): """Boolean if pause is supported.""" return bool(self.supported_features & SUPPORT_PAUSE) @property def support_stop(self): """Boolean if stop is supported.""" return bool(self.supported_features & SUPPORT_STOP) @property def support_seek(self): """Boolean if seek is supported.""" return bool(self.supported_features & SUPPORT_SEEK) @property def support_volume_set(self): """Boolean if setting volume is supported.""" return bool(self.supported_features & SUPPORT_VOLUME_SET) @property def support_volume_mute(self): """Boolean if muting volume is supported.""" return bool(self.supported_features & SUPPORT_VOLUME_MUTE) @property def support_previous_track(self): """Boolean if previous track command supported.""" return bool(self.supported_features & SUPPORT_PREVIOUS_TRACK) @property def support_next_track(self): """Boolean if next track command supported.""" return bool(self.supported_features & SUPPORT_NEXT_TRACK) @property def support_play_media(self): """Boolean if play media command supported.""" return bool(self.supported_features & SUPPORT_PLAY_MEDIA) @property def support_select_source(self): """Boolean if select source command supported.""" return bool(self.supported_features & SUPPORT_SELECT_SOURCE) @property def support_select_sound_mode(self): """Boolean if select sound mode command supported.""" return bool(self.supported_features & SUPPORT_SELECT_SOUND_MODE) @property def support_clear_playlist(self): """Boolean if clear playlist command supported.""" return bool(self.supported_features & SUPPORT_CLEAR_PLAYLIST) @property def support_shuffle_set(self): """Boolean if shuffle is supported.""" return bool(self.supported_features & SUPPORT_SHUFFLE_SET) @property def support_grouping(self): """Boolean if player grouping is supported.""" return bool(self.supported_features & SUPPORT_GROUPING) async def async_toggle(self): """Toggle the power on the media player.""" if hasattr(self, "toggle"): # pylint: disable=no-member await self.hass.async_add_executor_job(self.toggle) return if self.state in [STATE_OFF, STATE_IDLE]: await self.async_turn_on() else: await self.async_turn_off() async def async_volume_up(self): """Turn volume up for media player. This method is a coroutine. """ if hasattr(self, "volume_up"): # pylint: disable=no-member await self.hass.async_add_executor_job(self.volume_up) return if self.volume_level < 1 and self.supported_features & SUPPORT_VOLUME_SET: await self.async_set_volume_level(min(1, self.volume_level + 0.1)) async def async_volume_down(self): """Turn volume down for media player. This method is a coroutine. """ if hasattr(self, "volume_down"): # pylint: disable=no-member await self.hass.async_add_executor_job(self.volume_down) return if self.volume_level > 0 and self.supported_features & SUPPORT_VOLUME_SET: await self.async_set_volume_level(max(0, self.volume_level - 0.1)) async def async_media_play_pause(self): """Play or pause the media player.""" if hasattr(self, "media_play_pause"): # pylint: disable=no-member await self.hass.async_add_executor_job(self.media_play_pause) return if self.state == STATE_PLAYING: await self.async_media_pause() else: await self.async_media_play() @property def entity_picture(self): """Return image of the media playing.""" if self.state == STATE_OFF: return None if self.media_image_remotely_accessible: return self.media_image_url return self.media_image_local @property def media_image_local(self): """Return local url to media image.""" image_hash = self.media_image_hash if image_hash is None: return None return ( f"/api/media_player_proxy/{self.entity_id}?" f"token={self.access_token}&cache={image_hash}" ) @property def capability_attributes(self): """Return capability attributes.""" supported_features = self.supported_features or 0 data = {} if supported_features & SUPPORT_SELECT_SOURCE: source_list = self.source_list if source_list: data[ATTR_INPUT_SOURCE_LIST] = source_list if supported_features & SUPPORT_SELECT_SOUND_MODE: sound_mode_list = self.sound_mode_list if sound_mode_list: data[ATTR_SOUND_MODE_LIST] = sound_mode_list return data @final @property def state_attributes(self): """Return the state attributes.""" if self.state == STATE_OFF: return None state_attr = {} for attr in ATTR_TO_PROPERTY: value = getattr(self, attr) if value is not None: state_attr[attr] = value if self.media_image_remotely_accessible: state_attr["entity_picture_local"] = self.media_image_local if self.support_grouping: state_attr[ATTR_GROUP_MEMBERS] = self.group_members return state_attr async def async_browse_media( self, media_content_type: str | None = None, media_content_id: str | None = None, ) -> BrowseMedia: """Return a BrowseMedia instance. The BrowseMedia instance will be used by the "media_player/browse_media" websocket command. """ raise NotImplementedError() def join_players(self, group_members): """Join `group_members` as a player group with the current player.""" raise NotImplementedError() async def async_join_players(self, group_members): """Join `group_members` as a player group with the current player.""" await self.hass.async_add_executor_job(self.join_players, group_members) def unjoin_player(self): """Remove this player from any group.""" raise NotImplementedError() async def async_unjoin_player(self): """Remove this player from any group.""" await self.hass.async_add_executor_job(self.unjoin_player) async def _async_fetch_image_from_cache(self, url): """Fetch image. Images are cached in memory (the images are typically 10-100kB in size). """ cache_images = ENTITY_IMAGE_CACHE[CACHE_IMAGES] cache_maxsize = ENTITY_IMAGE_CACHE[CACHE_MAXSIZE] if urlparse(url).hostname is None: url = f"{get_url(self.hass)}{url}" if url not in cache_images: cache_images[url] = {CACHE_LOCK: asyncio.Lock()} async with cache_images[url][CACHE_LOCK]: if CACHE_CONTENT in cache_images[url]: return cache_images[url][CACHE_CONTENT] (content, content_type) = await self._async_fetch_image(url) async with cache_images[url][CACHE_LOCK]: cache_images[url][CACHE_CONTENT] = content, content_type while len(cache_images) > cache_maxsize: cache_images.popitem(last=False) return content, content_type async def _async_fetch_image(self, url): """Retrieve an image.""" content, content_type = (None, None) websession = async_get_clientsession(self.hass) with suppress(asyncio.TimeoutError), async_timeout.timeout(10): response = await websession.get(url) if response.status == HTTP_OK: content = await response.read() content_type = response.headers.get(CONTENT_TYPE) if content_type: content_type = content_type.split(";")[0] if content is None: _LOGGER.warning("Error retrieving proxied image from %s", url) return content, content_type def get_browse_image_url( self, media_content_type: str, media_content_id: str, media_image_id: str | None = None, ) -> str: """Generate an url for a media browser image.""" url_path = ( f"/api/media_player_proxy/{self.entity_id}/browse_media" f"/{media_content_type}/{media_content_id}" ) url_query = {"token": self.access_token} if media_image_id: url_query["media_image_id"] = media_image_id return str(URL(url_path).with_query(url_query)) class MediaPlayerImageView(HomeAssistantView): """Media player view to serve an image.""" requires_auth = False url = "/api/media_player_proxy/{entity_id}" name = "api:media_player:image" extra_urls = [ url + "/browse_media/{media_content_type}/{media_content_id}", ] def __init__(self, component): """Initialize a media player view.""" self.component = component async def get( self, request: web.Request, entity_id: str, media_content_type: str | None = None, media_content_id: str | None = None, ) -> web.Response: """Start a get request.""" player = self.component.get_entity(entity_id) if player is None: status = HTTP_NOT_FOUND if request[KEY_AUTHENTICATED] else HTTP_UNAUTHORIZED return web.Response(status=status) authenticated = ( request[KEY_AUTHENTICATED] or request.query.get("token") == player.access_token ) if not authenticated: return web.Response(status=HTTP_UNAUTHORIZED) if media_content_type and media_content_id: media_image_id = request.query.get("media_image_id") data, content_type = await player.async_get_browse_image( media_content_type, media_content_id, media_image_id ) else: data, content_type = await player.async_get_media_image() if data is None: return web.Response(status=HTTP_INTERNAL_SERVER_ERROR) headers: LooseHeaders = {CACHE_CONTROL: "max-age=3600"} return web.Response(body=data, content_type=content_type, headers=headers) @websocket_api.websocket_command( { vol.Required("type"): "media_player_thumbnail", vol.Required("entity_id"): cv.entity_id, } ) @websocket_api.async_response async def websocket_handle_thumbnail(hass, connection, msg): """Handle get media player cover command. Async friendly. """ component = hass.data[DOMAIN] player = component.get_entity(msg["entity_id"]) if player is None: connection.send_message( websocket_api.error_message(msg["id"], ERR_NOT_FOUND, "Entity not found") ) return _LOGGER.warning( "The websocket command media_player_thumbnail is deprecated. Use /api/media_player_proxy instead" ) data, content_type = await player.async_get_media_image() if data is None: connection.send_message( websocket_api.error_message( msg["id"], "thumbnail_fetch_failed", "Failed to fetch thumbnail" ) ) return await connection.send_big_result( msg["id"], { "content_type": content_type, "content": base64.b64encode(data).decode("utf-8"), }, ) @websocket_api.websocket_command( { vol.Required("type"): "media_player/browse_media", vol.Required("entity_id"): cv.entity_id, vol.Inclusive( ATTR_MEDIA_CONTENT_TYPE, "media_ids", "media_content_type and media_content_id must be provided together", ): str, vol.Inclusive( ATTR_MEDIA_CONTENT_ID, "media_ids", "media_content_type and media_content_id must be provided together", ): str, } ) @websocket_api.async_response async def websocket_browse_media(hass, connection, msg): """ Browse media available to the media_player entity. To use, media_player integrations can implement MediaPlayerEntity.async_browse_media() """ component = hass.data[DOMAIN] player: MediaPlayerDevice | None = component.get_entity(msg["entity_id"]) if player is None: connection.send_error(msg["id"], "entity_not_found", "Entity not found") return if not player.supported_features & SUPPORT_BROWSE_MEDIA: connection.send_message( websocket_api.error_message( msg["id"], ERR_NOT_SUPPORTED, "Player does not support browsing media" ) ) return media_content_type = msg.get(ATTR_MEDIA_CONTENT_TYPE) media_content_id = msg.get(ATTR_MEDIA_CONTENT_ID) try: payload = await player.async_browse_media(media_content_type, media_content_id) except NotImplementedError: _LOGGER.error( "%s allows media browsing but its integration (%s) does not", player.entity_id, player.platform.platform_name, ) connection.send_message( websocket_api.error_message( msg["id"], ERR_NOT_SUPPORTED, "Integration does not support browsing media", ) ) return except BrowseError as err: connection.send_message( websocket_api.error_message(msg["id"], ERR_UNKNOWN_ERROR, str(err)) ) return # For backwards compat if isinstance(payload, BrowseMedia): payload = payload.as_dict() else: _LOGGER.warning("Browse Media should use new BrowseMedia class") connection.send_result(msg["id"], payload) class MediaPlayerDevice(MediaPlayerEntity): """ABC for media player devices (for backwards compatibility).""" def __init_subclass__(cls, **kwargs): """Print deprecation warning.""" super().__init_subclass__(**kwargs) _LOGGER.warning( "MediaPlayerDevice is deprecated, modify %s to extend MediaPlayerEntity", cls.__name__, ) class BrowseMedia: """Represent a browsable media file.""" def __init__( self, *, media_class: str, media_content_id: str, media_content_type: str, title: str, can_play: bool, can_expand: bool, children: list[BrowseMedia] | None = None, children_media_class: str | None = None, thumbnail: str | None = None, ) -> None: """Initialize browse media item.""" self.media_class = media_class self.media_content_id = media_content_id self.media_content_type = media_content_type self.title = title self.can_play = can_play self.can_expand = can_expand self.children = children self.children_media_class = children_media_class self.thumbnail = thumbnail def as_dict(self, *, parent: bool = True) -> dict: """Convert Media class to browse media dictionary.""" if self.children_media_class is None: self.calculate_children_class() response = { "title": self.title, "media_class": self.media_class, "media_content_type": self.media_content_type, "media_content_id": self.media_content_id, "can_play": self.can_play, "can_expand": self.can_expand, "children_media_class": self.children_media_class, "thumbnail": self.thumbnail, } if not parent: return response if self.children: response["children"] = [ child.as_dict(parent=False) for child in self.children ] else: response["children"] = [] return response def calculate_children_class(self) -> None: """Count the children media classes and calculate the correct class.""" if self.children is None or len(self.children) == 0: return self.children_media_class = MEDIA_CLASS_DIRECTORY proposed_class = self.children[0].media_class if all(child.media_class == proposed_class for child in self.children): self.children_media_class = proposed_class
apache-2.0
7,419,448,609,892,268,000
30.31225
105
0.620727
false
3.920702
false
false
false
Bdanilko/EdPy
src/lib/program.py
1
21793
#!/usr/bin/env python2 # * **************************************************************** ** # File: program.py # Requires: Python 2.7+ (but not Python 3.0+) # Note: For history, changes and dates for this file, consult git. # Author: Brian Danilko, Likeable Software (brian@likeablesoftware.com) # Copyright 2015-2017 Microbric Pty Ltd. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License (in the doc/licenses directory) # for more details. # # * **************************************************************** */ """ Module contains Objects that represent the Ed.Py program """ from __future__ import print_function from __future__ import absolute_import class EdPyError(Exception): def __init__(self): pass class ParseError(EdPyError): def __init__(self, rawmsg=""): self.rawmsg = rawmsg class OptError(EdPyError): def __init__(self, rawmsg=""): self.rawmsg = rawmsg class CompileError(EdPyError): def __init__(self, rawmsg=""): self.rawmsg = rawmsg class AssemblerError(EdPyError): def __init__(self, rawmsg=""): self.rawmsg = rawmsg class UnclassifiedError(Exception): def __init__(self, rawmsg): self.rawmsg = rawmsg class Marker(object): """Mark each source line (but not worrying about column number)""" def __init__(self, line, col=None): self.kind = "Marker" self.line = line self.col = col def GetValues(self): return [] def GetTarget(self): return None def __repr__(self): return "<program.Marker source line:{0}>".format(self.line) class ControlMarker(object): """Marks start/else/end of If structures, While loops, For loops and Boolean Checks (for short-circuit evaluation). This marks a series of locations that tests can jump to.""" def __init__(self, markerNumber, name, end="start"): self.kind = "ControlMarker" self.num = markerNumber self.name = name # string - type of loop: "If", "While", "For", "Or", "And" self.end = end # a string - one of "start", "else", "end" self.CheckData() def GetNumber(self): return self.num def CheckData(self): if (self.name not in ("If", "While", "For", "Or", "And")): raise UnclassifiedError("Invalid program.ControlMarker() name.") if (self.end not in ("start", "else", "end")): raise UnclassifiedError("Invalid program.ControlMarker() end.") def GetValues(self): return [] def GetTarget(self): return None def __repr__(self): msg = "<program.ControlMarker marker:{0} {1} {2}>".format(self.num, self.name, self.end) return msg class LoopControl(object): """Used at the top of If and While loops (where a test needs to be evaluated). The markerNumber is the same as used in ControlMarkers, so jumps to locations marked by the corresponding ControlMarker will be done.""" def __init__(self, markerNumber, name=None, test=None): self.kind = "LoopControl" self.num = markerNumber self.name = name # a string "If", "While" self.test = test # a Value object. if evaluates to 0 then False, else True def GetValues(self): return [self.test] def GetTarget(self): return None def __repr__(self): msg = "<program.LoopControl {0}, name:{1}, test:{2}>".format( self.num, self.name, self.test) return msg class LoopModifier(object): """Mark, inside ControlMarkers, Breaks and Continues. As the markerNumber is the same as the corresponding ControlMarker markerNumber, jumps to the "start" or "end" is easy.""" def __init__(self, markerNumber, name=None): self.kind = "LoopModifier" self.num = markerNumber self.name = name # a string "Pass", "Break", "Continue" def GetValues(self): return [] def GetTarget(self): return None def __repr__(self): msg = "<program.LoopModifier {0}, name:{1}>".format( self.num, self.name) return msg class ForControl(object): """In a for loop, this will check that arrayValue is still inside the array. If not a jump to the "end" of the corresponding ControlMarker will be made.""" def __init__(self, markerNumber, arrayValue=None, constantLimit=None, currentValue=None): self.kind = "ForControl" self.num = markerNumber self.arrayValue = arrayValue # a value with name and iVariable self.constantLimit = constantLimit # a value self.currentValue = currentValue # a value if ((self.arrayValue is None and self.constantLimit is None) or (self.arrayValue is not None and self.constantLimit is not None) or (self.currentValue is None and self.constantLimit is not None) or (self.currentValue is not None and self.constantLimit is None)): raise UnclassifiedError("Invalid program.ForControl() condition.") def IsRange(self): return self.constantLimit is not None def IsArray(self): return self.arrayValue is not None def GetValues(self): if (self.IsArray()): return [self.arrayValue] else: return [self.constantLimit, self.currentValue] def GetTarget(self): return None def __repr__(self): msg = "<program.ForControl {0}, ".format(self.num) if (self.IsArray()): msg += "arrayValue:{0}>".format(self.arrayValue) else: msg += "constantLimit:{0}, currentValue:{1}>".format(self.constantLimit, self.currentValue) return msg class BoolCheck(object): """In a BoolOp, there is a need to short-curcuit evaluation on pass (or) or failure (and). This object is used in each location where a value is checked, and possible short-curcuit eval. may require a jump to the "end" of the corresponding ControlMarker""" def __init__(self, markerNumber, op=None, value=None, target=None): """An binary operation on constants or variables, assigned to a variable""" self.kind = "BoolCheck" self.num = markerNumber self.op = op # a string - the boolean op ("Or", "And", "Done") # Done signifies to put the non-shortcircuit value in target self.value = value # a Value object which has the left result of the op self.target = target # a Value object which gets the result on short-circuit def GetValues(self): return [self.value] def GetTarget(self): return self.target def __repr__(self): return "<program.BoolCheck {0} {1} check:{2}, target{3}>".format( self.num, self.op, self.value, self.target) class Value(object): """Stores an integer variable or constant or string constant, and depending on where it is used in the other objects, can represent a STORE or a LOAD. Note that for a STORE, this object can not represent a constant""" def __init__(self, constant=None, name=None, iConstant=None, iVariable=None, strConst=None, listConst=None, tsRef=None, listRef=None, objectRef=None): self.kind = "Value" self.name = name # The name of the variable self.indexConstant = iConstant # if not None, then the value is a slice at this index self.indexVariable = iVariable self.constant = constant # if not None, then this is the value (integer) self.strConst = strConst # if not None, then a string self.listConst = listConst # if not None, then a list self.tsRef = tsRef # if not None, then a reference to a tunestring variable self.listRef = listRef # if not None, then a reference to a list variable self.objectRef = objectRef # if not None, then a reference to an object variable self.loopTempStart = 9999 # All temps above this number are loop control temps # check that the object has been created consistently if (((self.IsIntConst()) and ((self.name is not None) or self.IsSlice() or self.IsStrConst() or self.IsListConst() or self.IsRef())) or ((self.IsStrConst()) and ((self.name is not None) or self.IsSlice() or self.IsRef() or self.IsListConst() or self.IsIntConst())) or ((self.IsListConst()) and ((self.name is not None) or self.IsSlice() or self.IsRef() or self.IsStrConst() or self.IsIntConst())) or (self.IsRef() and ((self.name is not None) or self.IsSlice() or self.IsStrConst() or self.IsListConst() or self.IsIntConst())) or ((self.indexConstant is not None) and (self.indexVariable is not None)) or ((self.indexConstant is not None) and (self.name is None)) or ((self.indexVariable is not None) and (self.name is None)) or ((self.tsRef is not None) and ((self.listRef is not None) or (self.objectRef is not None))) or ((self.listRef is not None) and ((self.tsRef is not None) or (self.objectRef is not None))) or ((self.objectRef is not None) and ((self.listRef is not None) or (self.tsRef is not None)))): raise UnclassifiedError("Invalid program.Value() constructor arguments") def IsIntConst(self): return self.constant is not None def IsStrConst(self): return (self.strConst is not None) def IsListConst(self): return (self.listConst is not None) def IsTSRef(self): return self.tsRef is not None def IsListRef(self): return self.listRef is not None def IsObjRef(self): return self.objectRef is not None def IsRef(self): return self.IsTSRef() or self.IsListRef() or self.IsObjRef() def IsConstant(self): return self.IsIntConst() or self.IsStrConst() or self.IsListConst() def IsSimpleVar(self): return (not (self.IsConstant() or self.IsSlice() or self.IsRef())) def IsSlice(self): return self.indexConstant is not None or self.indexVariable is not None def IsDotted(self): if (not self.IsTemp()): left, sep, right = self.name.partition(self.name) if (right != ""): return True return False def IsTemp(self): if self.IsSimpleVar(): if type(self.name) is int: return True return False def IsSimpleTemp(self): return self.IsTemp() and (self.name < self.loopTempStart) def IsSliceWithSimpleTempIndex(self): return (self.IsSlice() and self.indexVariable is not None and type(self.indexVariable) is int and (self.indexVariable < self.loopTempStart)) def IsSliceWithVarIndex(self): return self.IsSlice() and self.indexVariable is not None and type(self.indexVariable) is not int def IsAssignable(self): return not (self.IsRef() or self.IsConstant()) def UsesValue(self, otherValue): if (otherValue.IsSimpleVar()): if ((self.IsSimpleVar() and self.name == otherValue.name) or (self.IsSlice() and self.indexVariable == otherValue.name)): return True elif (otherValue.IsSlice()): return self == otherValue return False def Name(self): if self.IsConstant(): return "????" elif not self.IsSlice(): if type(self.name) is int: return "TEMP-" + str(self.name) else: return self.name elif self.indexConstant is not None: return self.name + "[" + str(self.indexConstant) + "]" elif type(self.indexVariable) is int: return self.name + "[TEMP-" + str(self.indexVariable) + "]" else: return self.name + "[" + self.indexVariable + "]" def __eq__(self, rhs): return ((self.kind == rhs.kind) and (self.name == rhs.name) and (self.indexConstant == rhs.indexConstant) and (self.indexVariable == rhs.indexVariable) and (self.constant == rhs.constant) and (self.strConst == rhs.strConst) and (self.listConst == rhs.listConst) and (self.tsRef == rhs.tsRef) and (self.listRef == rhs.listRef) and (self.objectRef == rhs.objectRef)) def GetValues(self): return [self] def GetTarget(self): return None def __repr__(self): if self.constant is not None: return "<program.Value const:{0}>".format(self.constant) elif self.IsStrConst(): return "<program.Value const:\"{0}\">".format(self.strConst) elif self.IsListConst(): return "<program.Value const:{0}>".format(self.listConst) elif self.IsTSRef(): return "<program.Value T_Ref:{0}>".format(self.tsRef) elif self.IsListRef(): return "<program.Value L_Ref:{0}>".format(self.listRef) elif self.IsObjRef(): return "<program.Value O_Ref:{0}>".format(self.objectRef) else: return "<program.Value name:{0}>".format(self.Name()) class UAssign(object): """Represent an Unary Op with assignment to a variable (target)""" def __init__(self, target=None, op=None, operand=None): """A unary operation on constants or variables, assigned to a variable""" self.kind = "UAssign" self.target = target # a value object self.operation = op # a unary operation (could be UAdd for identity self.operand = operand # (used for binary op or unary op) if used then a Value object def GetValues(self): if (self.operand is None): return [] else: return [self.operand] def GetTarget(self): return self.target def __repr__(self): msg = "<program.UAssign {0} = ".format(self.target) msg += "{0} {1}>".format(self.operation, self.operand) return msg class BAssign(object): """Represent a Binary Op (including logical tests) with assignment to a variable (target)""" def __init__(self, target=None, left=None, op=None, right=None): """An binary operation on constants or variables, assigned to a variable""" self.kind = "BAssign" self.target = target # a value object self.left = left # a Value object self.operation = op # binary operation self.right = right # a Value object def GetValues(self): return [self.left, self.right] def GetTarget(self): return self.target def __repr__(self): msg = "<program.BAssign {0} = ".format(self.target) msg += "{0} {1} {2}>".format(self.left, self.operation, self.right) return msg class Call(object): """Calling a function, optionally assigning the result to a variable (if self.target is not None).""" def __init__(self, target=None, funcName=None, args=[]): self.kind = "Call" self.target = target # a Value object OR CAN BE NONE! self.funcName = funcName # a String self.args = args # each arg is a Value object def GetValues(self): return self.args def GetTarget(self): if (self.target is None): return None else: return self.target def __repr__(self): msg = "<program.Call " if (self.target is not None): msg += "{0} = ".format(self.target) msg += "name:{0} with args:{1}>".format(self.funcName, self.args) return msg class Return(object): """Return an explicit value (an int) or nothing from the function""" def __init__(self, returnValue=None): self.kind = "Return" self.returnValue = returnValue def IsVoidReturn(self): return self.returnValue is None def GetValues(self): if self.returnValue is None: return [] else: return [self.returnValue] def GetTarget(self): return None def __repr__(self): return "<program.Return {0}>".format(self.returnValue) # ######## Top level objects ############################## class Function(object): def __init__(self, name, internalFunc = False): self.kind = "Function" self.name = name self.docString = "" self.internalFunction = internalFunc self.globalAccess = [] # Global variable names can write too self.localVar = {} # local variable types (including temps) self.args = [] self.callsTo = [] # functions called from this function self.maxSimpleTemps = 0 # Number of integer temps needed, # they will be from 0 - (maxSimpleTemps - 1). self.body = [] # contains objects of type 'Op', 'Call' self.returnsValue = False # explicit return with a value self.returnsNone = False # explicit return but with no value def __repr__(self): msg = "<program.Function name:{0}, doc:|{1}|, ".format( self.name, self.docString) msg += "args:{0}, lclVars:{1}, glbWriteVars:{2}, maxSimpleTemps:{3}, internal:{4}".format( self.args, self.localVar, self.globalAccess, self.maxSimpleTemps, self.internalFunction) return msg + "returnsValue:{0}, calls:{1}, body:{2}>".format( self.returnsValue, self.callsTo, self.body) def IsInternalFunction(self): return self.internalFunction class Class(object): def __init__(self, name): self.kind = "Class" self.name = name self.docString = "" self.funcNames = [] def __repr__(self): return "<program.Class name:{}, doc:|{}|, funcNames:{}>".format( self.name, self.docString, self.funcNames) class Program(object): def __init__(self): self.kind = "Program" self.EdVariables = {} self.Import = [] mainFunction = Function("__main__") self.Function = {"__main__": mainFunction} self.FunctionSigDict = {} self.EventHandlers = {} self.globalVar = {} self.GlobalTypeDict = {} self.Class = {} self.indent = 0 def __repr__(self): return "<program.Program Import:{}, Global:{}, Function:{}, Class:{}>".format( self.Import, self.globalVar, self.Function, self.Class) def Print(self, prefix="", *vars): if (prefix == "" and len(vars) == 0): print() else: if (prefix.startswith('\n')): print() prefix = prefix[1:] indentSpaces = " " * (self.indent) if (prefix): print(indentSpaces, prefix, sep='', end='') else: print(indentSpaces, end='') for v in vars: print(' ', v, sep='', end='') print() def Dump(self, filterOutInternals=True): """Dump the full program""" self.Print("Program") self.Print("\Edison variables:", self.EdVariables) self.Print("\nImports:", self.Import) self.Print("\nGlobals:", self.globalVar) self.Print("\nClasses:", self.Class) self.Print("\nFunctions:", self.Function.keys()) self.Print("\nFunction Sigs:", self.FunctionSigDict) self.Print("\nEvent Handlers:", self.EventHandlers) self.Print("\nFunction Details:") self.indent += 2 sigsPrinted = [] for i in self.Function: if (filterOutInternals and self.Function[i].IsInternalFunction()): continue self.Print() f = self.Function[i] if (f.IsInternalFunction()): name = "{}-internal".format(i) else: name = i self.Print("", name) self.indent += 2 self.Print("Args:", f.args) if (i in self.FunctionSigDict): sigsPrinted.append(i) self.Print("Signature:", self.FunctionSigDict[i]) self.Print("Globals can write:", f.globalAccess) self.Print("Local vars:", f.localVar) self.Print("Max simple temps:", f.maxSimpleTemps) self.Print("Functions called:", f.callsTo) self.indent += 2 for l in f.body: if (l.kind == "Marker"): self.Print() self.Print("", l) self.indent -= 4 self.indent -= 2 # header = "\nExternal functions:" # for i in self.FunctionSigDict: # if (i not in sigsPrinted): # if header: # self.Print(header) # header = None # self.Print("External function:", i) # self.indent += 2 # self.Print("Signature:", self.FunctionSigDict[i]) # self.indent -= 2
gpl-2.0
-59,243,137,877,478,696
33.757576
104
0.581976
false
4.037984
false
false
false
kevinkellyspacey/standalone-dell-recovery
Dell/recovery_xml.py
1
5532
#!/usr/bin/python3 # -*- coding: utf-8 -*- # # «recovery_xml» - Helper Class for parsing and using a bto.xml # # Copyright (C) 2010-2011, Dell Inc. # # Author: # - Mario Limonciello <Mario_Limonciello@Dell.com> # # This is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this application; if not, write to the Free Software Foundation, Inc., 51 # Franklin St, Fifth Floor, Boston, MA 02110-1301 USA ################################################################################## import xml.dom.minidom import codecs import os import sys if sys.version >= '3': text_type = str binary_type = bytes else: text_type = unicode binary_type = str def utf8str(old): if isinstance(old, text_type): return old else: return text_type(binary_type(old), 'utf-8', errors='ignore') class BTOxml: def __init__(self): self.dom = None self.new = False self.load_bto_xml() def set_base(self, name, md5=''): """Sets the base image""" self.replace_node_contents('base', name) if md5: self.dom.getElementsByTagName('base')[0].setAttribute('md5', md5) def append_fish(self, fish_type, name, md5='', srv=''): """Appends a fish package""" elements = self.dom.getElementsByTagName('fish') new_element = self.dom.createElement(fish_type) if md5: new_element.setAttribute('md5', md5) if srv: new_element.setAttribute('srv', srv) new_node = self.dom.createTextNode(name) new_element.appendChild(new_node) elements[0].appendChild(new_element) def fetch_node_contents(self, tag): """Fetches all children of a tag""" elements = self.dom.getElementsByTagName(tag) values = text_type('') if len(elements) > 1: values = [] if elements: for element in elements: child = element.firstChild if child: if len(elements) > 1: values.append(child.nodeValue.strip()) else: values = child.nodeValue.strip() return values def replace_node_contents(self, tag, new): """Replaces a node contents (that we assume exists)""" elements = self.dom.getElementsByTagName(tag) if not elements: print("Missing elements for tag") return if elements[0].hasChildNodes(): for node in elements[0].childNodes: elements[0].removeChild(node) noob = self.dom.createTextNode(utf8str(new)) elements[0].appendChild(noob) def load_bto_xml(self, fname=None): """Initialize an XML file into memory""" def create_top_level(dom): """Initializes a top level document""" element = dom.createElement('bto') dom.appendChild(element) return element def create_tag(dom, tag, append_to): """Create a subtag as necessary""" element = dom.getElementsByTagName(tag) if element: element = element[0] else: element = dom.createElement(tag) append_to.appendChild(element) return element if fname: self.new = False try: if os.path.exists(fname): with open(fname, 'rb') as f: fname = f.read() self.dom = xml.dom.minidom.parseString(utf8str(fname)) except xml.parsers.expat.ExpatError: print("Damaged XML file, regenerating") if not (fname and self.dom): self.new = True self.dom = xml.dom.minidom.Document() #test for top level bto object if self.dom.firstChild and self.dom.firstChild.localName != 'bto': self.dom.removeChild(self.dom.firstChild) if not self.dom.firstChild: bto = create_top_level(self.dom) else: bto = self.dom.getElementsByTagName('bto')[0] #create all our second and third level tags that are supported for tag in ['date', 'versions', 'base', 'fid', 'fish', 'logs']: element = create_tag(self.dom, tag, bto) subtags = [] if tag == 'versions': subtags = ['os', 'iso', 'generator', 'bootstrap', 'ubiquity'] elif tag == 'fid': subtags = ['git_tag', 'deb_archive'] elif tag == 'logs': subtags = ['syslog', 'debug'] for subtag in subtags: create_tag(self.dom, subtag, element) def write_xml(self, fname): """Writes out a BTO XML file based on the current data""" with codecs.open(fname, 'w', 'utf-8') as wfd: if self.new: self.dom.writexml(wfd, "", " ", "\n", encoding='utf-8') else: self.dom.writexml(wfd, encoding='utf-8')
gpl-2.0
-9,137,183,234,520,056,000
34.677419
82
0.565099
false
4.081181
false
false
false
BarrelfishOS/barrelfish
tools/harness/machines/gem5.py
1
5282
########################################################################## # Copyright (c) 2012-2016 ETH Zurich. # All rights reserved. # # This file is distributed under the terms in the attached LICENSE file. # If you do not find this file, copies can be found by writing to: # ETH Zurich D-INFK, Universitaetstr 6, CH-8092 Zurich. Attn: Systems Group. ########################################################################## # Quirks: # * this is only running in single-core mode, since bootarm=0 is # used in above mentioned menu.lst import os, signal, tempfile, subprocess, shutil, time import debug, machines from machines import ARMSimulatorBase, MachineFactory, ARMSimulatorOperations GEM5_PATH = '/home/netos/tools/gem5/gem5-stable-1804' # gem5 takes quite a while to come up. If we return right away, # telnet will be opened too early and fails to connect # # SG, 2016-10-07: If this is too high, however, and we have an # early-boot bug gem5 will exit before telnet connects, and we do # not get the gem5 output at all GEM5_START_TIMEOUT = 1 # in seconds class Gem5MachineBase(ARMSimulatorBase): imagename = "armv7_a15ve_gem5_image" def __init__(self, options, operations, **kwargs): super(Gem5MachineBase, self).__init__(options, operations, **kwargs) def get_buildall_target(self): return "VExpressEMM-A15" def get_boot_timeout(self): # we set this to 10 mins since gem5 is very slow return 600 def get_test_timeout(self): # give gem5 tests enough time to complete: skb initialization takes # about 10 minutes, so set timeout to 25 minutes. # RH, 2018-08-08 newer version of gem5 is even slower ... # increased to 50 mins return 50 * 60 class Gem5MachineBaseOperations(ARMSimulatorOperations): def __init__(self, machine): super(Gem5MachineBaseOperations, self).__init__(machine) self.simulator_start_timeout = GEM5_START_TIMEOUT # menu.lst template for gem5 is special # XXX: current template does not work because gem5 coreboot NYI self.menulst_template = "menu.lst.armv7_a15ve_gem5" def get_tftp_dir(self): if self.tftp_dir is None: debug.verbose('creating temporary directory for Gem5 files') self.tftp_dir = tempfile.mkdtemp(prefix='harness_gem5_') debug.verbose('Gem5 install directory is %s' % self.tftp_dir) return self.tftp_dir def reboot(self): self._kill_child() cmd = self._get_cmdline() self.telnet_port = 3456 debug.verbose('starting "%s" in gem5.py:reboot' % ' '.join(cmd)) devnull = open('/dev/null', 'w') # remove ubuntu chroot from environment to make sure gem5 finds the # right shared libraries env = dict(os.environ) if 'LD_LIBRARY_PATH' in env: del env['LD_LIBRARY_PATH'] self.child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=devnull, env=env) time.sleep(GEM5_START_TIMEOUT) class Gem5MachineARM(Gem5MachineBase): def __init__(self, options, operations, **kwargs): super(Gem5MachineARM, self).__init__(options, operations, **kwargs) def get_bootarch(self): return 'armv7' def get_platform(self): return 'a15ve' class Gem5MachineARMOperations(Gem5MachineBaseOperations): def set_bootmodules(self, modules): # write menu.lst in build directory debug.verbose("writing menu.lst in build directory") menulst_fullpath = os.path.join(self._machine.options.builds[0].build_dir, "platforms", "arm", self.menulst_template) debug.verbose("writing menu.lst in build directory: %s" % menulst_fullpath) self._machine._write_menu_lst(modules.get_menu_data("/"), menulst_fullpath) debug.verbose("building proper gem5 image") debug.checkcmd(["make", self._machine.imagename], cwd=self._machine.options.builds[0].build_dir) # SK: did not test this yet, but should work # @machines.add_machine # class Gem5MachineARMSingleCore(Gem5MachineARM): # name = 'gem5_arm_1' # def get_ncores(self): # return 1 # def _get_cmdline(self): # script_path = os.path.join(self.options.sourcedir, 'tools/arm_gem5', 'gem5script.py') # return (['gem5.fast', script_path, '--kernel=%s'%self.kernel_img, '--n=%s'%self.get_ncores()] # + GEM5_CACHES_ENABLE) class Gem5MachineARMSingleCore(Gem5MachineARM): name = 'armv7_gem5' def __init__(self, options, **kwargs): super(Gem5MachineARMSingleCore, self).__init__(options, Gem5MachineARMSingleCoreOperations(self), **kwargs) class Gem5MachineARMSingleCoreOperations(Gem5MachineARMOperations): def _get_cmdline(self): self.get_free_port() script_path = \ os.path.join(self._machine.options.sourcedir, 'tools/arm_gem5', 'boot_gem5.sh') return ([script_path, 'VExpress_EMM', self._machine.kernel_img, GEM5_PATH, str(self.telnet_port)]) MachineFactory.addMachine(Gem5MachineARMSingleCore.name, Gem5MachineARMSingleCore, bootarch="armv7", platform="a15ve")
mit
70,575,520,908,050,710
37.554745
115
0.639152
false
3.475
false
false
false
sbc100/yapf
yapf/yapflib/format_decision_state.py
1
38486
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implements a format decision state object that manages whitespace decisions. Each token is processed one at a time, at which point its whitespace formatting decisions are made. A graph of potential whitespace formattings is created, where each node in the graph is a format decision state object. The heuristic tries formatting the token with and without a newline before it to determine which one has the least penalty. Therefore, the format decision state object for each decision needs to be its own unique copy. Once the heuristic determines the best formatting, it makes a non-dry run pass through the code to commit the whitespace formatting. FormatDecisionState: main class exported by this module. """ from yapf.yapflib import format_token from yapf.yapflib import object_state from yapf.yapflib import split_penalty from yapf.yapflib import style from yapf.yapflib import unwrapped_line class FormatDecisionState(object): """The current state when indenting an unwrapped line. The FormatDecisionState object is meant to be copied instead of referenced. Attributes: first_indent: The indent of the first token. column: The number of used columns in the current line. next_token: The next token to be formatted. paren_level: The level of nesting inside (), [], and {}. lowest_level_on_line: The lowest paren_level on the current line. newline: Indicates if a newline is added along the edge to this format decision state node. previous: The previous format decision state in the decision tree. stack: A stack (of _ParenState) keeping track of properties applying to parenthesis levels. comp_stack: A stack (of ComprehensionState) keeping track of properties applying to comprehensions. ignore_stack_for_comparison: Ignore the stack of _ParenState for state comparison. """ def __init__(self, line, first_indent): """Initializer. Initializes to the state after placing the first token from 'line' at 'first_indent'. Arguments: line: (UnwrappedLine) The unwrapped line we're currently processing. first_indent: (int) The indent of the first token. """ self.next_token = line.first self.column = first_indent self.line = line self.paren_level = 0 self.lowest_level_on_line = 0 self.ignore_stack_for_comparison = False self.stack = [_ParenState(first_indent, first_indent)] self.comp_stack = [] self.first_indent = first_indent self.newline = False self.previous = None self.column_limit = style.Get('COLUMN_LIMIT') def Clone(self): """Clones a FormatDecisionState object.""" new = FormatDecisionState(self.line, self.first_indent) new.next_token = self.next_token new.column = self.column new.line = self.line new.paren_level = self.paren_level new.line.depth = self.line.depth new.lowest_level_on_line = self.lowest_level_on_line new.ignore_stack_for_comparison = self.ignore_stack_for_comparison new.first_indent = self.first_indent new.newline = self.newline new.previous = self.previous new.stack = [state.Clone() for state in self.stack] new.comp_stack = [state.Clone() for state in self.comp_stack] return new def __eq__(self, other): # Note: 'first_indent' is implicit in the stack. Also, we ignore 'previous', # because it shouldn't have a bearing on this comparison. (I.e., it will # report equal if 'next_token' does.) return (self.next_token == other.next_token and self.column == other.column and self.paren_level == other.paren_level and self.line.depth == other.line.depth and self.lowest_level_on_line == other.lowest_level_on_line and (self.ignore_stack_for_comparison or other.ignore_stack_for_comparison or self.stack == other.stack and self.comp_stack == other.comp_stack)) def __ne__(self, other): return not self == other def __hash__(self): return hash((self.next_token, self.column, self.paren_level, self.line.depth, self.lowest_level_on_line)) def __repr__(self): return ('column::%d, next_token::%s, paren_level::%d, stack::[\n\t%s' % (self.column, repr(self.next_token), self.paren_level, '\n\t'.join(repr(s) for s in self.stack) + ']')) def CanSplit(self, must_split): """Determine if we can split before the next token. Arguments: must_split: (bool) A newline was required before this token. Returns: True if the line can be split before the next token. """ current = self.next_token previous = current.previous_token if current.is_pseudo_paren: return False if (not must_split and format_token.Subtype.DICTIONARY_KEY_PART in current.subtypes and format_token.Subtype.DICTIONARY_KEY not in current.subtypes and not style.Get('ALLOW_MULTILINE_DICTIONARY_KEYS')): # In some situations, a dictionary may be multiline, but pylint doesn't # like it. So don't allow it unless forced to. return False if (not must_split and format_token.Subtype.DICTIONARY_VALUE in current.subtypes and not style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE')): return False if previous and previous.value == '(' and current.value == ')': # Don't split an empty function call list if we aren't splitting before # dict values. token = previous.previous_token while token: prev = token.previous_token if not prev or prev.name not in {'NAME', 'DOT'}: break token = token.previous_token if token and format_token.Subtype.DICTIONARY_VALUE in token.subtypes: if not style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE'): return False if previous and previous.value == '.' and current.value == '.': return False return current.can_break_before def MustSplit(self): """Returns True if the line must split before the next token.""" current = self.next_token previous = current.previous_token if current.is_pseudo_paren: return False if current.must_break_before: return True if not previous: return False if style.Get('SPLIT_ALL_COMMA_SEPARATED_VALUES') and previous.value == ',': return True if (self.stack[-1].split_before_closing_bracket and current.value in '}]' and style.Get('SPLIT_BEFORE_CLOSING_BRACKET')): # Split before the closing bracket if we can. return current.node_split_penalty != split_penalty.UNBREAKABLE if (current.value == ')' and previous.value == ',' and not _IsSingleElementTuple(current.matching_bracket)): return True # Prevent splitting before the first argument in compound statements # with the exception of function declarations. if (style.Get('SPLIT_BEFORE_FIRST_ARGUMENT') and _IsCompoundStatement(self.line.first) and not _IsFunctionDef(self.line.first)): return False ########################################################################### # List Splitting if (style.Get('DEDENT_CLOSING_BRACKETS') or style.Get('SPLIT_BEFORE_FIRST_ARGUMENT')): bracket = current if current.ClosesScope() else previous if format_token.Subtype.SUBSCRIPT_BRACKET not in bracket.subtypes: if bracket.OpensScope(): if style.Get('COALESCE_BRACKETS'): if current.OpensScope(): # Prefer to keep all opening brackets together. return False if (not _IsLastScopeInLine(bracket) or unwrapped_line.IsSurroundedByBrackets(bracket)): last_token = bracket.matching_bracket else: last_token = _LastTokenInLine(bracket.matching_bracket) if not self._FitsOnLine(bracket, last_token): # Split before the first element if the whole list can't fit on a # single line. self.stack[-1].split_before_closing_bracket = True return True elif style.Get('DEDENT_CLOSING_BRACKETS') and current.ClosesScope(): # Split before and dedent the closing bracket. return self.stack[-1].split_before_closing_bracket if (style.Get('SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN') and current.is_name): # An expression that's surrounded by parens gets split after the opening # parenthesis. def SurroundedByParens(token): """Check if it's an expression surrounded by parentheses.""" while token: if token.value == ',': return False if token.value == ')': return not token.next_token if token.OpensScope(): token = token.matching_bracket.next_token else: token = token.next_token return False if (previous.value == '(' and not previous.is_pseudo_paren and not unwrapped_line.IsSurroundedByBrackets(previous)): pptoken = previous.previous_token if (pptoken and not pptoken.is_name and not pptoken.is_keyword and SurroundedByParens(current)): return True if (current.is_name or current.is_string) and previous.value == ',': # If the list has function calls in it and the full list itself cannot # fit on the line, then we want to split. Otherwise, we'll get something # like this: # # X = [ # Bar(xxx='some string', # yyy='another long string', # zzz='a third long string'), Bar( # xxx='some string', # yyy='another long string', # zzz='a third long string') # ] # # or when a string formatting syntax. func_call_or_string_format = False tok = current.next_token if current.is_name: while tok and (tok.is_name or tok.value == '.'): tok = tok.next_token func_call_or_string_format = tok and tok.value == '(' elif current.is_string: while tok and tok.is_string: tok = tok.next_token func_call_or_string_format = tok and tok.value == '%' if func_call_or_string_format: open_bracket = unwrapped_line.IsSurroundedByBrackets(current) if open_bracket: if open_bracket.value in '[{': if not self._FitsOnLine(open_bracket, open_bracket.matching_bracket): return True elif tok.value == '(': if not self._FitsOnLine(current, tok.matching_bracket): return True ########################################################################### # Dict/Set Splitting if (style.Get('EACH_DICT_ENTRY_ON_SEPARATE_LINE') and format_token.Subtype.DICTIONARY_KEY in current.subtypes and not current.is_comment): # Place each dictionary entry onto its own line. if previous.value == '{' and previous.previous_token: opening = _GetOpeningBracket(previous.previous_token) if (opening and opening.value == '(' and opening.previous_token and opening.previous_token.is_name): # This is a dictionary that's an argument to a function. if (self._FitsOnLine(previous, previous.matching_bracket) and previous.matching_bracket.next_token and (not opening.matching_bracket.next_token or opening.matching_bracket.next_token.value != '.') and _ScopeHasNoCommas(previous)): # Don't split before the key if: # - The dictionary fits on a line, and # - The function call isn't part of a builder-style call and # - The dictionary has one entry and no trailing comma return False return True if (style.Get('SPLIT_BEFORE_DICT_SET_GENERATOR') and format_token.Subtype.DICT_SET_GENERATOR in current.subtypes): # Split before a dict/set generator. return True if (format_token.Subtype.DICTIONARY_VALUE in current.subtypes or (previous.is_pseudo_paren and previous.value == '(' and not current.is_comment)): # Split before the dictionary value if we can't fit every dictionary # entry on its own line. if not current.OpensScope(): opening = _GetOpeningBracket(current) if not self._EachDictEntryFitsOnOneLine(opening): return style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE') if previous.value == '{': # Split if the dict/set cannot fit on one line and ends in a comma. closing = previous.matching_bracket if (not self._FitsOnLine(previous, closing) and closing.previous_token.value == ','): self.stack[-1].split_before_closing_bracket = True return True ########################################################################### # Argument List Splitting if (style.Get('SPLIT_BEFORE_NAMED_ASSIGNS') and not current.is_comment and format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in current.subtypes): if (previous.value not in {'=', ':', '*', '**'} and current.value not in ':=,)' and not _IsFunctionDefinition(previous)): # If we're going to split the lines because of named arguments, then we # want to split after the opening bracket as well. But not when this is # part of a function definition. if previous.value == '(': # Make sure we don't split after the opening bracket if the # continuation indent is greater than the opening bracket: # # a( # b=1, # c=2) if (self._FitsOnLine(previous, previous.matching_bracket) and unwrapped_line.IsSurroundedByBrackets(previous)): # An argument to a function is a function call with named # assigns. return False column = self.column - self.stack[-1].last_space return column > style.Get('CONTINUATION_INDENT_WIDTH') opening = _GetOpeningBracket(current) if opening: arglist_length = ( opening.matching_bracket.total_length - opening.total_length + self.stack[-1].indent) return arglist_length > self.column_limit if (current.value not in '{)' and previous.value == '(' and self._ArgumentListHasDictionaryEntry(current)): return True if style.Get('SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED'): # Split before arguments in a function call or definition if the # arguments are terminated by a comma. opening = _GetOpeningBracket(current) if opening and opening.previous_token and opening.previous_token.is_name: if previous.value in '(,': if opening.matching_bracket.previous_token.value == ',': return True if ((current.is_name or current.value in {'*', '**'}) and previous.value == ','): # If we have a function call within an argument list and it won't fit on # the remaining line, but it will fit on a line by itself, then go ahead # and split before the call. opening = _GetOpeningBracket(current) if (opening and opening.value == '(' and opening.previous_token and (opening.previous_token.is_name or opening.previous_token.value in {'*', '**'})): is_func_call = False opening = current while opening: if opening.value == '(': is_func_call = True break if (not (opening.is_name or opening.value in {'*', '**'}) and opening.value != '.'): break opening = opening.next_token if is_func_call: if (not self._FitsOnLine(current, opening.matching_bracket) or (opening.matching_bracket.next_token and opening.matching_bracket.next_token.value != ',' and not opening.matching_bracket.next_token.ClosesScope())): return True pprevious = previous.previous_token if (current.is_name and pprevious and pprevious.is_name and previous.value == '('): if (not self._FitsOnLine(previous, previous.matching_bracket) and _IsFunctionCallWithArguments(current)): # There is a function call, with more than 1 argument, where the first # argument is itself a function call with arguments. In this specific # case, if we split after the first argument's opening '(', then the # formatting will look bad for the rest of the arguments. E.g.: # # outer_function_call(inner_function_call( # inner_arg1, inner_arg2), # outer_arg1, outer_arg2) # # Instead, enforce a split before that argument to keep things looking # good. return True if (previous.OpensScope() and not current.OpensScope() and not current.is_comment and format_token.Subtype.SUBSCRIPT_BRACKET not in previous.subtypes): if pprevious and not pprevious.is_keyword and not pprevious.is_name: # We want to split if there's a comment in the container. token = current while token != previous.matching_bracket: if token.is_comment: return True token = token.next_token if previous.value == '(': pptoken = previous.previous_token if not pptoken or not pptoken.is_name: # Split after the opening of a tuple if it doesn't fit on the current # line and it's not a function call. if self._FitsOnLine(previous, previous.matching_bracket): return False elif not self._FitsOnLine(previous, previous.matching_bracket): if len(previous.container_elements) == 1: return False elements = previous.container_elements + [previous.matching_bracket] i = 1 while i < len(elements): if (not elements[i - 1].OpensScope() and not self._FitsOnLine(elements[i - 1], elements[i])): return True i += 1 if (self.column_limit - self.column) / float(self.column_limit) < 0.3: # Try not to squish all of the arguments off to the right. return True else: # Split after the opening of a container if it doesn't fit on the # current line. if not self._FitsOnLine(previous, previous.matching_bracket): return True ########################################################################### # Original Formatting Splitting # These checks rely upon the original formatting. This is in order to # attempt to keep hand-written code in the same condition as it was before. # However, this may cause the formatter to fail to be idempotent. if (style.Get('SPLIT_BEFORE_BITWISE_OPERATOR') and current.value in '&|' and previous.lineno < current.lineno): # Retain the split before a bitwise operator. return True if (current.is_comment and previous.lineno < current.lineno - current.value.count('\n')): # If a comment comes in the middle of an unwrapped line (like an if # conditional with comments interspersed), then we want to split if the # original comments were on a separate line. return True return False def AddTokenToState(self, newline, dry_run, must_split=False): """Add a token to the format decision state. Allow the heuristic to try out adding the token with and without a newline. Later on, the algorithm will determine which one has the lowest penalty. Arguments: newline: (bool) Add the token on a new line if True. dry_run: (bool) Don't commit whitespace changes to the FormatToken if True. must_split: (bool) A newline was required before this token. Returns: The penalty of splitting after the current token. """ penalty = 0 if newline: penalty = self._AddTokenOnNewline(dry_run, must_split) else: self._AddTokenOnCurrentLine(dry_run) penalty += self._CalculateComprehensionState(newline) return self.MoveStateToNextToken() + penalty def _AddTokenOnCurrentLine(self, dry_run): """Puts the token on the current line. Appends the next token to the state and updates information necessary for indentation. Arguments: dry_run: (bool) Commit whitespace changes to the FormatToken if True. """ current = self.next_token previous = current.previous_token spaces = current.spaces_required_before if not dry_run: current.AddWhitespacePrefix(newlines_before=0, spaces=spaces) if previous.OpensScope(): if not current.is_comment: # Align closing scopes that are on a newline with the opening scope: # # foo = [a, # b, # ] self.stack[-1].closing_scope_indent = self.column - 1 if style.Get('ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT'): self.stack[-1].closing_scope_indent += 1 self.stack[-1].indent = self.column + spaces else: self.stack[-1].closing_scope_indent = ( self.stack[-1].indent - style.Get('CONTINUATION_INDENT_WIDTH')) self.column += spaces def _AddTokenOnNewline(self, dry_run, must_split): """Adds a line break and necessary indentation. Appends the next token to the state and updates information necessary for indentation. Arguments: dry_run: (bool) Don't commit whitespace changes to the FormatToken if True. must_split: (bool) A newline was required before this token. Returns: The split penalty for splitting after the current state. """ current = self.next_token previous = current.previous_token self.column = self._GetNewlineColumn() if not dry_run: indent_level = self.line.depth spaces = self.column if spaces: spaces -= indent_level * style.Get('INDENT_WIDTH') current.AddWhitespacePrefix( newlines_before=1, spaces=spaces, indent_level=indent_level) if not current.is_comment: self.stack[-1].last_space = self.column self.lowest_level_on_line = self.paren_level if (previous.OpensScope() or (previous.is_comment and previous.previous_token is not None and previous.previous_token.OpensScope())): self.stack[-1].closing_scope_indent = max( 0, self.stack[-1].indent - style.Get('CONTINUATION_INDENT_WIDTH')) self.stack[-1].split_before_closing_bracket = True # Calculate the split penalty. penalty = current.split_penalty if must_split: # Don't penalize for a must split. return penalty if previous.is_pseudo_paren and previous.value == '(': # Small penalty for splitting after a pseudo paren. penalty += 50 # Add a penalty for each increasing newline we add, but don't penalize for # splitting before an if-expression or list comprehension. if current.value not in {'if', 'for'}: last = self.stack[-1] last.num_line_splits += 1 penalty += ( style.Get('SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT') * last.num_line_splits) if current.OpensScope() and previous.OpensScope(): # Prefer to keep opening brackets coalesced (unless it's at the beginning # of a function call). pprev = previous.previous_token if not pprev or not pprev.is_name: penalty += 10 return penalty + 10 def MoveStateToNextToken(self): """Calculate format decision state information and move onto the next token. Before moving onto the next token, we first calculate the format decision state given the current token and its formatting decisions. Then the format decision state is set up so that the next token can be added. Returns: The penalty for the number of characters over the column limit. """ current = self.next_token if not current.OpensScope() and not current.ClosesScope(): self.lowest_level_on_line = min(self.lowest_level_on_line, self.paren_level) # If we encounter an opening bracket, we add a level to our stack to prepare # for the subsequent tokens. if current.OpensScope(): last = self.stack[-1] new_indent = style.Get('CONTINUATION_INDENT_WIDTH') + last.last_space self.stack.append(_ParenState(new_indent, self.stack[-1].last_space)) self.paren_level += 1 # If we encounter a closing bracket, we can remove a level from our # parenthesis stack. if len(self.stack) > 1 and current.ClosesScope(): if format_token.Subtype.DICTIONARY_KEY_PART in current.subtypes: self.stack[-2].last_space = self.stack[-2].indent else: self.stack[-2].last_space = self.stack[-1].last_space self.stack.pop() self.paren_level -= 1 is_multiline_string = current.is_string and '\n' in current.value if is_multiline_string: # This is a multiline string. Only look at the first line. self.column += len(current.value.split('\n')[0]) elif not current.is_pseudo_paren: self.column += len(current.value) self.next_token = self.next_token.next_token # Calculate the penalty for overflowing the column limit. penalty = 0 if (not current.is_pylint_comment and not current.is_pytype_comment and self.column > self.column_limit): excess_characters = self.column - self.column_limit penalty += style.Get('SPLIT_PENALTY_EXCESS_CHARACTER') * excess_characters if is_multiline_string: # If this is a multiline string, the column is actually the # end of the last line in the string. self.column = len(current.value.split('\n')[-1]) return penalty def _CalculateComprehensionState(self, newline): """Makes required changes to comprehension state. Args: newline: Whether the current token is to be added on a newline. Returns: The penalty for the token-newline combination given the current comprehension state. """ current = self.next_token previous = current.previous_token top_of_stack = self.comp_stack[-1] if self.comp_stack else None penalty = 0 if top_of_stack is not None: # Check if the token terminates the current comprehension. if current == top_of_stack.closing_bracket: last = self.comp_stack.pop() # Lightly penalize comprehensions that are split across multiple lines. if last.has_interior_split: penalty += style.Get('SPLIT_PENALTY_COMPREHENSION') return penalty if newline: top_of_stack.has_interior_split = True if (format_token.Subtype.COMP_EXPR in current.subtypes and format_token.Subtype.COMP_EXPR not in previous.subtypes): self.comp_stack.append(object_state.ComprehensionState(current)) return penalty if (current.value == 'for' and format_token.Subtype.COMP_FOR in current.subtypes): if top_of_stack.for_token is not None: # Treat nested comprehensions like normal comp_if expressions. # Example: # my_comp = [ # a.qux + b.qux # for a in foo # --> for b in bar <-- # if a.zut + b.zut # ] if (style.Get('SPLIT_COMPLEX_COMPREHENSION') and top_of_stack.has_split_at_for != newline and (top_of_stack.has_split_at_for or not top_of_stack.HasTrivialExpr())): penalty += split_penalty.UNBREAKABLE else: top_of_stack.for_token = current top_of_stack.has_split_at_for = newline # Try to keep trivial expressions on the same line as the comp_for. if (style.Get('SPLIT_COMPLEX_COMPREHENSION') and newline and top_of_stack.HasTrivialExpr()): penalty += split_penalty.CONNECTED if (format_token.Subtype.COMP_IF in current.subtypes and format_token.Subtype.COMP_IF not in previous.subtypes): # Penalize breaking at comp_if when it doesn't match the newline structure # in the rest of the comprehension. if (style.Get('SPLIT_COMPLEX_COMPREHENSION') and top_of_stack.has_split_at_for != newline and (top_of_stack.has_split_at_for or not top_of_stack.HasTrivialExpr())): penalty += split_penalty.UNBREAKABLE return penalty def _GetNewlineColumn(self): """Return the new column on the newline.""" current = self.next_token previous = current.previous_token top_of_stack = self.stack[-1] if current.spaces_required_before > 2 or self.line.disable: return current.spaces_required_before if current.OpensScope(): return top_of_stack.indent if self.paren_level else self.first_indent if current.ClosesScope(): if (previous.OpensScope() or (previous.is_comment and previous.previous_token is not None and previous.previous_token.OpensScope())): return max(0, top_of_stack.indent - style.Get('CONTINUATION_INDENT_WIDTH')) return top_of_stack.closing_scope_indent if (previous and previous.is_string and current.is_string and format_token.Subtype.DICTIONARY_VALUE in current.subtypes): return previous.column if style.Get('INDENT_DICTIONARY_VALUE'): if previous and (previous.value == ':' or previous.is_pseudo_paren): if format_token.Subtype.DICTIONARY_VALUE in current.subtypes: return top_of_stack.indent if (_IsCompoundStatement(self.line.first) and (not style.Get('DEDENT_CLOSING_BRACKETS') or style.Get('SPLIT_BEFORE_FIRST_ARGUMENT'))): token_indent = ( len(self.line.first.whitespace_prefix.split('\n')[-1]) + style.Get('INDENT_WIDTH')) if token_indent == top_of_stack.indent: return top_of_stack.indent + style.Get('CONTINUATION_INDENT_WIDTH') return top_of_stack.indent def _FitsOnLine(self, start, end): """Determines if line between start and end can fit on the current line.""" length = end.total_length - start.total_length if not start.is_pseudo_paren: length += len(start.value) return length + self.column <= self.column_limit def _EachDictEntryFitsOnOneLine(self, opening): """Determine if each dict elems can fit on one line.""" def PreviousNonCommentToken(tok): tok = tok.previous_token while tok.is_comment: tok = tok.previous_token return tok def ImplicitStringConcatenation(tok): num_strings = 0 if tok.is_pseudo_paren: tok = tok.next_token while tok.is_string: num_strings += 1 tok = tok.next_token return num_strings > 1 closing = opening.matching_bracket entry_start = opening.next_token current = opening.next_token.next_token while current and current != closing: if format_token.Subtype.DICTIONARY_KEY in current.subtypes: prev = PreviousNonCommentToken(current) length = prev.total_length - entry_start.total_length length += len(entry_start.value) if length + self.stack[-2].indent >= self.column_limit: return False entry_start = current if current.OpensScope(): if ((current.value == '{' or (current.is_pseudo_paren and current.next_token.value == '{') and format_token.Subtype.DICTIONARY_VALUE in current.subtypes) or ImplicitStringConcatenation(current)): # A dictionary entry that cannot fit on a single line shouldn't matter # to this calculation. If it can't fit on a single line, then the # opening should be on the same line as the key and the rest on # newlines after it. But the other entries should be on single lines # if possible. if current.matching_bracket: current = current.matching_bracket while current: if current == closing: return True if format_token.Subtype.DICTIONARY_KEY in current.subtypes: entry_start = current break current = current.next_token else: current = current.matching_bracket else: current = current.next_token # At this point, current is the closing bracket. Go back one to get the the # end of the dictionary entry. current = PreviousNonCommentToken(current) length = current.total_length - entry_start.total_length length += len(entry_start.value) return length + self.stack[-2].indent <= self.column_limit def _ArgumentListHasDictionaryEntry(self, token): """Check if the function argument list has a dictionary as an arg.""" if _IsArgumentToFunction(token): while token: if token.value == '{': length = token.matching_bracket.total_length - token.total_length return length + self.stack[-2].indent > self.column_limit if token.ClosesScope(): break if token.OpensScope(): token = token.matching_bracket token = token.next_token return False _COMPOUND_STMTS = frozenset( {'for', 'while', 'if', 'elif', 'with', 'except', 'def', 'class'}) def _IsCompoundStatement(token): if token.value == 'async': token = token.next_token return token.value in _COMPOUND_STMTS def _IsFunctionDef(token): if token.value == 'async': token = token.next_token return token.value == 'def' def _IsFunctionCallWithArguments(token): while token: if token.value == '(': token = token.next_token return token and token.value != ')' elif token.name not in {'NAME', 'DOT', 'EQUAL'}: break token = token.next_token return False def _IsArgumentToFunction(token): bracket = unwrapped_line.IsSurroundedByBrackets(token) if not bracket or bracket.value != '(': return False previous = bracket.previous_token return previous and previous.is_name def _GetLengthOfSubtype(token, subtype, exclude=None): current = token while (current.next_token and subtype in current.subtypes and (exclude is None or exclude not in current.subtypes)): current = current.next_token return current.total_length - token.total_length + 1 def _GetOpeningBracket(current): """Get the opening bracket containing the current token.""" if current.matching_bracket and not current.is_pseudo_paren: return current.matching_bracket while current: if current.ClosesScope(): current = current.matching_bracket elif current.is_pseudo_paren: current = current.previous_token elif current.OpensScope(): return current current = current.previous_token return None def _LastTokenInLine(current): while not current.is_comment and current.next_token: current = current.next_token return current def _IsFunctionDefinition(current): prev = current.previous_token return (current.value == '(' and prev and format_token.Subtype.FUNC_DEF in prev.subtypes) def _IsLastScopeInLine(current): while current: current = current.next_token if current and current.OpensScope(): return False return True def _IsSingleElementTuple(token): """Check if it's a single-element tuple.""" close = token.matching_bracket token = token.next_token num_commas = 0 while token != close: if token.value == ',': num_commas += 1 if token.OpensScope(): token = token.matching_bracket else: token = token.next_token return num_commas == 1 def _ScopeHasNoCommas(token): """Check if the scope has no commas.""" close = token.matching_bracket token = token.next_token while token != close: if token.value == ',': return False if token.OpensScope(): token = token.matching_bracket else: token = token.next_token return True class _ParenState(object): """Maintains the state of the bracket enclosures. A stack of _ParenState objects are kept so that we know how to indent relative to the brackets. Attributes: indent: The column position to which a specified parenthesis level needs to be indented. last_space: The column position of the last space on each level. split_before_closing_bracket: Whether a newline needs to be inserted before the closing bracket. We only want to insert a newline before the closing bracket if there also was a newline after the beginning left bracket. num_line_splits: Number of line splits this _ParenState contains already. Each subsequent line split gets an increasing penalty. """ # TODO(morbo): This doesn't track "bin packing." def __init__(self, indent, last_space): self.indent = indent self.last_space = last_space self.closing_scope_indent = 0 self.split_before_closing_bracket = False self.num_line_splits = 0 def Clone(self): state = _ParenState(self.indent, self.last_space) state.closing_scope_indent = self.closing_scope_indent state.split_before_closing_bracket = self.split_before_closing_bracket state.num_line_splits = self.num_line_splits return state def __repr__(self): return '[indent::%d, last_space::%d, closing_scope_indent::%d]' % ( self.indent, self.last_space, self.closing_scope_indent) def __eq__(self, other): return hash(self) == hash(other) def __ne__(self, other): return not self == other def __hash__(self, *args, **kwargs): return hash((self.indent, self.last_space, self.closing_scope_indent, self.split_before_closing_bracket, self.num_line_splits))
apache-2.0
1,545,245,249,763,236,600
37.06726
80
0.644364
false
4.030792
false
false
false
demisto/content
Packs/ThinkstCanary/Integrations/ThinkstCanary/ThinkstCanary_test.py
1
3171
import demistomock as demisto MOCK_PARAMS = { 'access-key': 'fake_access_key', 'secret-key': 'fake_access_key', 'server': 'http://123-fake-api.com/', 'unsecure': True, 'proxy': True } def test_fetch_incidents(mocker, requests_mock): """ Given: An existing last run time. When: Running a fetch incidents command normally (not a first run). Then: The last run time object should increment by 1 second. 2020-01-07-04:58:18 -> 2020-01-07-04:58:19 """ mocker.patch.object(demisto, 'params', return_value=MOCK_PARAMS) mocker.patch.object(demisto, 'getLastRun', return_value={'time': '2020-07-01-04:58:18'}) mocker.patch.object(demisto, 'setLastRun') requests_mock.get('http://123-fake-api.com/api/v1/incidents/unacknowledged?newer_than=2020-07-01-04%3A58%3A18', json={'incidents': [{'description': {'created': 1593579498}}]}) from ThinkstCanary import fetch_incidents_command fetch_incidents_command() assert demisto.setLastRun.call_args[0][0]['time'] == '2020-07-01-04:58:19' def test_check_whitelist_command_not_whitelisted(mocker): """ Given: An IP to check When: Running check_whitelist_command. Then: The IP should not be ignored (not in the whitelist). """ import ThinkstCanary ip_to_check = "1.2.3.4" mocker.patch.object(demisto, 'results') mocker.patch.object(demisto, 'params', return_value=MOCK_PARAMS) mocker.patch.object(demisto, 'args', return_value={'ip': ip_to_check}) mocker.patch.object(ThinkstCanary, 'check_whitelist', return_value={'is_ip_ignored': False, 'is_whitelist_enabled': True}) ThinkstCanary.check_whitelist_command() assert demisto.results.call_args_list[0][0][0].get('HumanReadable') == 'The IP address 1.2.3.4:Any is not ' \ 'Whitelisted' def test_check_whitelist_commands_whitelisted(mocker): """ Given: An already whitelisted IP to check When: Inserting IP to whitelist (whitelist_ip_command) and checking if it is whitelisted (check_whitelist_command). Then: The IP should be ignored (in the whitelist), and an appropriate message to the user should be prompted. """ import ThinkstCanary ip_to_whitelist = "1.2.3.4" mocker.patch.object(demisto, 'results') mocker.patch.object(demisto, 'params', return_value=MOCK_PARAMS) mocker.patch.object(demisto, 'args', return_value={'ip': ip_to_whitelist}) mocker.patch.object(ThinkstCanary, 'whitelist_ip', return_value={'message': 'Whitelist added', 'result': 'success'}) mocker.patch.object(ThinkstCanary, 'check_whitelist', return_value={'is_ip_ignored': True, 'is_whitelist_enabled': True}) ThinkstCanary.whitelist_ip_command() ThinkstCanary.check_whitelist_command() assert demisto.results.call_args_list[1][0][0].get('HumanReadable') == 'The IP address 1.2.3.4:Any is Whitelisted'
mit
-3,411,973,804,248,822,300
47.784615
120
0.62567
false
3.503867
false
false
false
rajarsheem/libsdae-autoencoder-tensorflow
deepautoencoder/stacked_autoencoder.py
1
6154
import numpy as np import deepautoencoder.utils as utils import tensorflow as tf allowed_activations = ['sigmoid', 'tanh', 'softmax', 'relu', 'linear'] allowed_noises = [None, 'gaussian', 'mask'] allowed_losses = ['rmse', 'cross-entropy'] class StackedAutoEncoder: """A deep autoencoder with denoising capability""" def assertions(self): global allowed_activations, allowed_noises, allowed_losses assert self.loss in allowed_losses, 'Incorrect loss given' assert 'list' in str( type(self.dims)), 'dims must be a list even if there is one layer.' assert len(self.epoch) == len( self.dims), "No. of epochs must equal to no. of hidden layers" assert len(self.activations) == len( self.dims), "No. of activations must equal to no. of hidden layers" assert all( True if x > 0 else False for x in self.epoch), "No. of epoch must be atleast 1" assert set(self.activations + allowed_activations) == set( allowed_activations), "Incorrect activation given." assert utils.noise_validator( self.noise, allowed_noises), "Incorrect noise given" def __init__(self, dims, activations, epoch=1000, noise=None, loss='rmse', lr=0.001, batch_size=100, print_step=50): self.print_step = print_step self.batch_size = batch_size self.lr = lr self.loss = loss self.activations = activations self.noise = noise self.epoch = epoch self.dims = dims self.assertions() self.depth = len(dims) self.weights, self.biases = [], [] def add_noise(self, x): if self.noise == 'gaussian': n = np.random.normal(0, 0.1, (len(x), len(x[0]))) return x + n if 'mask' in self.noise: frac = float(self.noise.split('-')[1]) temp = np.copy(x) for i in temp: n = np.random.choice(len(i), round( frac * len(i)), replace=False) i[n] = 0 return temp if self.noise == 'sp': pass def fit(self, x): for i in range(self.depth): print('Layer {0}'.format(i + 1)) if self.noise is None: x = self.run(data_x=x, activation=self.activations[i], data_x_=x, hidden_dim=self.dims[i], epoch=self.epoch[ i], loss=self.loss, batch_size=self.batch_size, lr=self.lr, print_step=self.print_step) else: temp = np.copy(x) x = self.run(data_x=self.add_noise(temp), activation=self.activations[i], data_x_=x, hidden_dim=self.dims[i], epoch=self.epoch[ i], loss=self.loss, batch_size=self.batch_size, lr=self.lr, print_step=self.print_step) def transform(self, data): tf.reset_default_graph() sess = tf.Session() x = tf.constant(data, dtype=tf.float32) for w, b, a in zip(self.weights, self.biases, self.activations): weight = tf.constant(w, dtype=tf.float32) bias = tf.constant(b, dtype=tf.float32) layer = tf.matmul(x, weight) + bias x = self.activate(layer, a) return x.eval(session=sess) def fit_transform(self, x): self.fit(x) return self.transform(x) def run(self, data_x, data_x_, hidden_dim, activation, loss, lr, print_step, epoch, batch_size=100): tf.reset_default_graph() input_dim = len(data_x[0]) sess = tf.Session() x = tf.placeholder(dtype=tf.float32, shape=[None, input_dim], name='x') x_ = tf.placeholder(dtype=tf.float32, shape=[ None, input_dim], name='x_') encode = {'weights': tf.Variable(tf.truncated_normal( [input_dim, hidden_dim], dtype=tf.float32)), 'biases': tf.Variable(tf.truncated_normal([hidden_dim], dtype=tf.float32))} decode = {'biases': tf.Variable(tf.truncated_normal([input_dim], dtype=tf.float32)), 'weights': tf.transpose(encode['weights'])} encoded = self.activate( tf.matmul(x, encode['weights']) + encode['biases'], activation) decoded = tf.matmul(encoded, decode['weights']) + decode['biases'] # reconstruction loss if loss == 'rmse': loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(x_, decoded)))) elif loss == 'cross-entropy': loss = -tf.reduce_mean(x_ * tf.log(decoded)) train_op = tf.train.AdamOptimizer(lr).minimize(loss) sess.run(tf.global_variables_initializer()) for i in range(epoch): b_x, b_x_ = utils.get_batch( data_x, data_x_, batch_size) sess.run(train_op, feed_dict={x: b_x, x_: b_x_}) if (i + 1) % print_step == 0: l = sess.run(loss, feed_dict={x: data_x, x_: data_x_}) print('epoch {0}: global loss = {1}'.format(i, l)) # self.loss_val = l # debug # print('Decoded', sess.run(decoded, feed_dict={x: self.data_x_})[0]) self.weights.append(sess.run(encode['weights'])) self.biases.append(sess.run(encode['biases'])) return sess.run(encoded, feed_dict={x: data_x_}) def activate(self, linear, name): if name == 'sigmoid': return tf.nn.sigmoid(linear, name='encoded') elif name == 'softmax': return tf.nn.softmax(linear, name='encoded') elif name == 'linear': return linear elif name == 'tanh': return tf.nn.tanh(linear, name='encoded') elif name == 'relu': return tf.nn.relu(linear, name='encoded')
mit
-4,848,992,055,917,937,000
41.441379
79
0.524862
false
3.834268
false
false
false
aamirmajeedkhan/P4-conference-central
conference.py
1
35026
#!/usr/bin/env python """ conference.py -- Udacity conference server-side Python App Engine API; uses Google Cloud Endpoints $Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $ created by wesc on 2014 apr 21 """ __author__ = 'wesc+api@google.com (Wesley Chun)' from datetime import datetime,time import endpoints from protorpc import messages from protorpc import message_types from protorpc import remote from google.appengine.ext import ndb from models import Profile from models import ProfileMiniForm from models import ProfileForm from models import TeeShirtSize from models import SessionType,Speaker,Session,SessionForm,SessionForms from models import SessionQueryForm,SessionQueryForms from utils import getUserId from settings import WEB_CLIENT_ID EMAIL_SCOPE = endpoints.EMAIL_SCOPE API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID from models import Conference from models import ConferenceForm DEFAULTS = { "city": "Default City", "maxAttendees": 0, "seatsAvailable": 0, "topics": [ "Default", "Topic" ], } OPERATORS = { 'EQ': '=', 'GT': '>', 'GTEQ': '>=', 'LT': '<', 'LTEQ': '<=', 'NE': '!=' } CONF_FIELDS = { 'CITY': 'city', 'TOPIC': 'topics', 'MONTH': 'month', 'MAX_ATTENDEES': 'maxAttendees', } SESSION_FIELDS = { 'NAME': 'name', 'DURATION': 'duration', 'TYPE_OF_SESSION': 'typeOfSession', 'Date': 'date', 'START_TIME':'startTime', 'SPEAKER':'speaker', } from models import ConferenceForms from models import ConferenceQueryForm from models import ConferenceQueryForms # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - from models import BooleanMessage from models import ConflictException CONF_GET_REQUEST = endpoints.ResourceContainer( message_types.VoidMessage, websafeConferenceKey=messages.StringField(1), ) CONF_POST_REQUEST = endpoints.ResourceContainer( ConferenceForm, websafeConferenceKey=messages.StringField(1), ) SESSION_SPEAKER_GET_REQUEST = endpoints.ResourceContainer( message_types.VoidMessage, speaker=messages.StringField(1, required=True), ) SESSION_POST_REQUEST = endpoints.ResourceContainer( SessionForm, websafeConferenceKey=messages.StringField(1, required=True) ) SESSION_TYPE_GET_REQUEST = endpoints.ResourceContainer( message_types.VoidMessage, websafeConferenceKey=messages.StringField(1, required=True), sessionType=messages.StringField(2, required=True) ) SESSION_WISHLIST_POST_REQUEST = endpoints.ResourceContainer( message_types.VoidMessage, websafeSessionKey=messages.StringField(1, required=True) ) SESSION_REQUIRED_FIELDS = ('name', 'speaker', 'duration', 'typeOfSession', 'date', 'startTime') MEMCACHE_ANNOUNCEMENTS_KEY="LATEST_ANNOUNCEMENT" MEMCACHE_FEATURED_SPEAKER_KEY="FEATURED_SPEAKER" from google.appengine.api import memcache from models import StringMessage from google.appengine.api import taskqueue @endpoints.api( name='conference', version='v1', allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID], scopes=[EMAIL_SCOPE]) class ConferenceApi(remote.Service): """Conference API v0.1""" # - - - Profile objects - - - - - - - - - - - - - - - - - - - def _copyProfileToForm(self, prof): """Copy relevant fields from Profile to ProfileForm.""" # copy relevant fields from Profile to ProfileForm pf = ProfileForm() for field in pf.all_fields(): if hasattr(prof, field.name): # convert t-shirt string to Enum; just copy others if field.name == 'teeShirtSize': setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name))) else: setattr(pf, field.name, getattr(prof, field.name)) pf.check_initialized() return pf def _getProfileFromUser(self): """Return user Profile from datastore, creating new one if non-existent.""" user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required') # retrieve profile from datastore user_id=getUserId(user) p_key = ndb.Key(Profile,user_id) profile = p_key.get() # create profile if not exist if not profile: profile = Profile( key = p_key, # TODO 1 step 4. replace with the key from step 3 displayName = user.nickname(), mainEmail= user.email(), teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED), ) profile.put() return profile # return Profile def _doProfile(self, save_request=None): """Get user Profile and return to user, possibly updating it first.""" # get user Profile prof = self._getProfileFromUser() # if saveProfile(), process user-modifyable fields if save_request: for field in ('displayName', 'teeShirtSize'): if hasattr(save_request, field): val = getattr(save_request, field) if val: setattr(prof, field, str(val)) #if field == 'teeShirtSize': # setattr(prof, field, str(val).upper()) #else: # setattr(prof, field, val) prof.put() return self._copyProfileToForm(prof) @endpoints.method(message_types.VoidMessage, ProfileForm, path='profile', http_method='GET', name='getProfile') def getProfile(self, request): """Return user profile.""" return self._doProfile() @endpoints.method(ProfileMiniForm, ProfileForm, path='profile', http_method='POST', name='saveProfile') def saveProfile(self, request): """Update & return user profile.""" return self._doProfile(request) def _copyConferenceToForm(self, conf, displayName): """Copy relevant fields from Conference to ConferenceForm.""" cf = ConferenceForm() for field in cf.all_fields(): if hasattr(conf, field.name): # convert Date to date string; just copy others if field.name.endswith('Date'): setattr(cf, field.name, str(getattr(conf, field.name))) else: setattr(cf, field.name, getattr(conf, field.name)) elif field.name == "websafeKey": setattr(cf, field.name, conf.key.urlsafe()) if displayName: setattr(cf, 'organizerDisplayName', displayName) cf.check_initialized() return cf def _createConferenceObject(self, request): """Create or update Conference object, returning ConferenceForm/request.""" # preload necessary data items user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required') user_id = getUserId(user) if not request.name: raise endpoints.BadRequestException("Conference 'name' field required") # copy ConferenceForm/ProtoRPC Message into dict data = {field.name: getattr(request, field.name) for field in request.all_fields()} del data['websafeKey'] del data['organizerDisplayName'] # add default values for those missing (both data model & outbound Message) for df in DEFAULTS: if data[df] in (None, []): data[df] = DEFAULTS[df] setattr(request, df, DEFAULTS[df]) # convert dates from strings to Date objects; set month based on start_date if data['startDate']: data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date() data['month'] = data['startDate'].month else: data['month'] = 0 if data['endDate']: data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date() # set seatsAvailable to be same as maxAttendees on creation # both for data model & outbound Message if data["maxAttendees"] > 0: data["seatsAvailable"] = data["maxAttendees"] setattr(request, "seatsAvailable", data["maxAttendees"]) # make Profile Key from user ID p_key = ndb.Key(Profile, user_id) # allocate new Conference ID with Profile key as parent c_id = Conference.allocate_ids(size=1, parent=p_key)[0] # make Conference key from ID c_key = ndb.Key(Conference, c_id, parent=p_key) data['key'] = c_key data['organizerUserId'] = request.organizerUserId = user_id # create Conference & return (modified) ConferenceForm Conference(**data).put() taskqueue.add(params={'email': user.email(), 'conferenceInfo': repr(request)}, url='/tasks/send_confirmation_email' ) return request @ndb.transactional() def _updateConferenceObject(self, request): user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required') user_id = getUserId(user) # copy ConferenceForm/ProtoRPC Message into dict data = {field.name: getattr(request, field.name) for field in request.all_fields()} # update existing conference conf = ndb.Key(urlsafe=request.websafeConferenceKey).get() # check that conference exists if not conf: raise endpoints.NotFoundException( 'No conference found with key: %s' % request.websafeConferenceKey) # check that user is owner if user_id != conf.organizerUserId: raise endpoints.ForbiddenException( 'Only the owner can update the conference.') # Not getting all the fields, so don't create a new object; just # copy relevant fields from ConferenceForm to Conference object for field in request.all_fields(): data = getattr(request, field.name) # only copy fields where we get data if data not in (None, []): # special handling for dates (convert string to Date) if field.name in ('startDate', 'endDate'): data = datetime.strptime(data, "%Y-%m-%d").date() if field.name == 'startDate': conf.month = data.month # write to Conference object setattr(conf, field.name, data) conf.put() prof = ndb.Key(Profile, user_id).get() return self._copyConferenceToForm(conf, getattr(prof, 'displayName')) @endpoints.method(CONF_POST_REQUEST, ConferenceForm, path='conference/{websafeConferenceKey}', http_method='PUT', name='updateConference') def updateConference(self, request): """Update conference w/provided fields & return w/updated info.""" return self._updateConferenceObject(request) @endpoints.method(CONF_GET_REQUEST, ConferenceForm, path='conference/{websafeConferenceKey}', http_method='GET', name='getConference') def getConference(self, request): """Return requested conference (by websafeConferenceKey).""" # get Conference object from request; bail if not found conf = ndb.Key(urlsafe=request.websafeConferenceKey).get() if not conf: raise endpoints.NotFoundException( 'No conference found with key: %s' % request.websafeConferenceKey) prof = conf.key.parent().get() # return ConferenceForm return self._copyConferenceToForm(conf, getattr(prof, 'displayName')) @endpoints.method(ConferenceForm, ConferenceForm, path='conference', http_method='POST', name='createConference') def createConference(self, request): """Create new conference.""" return self._createConferenceObject(request) @endpoints.method(ConferenceQueryForms, ConferenceForms, path='queryConferences', http_method='POST', name='queryConferences') def queryConferences(self, request): """Query for conferences.""" conferences = self._getQuery(request) # need to fetch organiser displayName from profiles # get all keys and use get_multi for speed organisers = [(ndb.Key(Profile, conf.organizerUserId)) for conf in conferences] profiles = ndb.get_multi(organisers) # put display names in a dict for easier fetching names = {} for profile in profiles: names[profile.key.id()] = profile.displayName # return individual ConferenceForm object per Conference return ConferenceForms( items=[self._copyConferenceToForm(conf,names[conf.organizerUserId]) \ for conf in conferences] ) @endpoints.method(message_types.VoidMessage, ConferenceForms, path='getConferencesCreated', http_method='POST', name='getConferencesCreated') def getConferencesCreated(self, request): """Return conferences created by user.""" # make sure user is authed user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required') # make profile key p_key = ndb.Key(Profile, getUserId(user)) # create ancestor query for this user conferences = Conference.query(ancestor=p_key) # get the user profile and display name prof = p_key.get() displayName = getattr(prof, 'displayName') # return set of ConferenceForm objects per Conference return ConferenceForms( items=[self._copyConferenceToForm(conf, displayName) for conf in conferences] ) @endpoints.method(message_types.VoidMessage, ConferenceForms, path='filterPlayground', http_method='GET', name='filterPlayground') def filterPlayground(self, request): q = Conference.query() # simple filter usage: # q = q.filter(Conference.city == "Paris") # advanced filter building and usage field = "city" operator = "=" value = "London" f = ndb.query.FilterNode(field, operator, value) q = q.filter(f) q=q.order(Conference.maxAttendees) # filter for month of june q=q.filter(Conference.maxAttendees > 6) # TODO # add 2 filters: # 1: city equals to London # 2: topic equals "Medical Innovations" return ConferenceForms( items=[self._copyConferenceToForm(conf, "") for conf in q] ) def _getQuery(self, request): """Return formatted query from the submitted filters.""" q = Conference.query() inequality_filter, filters = self._formatFilters(request.filters) # If exists, sort on inequality filter first if not inequality_filter: q = q.order(Conference.name) else: q = q.order(ndb.GenericProperty(inequality_filter)) q = q.order(Conference.name) for filtr in filters: if filtr["field"] in ["month", "maxAttendees"]: filtr["value"] = int(filtr["value"]) formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"]) q = q.filter(formatted_query) return q def _formatFilters(self, filters,type='Conference'): """Parse, check validity and format user supplied filters.""" formatted_filters = [] inequality_field = None for f in filters: filtr = {field.name: getattr(f, field.name) for field in f.all_fields()} try: if type == 'Conference': filtr["field"] = CONF_FIELDS[filtr["field"]] elif type == 'Session': filtr["field"] = SESSION_FIELDS[filtr["field"]] filtr["operator"] = OPERATORS[filtr["operator"]] except KeyError: raise endpoints.BadRequestException("Filter contains invalid field or operator.") # Every operation except "=" is an inequality if filtr["operator"] != "=": # check if inequality operation has been used in previous filters # disallow the filter if inequality was performed on a different field before # track the field on which the inequality operation is performed if inequality_field and inequality_field != filtr["field"]: raise endpoints.BadRequestException("Inequality filter is allowed on only one field.") else: inequality_field = filtr["field"] formatted_filters.append(filtr) return (inequality_field, formatted_filters) #@ndb.transactional(xg=True) def _conferenceRegistration(self, request, reg=True): """Register or unregister user for selected conference.""" retval = None prof = self._getProfileFromUser() # get user Profile # check if conf exists given websafeConfKey # get conference; check that it exists wsck = request.websafeConferenceKey conf = ndb.Key(urlsafe=wsck).get() if not conf: raise endpoints.NotFoundException( 'No conference found with key: %s' % wsck) # register if reg: # check if user already registered otherwise add if wsck in prof.conferenceKeysToAttend: raise ConflictException( "You have already registered for this conference") # check if seats avail if conf.seatsAvailable <= 0: raise ConflictException( "There are no seats available.") # register user, take away one seat prof.conferenceKeysToAttend.append(wsck) conf.seatsAvailable -= 1 retval = True # unregister else: # check if user already registered if wsck in prof.conferenceKeysToAttend: # unregister user, add back one seat prof.conferenceKeysToAttend.remove(wsck) conf.seatsAvailable += 1 retval = True else: retval = False # write things back to the datastore & return prof.put() conf.put() return BooleanMessage(data=retval) @endpoints.method(CONF_GET_REQUEST, BooleanMessage, path='conference/{websafeConferenceKey}', http_method='POST', name='registerForConference') def registerForConference(self, request): """Register user for selected conference.""" return self._conferenceRegistration(request) @endpoints.method(CONF_GET_REQUEST, BooleanMessage, path='conference/{websafeConferenceKey}', http_method='DELETE', name='unregisterFromConference') def unregisterFromConference(self, request): """Unregister user for selected conference.""" return self._conferenceRegistration(request, reg=False) @endpoints.method(message_types.VoidMessage, ConferenceForms, path='conferences/attending', http_method='GET', name='getConferencesToAttend') def getConferencesToAttend(self, request): """Get list of conferences that user has registered for.""" prof = self._getProfileFromUser() conf_keys = [ndb.Key(urlsafe=wsck) for wsck in prof.conferenceKeysToAttend] conferences = ndb.get_multi(conf_keys) # retrieve organizers organisers = [ndb.Key(Profile, conf.organizerUserId) for conf in conferences] profiles = ndb.get_multi(organisers) # put display names in a dict for easier fetching names = {} for profile in profiles: names[profile.key.id()] = profile.displayName # return set of ConferenceForm objects per Conference return ConferenceForms(items=[self._copyConferenceToForm(conf, names[conf.organizerUserId])\ for conf in conferences] ) # - - - Announcements - - - - - - - - - - - - - - - - - - - - @staticmethod def _cacheAnnouncement(): """Create Announcement & assign to memcache; used by memcache cron job & putAnnouncement(). """ confs = Conference.query(ndb.AND( Conference.seatsAvailable <= 5, Conference.seatsAvailable > 0) ).fetch(projection=[Conference.name]) if confs: # If there are almost sold out conferences, # format announcement and set it in memcache announcement = '%s %s' % ( 'Last chance to attend! The following conferences ' 'are nearly sold out:', ', '.join(conf.name for conf in confs)) memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement) else: # If there are no sold out conferences, # delete the memcache announcements entry announcement = "" memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY) return announcement @endpoints.method(message_types.VoidMessage, StringMessage, path='conference/announcement/get', http_method='GET', name='getAnnouncement') def getAnnouncement(self, request): """Return Announcement from memcache.""" announcement = memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) if not announcement: announcement = "" return StringMessage(data=announcement) # - - - Conference Session - - - - - - - - - - - - - - - - - - - - @endpoints.method(CONF_GET_REQUEST, SessionForms, path='conference/{websafeConferenceKey}/sessions', http_method='GET', name='getConferenceSessions') def getConferenceSessions(self, request): """Given a conference, return all sessions""" # get Conference object from request conf = ndb.Key(urlsafe=request.websafeConferenceKey).get() if not conf: raise endpoints.NotFoundException('No conference found with key: %s' % request.websafeConferenceKey) # Return set of SessionForm belong to Conference return SessionForms(items=[self._copySessionToForm(session) for session in conf.sessions]) @endpoints.method(SESSION_TYPE_GET_REQUEST, SessionForms, path='conference/{websafeConferenceKey}/sessions/type/{sessionType}', http_method='GET', name='getConferenceSessionsByType') def getConferenceSessionsByType(self, request): """Given a conference, return all sessions of a specified type (eg lecture, keynote, workshop)""" # get Conference object from request conf = ndb.Key(urlsafe=request.websafeConferenceKey).get() if not conf: raise endpoints.NotFoundException('No conference found with key: %s' % request.websafeConferenceKey) # filter sessions by session type sessions = conf.sessions.filter(Session.typeOfSession == str(request.sessionType)) # Return a set of SessionForm objects per session return SessionForms(items=[self._copySessionToForm(session) for session in sessions]) @endpoints.method(SESSION_SPEAKER_GET_REQUEST, SessionForms, path='sessions/speaker/{speaker}', http_method='GET', name='getSessionsBySpeaker') def getSessionsBySpeaker(self, request): """Given a speaker, return all sessions given by this particular speaker, across all conferences""" #filter session by speaker sessions = Session.query(Session.speaker == Speaker(name=request.speaker)) # Return a set of SessionForm objects per session return SessionForms(items=[self._copySessionToForm(session) for session in sessions]) def _createSessionObject(self, sessionForm): """Create Session object, return SessionForm.""" # ensure user is authenticated user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required') # get the conference conf = ndb.Key(urlsafe=sessionForm.websafeConferenceKey).get() if not conf: raise endpoints.NotFoundException('No conference found with key: %s' % sessionForm.conferenceKey) # ensure ownership if getUserId(user) != conf.organizerUserId: raise endpoints.ForbiddenException('Only organizer of conference : %s can add sessions.' % conf.name) # copy SessionForm/ProtoRPC Message into dict data = {field.name: getattr(sessionForm, field.name) for field in sessionForm.all_fields()} # convert typeOfsession to string if data['typeOfSession']: data['typeOfSession']=str(data['typeOfSession']) else: data['typeOfSession']=str(SessionType.NOT_SPECIFIED) del data['websafeKey'] del data['websafeConferenceKey'] # check required fields for key in SESSION_REQUIRED_FIELDS: if not data[key]: raise endpoints.BadRequestException("'%s' field is required to create a session." % key) # convert date string to a datetime object. try: data['date'] = datetime.strptime(data['date'][:10], "%Y-%m-%d").date() except (TypeError, ValueError): raise endpoints.BadRequestException("Invalid date format. Please use 'YYYY-MM-DD'") # convert date string to a time object. HH:MM try: data['startTime'] = datetime.strptime(data['startTime'][:5], "%H:%M").time() except (TypeError, ValueError): raise endpoints.BadRequestException("Invalid date format. Please use 'HH:MM'") if data['duration'] <= 0: raise endpoints.BadRequestException("Duration must be greater than zero") #session must be within conference start and end date only when dates #defined at the time of conference creation if conf.startDate and conf.endDate : if data['date'] < conf.startDate or data['date'] > conf.endDate: raise endpoints.BadRequestException("Session must be within range of conference start and end date") data['speaker'] = Speaker(name=data['speaker']) # Datastore to allocate an ID. s_id = Session.allocate_ids(size=1, parent=conf.key)[0] # Datastore returns an integer ID that we can use to create a session key data['key'] = ndb.Key(Session, s_id, parent=conf.key) # Add session to datastore session = Session(**data) session.put() # Add a task to check and update new featured speaker taskqueue.add( params={'websafeConferenceKey': conf.key.urlsafe(), 'speaker': session.speaker.name}, url='/tasks/set_featured_speaker' ) return self._copySessionToForm(session) @endpoints.method(SESSION_POST_REQUEST, SessionForm, path='conference/sessions/{websafeConferenceKey}', http_method='POST', name='createSession') def createSession(self, request): """Creates a session, open to the organizer of the conference""" return self._createSessionObject(request) def _copySessionToForm(self,session): """Copy fields from Session to SessionForm.""" sf = SessionForm() for field in sf.all_fields(): if hasattr(session, field.name): # convert Date to date string; just copy others if field.name.endswith('date'): setattr(sf, field.name, getattr(session, field.name).strftime('%Y-%m-%d')) elif field.name.endswith('startTime'): setattr(sf, field.name, getattr(session, field.name).strftime('%H:%M')) elif field.name.endswith('speaker'): setattr(sf, field.name, session.speaker.name) elif field.name.endswith('typeOfSession'): setattr(sf, field.name, getattr(SessionType, getattr(session, field.name))) else: setattr(sf, field.name, getattr(session, field.name)) elif field.name == "websafeKey": setattr(sf, field.name, session.key.urlsafe()) sf.check_initialized() return sf @endpoints.method(SESSION_WISHLIST_POST_REQUEST, BooleanMessage, path='profile/wishlist/{websafeSessionKey}', http_method='POST', name='addSessionToWishlist') @ndb.transactional(xg=True) def addSessionToWishlist(self, request): """adds the session to the user's list of sessions they are interested in attending""" # get user Profile prof = self._getProfileFromUser() # get session and check if it exists key = ndb.Key(urlsafe=request.websafeSessionKey) session = key.get() if not session: raise endpoints.BadRequestException("Session with key %s doesn't exist" % request.sessionKey) # ensure is not already in user's wishlist if key in prof.wishList: raise ConflictException("This session is already in user's wishlist") # add session to user's list prof.wishList.append(key) prof.put() return BooleanMessage(data=True) @endpoints.method(message_types.VoidMessage, SessionForms, path='profile/wishlist/all', http_method='GET', name='getSessionsInWishlist') def getSessionsInWishlist(self, request): """query for all the sessions in a conference that the user is interested in""" # get user Profile prof = self._getProfileFromUser() # get all sessions in user's wishlist sessions = ndb.get_multi(prof.wishList) # return a set of `SessionForm` objects return SessionForms(items=[self._copySessionToForm(session) for session in sessions]) @endpoints.method(SESSION_WISHLIST_POST_REQUEST, BooleanMessage, path='profile/wishlist/{websafeSessionKey}', http_method='DELETE', name='deleteSessionInWishlist') @ndb.transactional() def deleteSessionInWishlist(self, request): """removes the session from the user’s list of sessions they are interested in attending""" # get user Profile prof = self._getProfileFromUser() key = ndb.Key(urlsafe=request.websafeSessionKey) # get session the session key and check if it exists in user's wish list if key not in prof.wishList: raise endpoints.BadRequestException("Failed to find session in user's wishlist") # remove session from user's wishlist prof.wishList.remove(key) prof.put() return BooleanMessage(data=True) #additional query endpoint @endpoints.method(message_types.VoidMessage, SessionForms, path='conference/sessions/hour', http_method='GET', name='gethourSessions') def gethourSessions(self,request): """ Return all sessions that are of an hour or less """ sessions = Session.query(Session.duration <= 60) #here duration is specified in minutes return SessionForms(items=[self._copySessionToForm(session) for session in sessions]) def _getSessionQuery(self, request): """Return formatted query from the submitted filters.""" q = Session.query() inequality_filter, filters = self._formatFilters(request.filters,type='Session') # If exists, sort on inequality filter first if not inequality_filter: q = q.order(Session.name) else: q = q.order(ndb.GenericProperty(inequality_filter)) q = q.order(Session.name) for filtr in filters: if filtr["field"] in ["duration"]: filtr["value"] = int(filtr["value"]) formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"]) q = q.filter(formatted_query) return q @endpoints.method(SessionQueryForms, SessionForms, path='querySessions', http_method='POST', name='querySessions') def querySessions(self, request): """Query for sessions.""" # use `SESSION_FIELDS` to construct query. sessions = self._getSessionQuery(request) return SessionForms(items=[self._copySessionToForm(session) for session in sessions]) #special query problem @endpoints.method(SESSION_POST_REQUEST, SessionForms, path='conference/{websafeConferenceKey}/sessions/typewithtime', http_method='GET', name='getTypewithTime') def getTypewithTime(self,request): """Special query that handle couple of inequalities""" wck=request.websafeConferenceKey # get conference object confKey=ndb.Key(urlsafe=wck) if not confKey.get(): raise endpoints.NotFoundException('No conference found with key : %s' % wck) query=Session.query(ancestor=confKey) query=query.filter(Session.typeOfSession != str(SessionType.workshop)) query=query.order(Session.typeOfSession) query=query.order(Session.date) query=query.order(Session.startTime) results=[session for session in query if session.startTime < time(19)] return SessionForms(items=[self._copySessionToForm(session) for session in results]) @endpoints.method(message_types.VoidMessage, StringMessage, path='conference/featured_speakers/get', http_method='GET', name='getFeaturedSpeaker') def getFeaturedSpeaker(self, request): """Returns featured speaker along with their sessions from memcache""" return StringMessage(data=memcache.get(MEMCACHE_FEATURED_SPEAKER_KEY) or "") # registers API api = endpoints.api_server([ConferenceApi])
apache-2.0
3,213,191,701,473,363,000
38.264574
116
0.613094
false
4.297423
false
false
false
foursquare/fsqio
src/jvm/io/fsq/twofishes/scripts/match-flickr.py
1
2018
#!/usr/bin/python import sys import csv import urllib import urllib2 import json import geojson output = {} files = sys.argv[1:] for f in files: fdata = open(f).read() try: data = geojson.loads(fdata) except: print 'failed to parse: ' + fdata continue for feature in data['features']: woeid = str(feature['properties']['woe_id']) label = feature['properties']['label'] woetype = int(feature['properties']['place_type_id']) bbox = feature['geometry']['bbox'] url = u"http://localhost:8081/?query=%s&woeHint=%s" % (urllib.quote(label.encode('utf-8')), woetype) try: response = urllib2.urlopen(url) data = response.read() except: print url print "Unexpected error:", sys.exc_info()[0] continue jsonData = json.loads(data) geocodes = False match = False for interp in jsonData['interpretations']: if interp['what']: break fwoetype = interp['feature']['woeType'] geocodes = True center = interp['feature']['geometry']['center'] if ( center['lat'] >= bbox[1] and center['lat'] <= bbox[3] and center['lng'] >= bbox[0] and center['lng'] <= bbox[2] ): match = True geonameids = filter(lambda i: i['source'] == 'geonameid', interp['feature']['ids']) if len(geonameids): id = geonameids[0]['id'] if ((id not in output) or (output[id][0] == False)): lowlng = bbox[0] lowlat = bbox[1] hilng = bbox[2] hilat = bbox[3] output[id] = (fwoetype == woetype, '%s\t%s\t%s\t%s\t%s' % (id, lowlng, lowlat, hilng, hilat)) if not geocodes: print (u'No geocodes for %s %s' % (woeid, label)).encode('utf-8') elif not match: print (u'Geocodes, but no match for %s: %s' % (woeid, label)).encode('utf-8') print bbox print '\t' + url outfile = open('flickr-bbox.tsv', 'w') for k in output: outfile.write('%s\n' % output[k][1])
apache-2.0
-910,351,895,282,670,600
25.552632
106
0.564916
false
3.275974
false
false
false
botswana-harvard/edc-lab
old/lab_clinic_api/migrations/0001_initial.py
1
31522
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2016-05-07 13:52 from __future__ import unicode_literals import datetime import django.core.validators from django.db import migrations, models import django.db.models.deletion import django_extensions.db.fields import django_revision.revision_field import edc_base.model.fields.custom_fields import edc_base.model.fields.hostname_modification_field import edc_base.model.fields.userfield import edc_base.model.fields.uuid_auto_field import edc_base.model.validators.date class Migration(migrations.Migration): initial = True dependencies = [ ('edc_registration', '0002_auto_20160503_1604'), ] operations = [ migrations.CreateModel( name='Aliquot', fields=[ ('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')), ('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')), ('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')), ('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')), ('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)), ('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)), ('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')), ('id', edc_base.model.fields.uuid_auto_field.UUIDAutoField(editable=False, help_text='System field. UUID primary key.', primary_key=True, serialize=False)), ('aliquot_identifier', models.CharField(editable=False, help_text='Aliquot identifier', max_length=25, unique=True, verbose_name='Aliquot Identifier')), ('aliquot_datetime', models.DateTimeField(default=datetime.datetime(2016, 5, 7, 13, 51, 55, 444847), verbose_name='Date and time aliquot created')), ('count', models.IntegerField(editable=False, null=True)), ('medium', models.CharField(choices=[('tube_any', 'Tube'), ('tube_edta', 'Tube EDTA'), ('swab', 'Swab'), ('dbs_card', 'DBS Card')], default='TUBE', max_length=25, verbose_name='Medium')), ('original_measure', models.DecimalField(decimal_places=2, default='5.00', max_digits=10)), ('current_measure', models.DecimalField(decimal_places=2, default='5.00', max_digits=10)), ('measure_units', models.CharField(choices=[('mL', 'mL'), ('uL', 'uL'), ('spots', 'spots'), ('n/a', 'Not Applicable')], default='mL', max_length=25)), ('status', models.CharField(choices=[('available', 'available'), ('consumed', 'consumed')], default='available', max_length=25)), ('comment', models.CharField(blank=True, max_length=50, null=True)), ('subject_identifier', models.CharField(editable=False, help_text='non-user helper field to simplify search and filtering', max_length=50, null=True)), ('is_packed', models.BooleanField(default=False, verbose_name='packed')), ('receive_identifier', models.CharField(editable=False, help_text='non-user helper field to simplify search and filter', max_length=25, null=True)), ('import_datetime', models.DateTimeField(editable=False, null=True)), ], options={ 'ordering': ('receive', 'count'), }, ), migrations.CreateModel( name='AliquotCondition', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')), ('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')), ('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')), ('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')), ('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)), ('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)), ('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')), ('name', models.CharField(db_index=True, help_text='(suggest 40 characters max.)', max_length=250, null=True, unique=True, verbose_name='Name')), ('short_name', models.CharField(db_index=True, help_text='This is the stored value, required', max_length=250, null=True, unique=True, verbose_name='Stored value')), ('display_index', models.IntegerField(db_index=True, default=0, help_text='Index to control display order if not alphabetical, not required', verbose_name='display index')), ('field_name', models.CharField(blank=True, editable=False, help_text='Not required', max_length=25, null=True)), ('version', models.CharField(default='1.0', editable=False, max_length=35)), ], ), migrations.CreateModel( name='AliquotType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')), ('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')), ('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')), ('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')), ('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)), ('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)), ('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')), ('name', models.CharField(max_length=50, verbose_name='Description')), ('alpha_code', models.CharField(max_length=15, unique=True, validators=[django.core.validators.RegexValidator('^[A-Z]{2,15}$')], verbose_name='Alpha code')), ('numeric_code', models.CharField(max_length=2, unique=True, validators=[django.core.validators.RegexValidator('^[0-9]{2}$')], verbose_name='Numeric code (2-digit)')), ], options={ 'ordering': ['name'], }, ), migrations.CreateModel( name='Order', fields=[ ('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')), ('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')), ('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')), ('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')), ('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)), ('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)), ('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')), ('id', edc_base.model.fields.uuid_auto_field.UUIDAutoField(editable=False, help_text='System field. UUID primary key.', primary_key=True, serialize=False)), ('order_identifier', models.CharField(db_index=True, editable=False, help_text='Allocated internally', max_length=25, unique=True, verbose_name='Order number')), ('order_datetime', models.DateTimeField(db_index=True, validators=[edc_base.model.validators.date.datetime_not_future], verbose_name='Order Date')), ('status', models.CharField(choices=[('PENDING', 'Pending'), ('PARTIAL', 'Partial'), ('COMPLETE', 'Complete'), ('ERROR', 'Error'), ('REDRAW', 'Redraw'), ('WITHDRAWN', 'Withdrawn'), ('DUPLICATE', 'Duplicate')], max_length=25, null=True, verbose_name='Status')), ('comment', models.CharField(blank=True, max_length=150, null=True, verbose_name='Comment')), ('import_datetime', models.DateTimeField(null=True)), ('subject_identifier', models.CharField(db_index=True, editable=False, help_text='non-user helper field to simplify search and filtering', max_length=50, null=True)), ('receive_identifier', models.CharField(db_index=True, editable=False, help_text='non-user helper field to simplify search and filter', max_length=25, null=True)), ('aliquot', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.Aliquot')), ], options={ 'ordering': ['order_identifier'], }, ), migrations.CreateModel( name='Panel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')), ('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')), ('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')), ('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')), ('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)), ('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)), ('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')), ('name', models.CharField(db_index=True, max_length=50, unique=True, verbose_name='Panel Name')), ('comment', models.CharField(blank=True, max_length=250, verbose_name='Comment')), ('edc_name', models.CharField(max_length=50, null=True)), ('panel_type', models.CharField(choices=[('TEST', 'Test panel'), ('STORAGE', 'Storage panel')], default='TEST', max_length=15)), ('aliquot_type', models.ManyToManyField(help_text='Choose all that apply', to='lab_clinic_api.AliquotType')), ], ), migrations.CreateModel( name='Receive', fields=[ ('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')), ('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')), ('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')), ('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')), ('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)), ('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)), ('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')), ('id', edc_base.model.fields.uuid_auto_field.UUIDAutoField(editable=False, help_text='System field. UUID primary key.', primary_key=True, serialize=False)), ('receive_identifier', models.CharField(db_index=True, editable=False, max_length=25, null=True, unique=True, verbose_name='Receiving Identifier')), ('requisition_identifier', models.CharField(blank=True, db_index=True, max_length=25, null=True, verbose_name='Requisition Identifier')), ('drawn_datetime', models.DateTimeField(db_index=True, validators=[edc_base.model.validators.date.datetime_not_future], verbose_name='Date and time drawn')), ('receive_datetime', models.DateTimeField(db_index=True, default=datetime.datetime(2016, 5, 7, 13, 51, 55, 407698), validators=[edc_base.model.validators.date.datetime_not_future], verbose_name='Date and time received')), ('visit', models.CharField(max_length=25, verbose_name='Visit Code')), ('clinician_initials', edc_base.model.fields.custom_fields.InitialsField(help_text='Type 2-3 letters, all in uppercase and no spaces', max_length=3, verbose_name='Initials')), ('receive_condition', models.CharField(max_length=50, null=True, verbose_name='Condition of primary tube')), ('import_datetime', models.DateTimeField(null=True)), ('registered_subject', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='edc_registration.RegisteredSubject')), ], ), migrations.CreateModel( name='Result', fields=[ ('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')), ('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')), ('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')), ('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')), ('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)), ('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)), ('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')), ('id', edc_base.model.fields.uuid_auto_field.UUIDAutoField(editable=False, help_text='System field. UUID primary key.', primary_key=True, serialize=False)), ('result_identifier', models.CharField(db_index=True, editable=False, max_length=25)), ('result_datetime', models.DateTimeField(db_index=True, help_text='Date result added to system.')), ('release_status', models.CharField(choices=[('NEW', 'New'), ('RELEASED', 'Released'), ('AMENDED', 'Amended')], db_index=True, default='NEW', max_length=25)), ('release_datetime', models.DateTimeField(blank=True, db_index=True, help_text='Date result authorized for release. This field will auto-fill if release status is changed', null=True)), ('release_username', models.CharField(blank=True, db_index=True, help_text='Username of person authorizing result for release. This field will auto-fill if release status is changed', max_length=50, null=True, verbose_name='Release username')), ('comment', models.CharField(blank=True, max_length=50, null=True, verbose_name='Comment')), ('dmis_result_guid', models.CharField(blank=True, editable=False, help_text='dmis import value. N/A unless data imported from old system', max_length=36, null=True)), ('import_datetime', models.DateTimeField(null=True)), ('reviewed', models.BooleanField(default=False)), ('subject_identifier', models.CharField(db_index=True, editable=False, help_text='non-user helper field to simplify search and filtering', max_length=50, null=True)), ('receive_identifier', models.CharField(db_index=True, editable=False, help_text='non-user helper field to simplify search and filter', max_length=25, null=True)), ('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.Order')), ], options={ 'ordering': ['result_identifier'], }, ), migrations.CreateModel( name='ResultItem', fields=[ ('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')), ('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')), ('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')), ('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')), ('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)), ('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)), ('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')), ('id', edc_base.model.fields.uuid_auto_field.UUIDAutoField(editable=False, help_text='System field. UUID primary key.', primary_key=True, serialize=False)), ('result_item_value', models.CharField(db_index=True, max_length=25, verbose_name='Result')), ('result_item_value_as_float', models.FloatField(db_index=True, editable=False, null=True, verbose_name='Numeric result')), ('result_item_quantifier', models.CharField(choices=[('=', '='), ('>', '>'), ('>=', '>='), ('<', '<'), ('<=', '<=')], default='=', max_length=25, verbose_name='Quantifier')), ('result_item_datetime', models.DateTimeField(db_index=True, verbose_name='Assay date and time')), ('result_item_operator', models.CharField(blank=True, db_index=True, max_length=50, null=True, verbose_name='Operator')), ('grade_range', models.CharField(blank=True, max_length=25, null=True)), ('grade_flag', models.CharField(blank=True, max_length=5, null=True)), ('grade_message', models.CharField(blank=True, max_length=50, null=True)), ('grade_warn', models.BooleanField(default=False)), ('reference_flag', models.CharField(blank=True, max_length=5, null=True)), ('reference_range', models.CharField(blank=True, max_length=25, null=True)), ('validation_status', models.CharField(choices=[('P', 'Preliminary'), ('F', 'Final'), ('R', 'Rejected')], db_index=True, default='P', help_text='Default is preliminary', max_length=10, verbose_name='Status')), ('validation_datetime', models.DateTimeField(blank=True, db_index=True, null=True)), ('validation_username', models.CharField(blank=True, db_index=True, max_length=50, null=True, verbose_name='Validation username')), ('validation_reference', models.CharField(blank=True, max_length=50, null=True, verbose_name='Validation reference')), ('comment', models.CharField(blank=True, max_length=50, null=True, verbose_name='Validation Comment')), ('error_code', models.CharField(blank=True, max_length=50, null=True, verbose_name='Error codes')), ('subject_identifier', models.CharField(db_index=True, editable=False, help_text='non-user helper field to simplify search and filtering', max_length=50, null=True)), ('receive_identifier', models.CharField(db_index=True, editable=False, help_text='non-user helper field to simplify search and filter', max_length=25, null=True)), ('import_datetime', models.DateTimeField(null=True)), ('subject_type', models.CharField(max_length=25, null=True)), ('result', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.Result')), ], options={ 'ordering': ('-result_item_datetime',), }, ), migrations.CreateModel( name='Review', fields=[ ('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')), ('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')), ('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')), ('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')), ('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)), ('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)), ('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')), ('id', edc_base.model.fields.uuid_auto_field.UUIDAutoField(editable=False, help_text='System field. UUID primary key.', primary_key=True, serialize=False)), ('title', models.CharField(editable=False, max_length=50)), ('review_datetime', models.DateTimeField(null=True)), ('review_status', models.CharField(choices=[('REQUIRES_REVIEW', 'Requires Review'), ('REVIEWED', 'Reviewed')], max_length=25)), ('comment', models.TextField(blank=True, max_length=500, null=True)), ], options={ 'ordering': ['review_datetime'], }, ), migrations.CreateModel( name='TestCode', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')), ('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')), ('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')), ('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')), ('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)), ('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)), ('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')), ('code', models.CharField(max_length=15, unique=True, validators=[django.core.validators.RegexValidator('^[A-Z0-9\\%\\_\\-]{1,15}$', 'Ensure test code is uppercase alphanumeric ( with _ ,%) and no spaces')], verbose_name='Test Code')), ('name', models.CharField(max_length=50, verbose_name='Test Code Description')), ('units', models.CharField(choices=[('%', '%'), ('10^0/L', '10^0/L'), ('10^3/uL', '10^3/uL'), ('10^6/uL', '10^6/uL'), ('cells/ul', 'cells/ul'), ('copies/ml', 'copies/ml'), ('fL', 'fL'), ('g/dL', 'g/dL'), ('g/L', 'g/L'), ('mg/dL', 'mg/dL'), ('mg/L', 'mg/L'), ('mm^3', 'mm^3'), ('mm/H', 'mm/H'), ('mmol/L', 'mmol/L'), ('ng/ml', 'ng/ml'), ('pg', 'pg'), ('ratio', 'ratio'), ('U/L', 'U/L'), ('umol/L', 'umol/L')], max_length=25, verbose_name='Units')), ('display_decimal_places', models.IntegerField(blank=True, null=True, verbose_name='Decimal places to display')), ('is_absolute', models.CharField(choices=[('absolute', 'Absolute'), ('calculated', 'Calculated')], default='absolute', max_length=15, verbose_name='Is the value absolute or calculated?')), ('formula', models.CharField(blank=True, max_length=50, null=True, verbose_name='If calculated, formula?')), ('edc_code', models.CharField(db_index=True, max_length=25, null=True)), ('edc_name', models.CharField(db_index=True, max_length=50, null=True)), ], options={ 'ordering': ['edc_name'], }, ), migrations.CreateModel( name='TestCodeGroup', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')), ('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')), ('user_created', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user created')), ('user_modified', edc_base.model.fields.userfield.UserField(editable=False, max_length=50, verbose_name='user modified')), ('hostname_created', models.CharField(default='One-2.local', editable=False, help_text='System field. (modified on create only)', max_length=50)), ('hostname_modified', edc_base.model.fields.hostname_modification_field.HostnameModificationField(editable=False, help_text='System field. (modified on every save)', max_length=50)), ('revision', django_revision.revision_field.RevisionField(blank=True, editable=False, help_text='System field. Git repository tag:branch:commit.', max_length=75, null=True, verbose_name='Revision')), ('code', models.CharField(max_length=15, null=True)), ('name', models.CharField(blank=True, max_length=25, null=True)), ], options={ 'ordering': ['code'], }, ), migrations.AddField( model_name='testcode', name='test_code_group', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.TestCodeGroup'), ), migrations.AddField( model_name='resultitem', name='test_code', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='lab_clinic_api.TestCode'), ), migrations.AddField( model_name='result', name='review', field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.Review'), ), migrations.AddField( model_name='panel', name='test_code', field=models.ManyToManyField(blank=True, null=True, to='lab_clinic_api.TestCode'), ), migrations.AddField( model_name='order', name='panel', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.Panel'), ), migrations.AddField( model_name='aliquot', name='aliquot_condition', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.AliquotCondition', verbose_name='Aliquot Condition'), ), migrations.AddField( model_name='aliquot', name='aliquot_type', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.AliquotType', verbose_name='Aliquot Type'), ), migrations.AddField( model_name='aliquot', name='primary_aliquot', field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='primary', to='lab_clinic_api.Aliquot'), ), migrations.AddField( model_name='aliquot', name='receive', field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to='lab_clinic_api.Receive'), ), migrations.AddField( model_name='aliquot', name='source_aliquot', field=models.ForeignKey(editable=False, help_text='Aliquot from which this aliquot was created, Leave blank if this is the primary tube', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='source', to='lab_clinic_api.Aliquot'), ), migrations.AlterUniqueTogether( name='aliquot', unique_together=set([('receive', 'count')]), ), ]
gpl-2.0
2,280,493,389,148,608,000
91.985251
463
0.645327
false
3.959056
true
false
false
easy-as-pie-labs/tweap
tweap/project_management/tests.py
1
11427
from django.test import TestCase from project_management.models import Project, Invitation, Tag from project_management.tools import invite_users, get_tags from django.contrib.auth.models import User import json from django.http.response import HttpResponse, HttpResponseRedirect, HttpResponseNotAllowed class ModelTest(TestCase): project_name = "Testproject" project_description = "Testdescription" def test_project_model_members_and_leave(self): user = User.objects.create_user('testuser', 'test@test.de', 'testpw') user2 = User.objects.create_user('testuser2', 'test2@test.de', 'testpw') project = Project(name=self.project_name, description=self.project_description) project.save() self.assertEqual(str(project), self.project_name) project.members.add(user) project.members.add(user2) # test if users are in project now self.assertTrue(user in project.members.all()) self.assertTrue(user2 in project.members.all()) project.leave(user2) project_exists = Project.objects.filter(id=project.id).exists() # test if user2 is removed from project and project still exists self.assertTrue(project_exists) self.assertTrue(user in project.members.all()) self.assertFalse(user2 in project.members.all()) project.leave(user) project_exists = Project.objects.filter(id=project.id).exists() # test if leave of last user deletes the project self.assertFalse(project_exists) # cleanup user.delete() user2.delete() def test_invitation_model_get_for_users(self): project = Project(name=self.project_name, description=self.project_description) project.save() user = User.objects.create_user('testuser', 'test@test.de', 'testpw') invitation = Invitation(user=user, project=project) invitation.save() self.assertEqual(str(invitation), user.username + ' invited to ' + self.project_name) # test if invitation is returned for the user via the method get_for_user() self.assertTrue(invitation in Invitation.get_for_user(user)) invitation.delete() # cleanup user.delete() def test_invitation_model_accept(self): project = Project(name=self.project_name, description=self.project_description) project.save() user = User.objects.create_user('testuser', 'test@test.de', 'testpw') invitation = Invitation(user=user, project=project) invitation.save() invitation_exists = Invitation.objects.filter(id=invitation.id).exists() # test if invitation exists self.assertTrue(invitation_exists) invitation.accept() invitation_exists = Invitation.objects.filter(id=invitation.id).exists() # test if user is now member of the project and invitation was deleted self.assertTrue(user in project.members.all()) self.assertFalse(invitation_exists) # cleanup user.delete() def test_invitation_model_reject(self): project = Project(name=self.project_name, description=self.project_description) project.save() user = User.objects.create_user('testuser', 'test@test.de', 'testpw') invitation = Invitation(user=user, project=project) invitation.save() invitation_exists = Invitation.objects.filter(id=invitation.id).exists() # test if invitation exists self.assertTrue(invitation_exists) invitation.reject() invitation_exists = Invitation.objects.filter(id=invitation.id).exists() # test if user is not member of the project and invitation was deleted self.assertFalse(user in project.members.all()) self.assertFalse(invitation_exists) # cleanup user.delete() def test_has_user(self): user = User.objects.create_user('testuser', 'test@test.de', 'testpw') user2 = User.objects.create_user('testuser2', 'test2@test.de', 'testpw') user3 = User.objects.create_user('testuser3', 'test3@test.de', 'testpw') project = Project(name=self.project_name, description=self.project_description) project.save() self.assertEqual(str(project), self.project_name) project.members.add(user) project.members.add(user2) # test if users are in project now self.assertTrue(project.has_user(user)) self.assertTrue(project.has_user(user2)) self.assertFalse(project.has_user(user3)) project.leave(user2) project_exists = Project.objects.filter(id=project.id).exists() # test if user2 is removed from project and project still exists self.assertTrue(project.has_user(user)) self.assertFalse(project.has_user(user2)) self.assertFalse(project.has_user(user3)) project.leave(user) project_exists = Project.objects.filter(id=project.id).exists() # test if leave of last user deletes the project self.assertFalse(project_exists) # cleanup user.delete() user2.delete() user3.delete() class ToolsTest(TestCase): def test_invite_users(self): project = Project(name="Testprojekt") project.save() user1 = User.objects.create_user('user1', 'user1@test.de', 'testpw') user2 = User.objects.create_user('user2', 'user2@test.de', 'testpw') user3 = User.objects.create_user('user3', 'user3@test.de', 'testpw') # test with username and email user_string = ['user1', 'user2@test.de', 'test'] user_string = json.dumps(user_string) invite_users(user_string, project) # test if the both users are invited self.assertTrue(Invitation.objects.filter(user=user1, project=project).exists()) self.assertTrue(Invitation.objects.filter(user=user2, project=project).exists()) self.assertFalse(Invitation.objects.filter(user=user3, project=project).exists()) #cleanup user1.delete() user2.delete() user3.delete() def test_get_tags(self): project = Project(name="Testprojekt") project.save() tag = Tag(name="testtag1", project=project) tag.save() #test if only testtag1 exists self.assertTrue(Tag.objects.filter(project=project, name="testtag1").exists()) self.assertFalse(Tag.objects.filter(project=project, name="testtag2").exists()) self.assertFalse(Tag.objects.filter(project=project, name="testtag3").exists()) tag_string = ['testttag1', 'testtag2', 'testtag3'] tag_string = json.dumps(tag_string) tags = get_tags(tag_string, project) #test if return list contains 3 Tags self.assertEquals(len(tags), 3) self.assertIsInstance(tags[0], Tag) #test that all 3 testtags exists now self.assertTrue(Tag.objects.filter(project=project, name="testtag1").exists()) self.assertTrue(Tag.objects.filter(project=project, name="testtag2").exists()) self.assertTrue(Tag.objects.filter(project=project, name="testtag3").exists()) class ViewsTest(TestCase): def setup_login(self): User.objects.create_user('user', 'user@test.de', 'testpw') self.client.post('/users/login/', {'username': 'user', 'password': 'testpw'}) def test_project_create_edit(self): self.setup_login() # test if page is available resp = self.client.get('/projects/new/') self.assertEqual(resp.status_code, 200) self.assertFalse('error_messages' in resp.context) # test if validation works resp = self.client.post('/projects/new/', {}) self.assertEqual(resp.status_code, 200) self.assertTrue(resp.context['error_messages']) # test if project with name only can be created resp = self.client.post('/projects/new/', {'name': 'TestCreateProject', 'icon': 'fa fa-folder-open-o'}) self.assertEqual(resp.status_code, 302) self.assertTrue(type(resp) is HttpResponseRedirect) project_exist = Project.objects.filter(name='TestCreateProject').exists() self.assertTrue(project_exist) # test if project with name and description can be created resp = self.client.post('/projects/new/', {'name': 'TestCreateProject2', 'description': 'I am a test project', 'icon': 'fa fa-folder-open-o'}) self.assertEqual(resp.status_code, 302) self.assertTrue(type(resp) is HttpResponseRedirect) project_exist = Project.objects.filter(name='TestCreateProject2').exists() self.assertTrue(project_exist) project = Project.objects.get(name='TestCreateProject2') self.assertEqual(project.description, 'I am a test project') # test if a non existing project retuns 404 resp = self.client.get('/projects/edit/9999/') self.assertEqual(resp.status_code, 404) # test if an existing project can be edited resp = self.client.get('/projects/edit/' + str(project.id) + '/') self.assertEqual(resp.status_code, 200) # test if changes are saved resp = self.client.post('/projects/edit/' + str(project.id) + '/', {'name': 'new name', 'description': 'new description', 'icon': 'fa fa-folder-open-o'}) self.assertEqual(resp.status_code, 302) project = Project.objects.get(id=project.id) self.assertEqual(project.name, 'new name') self.assertEqual(project.description, 'new description') def test_project_view(self): self.setup_login() # test if project with name only can be created resp = self.client.post('/projects/new/', {'name': 'TestCreateProject', 'icon': 'fa fa-folder-open-o'}) self.assertEqual(resp.status_code, 302) self.assertTrue(type(resp) is HttpResponseRedirect) project_exists = Project.objects.filter(name='TestCreateProject').exists() self.assertTrue(project_exists) project = Project.objects.get(name='TestCreateProject') print('test: acces own project') resp = self.client.get('/projects/' + str(project.id)) self.assertEqual(resp.status_code, 200) self.assertTrue(type(resp) is HttpResponse) resp = self.client.post('/projects/' + str(project.id)) self.assertTrue(type(resp) is HttpResponseNotAllowed) print('test non-existent project') resp = self.client.get('/projects/1337') self.assertEqual(resp.status_code, 404) self.client.get('/users/logout/') print('test: access \'own\' project when not logged in') resp = self.client.get('/projects/' + str(project.id)) self.assertEqual(resp.status_code, 302) self.assertTrue(type(resp) is HttpResponseRedirect) User.objects.create_user('anotheruser', 'anotheruser@test.de', 'testpw') self.client.post('/users/login/', {'username': 'anotheruser', 'password': 'testpw'}) print('test: someone else\'s project') resp = self.client.get('/projects/' + str(project.id)) self.assertEqual(resp.status_code, 404) def test_view_all(self): # TODO: renew tests pass def test_view_invites(self): # TODO: renew tests pass def test_leave(self): pass def test_invitation_handler(self): pass
gpl-3.0
4,192,250,747,644,782,600
40.552727
161
0.655203
false
3.896011
true
false
false
Wyn10/Cnchi
cnchi/ui/gtk/pages/features.py
1
13668
#!/usr/bin/env python # -*- coding: utf-8 -*- # # features.py # # Copyright © 2013-2016 Antergos # # This file is part of Cnchi. # # Cnchi is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # Cnchi is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # The following additional terms are in effect as per Section 7 of the license: # # The preservation of all legal notices and author attributions in # the material or in the Appropriate Legal Notices displayed # by works containing it is required. # # You should have received a copy of the GNU General Public License # along with Cnchi; If not, see <http://www.gnu.org/licenses/>. """ Features screen """ import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk import subprocess import logging import desktop_info import features_info import misc.extra as misc from ui.base_widgets import Page COL_IMAGE = 0 COL_TITLE = 1 COL_DESCRIPTION = 2 COL_SWITCH = 3 class Features(Page): """ Features screen class """ def __init__(self, params, prev_page="desktop", next_page="disk_grp", **kwargs): """ Initializes features ui """ super().__init__(self, params, name="features", prev_page=prev_page, next_page=next_page, **kwargs) self.listbox_rows = {} self.title = _("Features") self.in_group = True # Set up list box self.listbox = self.ui.get_object("listbox") self.listbox.set_selection_mode(Gtk.SelectionMode.NONE) self.listbox.set_sort_func(self.listbox_sort_by_name, None) # self.listbox.set_selection_mode(Gtk.SelectionMode.BROWSE) # self.listbox.connect("row-selected", self.on_listbox_row_selected) # This is initialized each time this screen is shown in prepare() self.features = None # Only show ufw rules and aur disclaimer info once self.info_already_shown = {"ufw": False, "aur": False} # Only load defaults the first time this screen is shown self.load_defaults = True @staticmethod def nvidia_detected(): from hardware.nvidia import Nvidia if Nvidia().detect(): return True from hardware.nvidia_340xx import Nvidia340xx if Nvidia340xx().detect(): return True from hardware.nvidia_304xx import Nvidia304xx if Nvidia304xx().detect(): return True return False @staticmethod def amd_detected(): from hardware.catalyst import Catalyst return Catalyst().detect() @staticmethod def on_listbox_row_selected(listbox, listbox_row): """ Someone selected a different row of the listbox WARNING: IF LIST LAYOUT IS CHANGED THEN THIS SHOULD BE CHANGED ACCORDINGLY. """ if listbox_row is not None: for vbox in listbox_row: switch = vbox.get_children()[2] if switch: switch.set_active(not switch.get_active()) def fill_listbox(self): for listbox_row in self.listbox.get_children(): listbox_row.destroy() self.listbox_rows = {} # Only add graphic-driver feature if an AMD or Nvidia is detected # FIXME: Conflict between lib32-nvidia-libgl and lib32-mesa-libgl if "graphic_drivers" in self.features: if not self.amd_detected() and not self.nvidia_detected(): logging.debug("Neither NVidia nor AMD have been detected.") self.features.remove("graphic_drivers") #if "graphic_drivers" in self.features: # self.features.remove("graphic_drivers") for feature in self.features: box = Gtk.Box(spacing=20) box.set_name(feature + "-row") self.listbox_rows[feature] = [] if feature in features_info.ICON_NAMES: icon_name = features_info.ICON_NAMES[feature] else: logging.debug("No icon found for feature %s", feature) icon_name = "missing" object_name = "image_" + feature image = Gtk.Image.new_from_icon_name( icon_name, Gtk.IconSize.DND) image.set_name(object_name) image.set_property('margin_start', 10) self.listbox_rows[feature].append(image) box.pack_start(image, False, False, 0) text_box = Gtk.VBox() object_name = "label_title_" + feature label_title = Gtk.Label.new() label_title.set_halign(Gtk.Align.START) label_title.set_justify(Gtk.Justification.LEFT) label_title.set_name(object_name) self.listbox_rows[feature].append(label_title) text_box.pack_start(label_title, False, False, 0) object_name = "label_" + feature label = Gtk.Label.new() label.set_name(object_name) self.listbox_rows[feature].append(label) text_box.pack_start(label, False, False, 0) box.pack_start(text_box, False, False, 0) object_name = "switch_" + feature switch = Gtk.Switch.new() switch.set_name(object_name) switch.set_property('margin_top', 10) switch.set_property('margin_bottom', 10) switch.set_property('margin_end', 10) switch.get_style_context().add_class('switch') switch.set_property('width_request', 200) self.listbox_rows[feature].append(switch) box.pack_end(switch, False, False, 0) # Add row to our gtklist self.listbox.add(box) self.listbox.get_style_context().add_class('list_box') self.listbox.show_all() @staticmethod def listbox_sort_by_name(row1, row2, user_data): """ Sort function for listbox Returns : < 0 if row1 should be before row2, 0 if they are equal and > 0 otherwise WARNING: IF LAYOUT IS CHANGED IN fill_listbox THEN THIS SHOULD BE CHANGED ACCORDINGLY. """ box1 = row1.get_child() txt_box1 = box1.get_children()[1] label1 = txt_box1.get_children()[0] box2 = row2.get_child() txt_box2 = box2.get_children()[1] label2 = txt_box2.get_children()[0] text = [label1.get_text(), label2.get_text()] # sorted_text = misc.sort_list(text, self.settings.get("locale")) sorted_text = misc.sort_list(text) # If strings are already well sorted return < 0 if text[0] == sorted_text[0]: return -1 # Strings must be swaped, return > 0 return 1 def set_row_text(self, feature, title, desc, tooltip): """ Set translated text to our listbox feature row """ if feature in self.listbox_rows: title = "<span weight='bold' size='large'>{0}</span>".format(title) desc = "<span size='small'>{0}</span>".format(desc) row = self.listbox_rows[feature] row[COL_TITLE].set_markup(title) row[COL_DESCRIPTION].set_markup(desc) for widget in row: widget.set_tooltip_markup(tooltip) def translate_ui(self): """ Translates all ui elements """ self.header.set_subtitle(self.title) for feature in self.features: if feature == "graphic_drivers": # Only add this feature if NVIDIA or AMD are detected if not self.amd_detected() and not self.nvidia_detected(): continue title = _(features_info.TITLES[feature]) desc = _(features_info.DESCRIPTIONS[feature]) tooltip = _(features_info.TOOLTIPS[feature]) self.set_row_text(feature, title, desc, tooltip) # Sort listbox items self.listbox.invalidate_sort() def switch_defaults_on(self): """ Enable some features by default """ if 'bluetooth' in self.features: try: process1 = subprocess.Popen(["lsusb"], stdout=subprocess.PIPE) process2 = subprocess.Popen( ["grep", "-i", "bluetooth"], stdin=process1.stdout, stdout=subprocess.PIPE) process1.stdout.close() out, process_error = process2.communicate() if out.decode() is not '': row = self.listbox_rows['bluetooth'] row[COL_SWITCH].set_active(True) except subprocess.CalledProcessError as err: logging.warning( "Error checking bluetooth presence. Command %s failed: %s", err.cmd, err.output) if 'cups' in self.features: row = self.listbox_rows['cups'] row[COL_SWITCH].set_active(True) if 'visual' in self.features: row = self.listbox_rows['visual'] row[COL_SWITCH].set_active(True) def store_values(self): """ Get switches values and store them """ for feature in self.features: row = self.listbox_rows[feature] is_active = row[COL_SWITCH].get_active() self.settings.set("feature_" + feature, is_active) if is_active: logging.debug("Feature '%s' has been selected", feature) # Show ufw info message if ufw is selected (show it only once) if self.settings.get("feature_firewall") and not self.info_already_shown["ufw"]: self.show_info_dialog("ufw") self.info_already_shown["ufw"] = True # Show AUR disclaimer if AUR is selected (show it only once) if self.settings.get("feature_aur") and not self.info_already_shown["aur"]: self.show_info_dialog("aur") self.info_already_shown["aur"] = True # LAMP: Ask user if he wants Apache or Nginx if self.settings.get("feature_lamp"): info = Gtk.MessageDialog( transient_for=self.get_main_window(), modal=True, destroy_with_parent=True, message_type=Gtk.MessageType.INFO, buttons=Gtk.ButtonsType.YES_NO) info.set_markup("LAMP / LEMP") msg = _("Do you want to install the Nginx server instead of the Apache server?") info.format_secondary_markup(msg) response = info.run() info.destroy() if response == Gtk.ResponseType.YES: self.settings.set("feature_lemp", True) else: self.settings.set("feature_lemp", False) self.listbox_rows = {} return True def show_info_dialog(self, feature): """ Some features show an information dialog when this screen is accepted """ if feature == "aur": # Aur disclaimer txt1 = _("Arch User Repository - Disclaimer") txt2 = _("The Arch User Repository is a collection of user-submitted PKGBUILDs\n" "that supplement software available from the official repositories.\n\n" "The AUR is community driven and NOT supported by Arch or Antergos.\n") elif feature == "ufw": # Ufw rules info txt1 = _("Uncomplicated Firewall will be installed with these rules:") toallow = misc.get_network() txt2 = _("ufw default deny\nufw allow from {0}\nufw allow Transmission\n" "ufw allow SSH").format(toallow) else: # No message return txt1 = "<big>{0}</big>".format(txt1) txt2 = "<i>{0}</i>".format(txt2) info = Gtk.MessageDialog( transient_for=self.get_main_window(), modal=True, destroy_with_parent=True, message_type=Gtk.MessageType.INFO, buttons=Gtk.ButtonsType.CLOSE) info.set_markup(txt1) info.format_secondary_markup(txt2) info.run() info.destroy() def prepare(self, direction): """ Prepare features screen to get ready to show itself """ # Each desktop has its own features desktop = self.settings.get('desktop') self.features = list( set(desktop_info.ALL_FEATURES) - set(desktop_info.EXCLUDED_FEATURES[desktop])) self.fill_listbox() self.translate_ui() self.show_all() if self.load_defaults: self.switch_defaults_on() # Only load defaults once self.load_defaults = False else: # Load values user has chosen when this screen is shown again self.load_values() def load_values(self): """ Get previous selected switches values """ for feature in self.features: row = self.listbox_rows[feature] is_active = self.settings.get("feature_" + feature) if row[COL_SWITCH] is not None and is_active is not None: row[COL_SWITCH].set_active(is_active) # When testing, no _() is available try: _("") except NameError as err: def _(message): return message if __name__ == '__main__': from test_screen import _, run run('Features')
gpl-3.0
-3,563,286,189,405,870,000
36.138587
94
0.585791
false
3.993863
false
false
false
SVilgelm/CloudFerry
cloudferry/lib/os/storage/plugins/copy_mechanisms.py
1
5969
# Copyright 2016 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import logging import random from cloudferry.lib.utils import files from cloudferry.lib.utils import remote_runner from cloudferry.lib.copy_engines import base LOG = logging.getLogger(__name__) class CopyFailed(RuntimeError): pass class CopyMechanism(object): __metaclass__ = abc.ABCMeta @abc.abstractmethod def copy(self, context, source_object, destination_object): raise NotImplementedError() class CopyObject(object): def __init__(self, host=None, path=None): self.host = host self.path = path def __repr__(self): return "{host}:{path}".format(host=self.host, path=self.path) class RemoteFileCopy(CopyMechanism): """Uses one of `rsync`, `bbcp` or `scp` to copy volume files across remote nodes. Primarily used for NFS backend.""" def copy(self, context, source_object, destination_object): data = { 'host_src': source_object.host, 'path_src': source_object.path, 'host_dst': destination_object.host, 'path_dst': destination_object.path } try: copier = base.get_copier_checked(context.src_cloud, context.dst_cloud, data) copier.transfer(data) except (base.FileCopyError, base.CopierCannotBeUsed, base.CopierNotFound, base.NotEnoughSpace) as e: msg = ("Copying file from {src_host}@{src_file} to " "{dst_host}@{dst_file}, error: {err}").format( src_host=source_object.host, src_file=source_object.path, dst_host=destination_object.host, dst_file=destination_object.path, err=e.message) raise CopyFailed(msg) class CopyRegularFileToBlockDevice(CopyMechanism): """Redirects regular file to stdout and copies over ssh tunnel to calling node into block device""" @staticmethod def _generate_session_name(): return 'copy_{}'.format(random.getrandbits(64)) def copy(self, context, source_object, destination_object): cfg_src = context.cfg.src cfg_dst = context.cfg.dst src_user = cfg_src.ssh_user dst_user = cfg_dst.ssh_user src_host = source_object.host dst_host = destination_object.host rr_src = remote_runner.RemoteRunner(src_host, src_user, sudo=True, password=cfg_src.ssh_sudo_password) rr_dst = remote_runner.RemoteRunner(dst_host, dst_user, sudo=True, password=cfg_dst.ssh_sudo_password) ssh_opts = ('-o UserKnownHostsFile=/dev/null ' '-o StrictHostKeyChecking=no') # Choose auxiliary port for SSH tunnel aux_port_start, aux_port_end = \ context.cfg.migrate.ssh_transfer_port.split('-') aux_port = random.randint(int(aux_port_start), int(aux_port_end)) session_name = self._generate_session_name() try: progress_view = "" if files.is_installed(rr_src, "pv"): src_file_size = files.remote_file_size(rr_src, source_object.path) progress_view = "pv --size {size} --progress | ".format( size=src_file_size) # First step: prepare netcat listening on aux_port on dst and # forwarding all the data to block device rr_dst.run('screen -S {session_name} -d -m /bin/bash -c ' '\'nc -l {aux_port} | dd of={dst_device} bs=64k\'; ' 'sleep 1', session_name=session_name, aux_port=aux_port, dst_device=destination_object.path) # Second step: create SSH tunnel between source and destination rr_src.run('screen -S {session_name} -d -m ssh {ssh_opts} -L' ' {aux_port}:127.0.0.1:{aux_port} ' '{dst_user}@{dst_host}; sleep 1', session_name=session_name, ssh_opts=ssh_opts, aux_port=aux_port, dst_user=dst_user, dst_host=dst_host) # Third step: push data through the tunnel rr_src.run('/bin/bash -c \'dd if={src_file} bs=64k | ' '{progress_view} nc 127.0.0.1 {aux_port}\'', aux_port=aux_port, progress_view=progress_view, src_file=source_object.path) except remote_runner.RemoteExecutionError as e: msg = "Cannot copy {src_object} to {dst_object}: {error}" msg = msg.format(src_object=source_object, dst_object=destination_object, error=e.message) raise CopyFailed(msg) finally: try: rr_src.run('screen -X -S {session_name} quit || true', session_name=session_name) rr_dst.run('screen -X -S {session_name} quit || true', session_name=session_name) except remote_runner.RemoteExecutionError: LOG.error('Failed to close copy sessions', exc_info=True)
apache-2.0
-7,871,854,656,423,564,000
37.75974
79
0.565756
false
4.071623
false
false
false
saullocastro/pyNastran
pyNastran/bdf/dev_vectorized/cards/elements/rod/ptube.py
1
8276
from __future__ import print_function from six.moves import zip from numpy import array, zeros, unique, searchsorted, arange, pi from pyNastran.bdf.dev_vectorized.cards.elements.property import Property #from pyNastran.bdf.field_writer_8 import set_blank_if_default from pyNastran.bdf.field_writer_8 import print_card_8 from pyNastran.bdf.bdf_interface.assign_type import (integer, double, double_or_blank) class PTUBE(Property): type = 'PTUBE' def __init__(self, model): """ Defines the PTUBE object. Parameters ---------- model : BDF the BDF object """ Property.__init__(self, model) def allocate(self, ncards): self.n = ncards self.model.log.debug('%s ncards=%s' % (self.type, ncards)) float_fmt = self.model.float_fmt #: Property ID self.property_id = zeros(ncards, 'int32') self.material_id = zeros(ncards, 'int32') self.OD = zeros((ncards, 2), float_fmt) self.t = zeros(ncards, float_fmt) self.nsm = zeros(ncards, float_fmt) def add_card(self, card, comment=''): self.model.log.debug('n=%s i=%s' % (self.n, self.i)) i = self.i self.property_id[i] = integer(card, 1, 'property_id') self.material_id[i] = integer(card, 2, 'material_id') OD1 = double(card, 3, 'OD1') t = double_or_blank(card, 4, 't') if t is None: t = OD1 / 2. self.t[i] = t self.nsm[i] = double_or_blank(card, 5, 'nsm', 0.0) OD2 = double_or_blank(card, 6, 'OD2', OD1) self.OD[i, :] = [OD1, OD2] assert len(card) <= 7, 'len(PTUBE card) = %i\ncard=%s' % (len(card), card) self.i += 1 def build(self): """ :param cards: the list of PTUBE cards """ if self.n: i = self.property_id.argsort() self.property_id = self.property_id[i] self.material_id = self.material_id[i] self.OD = self.OD[i, :] self.t = self.t[i] self.nsm = self.nsm[i] unique_pids = unique(self.property_id) if len(unique_pids) != len(self.property_id): raise RuntimeError('There are duplicate PTUBE IDs...') self._cards = [] self._comments = [] else: self.property_id = array([], dtype='int32') def update(self, maps): """ maps = { 'property' : pid_map, 'material' : mid_map, } """ if self.n: nid_map = maps['node'] mid_map = maps['material'] for i, (pid, mid) in enumerate(zip(self.property_id, self.material_id)): self.property_id[i] = pid_map[pid] self.material_id[i] = mid_map[mid] #========================================================================= def get_mass_per_length_by_property_id(self, property_id=None): # L * (A * rho + nsm) i = self.get_property_index_by_property_id(property_id) A = self.A[i] mid = self.material_id[i] #mat = self.model.materials.get_material(mid) rho = self.model.materials.get_density_by_material_id(mid) nsm = self.nsm[i] return A * rho + nsm def get_area_by_property_id(self, property_id=None): i = self.get_property_index_by_property_id(property_id) return self.get_area_by_property_index(i) def get_area_by_property_index(self, i=None): area = zeros(len(i), dtype='float64') for ni, ii in enumerate(i): A = (self._area1(ii) + self._area2(ii)) / 2. area[ni] = A return area def _area1(self, i): """Gets the Area of Section 1 of the CTUBE.""" Dout = self.OD[i, 0] if self.t[i] == 0: return pi / 4. * Dout**2 Din = Dout - 2 * self.t A1 = pi / 4. * (Dout * Dout - Din * Din) return A1 def _area2(self, i): """Gets the Area of Section 2 of the CTUBE.""" Dout = self.OD[i, 1] if self.t[i] == 0: return pi / 4. * Dout**2 Din = Dout - 2 * self.t A2 = pi / 4. * (Dout * Dout - Din * Din) return A2 def get_non_structural_mass_by_property_id(self, property_id=None): i = self.get_property_index_by_property_id(property_id) nsm = self.nsm[i] return nsm #def get_E_by_property_id(self, property_id=None): #i = self.get_property_index_by_property_id(property_id) #material_id = self.material_id[i] #E = self.model.materials.get_E_by_material_id(material_id) #return E def get_E_by_property_id(self, property_id=None): mid = self.get_material_id_by_property_id(property_id) E = self.model.materials.get_E_by_material_id(mid) return E #def get_G_by_property_id(self, property_id=None): #i = self.get_property_index_by_property_id(property_id) #material_id = self.material_id[i] #G = self.model.materials.get_G_by_material_id(material_id) #return G def get_G_by_property_id(self, property_id=None): mid = self.get_material_id_by_property_id(property_id) G = self.model.materials.get_G_by_material_id(mid) return G def get_J_by_property_id(self, property_id=None): i = self.get_property_index_by_property_id(property_id) return self.get_J_by_property_index(i) def get_J_by_property_index(self, i=None): J = [] for ni, ii in enumerate(i): Ji = self._Ji(ii) J.append(Ji) return array(J, dtype='float64') def _Ji(self, i): Dout = self.OD[i, 0] if self.t[0] == 0.0: return pi / 8. * Dout**4 Din = Dout - 2 * self.t[i] return pi / 8. * (Dout**4 - Din**2) def get_c_by_property_id(self, property_id=None): i = self.get_property_index_by_property_id(property_id) c = self.c[i] return c def get_material_id_by_property_id(self, property_id=None): i = self.get_property_index_by_property_id(property_id) mid = self.material_id[i] return mid #========================================================================= def get_density_by_property_id(self, property_id=None): mid = self.get_material_id_by_property_id(property_id) density = self.model.materials.get_density_by_material_id(mid) return density #def get_J_by_property_id(self, property_id=None): #mid = self.get_material_id_by_property_id(property_id) #J = self.model.materials.get_J_by_material_id(mid) #return J #def get_E_by_property_id(self, property_id=None): #mid = self.get_material_id_by_property_id(property_id) #E = self.model.materials.get_E_by_material_id(mid) #return E #========================================================================= def write_card(self, bdf_file, size=8, property_id=None): if self.n: if self.n: if property_id is None: i = arange(self.n) else: assert len(unique(property_id)) == len(property_id), unique(property_id) i = searchsorted(self.property_id, property_id) for (pid, mid, (OD1, OD2), t, nsm) in zip( self.property_id, self.material_id[i], self.OD[i, :], self.t[i], self.nsm[i]): #t = set_blank_if_default(t, OD1 / 2.) #nsm = set_blank_if_default(nsm, 0.0) #OD2 = set_blank_if_default(OD2, OD1) card = ['PTUBE', pid, mid, OD1, t, nsm, OD2] bdf_file.write(print_card_8(card)) def slice_by_index(self, i): i = self._validate_slice(i) obj = PTUBE(self.model) n = len(i) obj.n = n obj.i = n #obj._cards = self._cards[i] #obj._comments = obj._comments[i] #obj.comments = obj.comments[i] obj.property_id = self.property_id[i] obj.material_id = self.material_id[i] obj.OD = self.OD[i, :] obj.t = self.t[i] obj.nsm = self.nsm[i] return obj
lgpl-3.0
-8,218,476,148,353,684,000
33.773109
95
0.531658
false
3.168453
false
false
false
calebbrown/calebcc
feedgenerator/feeds.py
1
8744
""" Syndication feed generation library -- used for generating RSS, etc. Sample usage: >>> from django.utils import feedgenerator >>> feed = feedgenerator.Rss201rev2Feed( ... title=u"Poynter E-Media Tidbits", ... link=u"http://www.poynter.org/column.asp?id=31", ... description=u"A group Weblog by the sharpest minds in online media/journalism/publishing.", ... language=u"en", ... ) >>> feed.add_item( ... title="Hello", ... link=u"http://www.holovaty.com/test/", ... description="Testing." ... ) >>> fp = open('test.rss', 'w') >>> feed.write(fp, 'utf-8') >>> fp.close() For definitions of the different versions of RSS, see: http://diveintomark.org/archives/2004/02/04/incompatible-rss """ from xmlutils import SimplerXMLGenerator from utils import rfc2822_date, rfc3339_date, get_tag_uri from base import SyndicationFeed, Enclosure class RssFeed(SyndicationFeed): mime_type = 'application/rss+xml' def write(self, outfile, encoding): handler = SimplerXMLGenerator(outfile, encoding) handler.startDocument() handler.startElement(u"rss", self.rss_attributes()) handler.startElement(u"channel", self.root_attributes()) self.add_root_elements(handler) self.write_items(handler) self.endChannelElement(handler) handler.endElement(u"rss") def rss_attributes(self): return {u"version": self._version, u"xmlns:atom": u"http://www.w3.org/2005/Atom"} def write_items(self, handler): for item in self.items: handler.startElement(u'item', self.item_attributes(item)) self.add_item_elements(handler, item) handler.endElement(u"item") def add_root_elements(self, handler): handler.addQuickElement(u"title", self.feed['title']) handler.addQuickElement(u"link", self.feed['link']) handler.addQuickElement(u"description", self.feed['description']) handler.addQuickElement(u"atom:link", None, {u"rel": u"self", u"href": self.feed['feed_url']}) if self.feed['language'] is not None: handler.addQuickElement(u"language", self.feed['language']) for cat in self.feed['categories']: handler.addQuickElement(u"category", cat) if self.feed['feed_copyright'] is not None: handler.addQuickElement(u"copyright", self.feed['feed_copyright']) handler.addQuickElement(u"lastBuildDate", rfc2822_date(self.latest_post_date()).decode('utf-8')) if self.feed['ttl'] is not None: handler.addQuickElement(u"ttl", self.feed['ttl']) def endChannelElement(self, handler): handler.endElement(u"channel") class RssUserland091Feed(RssFeed): _version = u"0.91" def add_item_elements(self, handler, item): handler.addQuickElement(u"title", item['title']) handler.addQuickElement(u"link", item['link']) if item['description'] is not None: handler.addQuickElement(u"description", item['description']) class Rss201rev2Feed(RssFeed): # Spec: http://blogs.law.harvard.edu/tech/rss _version = u"2.0" def add_item_elements(self, handler, item): handler.addQuickElement(u"title", item['title']) handler.addQuickElement(u"link", item['link']) if item['description'] is not None: handler.addQuickElement(u"description", item['description']) # Author information. if item["author_name"] and item["author_email"]: handler.addQuickElement(u"author", "%s (%s)" % \ (item['author_email'], item['author_name'])) elif item["author_email"]: handler.addQuickElement(u"author", item["author_email"]) elif item["author_name"]: handler.addQuickElement(u"dc:creator", item["author_name"], {u"xmlns:dc": u"http://purl.org/dc/elements/1.1/"}) if item['pubdate'] is not None: handler.addQuickElement(u"pubDate", rfc2822_date(item['pubdate']).decode('utf-8')) if item['comments'] is not None: handler.addQuickElement(u"comments", item['comments']) if item['unique_id'] is not None: handler.addQuickElement(u"guid", item['unique_id']) if item['ttl'] is not None: handler.addQuickElement(u"ttl", item['ttl']) # Enclosure. if item['enclosure'] is not None: handler.addQuickElement(u"enclosure", '', {u"url": item['enclosure'].url, u"length": item['enclosure'].length, u"type": item['enclosure'].mime_type}) # Categories. for cat in item['categories']: handler.addQuickElement(u"category", cat) class Atom1Feed(SyndicationFeed): # Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html mime_type = 'application/atom+xml; charset=utf8' ns = u"http://www.w3.org/2005/Atom" def write(self, outfile, encoding): handler = SimplerXMLGenerator(outfile, encoding) handler.startDocument() handler.startElement(u'feed', self.root_attributes()) self.add_root_elements(handler) self.write_items(handler) handler.endElement(u"feed") def root_attributes(self): if self.feed['language'] is not None: return {u"xmlns": self.ns, u"xml:lang": self.feed['language']} else: return {u"xmlns": self.ns} def add_root_elements(self, handler): handler.addQuickElement(u"title", self.feed['title']) handler.addQuickElement(u"link", "", {u"rel": u"alternate", u"href": self.feed['link']}) if self.feed['feed_url'] is not None: handler.addQuickElement(u"link", "", {u"rel": u"self", u"href": self.feed['feed_url']}) handler.addQuickElement(u"id", self.feed['id']) handler.addQuickElement(u"updated", rfc3339_date(self.latest_post_date()).decode('utf-8')) if self.feed['author_name'] is not None: handler.startElement(u"author", {}) handler.addQuickElement(u"name", self.feed['author_name']) if self.feed['author_email'] is not None: handler.addQuickElement(u"email", self.feed['author_email']) if self.feed['author_link'] is not None: handler.addQuickElement(u"uri", self.feed['author_link']) handler.endElement(u"author") if self.feed['subtitle'] is not None: handler.addQuickElement(u"subtitle", self.feed['subtitle']) for cat in self.feed['categories']: handler.addQuickElement(u"category", "", {u"term": cat}) if self.feed['feed_copyright'] is not None: handler.addQuickElement(u"rights", self.feed['feed_copyright']) def write_items(self, handler): for item in self.items: handler.startElement(u"entry", self.item_attributes(item)) self.add_item_elements(handler, item) handler.endElement(u"entry") def add_item_elements(self, handler, item): handler.addQuickElement(u"title", item['title']) handler.addQuickElement(u"link", u"", {u"href": item['link'], u"rel": u"alternate"}) if item['pubdate'] is not None: handler.addQuickElement(u"updated", rfc3339_date(item['pubdate']).decode('utf-8')) # Author information. if item['author_name'] is not None: handler.startElement(u"author", {}) handler.addQuickElement(u"name", item['author_name']) if item['author_email'] is not None: handler.addQuickElement(u"email", item['author_email']) if item['author_link'] is not None: handler.addQuickElement(u"uri", item['author_link']) handler.endElement(u"author") # Unique ID. if item['unique_id'] is not None: unique_id = item['unique_id'] else: unique_id = get_tag_uri(item['link'], item['pubdate']) handler.addQuickElement(u"id", unique_id) # Summary. if item['description'] is not None: handler.addQuickElement(u"summary", item['description'], {u"type": u"html"}) # Enclosure. if item['enclosure'] is not None: handler.addQuickElement(u"link", '', {u"rel": u"enclosure", u"href": item['enclosure'].url, u"length": item['enclosure'].length, u"type": item['enclosure'].mime_type}) # Categories. for cat in item['categories']: handler.addQuickElement(u"category", u"", {u"term": cat}) # Rights. if item['item_copyright'] is not None: handler.addQuickElement(u"rights", item['item_copyright'])
bsd-3-clause
-3,889,700,314,081,301,500
41.653659
123
0.613564
false
3.669324
false
false
false
riklaunim/django-custom-multisite
django/contrib/auth/models.py
1
17160
import urllib from django.core.exceptions import ImproperlyConfigured from django.core.mail import send_mail from django.db import models from django.db.models.manager import EmptyManager from django.utils.crypto import get_random_string from django.utils.encoding import smart_str from django.utils.translation import ugettext_lazy as _ from django.utils import timezone from django.contrib import auth # UNUSABLE_PASSWORD is still imported here for backwards compatibility from django.contrib.auth.hashers import ( check_password, make_password, is_password_usable, UNUSABLE_PASSWORD) from django.contrib.auth.signals import user_logged_in from django.contrib.contenttypes.models import ContentType from audioapp.apps import multisite def update_last_login(sender, user, **kwargs): """ A signal receiver which updates the last_login date for the user logging in. """ user.last_login = timezone.now() user.save() user_logged_in.connect(update_last_login) class SiteProfileNotAvailable(Exception): pass class PermissionManager(models.Manager): def get_by_natural_key(self, codename, app_label, model): return self.get( codename=codename, content_type=ContentType.objects.get_by_natural_key(app_label, model), ) class Permission(models.Model): """ The permissions system provides a way to assign permissions to specific users and groups of users. The permission system is used by the Django admin site, but may also be useful in your own code. The Django admin site uses permissions as follows: - The "add" permission limits the user's ability to view the "add" form and add an object. - The "change" permission limits a user's ability to view the change list, view the "change" form and change an object. - The "delete" permission limits the ability to delete an object. Permissions are set globally per type of object, not per specific object instance. It is possible to say "Mary may change news stories," but it's not currently possible to say "Mary may change news stories, but only the ones she created herself" or "Mary may only change news stories that have a certain status or publication date." Three basic permissions -- add, change and delete -- are automatically created for each Django model. """ name = models.CharField(_('name'), max_length=50) content_type = models.ForeignKey(ContentType) codename = models.CharField(_('codename'), max_length=100) objects = PermissionManager() class Meta: verbose_name = _('permission') verbose_name_plural = _('permissions') unique_together = (('content_type', 'codename'),) ordering = ('content_type__app_label', 'content_type__model', 'codename') def __unicode__(self): return u"%s | %s | %s" % ( unicode(self.content_type.app_label), unicode(self.content_type), unicode(self.name)) def natural_key(self): return (self.codename,) + self.content_type.natural_key() natural_key.dependencies = ['contenttypes.contenttype'] class GroupManager(models.Manager): """ The manager for the auth's Group model. """ def get_by_natural_key(self, name): return self.get(name=name) class Group(models.Model): """ Groups are a generic way of categorizing users to apply permissions, or some other label, to those users. A user can belong to any number of groups. A user in a group automatically has all the permissions granted to that group. For example, if the group Site editors has the permission can_edit_home_page, any user in that group will have that permission. Beyond permissions, groups are a convenient way to categorize users to apply some label, or extended functionality, to them. For example, you could create a group 'Special users', and you could write code that would do special things to those users -- such as giving them access to a members-only portion of your site, or sending them members-only email messages. """ name = models.CharField(_('name'), max_length=80, unique=True) permissions = models.ManyToManyField(Permission, verbose_name=_('permissions'), blank=True) objects = GroupManager() class Meta: verbose_name = _('group') verbose_name_plural = _('groups') def __unicode__(self): return self.name def natural_key(self): return (self.name,) class UserManager(multisite.CurrentSiteManager, models.Manager): @classmethod def normalize_email(cls, email): """ Normalize the address by lowercasing the domain part of the email address. """ email = email or '' try: email_name, domain_part = email.strip().rsplit('@', 1) except ValueError: pass else: email = '@'.join([email_name, domain_part.lower()]) return email def create_user(self, username, email=None, password=None): """ Creates and saves a User with the given username, email and password. """ now = timezone.now() if not username: raise ValueError('The given username must be set') email = UserManager.normalize_email(email) user = self.model(username=username, email=email, is_staff=False, is_active=True, is_superuser=False, last_login=now, date_joined=now) user.set_password(password) user.save(using=self._db) return user def create_superuser(self, username, email, password): u = self.create_user(username, email, password) u.is_staff = True u.is_active = True u.is_superuser = True u.save(using=self._db) return u def make_random_password(self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789'): """ Generates a random password with the given length and given allowed_chars. Note that the default value of allowed_chars does not have "I" or "O" or letters and digits that look similar -- just to avoid confusion. """ return get_random_string(length, allowed_chars) def get_by_natural_key(self, username): return self.get(username=username) # A few helper functions for common logic between User and AnonymousUser. def _user_get_all_permissions(user, obj): permissions = set() for backend in auth.get_backends(): if hasattr(backend, "get_all_permissions"): if obj is not None: permissions.update(backend.get_all_permissions(user, obj)) else: permissions.update(backend.get_all_permissions(user)) return permissions def _user_has_perm(user, perm, obj): anon = user.is_anonymous() active = user.is_active for backend in auth.get_backends(): if anon or active or backend.supports_inactive_user: if hasattr(backend, "has_perm"): if obj is not None: if backend.has_perm(user, perm, obj): return True else: if backend.has_perm(user, perm): return True return False def _user_has_module_perms(user, app_label): anon = user.is_anonymous() active = user.is_active for backend in auth.get_backends(): if anon or active or backend.supports_inactive_user: if hasattr(backend, "has_module_perms"): if backend.has_module_perms(user, app_label): return True return False class User(multisite.MultiSitesMixin, multisite.SiteFieldMixin, models.Model): """ Users within the Django authentication system are represented by this model. Username and password are required. Other fields are optional. """ username = models.CharField(_('username'), max_length=30, help_text=_('Required. 30 characters or fewer. Letters, numbers and ' '@/./+/-/_ characters')) first_name = models.CharField(_('first name'), max_length=30, blank=True) last_name = models.CharField(_('last name'), max_length=30, blank=True) email = models.EmailField(_('e-mail address'), blank=True) password = models.CharField(_('password'), max_length=128) is_staff = models.BooleanField(_('staff status'), default=False, help_text=_('Designates whether the user can log into this admin ' 'site.')) is_active = models.BooleanField(_('active'), default=True, help_text=_('Designates whether this user should be treated as ' 'active. Unselect this instead of deleting accounts.')) is_superuser = models.BooleanField(_('superuser status'), default=False, help_text=_('Designates that this user has all permissions without ' 'explicitly assigning them.')) last_login = models.DateTimeField(_('last login'), default=timezone.now) date_joined = models.DateTimeField(_('date joined'), default=timezone.now) groups = models.ManyToManyField(Group, verbose_name=_('groups'), blank=True, help_text=_('The groups this user belongs to. A user will ' 'get all permissions granted to each of ' 'his/her group.')) user_permissions = models.ManyToManyField(Permission, verbose_name=_('user permissions'), blank=True, help_text='Specific permissions for this user.') objects = UserManager() class Meta: verbose_name = _('user') verbose_name_plural = _('users') unique_together = ('username', 'site') def __unicode__(self): return self.username def natural_key(self): return (self.username,) def get_absolute_url(self): return "/users/%s/" % urllib.quote(smart_str(self.username)) def is_anonymous(self): """ Always returns False. This is a way of comparing User objects to anonymous users. """ return False def is_authenticated(self): """ Always return True. This is a way to tell if the user has been authenticated in templates. """ return True def get_full_name(self): """ Returns the first_name plus the last_name, with a space in between. """ full_name = u'%s %s' % (self.first_name, self.last_name) return full_name.strip() def set_password(self, raw_password): self.password = make_password(raw_password) def check_password(self, raw_password): """ Returns a boolean of whether the raw_password was correct. Handles hashing formats behind the scenes. """ def setter(raw_password): self.set_password(raw_password) self.save() return check_password(raw_password, self.password, setter) def set_unusable_password(self): # Sets a value that will never be a valid hash self.password = make_password(None) def has_usable_password(self): return is_password_usable(self.password) def get_group_permissions(self, obj=None): """ Returns a list of permission strings that this user has through his/her groups. This method queries all available auth backends. If an object is passed in, only permissions matching this object are returned. """ permissions = set() for backend in auth.get_backends(): if hasattr(backend, "get_group_permissions"): if obj is not None: permissions.update(backend.get_group_permissions(self, obj)) else: permissions.update(backend.get_group_permissions(self)) return permissions def get_all_permissions(self, obj=None): return _user_get_all_permissions(self, obj) def has_perm(self, perm, obj=None): """ Returns True if the user has the specified permission. This method queries all available auth backends, but returns immediately if any backend returns True. Thus, a user who has permission from a single auth backend is assumed to have permission in general. If an object is provided, permissions for this specific object are checked. """ # Active superusers have all permissions. if self.is_active and self.is_superuser: return True # Otherwise we need to check the backends. return _user_has_perm(self, perm, obj) def has_perms(self, perm_list, obj=None): """ Returns True if the user has each of the specified permissions. If object is passed, it checks if the user has all required perms for this object. """ for perm in perm_list: if not self.has_perm(perm, obj): return False return True def has_module_perms(self, app_label): """ Returns True if the user has any permissions in the given app label. Uses pretty much the same logic as has_perm, above. """ # Active superusers have all permissions. if self.is_active and self.is_superuser: return True return _user_has_module_perms(self, app_label) def email_user(self, subject, message, from_email=None): """ Sends an email to this User. """ send_mail(subject, message, from_email, [self.email]) def get_profile(self): """ Returns site-specific profile for this user. Raises SiteProfileNotAvailable if this site does not allow profiles. """ if not hasattr(self, '_profile_cache'): from django.conf import settings if not getattr(settings, 'AUTH_PROFILE_MODULE', False): raise SiteProfileNotAvailable( 'You need to set AUTH_PROFILE_MODULE in your project ' 'settings') try: app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.') except ValueError: raise SiteProfileNotAvailable( 'app_label and model_name should be separated by a dot in ' 'the AUTH_PROFILE_MODULE setting') try: model = models.get_model(app_label, model_name) if model is None: raise SiteProfileNotAvailable( 'Unable to load the profile model, check ' 'AUTH_PROFILE_MODULE in your project settings') self._profile_cache = model._default_manager.using( self._state.db).get(user__id__exact=self.id) self._profile_cache.user = self except (ImportError, ImproperlyConfigured): raise SiteProfileNotAvailable return self._profile_cache class AnonymousUser(object): id = None username = '' is_staff = False is_active = False is_superuser = False _groups = EmptyManager() _user_permissions = EmptyManager() def __init__(self): pass def __unicode__(self): return 'AnonymousUser' def __str__(self): return unicode(self).encode('utf-8') def __eq__(self, other): return isinstance(other, self.__class__) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return 1 # instances always return the same hash value def save(self): raise NotImplementedError def delete(self): raise NotImplementedError def set_password(self, raw_password): raise NotImplementedError def check_password(self, raw_password): raise NotImplementedError def _get_groups(self): return self._groups groups = property(_get_groups) def _get_user_permissions(self): return self._user_permissions user_permissions = property(_get_user_permissions) def get_group_permissions(self, obj=None): return set() def get_all_permissions(self, obj=None): return _user_get_all_permissions(self, obj=obj) def has_perm(self, perm, obj=None): return _user_has_perm(self, perm, obj=obj) def has_perms(self, perm_list, obj=None): for perm in perm_list: if not self.has_perm(perm, obj): return False return True def has_module_perms(self, module): return _user_has_module_perms(self, module) def is_anonymous(self): return True def is_authenticated(self): return False
bsd-3-clause
6,168,093,177,801,387,000
34.824635
79
0.617599
false
4.4641
false
false
false
caktus/django-opendebates
opendebates/tests/test_flatpage_metadata_override.py
1
2796
from django.contrib.flatpages.models import FlatPage from django.contrib.sites.models import Site from django.test import TestCase from django.utils.html import escape from opendebates.models import FlatPageMetadataOverride from opendebates import site_defaults from .factories import SiteFactory, DebateFactory class FlatPageTest(TestCase): def setUp(self): self.site = SiteFactory() self.debate = DebateFactory(site=self.site) self.page1_content = 'About the site' self.page1 = FlatPage(url='/{}/about/'.format(self.debate.prefix), title='About', content=self.page1_content) self.page1.save() self.page1.sites.add(self.site) self.page2_content = '[An embedded video]' self.page2 = FlatPage(url='/{}/watch/'.format(self.debate.prefix), title='Watch Now!', content=self.page2_content) self.page2.save() self.page2.sites.add(self.site) FlatPageMetadataOverride(page=self.page2).save() def tearDown(self): Site.objects.clear_cache() def test_metadata_not_overridden(self): rsp = self.client.get(self.page1.url) self.assertContains(rsp, self.page1_content) self.assertContains(rsp, escape(site_defaults.FACEBOOK_SITE_TITLE)) self.assertContains(rsp, escape(site_defaults.FACEBOOK_SITE_DESCRIPTION)) self.assertContains(rsp, escape(site_defaults.FACEBOOK_IMAGE)) def test_default_metadata_overrides(self): rsp = self.client.get(self.page2.url) self.assertContains(rsp, self.page2_content) self.assertNotContains(rsp, escape(site_defaults.FACEBOOK_SITE_TITLE)) self.assertNotContains(rsp, escape(site_defaults.FACEBOOK_SITE_DESCRIPTION)) self.assertNotContains(rsp, escape(site_defaults.FACEBOOK_IMAGE)) self.assertNotContains(rsp, escape(site_defaults.TWITTER_IMAGE)) self.assertContains(rsp, escape(site_defaults.FLATPAGE_FACEBOOK_TITLE)) self.assertContains(rsp, escape(site_defaults.FLATPAGE_FACEBOOK_DESCRIPTION)) self.assertContains(rsp, escape(site_defaults.FLATPAGE_FACEBOOK_IMAGE)) self.assertContains(rsp, escape(site_defaults.FLATPAGE_TWITTER_IMAGE)) def test_custom_metadata_overrides(self): FlatPageMetadataOverride( page=self.page1, facebook_title='Foo! Foo! Foo!', twitter_description='lorem ipsum dolor sit amet').save() rsp = self.client.get(self.page1.url) self.assertContains(rsp, escape('Foo! Foo! Foo!')) self.assertContains(rsp, escape('lorem ipsum dolor sit amet')) self.assertContains(rsp, escape(site_defaults.FLATPAGE_TWITTER_IMAGE))
apache-2.0
-2,610,047,757,008,014,300
44.836066
85
0.675966
false
3.814461
true
false
false
patrickm/chromium.src
chrome/common/extensions/docs/server2/future.py
1
1238
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import sys _no_value = object() def Collect(futures): '''Creates a Future which returns a list of results from each Future in |futures|. ''' return Future(callback=lambda: [f.Get() for f in futures]) class Future(object): '''Stores a value, error, or callback to be used later. ''' def __init__(self, value=_no_value, callback=None, exc_info=None): self._value = value self._callback = callback self._exc_info = exc_info if (self._value is _no_value and self._callback is None and self._exc_info is None): raise ValueError('Must have either a value, error, or callback.') def Get(self): '''Gets the stored value, error, or callback contents. ''' if self._value is not _no_value: return self._value if self._exc_info is not None: self._Raise() try: self._value = self._callback() return self._value except: self._exc_info = sys.exc_info() self._Raise() def _Raise(self): exc_info = self._exc_info raise exc_info[0], exc_info[1], exc_info[2]
bsd-3-clause
-2,560,667,322,118,759,400
26.511111
73
0.638934
false
3.619883
false
false
false
cliali/py2
a.py
1
1578
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Simple Bot to reply to Telegram messages. This is built on the API wrapper, see # echobot2.py to see the same example built on the telegram.ext bot framework. # This program is dedicated to the public domain under the CC0 license. import logging import telegram from telegram.error import NetworkError, Unauthorized from time import sleep update_id = None def main(): global update_id # Telegram Bot Authorization Token bot = telegram.Bot('277679081:AAGk3IXlId9PKUn3n_5wrfrUIR_mgsUVCeE') # get the first pending update_id, this is so we can skip over it in case # we get an "Unauthorized" exception. try: update_id = bot.getUpdates()[0].update_id except IndexError: update_id = None logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') while True: try: echo(bot) except NetworkError: sleep(1) except Unauthorized: # The user has removed or blocked the bot. update_id += 1 def echo(bot): global update_id # Request updates after the last update_id for update in bot.getUpdates(offset=update_id, timeout=10): # chat_id is required to reply to any message chat_id = update.message.chat_id update_id = update.update_id + 1 if update.message: # your bot can receive updates without messages # Reply to the message update.message.reply_text(update.message.text) if __name__ == '__main__': main()
apache-2.0
2,133,397,267,722,731,300
28.773585
86
0.653359
false
3.793269
false
false
false
flavoi/diventi
diventi/landing/views.py
1
3927
from itertools import chain from django.shortcuts import render, redirect, resolve_url from django.views.generic.detail import DetailView from django.views.generic import ListView, TemplateView from django.views.generic.edit import CreateView from django.utils.translation import ugettext_lazy as _ from django.contrib import messages from django.contrib.auth.mixins import LoginRequiredMixin from django.urls import reverse from django.http import HttpResponseNotFound from diventi.accounts.models import DiventiUser from diventi.accounts.forms import DiventiUserInitForm from diventi.products.models import Product from diventi.blog.models import Article from diventi.feedbacks.models import Survey, Answer from diventi.core.views import StaffRequiredMixin from .models import ( Section, AboutArticle, PolicyArticle, ) class LandingSearchView(ListView): """ Search for every content in the project. """ template_name = "landing/search_results_quick.html" context_object_name = 'results' model = Section def get_queryset(self): results = super(LandingSearchView, self).get_queryset() query = self.request.GET.get('q') if query: articles = Article.search(self, query) products = Product.search(self, query) users = DiventiUser.search(self, query) results = list(chain(products, articles, users)) else: results = None return results def get_context_data(self, **kwargs): context = super(LandingSearchView, self).get_context_data(**kwargs) context['search_query'] = self.request.GET.get('q') return context class DashboardView(StaffRequiredMixin, ListView): """ Report relevant piece of contents of any supported app. """ template_name = "landing/analytics_quick.html" context_object_name = 'results' model = Section def get_queryset(self): results = super(DashboardView, self).get_queryset() articles = Article.reporting(self) products = Product.reporting(self) users = DiventiUser.reporting(self) results = list(chain(users,articles, products, )) return results def get_context_data(self, **kwargs): context = super(DashboardView, self).get_context_data(**kwargs) featured_section = Section.objects.featured() context['featured_section'] = featured_section return context def get_landing_context(request): sections = Section.objects.not_featured() featured_section = Section.objects.featured() if featured_section: pass elif sections.exists(): featured_section = sections.first() sections = sections.exclude(id=featured_section.id) else: return HttpResponseNotFound(_('This page is not available yet.')) context = { 'sections': sections, 'featured_section': featured_section, } return context class LandingTemplateView(TemplateView): """ Renders the landing page with all necessary context. """ template_name = "landing/landing_quick.html" def get_context_data(self, **kwargs): context = super(LandingTemplateView, self).get_context_data(**kwargs) landing_context = get_landing_context(self.request) context = {**context, **landing_context} # Merge the two dictionaries return context class AboutArticleDetailView(DetailView): """ Renders the 'about us' article and the content related to it. """ model = AboutArticle template_name = "landing/about_article_quick.html" class PolicyArticleDetailView(DetailView): """ Renders the policy article and the content related to it. """ model = PolicyArticle template_name = "landing/about_article_quick.html"
apache-2.0
2,472,953,982,382,659,600
30.725
77
0.675325
false
4.16879
false
false
false
Uberi/botty-bot-bot-bot
src/plugins/timezones.py
1
5164
#!/usr/bin/env python3 import re from datetime import datetime, date import pytz from .utilities import BasePlugin from .utilities import clockify, untag_word timezone_abbreviations = { "est": pytz.timezone("Canada/Eastern"), "edt": pytz.timezone("Canada/Eastern"), "atlantic": pytz.timezone("Canada/Eastern"), "eastern": pytz.timezone("Canada/Eastern"), "toronto": pytz.timezone("Canada/Eastern"), "waterloo": pytz.timezone("Canada/Eastern"), "ontario": pytz.timezone("Canada/Eastern"), "ny": pytz.timezone("US/Eastern"), "pst": pytz.timezone("Canada/Pacific"), "vancouver": pytz.timezone("Canada/Pacific"), "pacific": pytz.timezone("US/Pacific-New"), "sf": pytz.timezone("US/Pacific-New"), "la": pytz.timezone("US/Pacific-New"), "california": pytz.timezone("US/Pacific-New"), } other_timezones = ( ("toronto", pytz.timezone("Canada/Eastern")), ("vancouver", pytz.timezone("Canada/Pacific")), ("utc", pytz.utc), ) class TimezonesPlugin(BasePlugin): """ Timezone conversion plugin for Botty. Example invocations: #general | Me: 4pm local #general | Botty: *EASTERN DAYLIGHT TIME* (Μe's local time) :clock4: 16:00 :point_right: *TORONTO* :clock4: 16:00 - *VANCOUVER* :clock1: 13:00 - *UTC* :clock8: 20:00 #general | Me: 6:23pm pst #general | Botty: *PST* :clock630: 18:23 :point_right: *TORONTO* :clock930: 21:23 - *VANCOUVER* :clock630: 18:23 - *UTC* :clock130: 1:23 (tomorrow) #general | Me: 6:23 here #general | Botty: *EASTERN DAYLIGHT TIME* (Μe's local time) :clock630: 6:23 :point_right: *TORONTO* :clock630: 6:23 - *VANCOUVER* :clock330: 3:23 - *UTC* :clock1030: 10:23 #general | Me: 8pm toronto #general | Botty: *TORONTO* :clock8: 20:00 :point_right: *TORONTO* :clock8: 20:00 - *VANCOUVER* :clock5: 17:00 - *UTC* :clock12: 0:00 (tomorrow) """ def __init__(self, bot): super().__init__(bot) def on_message(self, m): if not m.is_user_text_message: return False match = re.search(r"\b(\d\d?)(?::(\d\d))?(?:\s*(am|pm))?\s+(\w+)", m.text, re.IGNORECASE) if not match: return False # get time of day if not match.group(2) and not match.group(3): return False # ignore plain numbers like "4 potato" hour = int(match.group(1)) minute = 0 if match.group(2) is None else int(match.group(2)) if not (0 <= hour <= 23) or not (0 <= minute <= 59): return False if match.group(3) is not None and match.group(3).lower() == "pm": if not (1 <= hour <= 12): return False hour = (hour % 12) + 12 today = date.today() naive_timestamp = datetime(today.year, today.month, today.day, hour, minute) timezone_name = match.group(4) # get timezone and localized timestamp if timezone_name.lower() in timezone_abbreviations: # use the specified timezone timezone = timezone_abbreviations[timezone_name.lower()] timezone_is_from_user_info = False elif timezone_name.lower() in {"local", "here"}: # use the user's local timezone, specified in their profile user_info = self.get_user_info_by_id(m.user_id) try: timezone = pytz.timezone(user_info.get("tz")) except: # user does not have a valid timezone return False timezone_name = user_info.get("tz_label") timezone_is_from_user_info = True else: return False timestamp = timezone.localize(naive_timestamp) # perform timezone conversions timezone_conversions = [] for other_timezone_name, other_timezone in other_timezones: converted_timestamp = timestamp.astimezone(other_timezone) if converted_timestamp.date() > timestamp.date(): timezone_conversions.append("*{}* :{}: {}:{:>02} (tomorrow)".format(other_timezone_name.upper(), clockify(converted_timestamp), converted_timestamp.hour, converted_timestamp.minute)) elif converted_timestamp.date() < timestamp.date(): timezone_conversions.append("*{}* :{}: {}:{:>02} (yesterday)".format(other_timezone_name.upper(), clockify(converted_timestamp), converted_timestamp.hour, converted_timestamp.minute)) else: timezone_conversions.append("*{}* :{}: {}:{:>02}".format(other_timezone_name.upper(), clockify(converted_timestamp), converted_timestamp.hour, converted_timestamp.minute)) if timezone_is_from_user_info: selected_time = "(timezone from {}'s profile) *{}* :{}: {}:{:>02}".format(untag_word(self.get_user_name_by_id(m.user_id)), timezone_name.upper(), clockify(timestamp), timestamp.hour, timestamp.minute) else: selected_time = "*{}* :{}: {}:{:>02}".format(timezone_name.upper(), clockify(timestamp), timestamp.hour, timestamp.minute) self.respond_raw("{} :point_right: {}".format(selected_time, " - ".join(timezone_conversions))) return True
mit
1,056,715,963,307,125,400
49.607843
212
0.610616
false
3.418543
false
false
false
muttiopenbts/fusion
fusion_level02_4.py
1
1932
#!/usr/bin/python ''' Simple script to interact with fusion level 02 challenge network daemon, #!/usr/bin/python mkocbayi@gmail.com ''' from pwn import * import sys #Use this hexdump lib because pwntools hexdump is too slow from hexdump import * def doMode(mode): # Either E or Q print 'Sending mode call: {}'.format(mode) #Specify encryption function io.send(mode) def doEncryption(message, fake_message_size=None): doMode('E') if fake_message_size is not None: message_size = fake_message_size else: message_size = len(message) #Specify message size as little endian 8(d) = \x08\x00\x00\x00 encryption_size_bytes = p32(message_size) #Use p32, p64, or pack print 'Sending message size as bytes\n{}'.format(encryption_size_bytes.encode('hex')) print 'Sending message size as bytes\n{}'.format(unpack(encryption_size_bytes)) #Specify size of message to be encrypted io.send(encryption_size_bytes) #Generate message and send print 'Sending message\n{}'.format(hexdump(message)) io.send(message) data = io.recvregex('your file --]\n') log.info(data) #Server sends message size as 4 bytes little endian data = io.recvn(4) log.info('Received encrypted message size as bytes\n{}'.format(data.encode('hex'))) log.info('Size in integer\n{}'.format(unpack(data))) encrypted_message = io.recvn(message_size) log.info('Received encrypted message\n{}'.format(hexdump(encrypted_message))) return encrypted_message if __name__ == "__main__": host = sys.argv[1] port = sys.argv[2] io = remote(host,int(port)) #size = 32*4096 # No crash # xor key is 32*4 = 128 bytes message_size = 32*4096+100 # crash message = cyclic(message_size) #Generate unique string to help determin payload register overwrite xor_message = doEncryption(message) message = doEncryption(xor_message) doMode('Q')
gpl-3.0
536,043,605,942,736,000
36.153846
102
0.683747
false
3.551471
false
false
false
xiaonanln/myleetcode-python
src/Sudoku Solver.py
1
2697
class Solution: # @param board, a 9x9 2D array # Solve the Sudoku by modifying the input board in-place. # Do not return any value. def solveSudoku(self, board): rowUsable = [set(xrange(1, 10)) for i in xrange(9)] colUsable = [set(xrange(1, 10)) for i in xrange(9)] blockUsable = [set(xrange(1, 10)) for i in xrange(9)] __board = board board = [ [ int(c) if c != '.' else None for c in row ] for row in board] for row in xrange(9): boardrow = board[row] for col in xrange(9): n = boardrow[col] if n is None: continue rowUsable[row].remove(n) colUsable[col].remove(n) blockindex = (row // 3) * 3 + (col // 3) blockUsable[blockindex].remove(n) self.rowUsable = rowUsable self.colUsable = colUsable self.blockUsable = blockUsable r, c = 0, 0 self.solve(board, r, c) for i, row in enumerate(board): __board[i] = ''.join( str(n) for n in row) def solve(self, board, r, c): if c == 9: c = 0 r += 1 if r == 9: return True if board[r][c] is None: bi = (r // 3) * 3 + (c // 3) usable = self.rowUsable[r] & self.colUsable[c] & self.blockUsable[bi] # if r == 1: print self.rowUsable[1], usable for n in usable: # if r == 1: print 'using', n board[r][c] = n self.rowUsable[r].remove(n) self.colUsable[c].remove(n) self.blockUsable[bi].remove(n) if self.solve(board, r, c+1): return True board[r][c] = None self.rowUsable[r].add(n) self.colUsable[c].add(n) self.blockUsable[bi].add(n) return False else: return self.solve(board, r, c + 1) E = '.' # board = [ # [5, 3, E, E, 7, E, E, E, E], # [6, E, E, 1, 9, 5, E, E, E], # [E, 9, 8, E, E, E, E, 6, E], # [8, E, E, E, 6, E, E, E, 3], # [4, E, E, 8, E, 3, E, E, 1], # [7, E, E, E, 2, E, E, E, 6], # [E, 6, E, E, E, E, 2, 8, E], # [E, E, E, 4, 1, 9, E, E, 5], # [E, E, E, E, 8, E, E, 7, 9], # ] board = ["..9748...","7........",".2.1.9...","..7...24.",".64.1.59.",".98...3..","...8.3.2.","........6","...2759.."] Solution().solveSudoku(board) print '\n'.join(board)
apache-2.0
8,919,941,869,076,930,000
31.119048
118
0.411568
false
3.082286
false
false
false
kennethreitz/pipenv
pipenv/patched/notpip/_internal/network/download.py
1
6458
"""Download files with progress indicators. """ import cgi import logging import mimetypes import os from pipenv.patched.notpip._vendor import requests from pipenv.patched.notpip._vendor.requests.models import CONTENT_CHUNK_SIZE from pipenv.patched.notpip._internal.models.index import PyPI from pipenv.patched.notpip._internal.network.cache import is_from_cache from pipenv.patched.notpip._internal.network.utils import response_chunks from pipenv.patched.notpip._internal.utils.misc import ( format_size, redact_auth_from_url, splitext, ) from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING from pipenv.patched.notpip._internal.utils.ui import DownloadProgressProvider if MYPY_CHECK_RUNNING: from typing import Iterable, Optional from pipenv.patched.notpip._vendor.requests.models import Response from pipenv.patched.notpip._internal.models.link import Link from pipenv.patched.notpip._internal.network.session import PipSession logger = logging.getLogger(__name__) def _get_http_response_size(resp): # type: (Response) -> Optional[int] try: return int(resp.headers['content-length']) except (ValueError, KeyError, TypeError): return None def _prepare_download( resp, # type: Response link, # type: Link progress_bar # type: str ): # type: (...) -> Iterable[bytes] total_length = _get_http_response_size(resp) if link.netloc == PyPI.file_storage_domain: url = link.show_url else: url = link.url_without_fragment logged_url = redact_auth_from_url(url) if total_length: logged_url = '{} ({})'.format(logged_url, format_size(total_length)) if is_from_cache(resp): logger.info("Using cached %s", logged_url) else: logger.info("Downloading %s", logged_url) if logger.getEffectiveLevel() > logging.INFO: show_progress = False elif is_from_cache(resp): show_progress = False elif not total_length: show_progress = True elif total_length > (40 * 1000): show_progress = True else: show_progress = False chunks = response_chunks(resp, CONTENT_CHUNK_SIZE) if not show_progress: return chunks return DownloadProgressProvider( progress_bar, max=total_length )(chunks) def sanitize_content_filename(filename): # type: (str) -> str """ Sanitize the "filename" value from a Content-Disposition header. """ return os.path.basename(filename) def parse_content_disposition(content_disposition, default_filename): # type: (str, str) -> str """ Parse the "filename" value from a Content-Disposition header, and return the default filename if the result is empty. """ _type, params = cgi.parse_header(content_disposition) filename = params.get('filename') if filename: # We need to sanitize the filename to prevent directory traversal # in case the filename contains ".." path parts. filename = sanitize_content_filename(filename) return filename or default_filename def _get_http_response_filename(resp, link): # type: (Response, Link) -> str """Get an ideal filename from the given HTTP response, falling back to the link filename if not provided. """ filename = link.filename # fallback # Have a look at the Content-Disposition header for a better guess content_disposition = resp.headers.get('content-disposition') if content_disposition: filename = parse_content_disposition(content_disposition, filename) ext = splitext(filename)[1] # type: Optional[str] if not ext: ext = mimetypes.guess_extension( resp.headers.get('content-type', '') ) if ext: filename += ext if not ext and link.url != resp.url: ext = os.path.splitext(resp.url)[1] if ext: filename += ext return filename def _http_get_download(session, link): # type: (PipSession, Link) -> Response target_url = link.url.split('#', 1)[0] resp = session.get( target_url, # We use Accept-Encoding: identity here because requests # defaults to accepting compressed responses. This breaks in # a variety of ways depending on how the server is configured. # - Some servers will notice that the file isn't a compressible # file and will leave the file alone and with an empty # Content-Encoding # - Some servers will notice that the file is already # compressed and will leave the file alone and will add a # Content-Encoding: gzip header # - Some servers won't notice anything at all and will take # a file that's already been compressed and compress it again # and set the Content-Encoding: gzip header # By setting this to request only the identity encoding We're # hoping to eliminate the third case. Hopefully there does not # exist a server which when given a file will notice it is # already compressed and that you're not asking for a # compressed file and will then decompress it before sending # because if that's the case I don't think it'll ever be # possible to make this work. headers={"Accept-Encoding": "identity"}, stream=True, ) resp.raise_for_status() return resp class Download(object): def __init__( self, response, # type: Response filename, # type: str chunks, # type: Iterable[bytes] ): # type: (...) -> None self.response = response self.filename = filename self.chunks = chunks class Downloader(object): def __init__( self, session, # type: PipSession progress_bar, # type: str ): # type: (...) -> None self._session = session self._progress_bar = progress_bar def __call__(self, link): # type: (Link) -> Download try: resp = _http_get_download(self._session, link) except requests.HTTPError as e: logger.critical( "HTTP error %s while getting %s", e.response.status_code, link ) raise return Download( resp, _get_http_response_filename(resp, link), _prepare_download(resp, link, self._progress_bar), )
mit
-2,368,371,102,283,550,700
31.29
78
0.644162
false
4.084756
false
false
false
stefanopanella/xapi-storage-plugins
libs/losetup.py
1
1260
import os.path from xapi.storage.common import call # Use Linux "losetup" to create block devices from files class Loop: """An active loop device""" def __init__(self, path, loop): self.path = path self.loop = loop def destroy(self, dbg): call(dbg, ["losetup", "-d", self.loop]) def block_device(self): return self.loop def find(dbg, path): """Return the active loop device associated with the given path""" # The kernel loop driver will transparently follow symlinks, so # we must too. path = os.path.realpath(path) for line in call(dbg, ["losetup", "-a"]).split("\n"): line = line.strip() if line != "": bits = line.split() loop = bits[0][0:-1] open_bracket = line.find('(') close_bracket = line.find(')') this_path = line[open_bracket + 1:close_bracket] if this_path == path: return Loop(path, loop) return None def create(dbg, path): """Creates a new loop device backed by the given file""" # losetup will resolve paths and 'find' needs to use string equality path = os.path.realpath(path) call(dbg, ["losetup", "-f", path]) return find(dbg, path)
lgpl-2.1
-1,819,758,216,933,129,700
26.391304
72
0.584127
false
3.806647
false
false
false
mosarg/gestione_scuola
backend/migrations/0001_initial.py
1
2074
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Backend' db.create_table(u'backend_backend', ( ('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)), ('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)), ('backendId', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('kind', self.gf('django.db.models.fields.CharField')(unique=True, max_length=20)), ('description', self.gf('django.db.models.fields.CharField')(max_length=1000)), ('serverIp', self.gf('django.db.models.fields.GenericIPAddressField')(max_length=39)), ('serverFqdn', self.gf('django.db.models.fields.CharField')(max_length=200)), )) db.send_create_signal(u'backend', ['Backend']) def backwards(self, orm): # Deleting model 'Backend' db.delete_table(u'backend_backend') models = { u'backend.backend': { 'Meta': {'ordering': "('-modified', '-created')", 'object_name': 'Backend'}, 'backendId': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '1000'}), 'kind': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'serverFqdn': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'serverIp': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}) } } complete_apps = ['backend']
gpl-3.0
-7,381,446,622,908,163,000
48.404762
125
0.607522
false
3.79159
false
false
false
oarriaga/spatial_transformer_networks
src/models/layers.py
1
5049
from keras import backend as K from keras.engine.topology import Layer if K.backend() == 'tensorflow': import tensorflow as tf def K_meshgrid(x, y): return tf.meshgrid(x, y) def K_linspace(start, stop, num): return tf.linspace(start, stop, num) else: raise Exception("Only 'tensorflow' is supported as backend") class BilinearInterpolation(Layer): """Performs bilinear interpolation as a keras layer References ---------- [1] Spatial Transformer Networks, Max Jaderberg, et al. [2] https://github.com/skaae/transformer_network [3] https://github.com/EderSantana/seya """ def __init__(self, output_size, **kwargs): self.output_size = output_size super(BilinearInterpolation, self).__init__(**kwargs) def get_config(self): return { 'output_size': self.output_size, } def compute_output_shape(self, input_shapes): height, width = self.output_size num_channels = input_shapes[0][-1] return (None, height, width, num_channels) def call(self, tensors, mask=None): X, transformation = tensors output = self._transform(X, transformation, self.output_size) return output def _interpolate(self, image, sampled_grids, output_size): batch_size = K.shape(image)[0] height = K.shape(image)[1] width = K.shape(image)[2] num_channels = K.shape(image)[3] x = K.cast(K.flatten(sampled_grids[:, 0:1, :]), dtype='float32') y = K.cast(K.flatten(sampled_grids[:, 1:2, :]), dtype='float32') x = .5 * (x + 1.0) * K.cast(width, dtype='float32') y = .5 * (y + 1.0) * K.cast(height, dtype='float32') x0 = K.cast(x, 'int32') x1 = x0 + 1 y0 = K.cast(y, 'int32') y1 = y0 + 1 max_x = int(K.int_shape(image)[2] - 1) max_y = int(K.int_shape(image)[1] - 1) x0 = K.clip(x0, 0, max_x) x1 = K.clip(x1, 0, max_x) y0 = K.clip(y0, 0, max_y) y1 = K.clip(y1, 0, max_y) pixels_batch = K.arange(0, batch_size) * (height * width) pixels_batch = K.expand_dims(pixels_batch, axis=-1) flat_output_size = output_size[0] * output_size[1] base = K.repeat_elements(pixels_batch, flat_output_size, axis=1) base = K.flatten(base) # base_y0 = base + (y0 * width) base_y0 = y0 * width base_y0 = base + base_y0 # base_y1 = base + (y1 * width) base_y1 = y1 * width base_y1 = base_y1 + base indices_a = base_y0 + x0 indices_b = base_y1 + x0 indices_c = base_y0 + x1 indices_d = base_y1 + x1 flat_image = K.reshape(image, shape=(-1, num_channels)) flat_image = K.cast(flat_image, dtype='float32') pixel_values_a = K.gather(flat_image, indices_a) pixel_values_b = K.gather(flat_image, indices_b) pixel_values_c = K.gather(flat_image, indices_c) pixel_values_d = K.gather(flat_image, indices_d) x0 = K.cast(x0, 'float32') x1 = K.cast(x1, 'float32') y0 = K.cast(y0, 'float32') y1 = K.cast(y1, 'float32') area_a = K.expand_dims(((x1 - x) * (y1 - y)), 1) area_b = K.expand_dims(((x1 - x) * (y - y0)), 1) area_c = K.expand_dims(((x - x0) * (y1 - y)), 1) area_d = K.expand_dims(((x - x0) * (y - y0)), 1) values_a = area_a * pixel_values_a values_b = area_b * pixel_values_b values_c = area_c * pixel_values_c values_d = area_d * pixel_values_d return values_a + values_b + values_c + values_d def _make_regular_grids(self, batch_size, height, width): # making a single regular grid x_linspace = K_linspace(-1., 1., width) y_linspace = K_linspace(-1., 1., height) x_coordinates, y_coordinates = K_meshgrid(x_linspace, y_linspace) x_coordinates = K.flatten(x_coordinates) y_coordinates = K.flatten(y_coordinates) ones = K.ones_like(x_coordinates) grid = K.concatenate([x_coordinates, y_coordinates, ones], 0) # repeating grids for each batch grid = K.flatten(grid) grids = K.tile(grid, K.stack([batch_size])) return K.reshape(grids, (batch_size, 3, height * width)) def _transform(self, X, affine_transformation, output_size): batch_size, num_channels = K.shape(X)[0], K.shape(X)[3] transformations = K.reshape(affine_transformation, shape=(batch_size, 2, 3)) # transformations = K.cast(affine_transformation[:, 0:2, :], 'float32') regular_grids = self._make_regular_grids(batch_size, *output_size) sampled_grids = K.batch_dot(transformations, regular_grids) interpolated_image = self._interpolate(X, sampled_grids, output_size) new_shape = (batch_size, output_size[0], output_size[1], num_channels) interpolated_image = K.reshape(interpolated_image, new_shape) return interpolated_image
mit
-3,533,227,027,551,988,700
35.854015
79
0.575956
false
3.137974
false
false
false
mattgemmell/DOT-MGTextEntry
mgtext.py
1
12149
#! /usr/bin/python """ MGText Text-entry plugin for Pimoroni's menu system for the Raspberry Pi Display-O-Tron. Code and info: https://github.com/mattgemmell/DOT-MGTextEntry By: Matt Gemmell http://mattgemmell.com/ http://twitter.com/mattgemmell """ from dot3k.menu import MenuOption _UP = 0 _DOWN = 1 _LEFT = 2 _RIGHT = 3 class MGText(MenuOption): def __init__(self): self.cols = 16 self.initialized = False self.scroll_up_icon = chr(0) self.scroll_down_icon = chr(1) self.abbreviation_icon = chr(2) self.placeholder_icon = chr(3) self.caps_on = True self.symbols_mode = False self.cancel_aborts = False # by default, Cancel button acts as Delete self.selection = {'row': 0, 'option': 0} self.first_displayed_row = 0 self.uppercase_letters = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ') self.lowercase_letters = list('abcdefghijklmnopqrstuvwxyz') self.space_symbol = 'Spc' self.line_break = '\n' # for layout only; can't be entered self.numbers = list('0123456789') self.quick_punctuation = list('./:@') self.symbols = list('./:@\'"~+-=_!?,;()[]<>{}\\^|&*$%#`') self.caps_command = "Caps" self.symbols_command = "More" self.delete_command = "Del" self.cancel_command = "Cancel" self.commit_command = "Accept" self.commands = [self.caps_command, self.symbols_command, self.delete_command, self.cancel_command, self.commit_command] self.uppercase_set = self.uppercase_letters self.uppercase_set.append(self.space_symbol) self.uppercase_set.extend(self.numbers) self.uppercase_set.extend(self.quick_punctuation) self.uppercase_set.extend(self.commands) self.lowercase_set = self.lowercase_letters self.lowercase_set.append(self.space_symbol) self.lowercase_set.extend(self.numbers) self.lowercase_set.extend(self.quick_punctuation) self.lowercase_set.extend(self.commands) self.symbols_set = self.symbols self.symbols_set.append(self.line_break) self.symbols_set.extend(self.commands) self.confirm_accept = "Yes" self.confirm_cancel = "No" self.confirm_quit = "Quit" self.confirm_set = [self.confirm_accept, self.confirm_cancel, self.confirm_quit] self.display_map = [] # 2D array of options self.display_ranges = [] # 2D array of range-arrays with option extents self.entered_text = '' self.confirming = False MenuOption.__init__(self) self.is_setup = False def set_value(self, value): self.entered_text = value def get_value(self): return self.entered_text def begin(self): self.initialized = False self.confirming = False self.symbols_mode = False self.selection = {'row': 0, 'option': 0} self.first_displayed_row = 0 self.set_value('') self.update_display_map() def setup(self, config): MenuOption.setup(self, config) def cleanup(self): self.entered_text = '' self.display_map = [] self.display_ranges = [] def update_display_map(self): """ Builds two datastructures: - display_map is an array of rows of the display, with each entry being an array of that row's options as strings. - display_ranges is similar, but each row-array contains dictionaries that are ranges of where the corresponding option renders on the display. """ self.display_map = [] self.display_ranges = [] options_set = self.uppercase_set if self.caps_on else self.lowercase_set if self.symbols_mode: options_set = self.symbols_set if self.confirming: options_set = self.confirm_set row_len = 0 self.display_map.append([]) self.display_ranges.append([]) for opt in options_set: if (opt == self.line_break) or ((len(opt) + row_len + 2) > (self.cols - 1)): # Start a new row self.display_map.append([]) self.display_ranges.append([]) row_len = 0 if opt == self.line_break: # We don't actually include line-breaks as options continue # Add to latest row self.display_map[-1].append(opt) opt_len = len(opt) + 1 # to account for the leading space self.display_ranges[-1].append({'start': row_len, 'len': opt_len}) row_len += opt_len def index_of_range_containing(self, row, col): """ This allows us to move the cursor spatially when going to a different row. For example, consider moving from a row with only two lengthy options, to a row with seven single-character options. If option 2 of 2 was selected on the former row, we wouldn't just want option 2 to be selected on the latter row after the move, because the cursor would seem to jump way over to the left. What we really want is to "move to whatever option is directly above/below the one I already had selected", which is what this method (and the display_ranges structure) allows. """ if row >= 0 and row < len(self.display_ranges) and col >= 0 and col < self.cols: row_ranges = self.display_ranges[row] index = len(row_ranges) - 1 for range in reversed(row_ranges): if col >= range['start']: break index -= 1 return index def move_cursor(self, direction): # Move cursor appropriately using ranges sel_row = self.selection['row'] sel_opt = self.selection['option'] sel_orig_row = sel_row sel_orig_col = self.display_ranges[sel_row][sel_opt]['start'] if direction == _UP: self.selection['row'] = (sel_row - 1) % len(self.display_map) self.selection['option'] = self.index_of_range_containing(self.selection['row'], sel_orig_col) elif direction == _DOWN: self.selection['row'] = (sel_row + 1) % len(self.display_map) self.selection['option'] = self.index_of_range_containing(self.selection['row'], sel_orig_col) elif direction == _LEFT: # We wrap back onto the previous row when appropriate self.selection['option'] = (sel_opt - 1) % len(self.display_map[sel_row]) # Check to see if we wrapped around if self.selection['option'] > sel_opt or len(self.display_map[sel_row]) == 1: # Wrap to previous row self.selection['row'] = (sel_row - 1) % len(self.display_map) self.selection['option'] = len(self.display_map[self.selection['row']]) - 1 elif direction == _RIGHT: # We wrap forward onto the next row when appropriate self.selection['option'] = (sel_opt + 1) % len(self.display_map[sel_row]) # Check to see if we wrapped around if self.selection['option'] < sel_opt or len(self.display_map[sel_row]) == 1: # Wrap to next row self.selection['row'] = (sel_row + 1) % len(self.display_map) self.selection['option'] = 0 # Sanitise new selection self.selection['option'] = max(0, self.selection['option']) self.selection['option'] = min(len(self.display_map[self.selection['row']]) - 1, self.selection['option']) # Update first_displayed_row appropriately sel_row = self.selection['row'] if sel_row < self.first_displayed_row: self.first_displayed_row = sel_row elif sel_row > self.first_displayed_row + 1: self.first_displayed_row = sel_row - 1 def render_row(self, row): # Returns the actual rendered full text of a row, with all annotations result = "" if row >= 0 and row < len(self.display_map): row_opts = self.display_map[row] row_selected = (self.selection['row'] == row) selected_option = self.selection['option'] for index, opt in enumerate(row_opts): # Selection markers if row_selected: if selected_option == index: result += "[" elif selected_option == (index - 1): result += "]" else: result += " " else: result += " " # Option text if opt == self.caps_command: if self.caps_on: result += "lowr" else: result += "UPPR" elif opt == self.symbols_command: if self.symbols_mode: if self.caps_on: result += "ABC1" else: result += "abc1" else: result += "#+=$" else: result += opt # Special case for end of row if index == len(row_opts) - 1: # Selection markers if row_selected and selected_option == index: result += "]" else: result += " " # Add any end-of-row padding required result += (" " * (self.cols - (len(result) + 1))) # Scroll indicators if row == self.first_displayed_row and row > 0: result += self.scroll_up_icon elif row == (self.first_displayed_row + 1) and row < (len(self.display_map) - 1): result += self.scroll_down_icon else: result += " " return result def delete(self): # Delete last character entered if (not self.confirming) and len(self.entered_text) > 0: self.entered_text = self.entered_text[:-1] def left(self): self.move_cursor(_LEFT) return True def right(self): self.move_cursor(_RIGHT) return True def up(self): self.move_cursor(_UP) return True def down(self): self.move_cursor(_DOWN) return True def cancel(self): if self.cancel_aborts: # Confirm quit if we have text if len(self.entered_text > 0): self.confirming = True self.update_display_map() self.selection = {'row': 0, 'option': 1} self.first_displayed_row = 0 return False else: return True # Delete last character entered self.delete() return False def select(self): # Handle all the selectable options and commands opt = self.display_map[self.selection['row']][self.selection['option']] if opt == self.space_symbol: self.entered_text += " " elif opt == self.caps_command: self.caps_on = not (self.caps_on) self.symbols_mode = False self.update_display_map() self.selection = {'row': 0, 'option': 0} self.first_displayed_row = 0 elif opt == self.symbols_command: self.symbols_mode = not (self.symbols_mode) self.update_display_map() self.selection = {'row': 0, 'option': 0} self.first_displayed_row = 0 elif opt == self.delete_command: self.delete() elif opt == self.cancel_command: self.confirming = True self.update_display_map() self.selection = {'row': 0, 'option': 1} self.first_displayed_row = 0 elif opt == self.commit_command: self.confirming = True self.update_display_map() self.selection = {'row': 0, 'option': 1} self.first_displayed_row = 0 elif opt == self.confirm_accept: self.confirming = False self.update_display_map() self.selection = {'row': 0, 'option': 0} self.first_displayed_row = 0 return True elif opt == self.confirm_cancel: self.confirming = False self.update_display_map() self.selection = {'row': 0, 'option': 0} self.first_displayed_row = 0 elif opt == self.confirm_quit: self.confirming = False self.update_display_map() self.selection = {'row': 0, 'option': 0} self.first_displayed_row = 0 self.cancel_input = True return True else: self.entered_text += opt return False def redraw(self, menu): if not self.initialized: menu.lcd.create_char(0, [0, 0, 4, 14, 31, 0, 0, 0]) # scroll up icon menu.lcd.create_char(1, [0, 0, 0, 31, 14, 4, 0, 0]) # scroll down icon menu.lcd.create_char(2, [0, 0, 0, 0, 0, 0, 21, 0]) # abbreviation icon menu.lcd.create_char(3, [0, 0, 0, 0, 0, 0, 0, 28]) # placeholder icon self.initialized = True if not self.confirming: # Output the editing row text_len = len(self.entered_text) if text_len > self.cols: menu.write_row(0, self.abbreviation_icon + self.entered_text[text_len - self.cols + 1:]) else: menu.write_row(0, self.entered_text + (self.placeholder_icon * (self.cols - text_len))) # Output relevant two rows if self.first_displayed_row < len(self.display_map): menu.write_row(1, self.render_row(self.first_displayed_row)) else: menu.clear_row(1) if self.first_displayed_row + 1 < len(self.display_map): menu.write_row(2, self.render_row(self.first_displayed_row + 1)) else: menu.clear_row(2) else: # Handle the confirmation screen if len(self.entered_text) > self.cols: menu.write_option(0, self.entered_text, scroll=True, scroll_repeat=2000) else: menu.write_row(0, self.entered_text + (" " * (self.cols - len(self.entered_text)))) menu.write_row(1, 'Confirm?') menu.write_row(2, self.render_row(self.first_displayed_row))
mit
-2,095,706,729,113,539,300
30.720627
559
0.658408
false
3.043337
false
false
false
saebyn/nwgui
nwgui/gui.py
1
2263
import pygame from nwgui.container import AbsoluteContainer class AbstractGUI(object): def __init__(self, game): raise NotImplementedError def getGameObject(self): raise NotImplementedError def get(self, widgetName): raise NotImplementedError def setName(self, name, widget): raise NotImplementedError def updateLayers(self): raise NotImplementedError def getLayer(self): raise NotImplementedError def addSprite(self, widget): raise NotImplementedError def setActive(self, widget): raise NotImplementedError def setInactive(self, widget): raise NotImplementedError def isControlledPosition(self, position): raise NotImplementedError class GUI(AbsoluteContainer, AbstractGUI): def __init__(self, game): self._game = game AbsoluteContainer.__init__(self, game.screen.get_width(), game.screen.get_height(), self, root=self) self.image = pygame.Surface((0, 0)) self.active = None self.names = {} def getGameObject(self): return self._game def get(self, widgetName): return self.names[widgetName] def handleEvent(self, event): AbsoluteContainer.handleEvent(self, event) def updateLayers(self): for widget in self.widgets: widget.updateLayer() def setParent(self, parent): raise NotImplementedError def isActive(self): return self.active is not None def setActive(self, widget): if self.active is not None: self.active.setInactive() self.active = widget def setInactive(self, widget=None): if self.active == widget or widget is None: self.active = None def addSprite(self, sprite): self._game.addGUISprite(sprite) def setName(self, name, widget): self.names[name] = widget def isControlledPosition(self, position): for widget in self._game.guiSprites.sprites(): if widget is self: continue if widget.rect.collidepoint(position): return True return False
gpl-3.0
-5,101,451,680,991,540,000
23.597826
66
0.613787
false
4.744235
false
false
false
algorhythms/LeetCode
673 Number of Longest Increasing Subsequence.py
1
1796
#!/usr/bin/python3 """ Given an unsorted array of integers, find the number of longest increasing subsequence. Example 1: Input: [1,3,5,4,7] Output: 2 Explanation: The two longest increasing subsequence are [1, 3, 4, 7] and [1, 3, 5, 7]. Example 2: Input: [2,2,2,2,2] Output: 5 Explanation: The length of longest continuous increasing subsequence is 1, and there are 5 subsequences' length is 1, so output 5. Note: Length of the given array will be not exceed 2000 and the answer is guaranteed to be fit in 32-bit signed int. """ from typing import List class LenCnt: def __init__(self, l, c): self.l = l self.c = c def __repr__(self): return repr((self.l, self.c)) class Solution: def findNumberOfLIS(self, A: List[int]) -> int: """ Two pass - 1st pass find the LIS, 2nd pass find the number Let F[i] be the length of LIS ended at A[i] """ if not A: return 0 n = len(A) F = [LenCnt(l=1, c=1) for _ in A] mx = LenCnt(l=1, c=1) for i in range(1, n): for j in range(i): if A[i] > A[j]: if F[i].l < F[j].l + 1: F[i].l = F[j].l + 1 F[i].c = F[j].c elif F[i].l == F[j].l + 1: F[i].c += F[j].c if F[i].l > mx.l: # mx = F[i] error, need deep copy mx.l = F[i].l mx.c = F[i].c elif F[i].l == mx.l: mx.c += F[i].c return mx.c if __name__ == "__main__": assert Solution().findNumberOfLIS([1,1,1,2,2,2,3,3,3]) == 27 assert Solution().findNumberOfLIS([1, 3, 5, 4, 7]) == 2 assert Solution().findNumberOfLIS([2, 2, 2, 2, 2]) == 5
mit
3,544,294,024,520,078,000
26.630769
78
0.50167
false
3.059625
false
false
false
pearu/sympycore
sympycore/functions/algebra.py
1
2063
""" Implementes functions ring support. """ # # Author: Pearu Peterson # Created: April, 2008 # __all__ = ['FunctionRing'] from ..core import classes, objects, init_module from ..basealgebra import Verbatim, Algebra from ..ring import CommutativeRing init_module.import_heads() class FunctionRing(CommutativeRing): """ Base class to functions ring classes. Use ``Function`` function to construct instances. """ argument_algebras = None nargs = None @classmethod def get_value_algebra(cls): return CommutativeRing def get_argument_algebra(self, index): return self.get_value_algebra() @classmethod def get_function_algebra(cls): return classes.OperatorRing @classmethod def get_differential_algebra(cls): return classes.DifferentialRing @classmethod def get_predefined_symbols(cls, name): if name=='D': return D return @classmethod def convert(cls, obj, typeerror=True): tobj = type(obj) if tobj is cls: return obj if isinstance(obj, cls.get_value_algebra()): return cls(NUMBER, obj) return super(CommutativeRing, cls).convert(obj, typeerror=typeerror) def as_algebra(self, cls, typeerror=True): if cls is classes.Verbatim: return self.as_verbatim() if type(self) is cls: return self #if isinstance(self, cls): # return self.as_verbatim().as_algebra(cls) if typeerror: raise TypeError('Cannot convert %s to %s instance' % (type(self).__name__, cls.__name__)) return NotImplemented def __call__(self, *args, **options): cls = self.get_value_algebra() #cls = classes.Calculus evaluate = options.get('evaluate', True) if evaluate: result = self.head.apply(cls, self.data, self, args) if result is not NotImplemented: return result return cls(APPLY, (self, args)) classes.FunctionRing = FunctionRing
bsd-3-clause
7,460,060,148,131,743,000
25.792208
101
0.62191
false
3.944551
false
false
false
NEONScience/NEON-Data-Skills
tutorials/Python/Lidar/lidar-biomass/calc-biomass_py/calc-biomass_py.py
1
20510
#!/usr/bin/env python # coding: utf-8 # --- # syncID: e6ccf19a4b454ca594388eeaa88ebe12 # title: "Calculate Vegetation Biomass from LiDAR Data in Python" # description: "Learn to calculate the biomass of standing vegetation using a canopy height model data product." # dateCreated: 2017-06-21 # authors: Tristan Goulden # contributors: Donal O'Leary # estimatedTime: 1 hour # packagesLibraries: numpy, gdal, matplotlib, matplotlib.pyplot, os # topics: lidar,remote-sensing # languagesTool: python # dataProduct: DP1.10098.001, DP3.30015.001, # code1: https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/tutorials/Python/Lidar/lidar-biomass/calc-biomass_py/calc-biomass_py.ipynb # tutorialSeries: intro-lidar-py-series # urlTitle: calc-biomass-py # --- # <div id="ds-objectives" markdown="1"> # # In this tutorial, we will calculate the biomass for a section of the SJER site. We # will be using the Canopy Height Model discrete LiDAR data product as well as NEON # field data on vegetation data. This tutorial will calculate Biomass for individual # trees in the forest. # # ### Objectives # After completing this tutorial, you will be able to: # # * Learn how to apply a guassian smoothing fernal for high-frequency spatial filtering # * Apply a watershed segmentation algorithm for delineating tree crowns # * Calculate biomass predictor variables from a CHM # * Setup training data for Biomass predictions # * Apply a Random Forest machine learning approach to calculate biomass # # # ### Install Python Packages # # * **numpy** # * **gdal** # * **matplotlib** # * **matplotlib.pyplot** # * **os** # # # ### Download Data # # If you have already downloaded the data set for the Data Institute, you have the # data for this tutorial within the SJER directory. If you would like to just # download the data for this tutorial use the following link. # # <a href="https://neondata.sharefile.com/d-s58db39240bf49ac8" class="link--button link--arrow"> # Download the Biomass Calculation teaching data subset</a> # # </div> # In this tutorial, we will calculate the biomass for a section of the SJER site. We # will be using the Canopy Height Model discrete LiDAR data product as well as NEON # field data on vegetation data. This tutorial will calculate Biomass for individual # trees in the forest. # # The calculation of biomass consists of four primary steps: # # 1. Delineating individual tree crowns # 2. Calculating predictor variables for all individuals # 3. Collecting training data # 4. Applying a regression model to estiamte biomass from predictors # # In this tutorial we will use a watershed segmentation algorithm for delineating # tree crowns (step 1) and and a Random Forest (RF) machine learning algorithm for # relating the predictor variables to biomass (part 4). The predictor variables were # selected following suggestions by Gleason et al. (2012) and biomass estimates were # determined from DBH (diamter at breast height) measurements following relationships # given in Jenkins et al. (2003). # # ## Get Started # # First, we need to specify the directory where we will find and save the data needed for this tutorial. You will need to change this line to suit your local machine. I have decided to save my data in the following directory: # In[1]: data_path = '/Users/olearyd/Git/data/' # Next, we will import several of the typical libraries. # In[2]: import numpy as np import os import gdal, osr import matplotlib.pyplot as plt import sys from scipy import ndimage as ndi get_ipython().run_line_magic('matplotlib', 'inline') # Next, we will add libraries from skilearn which will help with the watershed delination, determination of predictor variables and random forest algorithm # In[3]: #Import biomass specific libraries from skimage.morphology import watershed from skimage.feature import peak_local_max from skimage.measure import regionprops from sklearn.ensemble import RandomForestRegressor # ## Define functions # # Now we will define a few functions that allow us to more easily work with the NEON data. # # * `plot_band_array`: function to plot NEON spatial data. # In[4]: #Define plot band array function def plot_band_array(band_array,image_extent,title,cmap_title,colormap,colormap_limits): plt.imshow(band_array,extent=image_extent) cbar = plt.colorbar(); plt.set_cmap(colormap); plt.clim(colormap_limits) cbar.set_label(cmap_title,rotation=270,labelpad=20) plt.title(title); ax = plt.gca() ax.ticklabel_format(useOffset=False, style='plain') rotatexlabels = plt.setp(ax.get_xticklabels(),rotation=90) # * `array2raster`: function to output geotiff files. # In[5]: def array2raster(newRasterfn,rasterOrigin,pixelWidth,pixelHeight,array,epsg): cols = array.shape[1] rows = array.shape[0] originX = rasterOrigin[0] originY = rasterOrigin[1] driver = gdal.GetDriverByName('GTiff') outRaster = driver.Create(newRasterfn, cols, rows, 1, gdal.GDT_Float32) outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight)) outband = outRaster.GetRasterBand(1) outband.WriteArray(array) outRasterSRS = osr.SpatialReference() outRasterSRS.ImportFromEPSG(epsg) outRaster.SetProjection(outRasterSRS.ExportToWkt()) outband.FlushCache() # * `raster2array`: function to conver rasters to an array. # In[6]: def raster2array(geotif_file): metadata = {} dataset = gdal.Open(geotif_file) metadata['array_rows'] = dataset.RasterYSize metadata['array_cols'] = dataset.RasterXSize metadata['bands'] = dataset.RasterCount metadata['driver'] = dataset.GetDriver().LongName metadata['projection'] = dataset.GetProjection() metadata['geotransform'] = dataset.GetGeoTransform() mapinfo = dataset.GetGeoTransform() metadata['pixelWidth'] = mapinfo[1] metadata['pixelHeight'] = mapinfo[5] metadata['ext_dict'] = {} metadata['ext_dict']['xMin'] = mapinfo[0] metadata['ext_dict']['xMax'] = mapinfo[0] + dataset.RasterXSize/mapinfo[1] metadata['ext_dict']['yMin'] = mapinfo[3] + dataset.RasterYSize/mapinfo[5] metadata['ext_dict']['yMax'] = mapinfo[3] metadata['extent'] = (metadata['ext_dict']['xMin'],metadata['ext_dict']['xMax'], metadata['ext_dict']['yMin'],metadata['ext_dict']['yMax']) if metadata['bands'] == 1: raster = dataset.GetRasterBand(1) metadata['noDataValue'] = raster.GetNoDataValue() metadata['scaleFactor'] = raster.GetScale() # band statistics metadata['bandstats'] = {} # make a nested dictionary to store band stats in same stats = raster.GetStatistics(True,True) metadata['bandstats']['min'] = round(stats[0],2) metadata['bandstats']['max'] = round(stats[1],2) metadata['bandstats']['mean'] = round(stats[2],2) metadata['bandstats']['stdev'] = round(stats[3],2) array = dataset.GetRasterBand(1).ReadAsArray(0,0, metadata['array_cols'], metadata['array_rows']).astype(np.float) array[array==int(metadata['noDataValue'])]=np.nan array = array/metadata['scaleFactor'] return array, metadata elif metadata['bands'] > 1: print('More than one band ... need to modify function for case of multiple bands') # * `crown_geometric_volume_pth`: function to get tree crown volumn. # In[7]: def crown_geometric_volume_pth(tree_data,min_tree_height,pth): p = np.percentile(tree_data, pth) tree_data_pth = [v if v < p else p for v in tree_data] crown_geometric_volume_pth = np.sum(tree_data_pth - min_tree_height) return crown_geometric_volume_pth, p # * `get_predictors`: function to get the trees from the biomass data. # In[8]: def get_predictors(tree,chm_array, labels): indexes_of_tree = np.asarray(np.where(labels==tree.label)).T tree_crown_heights = chm_array[indexes_of_tree[:,0],indexes_of_tree[:,1]] full_crown = np.sum(tree_crown_heights - np.min(tree_crown_heights)) crown50, p50 = crown_geometric_volume_pth(tree_crown_heights,tree.min_intensity,50) crown60, p60 = crown_geometric_volume_pth(tree_crown_heights,tree.min_intensity,60) crown70, p70 = crown_geometric_volume_pth(tree_crown_heights,tree.min_intensity,70) return [tree.label, np.float(tree.area), tree.major_axis_length, tree.max_intensity, tree.min_intensity, p50, p60, p70, full_crown, crown50, crown60, crown70] # ## Canopy Height Data # # With everything set up, we can now start working with our data by define the file path to our CHM file. Note that you will need to change this and subsequent filepaths according to your local machine. # In[9]: chm_file = data_path+'NEON_D17_SJER_DP3_256000_4106000_CHM.tif' # When we output the results, we will want to include the same file information as the input, so we will gather the file name information. # In[10]: #Get info from chm file for outputting results just_chm_file = os.path.basename(chm_file) just_chm_file_split = just_chm_file.split(sep="_") # Now we will get the CHM data... # In[11]: chm_array, chm_array_metadata = raster2array(chm_file) # ..., plot it, and save the figure. # In[12]: #Plot the original CHM plt.figure(1) #Plot the CHM figure plot_band_array(chm_array,chm_array_metadata['extent'], 'Canopy height Model', 'Canopy height (m)', 'Greens',[0, 9]) plt.savefig(data_path+just_chm_file[0:-4]+'_CHM.png',dpi=300,orientation='landscape', bbox_inches='tight', pad_inches=0.1) # It looks like SJER primarily has low vegetation with scattered taller trees. # # ## Create Filtered CHM # # Now we will use a Gaussian smoothing kernal (convolution) across the data set to remove spurious high vegetation points. This will help ensure we are finding the treetops properly before running the watershed segmentation algorithm. # # For different forest types it may be necessary to change the input parameters. Information on the function can be found in the <a href="https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.filters.gaussian_filter.html" target="_blank">SciPy documentation</a>. # # Of most importance are the second and fifth inputs. The second input defines the standard deviation of the Gaussian smoothing kernal. Too large a value will apply too much smoothing, too small and some spurious high points may be left behind. The fifth, the truncate value, controls after how many standard deviations the Gaussian kernal will get cut off (since it theoretically goes to infinity). # In[13]: #Smooth the CHM using a gaussian filter to remove spurious points chm_array_smooth = ndi.gaussian_filter(chm_array,2, mode='constant',cval=0,truncate=2.0) chm_array_smooth[chm_array==0] = 0 # Now save a copy of filtered CHM. We will later use this in our code, so we'll output it into our data directory. # In[14]: #Save the smoothed CHM array2raster(data_path+'chm_filter.tif', (chm_array_metadata['ext_dict']['xMin'],chm_array_metadata['ext_dict']['yMax']), 1,-1, np.array(chm_array_smooth,dtype=float), 32611) # ## Determine local maximums # # Now we will run an algorithm to determine local maximums within the image. Setting indices to 'False' returns a raster of the maximum points, as opposed to a list of coordinates. The footprint parameter is an area where only a single peak can be found. This should be approximately the size of the smallest tree. Information on more sophisticated methods to define the window can be found in Chen (2006). # In[15]: #Calculate local maximum points in the smoothed CHM local_maxi = peak_local_max(chm_array_smooth,indices=False, footprint=np.ones((5, 5))) # Our new object `local_maxi` is an array of boolean values where each pixel is identified as either being the local maximum (`True`) or not being the local maximum (`False`). # In[16]: local_maxi # This is very helpful, but it can be difficult to visualizee boolean values using our typical numeric plotting procedures as defined in the `plot_band_array` function above. Therefore, we will need to convert this boolean array to an numeric format to use this function. Booleans convert easily to integers with values of `False=0` and `True=1` using the `.astype(int)` method. # In[17]: local_maxi.astype(int) # Next ,we can plot the raster of local maximums bo coercing the boolean array into an array ofintegers inline. The following figure shows the difference in finding local maximums for a filtered vs. non-filtered CHM. # # We will save the graphics (.png) in an outputs folder sister to our working directory and data outputs (.tif) to our data directory. # In[18]: #Plot the local maximums plt.figure(2) plot_band_array(local_maxi.astype(int),chm_array_metadata['extent'], 'Maximum', 'Maxi', 'Greys', [0, 1]) plt.savefig(data_path+just_chm_file[0:-4]+ '_Maximums.png', dpi=300,orientation='landscape', bbox_inches='tight',pad_inches=0.1) array2raster(data_path+'maximum.tif', (chm_array_metadata['ext_dict']['xMin'],chm_array_metadata['ext_dict']['yMax']), 1,-1,np.array(local_maxi,dtype=np.float32),32611) # If we were to look at the overlap between the tree crowns and the local maxima from each method, it would appear a bit like this raster. # # <figure> # <a href="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/raster-general/raster-classification-filter-vs-nonfilter.jpg"> # <img src="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/raster-general/raster-classification-filter-vs-nonfilter.jpg"></a> # <figcaption> The difference in finding local maximums for a filtered vs. # non-filtered CHM. # Source: National Ecological Observatory Network (NEON) # </figcaption> # </figure> # # # Apply labels to all of the local maximum points # In[19]: #Identify all the maximum points markers = ndi.label(local_maxi)[0] # Next we will create a mask layer of all of the vegetation points so that the watershed segmentation will only occur on the trees and not extend into the surrounding ground points. Since 0 represent ground points in the CHM, setting the mask to 1 where the CHM is not zero will define the mask # In[20]: #Create a CHM mask so the segmentation will only occur on the trees chm_mask = chm_array_smooth chm_mask[chm_array_smooth != 0] = 1 # ## Watershed segmentation # # As in a river system, a watershed is divided by a ridge that divides areas. Here our watershed are the individual tree canopies and the ridge is the delineation between each one. # # <figure> # <a href="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/raster-general/raster-classification-watershed-segments.png"> # <img src="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/raster-general/raster-classification-watershed-segments.png"></a> # <figcaption> A raster classified based on watershed segmentation. # Source: National Ecological Observatory Network (NEON) # </figcaption> # </figure> # # Next, we will perform the watershed segmentation which produces a raster of labels. # In[21]: #Perfrom watershed segmentation labels = watershed(chm_array_smooth, markers, mask=chm_mask) labels_for_plot = labels.copy() labels_for_plot = np.array(labels_for_plot,dtype = np.float32) labels_for_plot[labels_for_plot==0] = np.nan max_labels = np.max(labels) # In[22]: #Plot the segments plot_band_array(labels_for_plot,chm_array_metadata['extent'], 'Crown Segmentation','Tree Crown Number', 'Spectral',[0, max_labels]) plt.savefig(data_path+just_chm_file[0:-4]+'_Segmentation.png', dpi=300,orientation='landscape', bbox_inches='tight',pad_inches=0.1) array2raster(data_path+'labels.tif', (chm_array_metadata['ext_dict']['xMin'], chm_array_metadata['ext_dict']['yMax']), 1,-1,np.array(labels,dtype=float),32611) # Now we will get several properties of the individual trees will be used as predictor variables. # In[23]: #Get the properties of each segment tree_properties = regionprops(labels,chm_array) # Now we will get the predictor variables to match the (soon to be loaded) training data using the function defined above. The first column will be segment IDs, the rest will be the predictor variables. # In[24]: predictors_chm = np.array([get_predictors(tree, chm_array, labels) for tree in tree_properties]) X = predictors_chm[:,1:] tree_ids = predictors_chm[:,0] # ## Training data # # We now bring in the training data file which is a simple CSV file with no header. The first column is biomass, and the remaining columns are the same predictor variables defined above. The tree diameter and max height are defined in the NEON vegetation structure data along with the tree DBH. The field validated values are used for training, while the other were determined from the CHM and camera images by manually delineating the tree crowns and pulling out the relevant information from the CHM. # # Biomass was calculated from DBH according to the formulas in Jenkins et al. (2003). # # If you didn't download this training dataset above, you can <a href="https://neondata.sharefile.com/share/view/cdc8242e24ad4517/fobd4959-4cf0-44ab-acc6-0695a04a1afc" target="_blank">Download the training dataset CSV here</a>. # In[25]: #Define the file of training data training_data_file = data_path+'SJER_Biomass_Training.csv' #Read in the training data from a CSV file training_data = np.genfromtxt(training_data_file,delimiter=',') #Grab the biomass (Y) from the first line biomass = training_data[:,0] #Grab the biomass prdeictors from the remaining lines biomass_predictors = training_data[:,1:12] # ## Random Forest classifiers # # We can then define parameters of the Random Forest classifier and fit the predictor variables from the training data to the Biomass estaimtes. # In[26]: #Define paraemters for Random forest regressor max_depth = 30 #Define regressor rules regr_rf = RandomForestRegressor(max_depth=max_depth, random_state=2) #Fit the biomass to regressor variables regr_rf.fit(biomass_predictors,biomass) # We now apply the Random Forest model to the predictor variables to retreive biomass # In[27]: #Apply the model to the predictors estimated_biomass = regr_rf.predict(X) # For outputting a raster, copy the labels raster to a biomass raster, then cycle through the segments and assign the biomass estimate to each individual tree segment. # In[28]: #Set an out raster with the same size as the labels biomass_map = np.array((labels),dtype=float) #Assign the appropriate biomass to the labels biomass_map[biomass_map==0] = np.nan for tree_id, biomass_of_tree_id in zip(tree_ids, estimated_biomass): biomass_map[biomass_map == tree_id] = biomass_of_tree_id # ## Calc Biomass # Collect some of the biomass statistics and then plot the results and save an output geotiff. # In[29]: #Get biomass stats for plotting mean_biomass = np.mean(estimated_biomass) std_biomass = np.std(estimated_biomass) min_biomass = np.min(estimated_biomass) sum_biomass = np.sum(estimated_biomass) print('Sum of biomass is ',sum_biomass,' kg') #Plot the biomass! plt.figure(5) plot_band_array(biomass_map,chm_array_metadata['extent'], 'Biomass (kg)','Biomass (kg)', 'winter', [min_biomass+std_biomass, mean_biomass+std_biomass*3]) plt.savefig(data_path+just_chm_file_split[0]+'_'+just_chm_file_split[1]+'_'+just_chm_file_split[2]+'_'+just_chm_file_split[3]+'_'+just_chm_file_split[4]+'_'+just_chm_file_split[5]+'_'+'Biomass.png', dpi=300,orientation='landscape', bbox_inches='tight', pad_inches=0.1) array2raster(data_path+'biomass.tif', (chm_array_metadata['ext_dict']['xMin'],chm_array_metadata['ext_dict']['yMax']), 1,-1,np.array(biomass_map,dtype=float),32611) # In[ ]:
agpl-3.0
-6,897,848,424,968,655,000
35.17284
503
0.709654
false
3.292135
false
false
false
abstractfactory/openmetadata-mk1
transaction.py
1
5900
"""Convenience module for the end-user The goal of this module is to provide as high-level utilities as possible for users who wish to have as little knowledge as possible about Open Folder. Target audience leans towards Technical Directors or fellow scripters in any DCC. """ from __future__ import absolute_import import os # import sys import errno import logging import shutil import collections from openmetadata import domain log = logging.getLogger('openmetadata.transaction') def write(path, channel=None, key=None, data=None): """Convenience method for writing metadata""" container = domain.Folder(path) if key and not channel: raise ValueError("Argument `key` must be specified in " "conjunction with `channel`") if channel and not key: if not isinstance(data, dict): raise ValueError("Data passed to object of type " "<Channel> must be of type <dict>") container = domain.Channel(channel, container) if channel and key: channel = domain.Channel(channel, container) key = domain.Key(key, channel) container = key container.data = data # container.write() print "%r = %r" % (container.path, container.data) def update(path, channel=None, key=None, data=None): """Convenience method for updating metadata""" raise NotImplementedError def read(path, channel=None, key=None): """Convenience method for reading metadata Parameters path (str) : Path to meta folder channel (str) : (optional) Name of individual channel key (str) : (optional) Name of individual file Returns dict() : {'obj.name': content} Calling this method with only `path` specified is identical to calling Folder.read().data directly. """ if key and not channel: raise ValueError("Must supply `channel` with `key` argument") if not os.path.exists(path): return {} try: obj = domain.Factory.create(path) except WindowsError as e: # Temporary fix. An error occurs when trying to # read junctions pointing to invalid targets. if e.errno == errno.ENOENT: print e return {} raise e assert isinstance(obj, domain.Folder) if channel: obj = obj.child(channel) if not obj: return {} if key: obj = obj.child(key) if not obj: return None return obj.read().data def exists(path, channel=None, key=None): pass def cascade(path, channel, key=None): """Merge metadata of each channel matching `term` up-wards through hierarchy""" folder = domain.Folder(path) hierarchy = _findchannels(folder, channel) hierarchy.reverse() # An implementation of the Property-Pattern as discussed here: # http://steve-yegge.blogspot.co.uk/2008/10/universal-design-pattern.html metadata_hierarchy = [] for _channel in hierarchy: _channel.read() _data = _channel.data or {} metadata_hierarchy.append(_data) # The following algorithm is based on this answer: # http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth def update(d, u): for k, v in u.iteritems(): if isinstance(v, collections.Mapping): r = update(d.get(k, {}), v) d[k] = r else: d[k] = u[k] return d metadata = {} for _metadata in metadata_hierarchy: update(metadata, _metadata) return metadata def delete(path, channel=None, key=None, max_retries=10): assert os.path.exists(path) retries = 0 while True: try: if os.path.isdir(path): shutil.rmtree(path) else: os.remove(path) break except WindowsError as e: # Sometimes, Dropbox can bother this operation; # creating files in the midst of deleting a folder. # # If this happens, try again in a short while. retries += 1 if retries > max_retries: log.error(e) break import time time.sleep(0.1) log.info("Retired %i time(s) for %s" % (retries, path)) log.info("Removed %s" % path) def _findchannels(folder, term, result=None): """Return channels matching `term` up-wards through hierarchy""" assert isinstance(folder, domain.Folder) result = result or [] # Note: We can only cascade channels of type .kvs current_channel = None # Look for `term` within folder for _channel in folder: if _channel.name == term and _channel.extension == '.kvs': result.append(_channel) current_channel = _channel # Recurse parent = folder.parent if parent: # Before we recurse, ensure this is not a path. isroot = False # TODO # Find a way to optimize this. Channel is being read here # to find the isRoot property which is used solely to # determine whether or not to continue searching. # This is an expensive operation, and whats worse, # the channel is being re-read in `cascade`. if current_channel: data = current_channel.read().data or {} if data.get('isRoot') is True: isroot = True if not isroot: return _findchannels(parent, term, result) return result # def cascade(folder, term): if __name__ == '__main__': import openmetadata as om package = os.getcwd() path = os.path.join(package, 'test', 'persist') path = om.Folder(r's:\content\jobs\test\content\shots') # print cascade(path, 'properties')
mit
4,959,071,983,263,927,000
25.818182
101
0.605593
false
4.184397
false
false
false
mchels/FolderBrowser
plotcontrols.py
1
6028
from PyQt5 import QtWidgets from PyQt5.QtWidgets import QSizePolicy class PlotControls(QtWidgets.QWidget): """ Control bar for controlling how plots are shown. Parameters ---------- cmap_names : list List of colormap names to show in the colormap dropdown menu. plot_2D_types : list List of plot_2D_type names. """ def __init__(self, cmap_names, plot_2D_types): super().__init__() self.layout = QtWidgets.QHBoxLayout() self.num_col_boxes = 3 self.num_lim_boxes = 3 self.cmap_names = cmap_names self.plot_2D_types = plot_2D_types self.init_col_sel_boxes() self.init_cmap_sel() self.init_plot_2D_type_sel() self.init_lim_boxes() self.init_aspect_box() self.setLayout(self.layout) def reset_col_boxes(self, array_of_text_items): """ Reset column selector boxes. """ assert len(array_of_text_items) == self.num_col_boxes for i, box in enumerate(self.col_boxes): box.list_of_text_items = array_of_text_items[i] prev_text = box.currentText() box.clear() box.addItems(array_of_text_items[i]) idx = box.findText(prev_text) box.setCurrentIndex(idx) min_width = len(max(box.list_of_text_items, key=len)) * 8 box.view().setMinimumWidth(min_width) # All indices must be set in the loop above before we can start # assigning lowest unoccupied texts. Otherwise we don't know which # texts are unoccupied. for box in self.col_boxes: if box.currentIndex() == -1: self.select_lowest_unoccupied(box) def init_col_sel_boxes(self): """ Initialize column selector boxes. """ self.col_boxes = [None] * self.num_col_boxes for i in range(self.num_col_boxes): box = QtWidgets.QComboBox() box.setMaxVisibleItems(80) policy_horiz = QSizePolicy.MinimumExpanding policy_vert = QSizePolicy.Maximum box.setSizePolicy(policy_horiz, policy_vert) box.setMinimumWidth(40) self.layout.addWidget(box) self.col_boxes[i] = box def init_cmap_sel(self): """ Initialize colormap selector. """ cmap_sel = QtWidgets.QComboBox() cmap_sel.addItems(self.cmap_names) policy_horiz = QSizePolicy.MinimumExpanding policy_vert = QSizePolicy.Maximum cmap_sel.setSizePolicy(policy_horiz, policy_vert) cmap_sel.setMinimumWidth(40) min_width = len(max(self.cmap_names, key=len)) * 8 cmap_sel.view().setMinimumWidth(min_width) self.layout.addWidget(cmap_sel) self.cmap_sel = cmap_sel def init_plot_2D_type_sel(self): plot_2D_type_sel = QtWidgets.QComboBox() plot_2D_type_sel.addItems(self.plot_2D_types) policy_horiz = QSizePolicy.MinimumExpanding policy_vert = QSizePolicy.Maximum plot_2D_type_sel.setSizePolicy(policy_horiz, policy_vert) plot_2D_type_sel.setMinimumWidth(40) min_width = len(max(self.plot_2D_types, key=len)) * 8 plot_2D_type_sel.view().setMinimumWidth(min_width) self.layout.addWidget(plot_2D_type_sel) self.plot_2D_type_sel = plot_2D_type_sel def init_lim_boxes(self): self.lim_boxes = [None] * self.num_lim_boxes dim_names = ['x', 'y', 'z'] for i in range(self.num_lim_boxes): lim_box = QtWidgets.QLineEdit() tooltip = ('Limit for {}. Use <number>:<number> where both numbers ' 'can be empty').format(dim_names[i]) lim_box.setToolTip(tooltip) self.layout.addWidget(lim_box) self.lim_boxes[i] = lim_box def init_aspect_box(self): aspect_box = QtWidgets.QLineEdit() aspect_box.setToolTip('Aspect ratio, use <number> or <number:number>') self.layout.addWidget(aspect_box) self.aspect_box = aspect_box def get_sel_cols(self): sel_texts = [box.currentText() for box in self.col_boxes] return sel_texts def get_sel_2D_type(self): sel_str = self.plot_2D_type_sel.currentText() return sel_str def get_lims(self): lims = [None] * self.num_lim_boxes for i, lim_box in enumerate(self.lim_boxes): lims[i] = self.parse_lims(lim_box.text()) return lims def get_aspect(self): text = self.aspect_box.text() return self.parse_aspect(text) def select_lowest_unoccupied(self, box): """ Sets the text on box to the text with the lowest index in box.list_of_text_items which is not already selected in another box in self.col_boxes. """ sel_texts = self.get_sel_cols() for i, text in enumerate(box.list_of_text_items): if text not in sel_texts: box.setCurrentIndex(i) return def set_text_on_box(self, box_idx, text): """ Potential infinite loop if sel_col_func calls this function. """ box = self.col_boxes[box_idx] idx = box.findText(text) box.setCurrentIndex(idx) def parse_lims(self, text): lims = text.split(':') if len(lims) != 2: return (None, None) lower_lim = self.conv_to_float_or_None(lims[0]) upper_lim = self.conv_to_float_or_None(lims[1]) return (lower_lim, upper_lim) def parse_aspect(self, text): try: return float(text) except ValueError: pass parts = text.split(':') try: num = float(parts[0]) den = float(parts[1]) except (ValueError, IndexError): return 'auto' return num / den @staticmethod def conv_to_float_or_None(str): try: return float(str) except ValueError: return None
mit
-5,390,035,938,519,185,000
34.251462
80
0.584439
false
3.613909
false
false
false
AlexProfi/django-cms
cms/cms_menus.py
1
16191
# -*- coding: utf-8 -*- from django.utils.translation import get_language from cms import constants from cms.apphook_pool import apphook_pool from cms.utils.permissions import load_view_restrictions, has_global_page_permission from cms.utils import get_language_from_request from cms.utils.conf import get_cms_setting from cms.utils.helpers import current_site from cms.utils.i18n import get_fallback_languages, hide_untranslated from cms.utils.page_resolver import get_page_queryset from cms.utils.moderator import get_title_queryset, use_draft from menus.base import Menu, NavigationNode, Modifier from menus.menu_pool import menu_pool def get_visible_page_objects(request, pages, site=None): """ This code is basically a many-pages-at-once version of Page.has_view_permission. pages contains all published pages check if there is ANY restriction that needs a permission page visibility calculation """ public_for = get_cms_setting('PUBLIC_FOR') can_see_unrestricted = public_for == 'all' or ( public_for == 'staff' and request.user.is_staff) is_auth_user = request.user.is_authenticated() restricted_pages = load_view_restrictions(request, pages) if not restricted_pages: if can_see_unrestricted: return pages elif not is_auth_user: return [] # Unauth user can't acquire global or user perm to see pages if get_cms_setting('PERMISSION') and not site: site = current_site(request) # avoid one extra query when possible if has_global_page_permission(request, site, can_view=True): return pages def has_global_perm(): if has_global_perm.cache < 0: if request.user.has_perm('cms.view_page'): has_global_perm.cache = 1 else: has_global_perm.cache = 0 return bool(has_global_perm.cache) has_global_perm.cache = -1 def has_permission_membership(page_id): """ PagePermission user group membership tests """ user_pk = request.user.pk for perm in restricted_pages[page_id]: if perm.user_id == user_pk: return True if not perm.group_id: continue if has_permission_membership.user_groups is None: has_permission_membership.user_groups = request.user.groups.all().values_list( 'pk', flat=True) if perm.group_id in has_permission_membership.user_groups: return True return False has_permission_membership.user_groups = None visible_pages = [] for page in pages: to_add = False page_id = page.pk is_restricted = page_id in restricted_pages # restricted_pages contains as key any page.pk that is # affected by a permission grant_on if not is_restricted and can_see_unrestricted: to_add = True elif is_auth_user: # setting based handling of unrestricted pages # check group and user memberships to restricted pages if is_restricted and has_permission_membership(page_id): to_add = True elif has_global_perm(): to_add = True if to_add: visible_pages.append(page) return visible_pages def get_visible_pages(request, pages, site=None): """Returns the IDs of all visible pages""" pages = get_visible_page_objects(request, pages, site) return [page.pk for page in pages] def page_to_node(page, home, cut): """ Transform a CMS page into a navigation node. :param page: the page you wish to transform :param home: a reference to the "home" page (the page with path="0001") :param cut: Should we cut page from its parent pages? This means the node will not have a parent anymore. """ # Theses are simple to port over, since they are not calculated. # Other attributes will be added conditionnally later. attr = { 'soft_root': page.soft_root, 'auth_required': page.login_required, 'reverse_id': page.reverse_id, } parent_id = page.parent_id # Should we cut the Node from its parents? if home and page.parent_id == home.pk and cut: parent_id = None # possible fix for a possible problem # if parent_id and not page.parent.get_calculated_status(): # parent_id = None # ???? if page.limit_visibility_in_menu is constants.VISIBILITY_ALL: attr['visible_for_authenticated'] = True attr['visible_for_anonymous'] = True else: attr['visible_for_authenticated'] = page.limit_visibility_in_menu == constants.VISIBILITY_USERS attr['visible_for_anonymous'] = page.limit_visibility_in_menu == constants.VISIBILITY_ANONYMOUS attr['is_home'] = page.is_home # Extenders can be either navigation extenders or from apphooks. extenders = [] if page.navigation_extenders: if page.navigation_extenders in menu_pool.menus: extenders.append(page.navigation_extenders) elif "{0}:{1}".format(page.navigation_extenders, page.pk) in menu_pool.menus: extenders.append("{0}:{1}".format(page.navigation_extenders, page.pk)) # Is this page an apphook? If so, we need to handle the apphooks's nodes lang = get_language() # Only run this if we have a translation in the requested language for this # object. The title cache should have been prepopulated in CMSMenu.get_nodes # but otherwise, just request the title normally if not hasattr(page, 'title_cache') or lang in page.title_cache: app_name = page.get_application_urls(fallback=False) if app_name: # it means it is an apphook app = apphook_pool.get_apphook(app_name) extenders += app.menus exts = [] for ext in extenders: if hasattr(ext, "get_instances"): # CMSAttachMenus are treated a bit differently to allow them to be # able to be attached to multiple points in the navigation. exts.append("{0}:{1}".format(ext.__name__, page.pk)) elif hasattr(ext, '__name__'): exts.append(ext.__name__) else: exts.append(ext) if exts: attr['navigation_extenders'] = exts # Do we have a redirectURL? attr['redirect_url'] = page.get_redirect() # save redirect URL if any attr['slug'] = page.get_slug() #save page slug # Now finally, build the NavigationNode object and return it. ret_node = NavigationNode( page.get_menu_title(), page.get_absolute_url(), page.pk, parent_id, attr=attr, visible=page.in_navigation, ) return ret_node class CMSMenu(Menu): def get_nodes(self, request): page_queryset = get_page_queryset(request) site = current_site(request) lang = get_language_from_request(request) filters = { 'site': site, } if hide_untranslated(lang, site.pk): filters['title_set__language'] = lang if not use_draft(request): filters['title_set__published'] = True if not use_draft(request): page_queryset = page_queryset.published() pages = page_queryset.filter(**filters).order_by("path") ids = {} nodes = [] first = True home_cut = False home_children = [] home = None actual_pages = [] # cache view perms visible_pages = get_visible_pages(request, pages, site) for page in pages: # Pages are ordered by path, therefore the first page is the root # of the page tree (a.k.a "home") if page.pk not in visible_pages: # Don't include pages the user doesn't have access to continue if not home: home = page if first and page.pk != home.pk: home_cut = True if (home_cut and (page.parent_id == home.pk or page.parent_id in home_children)): home_children.append(page.pk) if ((page.pk == home.pk and home.in_navigation) or page.pk != home.pk): first = False ids[page.id] = page actual_pages.append(page) page.title_cache = {} langs = [lang] if not hide_untranslated(lang): langs.extend(get_fallback_languages(lang)) titles = list(get_title_queryset(request).filter( page__in=ids, language__in=langs)) for title in titles: # add the title and slugs and some meta data page = ids[title.page_id] page.title_cache[title.language] = title for page in actual_pages: if page.title_cache: nodes.append(page_to_node(page, home, home_cut)) return nodes menu_pool.register_menu(CMSMenu) class NavExtender(Modifier): def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb): if post_cut: return nodes # rearrange the parent relations # Find home home = next((n for n in nodes if n.attr.get("is_home", False)), None) # Find nodes with NavExtenders exts = [] for node in nodes: extenders = node.attr.get("navigation_extenders", None) if extenders: for ext in extenders: if ext not in exts: exts.append(ext) # Link the nodes for extnode in nodes: if extnode.namespace == ext and not extnode.parent_id: # if home has nav extenders but home is not visible if node == home and not node.visible: # extnode.parent_id = None extnode.parent_namespace = None extnode.parent = None else: extnode.parent_id = node.id extnode.parent_namespace = node.namespace extnode.parent = node node.children.append(extnode) removed = [] # find all not assigned nodes for menu in menu_pool.menus.items(): if (hasattr(menu[1], 'cms_enabled') and menu[1].cms_enabled and not menu[0] in exts): for node in nodes: if node.namespace == menu[0]: removed.append(node) if breadcrumb: # if breadcrumb and home not in navigation add node if breadcrumb and home and not home.visible: home.visible = True if request.path_info == home.get_absolute_url(): home.selected = True else: home.selected = False # remove all nodes that are nav_extenders and not assigned for node in removed: nodes.remove(node) return nodes menu_pool.register_modifier(NavExtender) class SoftRootCutter(Modifier): """ Ask evildmp/superdmp if you don't understand softroots! Softroot description from the docs: A soft root is a page that acts as the root for a menu navigation tree. Typically, this will be a page that is the root of a significant new section on your site. When the soft root feature is enabled, the navigation menu for any page will start at the nearest soft root, rather than at the real root of the site’s page hierarchy. This feature is useful when your site has deep page hierarchies (and therefore multiple levels in its navigation trees). In such a case, you usually don’t want to present site visitors with deep menus of nested items. For example, you’re on the page -Introduction to Bleeding-?, so the menu might look like this: School of Medicine Medical Education Departments Department of Lorem Ipsum Department of Donec Imperdiet Department of Cras Eros Department of Mediaeval Surgery Theory Cures Bleeding Introduction to Bleeding <this is the current page> Bleeding - the scientific evidence Cleaning up the mess Cupping Leaches Maggots Techniques Instruments Department of Curabitur a Purus Department of Sed Accumsan Department of Etiam Research Administration Contact us Impressum which is frankly overwhelming. By making -Department of Mediaeval Surgery-? a soft root, the menu becomes much more manageable: Department of Mediaeval Surgery Theory Cures Bleeding Introduction to Bleeding <current page> Bleeding - the scientific evidence Cleaning up the mess Cupping Leaches Maggots Techniques Instruments """ def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb): # only apply this modifier if we're pre-cut (since what we do is cut) # or if no id argument is provided, indicating {% show_menu_below_id %} if post_cut or root_id: return nodes selected = None root_nodes = [] # find the selected node as well as all the root nodes for node in nodes: if node.selected: selected = node if not node.parent: root_nodes.append(node) # if we found a selected ... if selected: # and the selected is a softroot if selected.attr.get("soft_root", False): # get it's descendants nodes = selected.get_descendants() # remove the link to parent selected.parent = None # make the selected page the root in the menu nodes = [selected] + nodes else: # if it's not a soft root, walk ancestors (upwards!) nodes = self.find_ancestors_and_remove_children(selected, nodes) return nodes def find_and_remove_children(self, node, nodes): for child in node.children: if child.attr.get("soft_root", False): self.remove_children(child, nodes) return nodes def remove_children(self, node, nodes): for child in node.children: nodes.remove(child) self.remove_children(child, nodes) node.children = [] def find_ancestors_and_remove_children(self, node, nodes): """ Check ancestors of node for soft roots """ if node.parent: if node.parent.attr.get("soft_root", False): nodes = node.parent.get_descendants() node.parent.parent = None nodes = [node.parent] + nodes else: nodes = self.find_ancestors_and_remove_children( node.parent, nodes) else: for newnode in nodes: if newnode != node and not newnode.parent: self.find_and_remove_children(newnode, nodes) for child in node.children: if child != node: self.find_and_remove_children(child, nodes) return nodes menu_pool.register_modifier(SoftRootCutter)
bsd-3-clause
7,715,891,116,846,316,000
36.992958
103
0.571517
false
4.357835
false
false
false
hadware/lexicographer
epub-parser/src/epub_to_json/epub_to_json.py
1
5101
import sys from epub import open_epub import simplejson as json from bs4 import BeautifulSoup, Tag class SimpleChapter(object): def __init__(self, name, text): self.name = name self.text = text class Parser(object): def __init__(self, epub_path): self.epub_file = open_epub(epub_path, 'r') # current item used for navigation self.current_item = None # soup for the current item self.item_data_soup = None def _get_metadata_(self, metadata): dict = {} # get metadata dict['titles'] = [x for x in metadata.titles[0] if x] dict['creators'] = [x for x in metadata.creators[0] if x] dict['subjects'] = [x for x in metadata.subjects if x] dict['identifiers'] = [x for x in metadata.identifiers[0] if x] dict['dates'] = [x for x in metadata.dates[0] if x] dict['right'] = metadata.right # return filled dict return dict def _get_text_chapter_(self, current_tag, next_tag=None, first_item=False): if first_item: chapter_text = current_tag.get_text() else: chapter_text = '' for elem in current_tag.next_siblings: # if next tag if next_tag is not None and isinstance(elem, Tag) and elem == next_tag: break # else, append text elif isinstance(elem, Tag): text = elem.get_text() # if end of ebook if "Project Gutenberg" in text: break else: chapter_text += text # sanitize text chapter_text = chapter_text.replace('\n', ' ').replace('*', '').replace('"', ' ') chapter_text = chapter_text.strip() return chapter_text def _switch_item_(self, item): # if new file or first read if self.current_item != item or self.item_data_soup is None: # we change the current item self.current_item = item # we read the file self.item_data_soup = BeautifulSoup(self.epub_file.read_item(item), 'lxml') def _iterate_chapter_(self, chapters, current_nav, next_nav): # get chapter name chapter_name = current_nav.labels[0][0] # get chapter id & file split_src = current_nav.src.rsplit('#', 1) item = self.epub_file.get_item_by_href(split_src[0]) self._switch_item_(item) # get tag by id current_tag = self.item_data_soup.find(id=split_src[1]) # determine which tag is next if current_nav.nav_point: direct_next = current_nav.nav_point[0] else: if next_nav is not None: direct_next = next_nav else: direct_next = None if direct_next is not None: next_split = direct_next.src.rsplit('#', 1) # if next is on same file if split_src[0] == next_split[0]: next_tag = self.item_data_soup.find(id=next_split[1]) chapter_text = self._get_text_chapter_(current_tag, next_tag) else: # get text remaining on current page chapter_text = self._get_text_chapter_(current_tag) # get next item item = self.epub_file.get_item_by_href(next_split[0]) self._switch_item_(item) current_tag = self.item_data_soup.body.contents[0] next_tag = self.item_data_soup.find(id=next_split[1]) chapter_text += self._get_text_chapter_(current_tag, next_tag, True) else: chapter_text = self._get_text_chapter_(current_tag) # add chapter to array if not empty if chapter_text != '' and "CONTENT" not in chapter_name.upper() and "CHAPTERS" not in chapter_name.upper(): chapters.append(SimpleChapter(chapter_name, chapter_text).__dict__) # if nav point has subchild if current_nav.nav_point: it = iter(current_nav.nav_point) current_nav = next(it) for child in it: self._iterate_chapter_(chapters, current_nav, child) current_nav = child self._iterate_chapter_(chapters, current_nav, next_nav) def epub_to_json(self): epub = {} chapters = [] it = iter(self.epub_file.toc.nav_map.nav_point) current_nav = next(it) for next_nav in it: self._iterate_chapter_(chapters, current_nav, next_nav) current_nav = next_nav self._iterate_chapter_(chapters, current_nav, None) # assemble parts epub['metadatas'] = self._get_metadata_(self.epub_file.opf.metadata) epub['chapters'] = chapters # create json object json_obj = json.dumps(epub, separators=(',', ':'), ensure_ascii=False) self.epub_file.close() return json_obj if __name__ == '__main__': # need one argument parser = Parser(sys.argv[1]) parser.epub_to_json()
gpl-2.0
7,686,731,611,143,497,000
32.781457
115
0.554401
false
3.867324
false
false
false
datamade/yournextmp-popit
candidates/tests/test_person_view.py
1
2933
# Smoke tests for viewing a candidate's page from datetime import date, timedelta import re from django.conf import settings from django.test.utils import override_settings from django_webtest import WebTest from .factories import ( AreaTypeFactory, ElectionFactory, CandidacyExtraFactory, ParliamentaryChamberFactory, PartyFactory, PartyExtraFactory, PersonExtraFactory, PostExtraFactory ) election_date_before = lambda r: {'DATE_TODAY': date.today()} election_date_after = lambda r: {'DATE_TODAY': date.today() + timedelta(days=28)} processors = settings.TEMPLATE_CONTEXT_PROCESSORS processors_before = processors + ("candidates.tests.test_person_view.election_date_before",) processors_after = processors + ("candidates.tests.test_person_view.election_date_after",) class TestPersonView(WebTest): def setUp(self): wmc_area_type = AreaTypeFactory.create() election = ElectionFactory.create( slug='2015', name='2015 General Election', area_types=(wmc_area_type,) ) commons = ParliamentaryChamberFactory.create() post_extra = PostExtraFactory.create( elections=(election,), base__organization=commons, slug='65808', base__label='Member of Parliament for Dulwich and West Norwood' ) person_extra = PersonExtraFactory.create( base__id='2009', base__name='Tessa Jowell' ) PartyFactory.reset_sequence() party_extra = PartyExtraFactory.create() CandidacyExtraFactory.create( election=election, base__person=person_extra.base, base__post=post_extra.base, base__on_behalf_of=party_extra.base ) def test_get_tessa_jowell(self): response = self.app.get('/person/2009/tessa-jowell') self.assertTrue( re.search( r'''(?msx) <h1>Tessa\s+Jowell</h1>\s* <p>Candidate\s+for\s+ <a\s+href="/election/2015/post/65808/dulwich-and-west-norwood">Dulwich\s+ and\s+West\s+Norwood</a>\s+in\ 2015\s+General\s+Election\s*</p>''', unicode(response) ) ) @override_settings(TEMPLATE_CONTEXT_PROCESSORS=processors_before) def test_get_tessa_jowell_before_election(self): response = self.app.get('/person/2009/tessa-jowell') self.assertContains(response, 'Contesting in the 2015 General Election') @override_settings(TEMPLATE_CONTEXT_PROCESSORS=processors_after) def test_get_tessa_jowell_after_election(self): response = self.app.get('/person/2009/tessa-jowell') self.assertContains(response, 'Contested in the 2015 General Election') def test_get_non_existent(self): response = self.app.get( '/person/987654/imaginary-person', expect_errors=True ) self.assertEqual(response.status_code, 404)
agpl-3.0
2,693,899,126,005,377,500
35.6625
92
0.656325
false
3.594363
true
false
false
antoinecarme/pyaf
setup.py
1
1126
from setuptools import setup from setuptools import find_packages with open("README.md", "r") as fh: pyaf_long_description = fh.read() setup(name='pyaf', version='3.0-RC1', description='Python Automatic Forecasting', long_description=pyaf_long_description, long_description_content_type="text/markdown", author='Antoine CARME', author_email='antoine.carme@laposte.net', url='https://github.com/antoinecarme/pyaf', license='BSD 3-clause', packages=find_packages(include=['pyaf', 'pyaf.*']), python_requires='>=3', classifiers=['Development Status :: 5 - Production/Stable', 'Programming Language :: Python :: 3'], keywords='arx automatic-forecasting autoregressive benchmark cycle decomposition exogenous forecasting heroku hierarchical-forecasting horizon jupyter pandas python scikit-learn seasonal time-series transformation trend web-service', install_requires=[ 'scipy', 'pandas', 'sklearn', 'matplotlib', 'pydot', 'dill', 'sqlalchemy' ])
bsd-3-clause
8,461,214,734,102,750,000
37.827586
239
0.64476
false
4.035842
false
true
false
miguelalonso/pywws
src/doc/conf.py
1
9030
# -*- coding: utf-8 -*- # # pywws - Python software for USB Wireless Weather Stations # http://github.com/jim-easterbrook/pywws # Copyright (C) 2008-15 pywws contributors # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # pywws documentation build configuration file, created by # sphinx-quickstart on Fri Sep 30 08:05:58 2011. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) sys.path.insert(0, os.path.abspath('..')) on_rtd = os.environ.get('READTHEDOCS', None) == 'True' # cludge to allow documentation to be compiled without installing dependencies class Dummy(object): def __getattr__(self, name): if name in ('__file__',): return None return Dummy for mod_name in ('hid', 'oauth2', 'twitter', 'usb', 'usb.core', 'usb.util', 'libusb1', 'usb1', 'daemon', 'daemon.runner'): sys.modules[mod_name] = Dummy() # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.viewcode'] autosummary_generate = True autoclass_content = 'both' autodoc_member_order = 'bysource' autodoc_default_flags = ['members', 'undoc-members'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] rst_epilog = """ ---- Comments or questions? Please subscribe to the pywws mailing list http://groups.google.com/group/pywws and let us know. """ # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'pywws' copyright = u'2008-15, pywws contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. #version = # The full version, including alpha/beta/rc tags. #release = from pywws import __version__ as release version = release[:release.rfind('.')] # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None if not on_rtd and 'LANG' in os.environ: language = os.environ['LANG'].split('_')[0] locale_dirs = ['../pywws/lang'] # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. if on_rtd: html_theme = 'default' else: html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None html_logo = 'pywws_logo.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None html_favicon = 'pywws_logo.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'pywwsdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'pywws.tex', u'pywws Documentation', u'Jim Easterbrook', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'pywws', u'pywws Documentation', [u'Jim Easterbrook'], 1) ]
gpl-2.0
7,874,652,060,301,545,000
31.956204
81
0.706645
false
3.673718
false
false
false
PaddlePaddle/Paddle
python/paddle/fluid/incubate/fleet/parameter_server/ir/public.py
1
50054
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function from functools import reduce import collections import math import os import warnings import logging import six import paddle.fluid as fluid from paddle.fluid import core from paddle.fluid.core import CommContext import paddle.fluid.framework as framework from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode from paddle.fluid.incubate.fleet.parameter_server.ir import vars_metatools from paddle.fluid.incubate.fleet.parameter_server.ir.ps_dispatcher import RoundRobin, PSDispatcher from paddle.fluid.transpiler.details.program_utils import delete_ops OP_NAME_SCOPE = "op_namescope" CLIP_OP_NAME_SCOPE = "gradient_clip" STEP_COUNTER = "@PS_STEP_COUNTER@" LEARNING_RATE_DECAY_COUNTER = "@LR_DECAY_COUNTER@" OP_ROLE_VAR_ATTR_NAME = core.op_proto_and_checker_maker.kOpRoleVarAttrName() RPC_OP_ROLE_ATTR_NAME = core.op_proto_and_checker_maker.kOpRoleAttrName() RPC_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.RPC op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName() LR_SCHED_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.LRSched OPT_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.Optimize SPARSE_OP_LIST = ["lookup_table", "lookup_table_v2"] SPARSE_OP_TYPE_DICT = {"lookup_table": "W", "lookup_table_v2": "W"} def _get_lr_ops(program): lr_ops = [] for index, op in enumerate(program.global_block().ops): role_id = int(op.attr(RPC_OP_ROLE_ATTR_NAME)) if role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) or \ role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) | \ int(OPT_OP_ROLE_ATTR_VALUE): lr_ops.append(op) return lr_ops def _has_global_step(lr_ops): if len(lr_ops) > 0: for idx, op in enumerate(lr_ops): if op.type != 'increment': continue counter = op.input("X")[0] if counter == LEARNING_RATE_DECAY_COUNTER: return True return False def is_sparse_op(op): if op.type in SPARSE_OP_LIST and op.attr('is_sparse') is True and op.attr( 'is_distributed') is False: return True if op.type == "distributed_lookup_table" and op.attr( 'is_distributed') is False: return True return False def is_distributed_sparse_op(op): if op.type in SPARSE_OP_LIST and op.attr('is_distributed') is True: return True if op.type == "distributed_lookup_table" and op.attr( 'is_distributed') is True: return True return False def get_sparse_tablename(op): return op.input("W")[0] def get_sparse_tablenames(program, is_distributed): tablenames = set() if is_distributed: for op in program.global_block().ops: if is_distributed_sparse_op(op): tablenames.add(get_sparse_tablename(op)) else: for op in program.global_block().ops: if is_sparse_op(op): tablenames.add(get_sparse_tablename(op)) return list(tablenames) class MergedVariable: def __init__(self, merged, ordered, offsets): self.merged_var = merged self.ordered_vars = ordered self.offsets = offsets def Singleton(cls): _instance = {} def _singleton(*args, **kargs): if cls not in _instance: _instance[cls] = cls(*args, **kargs) return _instance[cls] return _singleton @Singleton class CompileTimeStrategy(object): def __init__(self, main_program, startup_program, strategy, role_maker): self.min_block_size = 81920 self.origin_main_program = main_program self.origin_startup_program = startup_program self.origin_ps_main_program = main_program self.origin_ps_startup_program = startup_program self.strategy = strategy self.role_maker = role_maker self.use_ps_gpu = False try: self.is_heter_ps_mode = role_maker._is_heter_parameter_server_mode except: warnings.warn( "Using paddle.distributed.fleet instead of paddle.fluid.incubate.fleet" ) self.is_heter_ps_mode = False self.origin_sparse_pairs = [] self.origin_dense_pairs = [] self.merged_variables_pairs = [] self.merged_dense_pairs = [] self.merged_sparse_pairs = [] self.merged_variable_map = {} self.param_name_to_grad_name = {} self.grad_name_to_param_name = {} self.param_grad_ep_mapping = collections.OrderedDict() self.grad_param_mapping = collections.OrderedDict() self._build_var_distributed() self.tensor_table_dict = {} # for heter-ps save variables self.origin_merged_variables_pairs = list(self.merged_variables_pairs) self.origin_merged_dense_pairs = list(self.merged_dense_pairs) self.origin_merged_sparse_pairs = list(self.merged_sparse_pairs) def get_distributed_mode(self): trainer = self.strategy.get_trainer_runtime_config() return trainer.mode def is_sync_mode(self): trainer = self.strategy.get_trainer_runtime_config() return trainer.mode == DistributedMode.SYNC def is_geo_mode(self): trainer = self.strategy.get_trainer_runtime_config() return trainer.mode == DistributedMode.GEO def is_async_mode(self): trainer = self.strategy.get_trainer_runtime_config() return trainer.mode == DistributedMode.ASYNC def get_role_id(self): try: return self.role_maker._role_id() except Exception: return self.role_maker.role_id() def get_trainers(self): try: return self.role_maker._worker_num() except Exception: return self.role_maker.worker_num() def get_ps_endpoint(self): try: return self.role_maker._get_pserver_endpoints()[self.get_role_id()] except Exception: return self.role_maker.get_pserver_endpoints()[self.get_role_id()] def get_ps_endpoints(self): try: return self.role_maker._get_pserver_endpoints() except Exception: return self.role_maker.get_pserver_endpoints() def get_heter_worker_endpoints(self): try: return self.role_maker._get_heter_worker_endpoints() except Exception: return self.role_maker.get_heter_worker_endpoints() def get_heter_worker_endpoint(self): try: return self.role_maker._get_heter_worker_endpoint() except Exception: return self.role_maker.get_heter_worker_endpoint() def get_origin_programs(self): return self.origin_main_program, self.origin_startup_program def get_origin_main_program(self): return self.origin_main_program def get_origin_startup_program(self): return self.origin_startup_program def set_origin_ps_main_program(self, program): self.origin_ps_main_program = program def set_origin_ps_startup_program(self, program): self.origin_ps_startup_program = program def get_origin_ps_main_program(self): return self.origin_ps_main_program def get_origin_ps_startup_program(self): return self.origin_ps_startup_program def add_tensor_table(self, feed_var_name, fetch_var_name="", startup_program=None, main_program=None, tensor_table_class=""): self.tensor_table_dict[feed_var_name] = {} self.tensor_table_dict[feed_var_name]["feed_var_name"] = feed_var_name self.tensor_table_dict[feed_var_name]["fetch_var_name"] = fetch_var_name self.tensor_table_dict[feed_var_name][ "startup_program"] = startup_program self.tensor_table_dict[feed_var_name]["main_program"] = main_program self.tensor_table_dict[feed_var_name][ "tensor_table_class"] = tensor_table_class def get_tensor_table_dict(self): return self.tensor_table_dict def get_sparse_varname_on_ps(self, is_distributed, endpoint=None): if not endpoint: endpoint = self.get_ps_endpoint() varnames = get_sparse_tablenames(self.get_origin_main_program(), is_distributed) ps_sparse_varnames = [] for varname in varnames: tables = self.get_var_distributed(varname, True) for i in range(len(tables)): table, ep, _ = tables[i] if ep == endpoint: ps_sparse_varnames.append(table) return ps_sparse_varnames def get_optimize_varname_on_ps(self, param_name): origin_param_name, _, _ = _get_varname_parts(param_name) optimize_var_names = [] for op in self.get_origin_main_program().global_block().ops: # check all optimizer op if int(op.all_attrs()["op_role"]) == 2: # check param name if op.input("Param")[0] != origin_param_name: continue # check all input for key in op.input_names: if key in [ "Param", "Grad", "LearningRate", "Beta1Tensor", "Beta2Tensor" ]: continue # check varibale shape related param, e.g: Moment1 optimize_var_names += self._get_optimizer_param_related_var_name( op, op.type, key) return optimize_var_names def _get_optimizer_param_related_var_name(self, op, op_type, varkey): """ Returns the names for optimizer inputs that need to be load """ related_var_names = [] if op_type == "adam": if varkey in ["Moment1", "Moment2"]: related_var_names.append(op.input(varkey)[0]) elif op_type == "adagrad": if varkey == "Moment": related_var_names.append(op.input(varkey)[0]) elif op_type in ["momentum", "lars_momentum"]: if varkey == "Velocity": related_var_names.append(op.input(varkey)[0]) elif op_type == "rmsprop": if varkey in ["Moment", "MeanSquare"]: related_var_names.append(op.input(varkey)[0]) elif op_type == "ftrl": if varkey in ["SquaredAccumulator", "LinearAccumulator"]: related_var_names.append(op.input(varkey)[0]) elif op_type == "sgd": pass else: raise ValueError( "Not supported optimizer for distributed training: %s" % op_type) return related_var_names def build_ctx(self, vars, mapping, is_grad, is_sparse, is_send, is_distributed=False): def get_grad_var_ep(slices): names = [] eps = [] sections = [] for slice in slices: if self.is_geo_mode(): if is_send: names.append("{}.delta".format(slice.name)) else: names.append(slice.name) elif is_grad and self.is_sync_mode() and self.get_trainers( ) > 1: names.append("{}.trainer_{}".format(slice.name, self.get_role_id())) else: names.append(slice.name) sections.append(slice.shape[0]) for ep, pairs in self.param_grad_ep_mapping.items(): params, grads = pairs["params"], pairs["grads"] for var in params + grads: if slice.name == var.name: eps.append(ep) break return names, eps, sections if isinstance(vars, MergedVariable): name = vars.merged_var.name slices = mapping[name] names, eps, sections = get_grad_var_ep(slices) origin_varnames = [var.name for var in vars.ordered_vars] else: name = vars.name slices = mapping[name] names, eps, sections = get_grad_var_ep(slices) origin_varnames = [vars.name] trainer_id = self.get_role_id() aggregate = True ctx = CommContext(name, names, eps, sections, origin_varnames, trainer_id, aggregate, is_sparse, is_distributed) return ctx def get_trainer_send_context(self): send_ctx = {} distibuted_varnames = get_sparse_tablenames(self.origin_main_program, True) if not self.is_geo_mode(): for merged in self.merged_dense_pairs: grad = merged[1] ctx = self.build_ctx(grad, self.grad_var_mapping, True, False, True) send_ctx[ctx.var_name()] = ctx for merged in self.merged_sparse_pairs: param = merged[0] grad = merged[1] param_name = param.merged_var.name is_distributed = True if param_name in distibuted_varnames else False ctx = self.build_ctx(grad, self.grad_var_mapping, True, True, True, is_distributed) send_ctx[ctx.var_name()] = ctx if self.is_async_mode(): name, ctx = self._step_ctx() send_ctx[name] = ctx else: for pairs in self.origin_sparse_pairs: param, grad = pairs param_name = param.name is_distributed = True if param_name in distibuted_varnames else False param_ctx = self.build_ctx(param, self.param_var_mapping, False, True, True, is_distributed) grad_ctx = self.build_ctx(grad, self.grad_var_mapping, True, True, True, is_distributed) ctx = CommContext(param_ctx.var_name(), param_ctx.split_varnames(), param_ctx.split_endpoints(), param_ctx.sections(), grad_ctx.origin_varnames(), param_ctx.trainer_id(), param_ctx.aggregate(), param_ctx.is_sparse(), param_ctx.is_distributed()) send_ctx[ctx.var_name()] = ctx name, ctx = self._step_ctx() send_ctx[name] = ctx return send_ctx def get_communicator_send_context(self): send_ctx = {} distibuted_varnames = get_sparse_tablenames(self.origin_main_program, True) if self.is_geo_mode(): for pairs in self.merged_dense_pairs: param = pairs[0] ctx = self.build_ctx(param, self.param_var_mapping, False, False, True) send_ctx[ctx.var_name()] = ctx for pairs in self.merged_sparse_pairs: param = pairs[0] param_name = param.merged_var.name is_distributed = True if param_name in distibuted_varnames else False ctx = self.build_ctx(param, self.param_var_mapping, False, True, True, is_distributed) send_ctx[ctx.var_name()] = ctx name, ctx = self._step_ctx() send_ctx[name] = ctx else: for merged in self.merged_dense_pairs: grad = merged[1] ctx = self.build_ctx(grad, self.grad_var_mapping, True, False, True) send_ctx[ctx.var_name()] = ctx for merged in self.merged_sparse_pairs: param, grad = merged param_name = param.merged_var.name is_distributed = True if param_name in distibuted_varnames else False ctx = self.build_ctx(grad, self.grad_var_mapping, True, True, True, is_distributed) send_ctx[ctx.var_name()] = ctx name, ctx = self._step_ctx() send_ctx[name] = ctx return send_ctx def get_communicator_recv_context(self, recv_type=1, use_origin_program=False): # recv_type # 1 : DENSE 2. SPARSE 3. DISTRIBUTED 4. ALL distibuted_varnames = get_sparse_tablenames(self.origin_main_program, True) sparse_varnames = [] for pairs in self.origin_sparse_pairs: param, grad = pairs sparse_varnames.append(param.name) dense_recv_ctx = {} sparse_recv_ctx = {} distributed_recv_ctx = {} variables_pairs = self.merged_variables_pairs if not use_origin_program else self.origin_merged_variables_pairs for merged in variables_pairs: params = merged[0] if params.merged_var.name in sparse_varnames: continue ctx = self.build_ctx(params, self.param_var_mapping, False, False, False, False) dense_recv_ctx[ctx.var_name()] = ctx for pairs in self.origin_sparse_pairs: param, grad = pairs if param.name in distibuted_varnames: ctx = self.build_ctx(param, self.param_var_mapping, False, True, False, True) distributed_recv_ctx[ctx.var_name()] = ctx else: ctx = self.build_ctx(param, self.param_var_mapping, False, True, False, False) sparse_recv_ctx[ctx.var_name()] = ctx if recv_type == 1: return dense_recv_ctx if recv_type == 2: return sparse_recv_ctx if recv_type == 3: return distributed_recv_ctx if recv_type == 4: dense_recv_ctx.update(sparse_recv_ctx) dense_recv_ctx.update(distributed_recv_ctx) return dense_recv_ctx assert ValueError( "recv_type can only be 1/2/3/4, 1 : DENSE 2. SPARSE 3. DISTRIBUTED 4. ALL" ) def get_the_one_trainer_send_context(self, split_dense_table): if self.is_geo_mode(): send_ctx = {} trainer_id = self.get_role_id() idx = 0 distibuted_varnames = get_sparse_tablenames( self.origin_main_program, True) for merged in self.merged_sparse_pairs: param, grad = merged grad_name = grad.merged_var.name param_name = param.merged_var.name is_distributed = True if param_name in distibuted_varnames else False var = self.origin_main_program.global_block().vars[ grad.merged_var.name] var_numel = reduce(lambda x, y: x * y, var.shape[1:]) sparse_ctx = CommContext(grad_name, [grad_name], ["127.0.0.1:6071"], [var_numel], [grad_name], trainer_id, True, True, is_distributed, idx, False) idx += 1 send_ctx[sparse_ctx.var_name()] = sparse_ctx if len(send_ctx) == 0: raise ValueError( "GeoSGD require sparse parameters in your net.") if len(self.tensor_table_dict) > 0 and self.role_maker._is_worker(): name, ctx = self._step_ctx(idx) send_ctx[name] = ctx return send_ctx else: return self.get_the_one_send_context(split_dense_table) def get_dense_send_context(self, send_ctx, idx, merged_dense_pairs, trainer_id, split_dense_table=False): if len(merged_dense_pairs) < 1: return idx if not split_dense_table: origin_varnames = [] var_numel = 0 for merged in merged_dense_pairs: grad = merged[1] origin_varnames.append(grad.merged_var.name) var = self.origin_main_program.global_block().vars[ grad.merged_var.name] var_numel += reduce(lambda x, y: x * y, var.shape) grad_name = "Dense@Grad" trainer_id = self.get_role_id() aggregate = True dense_ctx = CommContext(grad_name, [grad_name], ["127.0.0.1:6071"], [var_numel], origin_varnames, trainer_id, aggregate, False, False, idx, False) send_ctx[grad_name] = dense_ctx idx += 1 else: for merged in merged_dense_pairs: grad = merged[1] origin_varname = grad.merged_var.name var = self.origin_main_program.global_block().vars[ origin_varname] var_numel = reduce(lambda x, y: x * y, var.shape) grad_name = origin_varname aggregate = True dense_ctx = CommContext(grad_name, [grad_name], ["127.0.0.1:6071"], [var_numel], [origin_varname], trainer_id, aggregate, False, False, idx, False) send_ctx[grad_name] = dense_ctx idx += 1 return idx def get_the_one_send_context(self, split_dense_table=False, use_origin_program=False, ep_list=None): if ep_list is None: ep_list = ["127.0.0.1:6071"] send_ctx = {} trainer_id = self.get_role_id() idx = 0 merged_dense_pairs = self.origin_merged_dense_pairs if use_origin_program else self.merged_dense_pairs merged_sparse_pairs = self.origin_merged_sparse_pairs if use_origin_program else self.merged_sparse_pairs idx += self.get_dense_send_context(send_ctx, idx, merged_dense_pairs, trainer_id, split_dense_table) distibuted_varnames = get_sparse_tablenames(self.origin_main_program, True) for merged in merged_sparse_pairs: param, grad = merged grad_name = grad.merged_var.name param_name = param.merged_var.name splited_varname = [] for i in range(len(ep_list)): splited_varname.append("{}.block{}".format(param_name, i)) is_distributed = True if param_name in distibuted_varnames else False var = self.origin_main_program.global_block().vars[ grad.merged_var.name] shape = list(var.shape) shape[0] = 0 if is_distributed else shape[0] sparse_ctx = CommContext(grad_name, splited_varname, ep_list, shape, [grad_name], trainer_id, True, True, is_distributed, idx, False) idx += 1 send_ctx[sparse_ctx.var_name()] = sparse_ctx if len(self.tensor_table_dict) > 0 and self.role_maker._is_worker(): name, ctx = self._step_ctx(idx) send_ctx[name] = ctx return send_ctx def get_the_one_recv_context(self, is_dense=True, split_dense_table=False, use_origin_program=False): recv_id_maps = {} if is_dense: send_ctx = self.get_the_one_send_context( split_dense_table=split_dense_table, use_origin_program=use_origin_program) for idx, (name, ctx) in enumerate(send_ctx.items()): if ctx.is_sparse(): continue if ctx.is_tensor_table(): continue origin_grad_varnames = ctx.origin_varnames() param_names = [] for grad_varname in origin_grad_varnames: param_name = self.grad_name_to_param_name[grad_varname] param_names.append(param_name) recv_id_maps[ctx.table_id()] = param_names else: send_ctx = self.get_the_one_send_context() for idx, (name, ctx) in enumerate(send_ctx.items()): if not ctx.is_sparse(): continue origin_grad_varnames = ctx.origin_varnames() param_names = [] for grad_varname in origin_grad_varnames: param_name = self.grad_name_to_param_name[grad_varname] param_names.append(param_name) recv_id_maps[ctx.table_id()] = param_names return recv_id_maps def get_server_runtime_config(self): return self.strategy.get_server_runtime_config() def get_var_distributed(self, varname, is_param): var_distributed = [] offset = 0 if is_param: params = self.param_var_mapping[varname] param_varnames = [var.name for var in params] for ep, pairs in self.param_grad_ep_mapping.items(): for p in pairs["params"]: if p.name in param_varnames: offset += p.shape[0] var_distributed.append((p.name, ep, p.shape[0])) else: grads = self.grad_var_mapping[varname] grad_varnames = [var.name for var in grads] for ep, pairs in self.param_grad_ep_mapping.items(): for g in pairs["grads"]: if g.name in grad_varnames: var_distributed.append((g.name, ep, g.shape[0])) return var_distributed def _step_ctx(self, idx): name = STEP_COUNTER trainer_id = self.get_role_id() endpoints = self.get_ps_endpoints() sections = [1] * len(endpoints) names = [name] * len(endpoints) ctx = CommContext(name, names, endpoints, sections, [name], trainer_id, True, False, False, idx, True) return name, ctx def _create_vars_from_blocklist(self, block_list): """ Create vars for each split. NOTE: only grads need to be named for different trainers, use add_trainer_suffix to rename the grad vars. Args: block_list (list[(varname, block_id, block_size)]): List of gradient blocks. add_trainer_suffix (Bool): Add trainer suffix to new variable's name if set True. Returns: var_mapping (collections.OrderedDict(varname->[new_varname_variable])):A dict mapping from original var name to each var split. """ # varname->[(block_id, current_block_size)] block_map = collections.OrderedDict() var_mapping = collections.OrderedDict() for block_str in block_list: varname, offset, size = block_str.split(":") if varname not in block_map: block_map[varname] = [] block_map[varname].append((int(offset), int(size))) for varname, split in six.iteritems(block_map): orig_var = self.merged_variable_map[varname] if len(split) == 1: var_mapping[varname] = [orig_var] self.var_distributed.add_distributed_var( origin_var=orig_var, slice_var=orig_var, block_id=0, offset=0, is_slice=False, vtype="Param") else: var_mapping[varname] = [] orig_shape = orig_var.shape orig_dim1_flatten = 1 if len(orig_shape) >= 2: orig_dim1_flatten = reduce(lambda x, y: x * y, orig_shape[1:]) for i, block in enumerate(split): size = block[1] rows = size // orig_dim1_flatten splited_shape = [rows] if len(orig_shape) >= 2: splited_shape.extend(orig_shape[1:]) new_var_name = "%s.block%d" % (varname, i) slice_var = vars_metatools.VarStruct( name=new_var_name, shape=splited_shape, dtype=orig_var.dtype, type=orig_var.type, lod_level=orig_var.lod_level, persistable=False) var_mapping[varname].append(slice_var) self.var_distributed.add_distributed_var( origin_var=orig_var, slice_var=slice_var, block_id=i, offset=-1, is_slice=False, vtype="Param") return var_mapping def _dispatcher(self): ps_dispatcher = RoundRobin(self.get_ps_endpoints()) ps_dispatcher.reset() grad_var_mapping_items = list(six.iteritems(self.grad_var_mapping)) sparse_gradnames = [grad.name for _, grad in self.origin_sparse_pairs] for grad_varname, splited_vars in grad_var_mapping_items: if grad_varname in sparse_gradnames: continue send_vars = [] for _, var in enumerate(splited_vars): send_vars.append(var) recv_vars = [] for _, var in enumerate(send_vars): recv_vars.append(self.grad_param_mapping[var]) eps = ps_dispatcher.dispatch(recv_vars) for i, ep in enumerate(eps): self.param_grad_ep_mapping[ep]["params"].append(recv_vars[i]) self.param_grad_ep_mapping[ep]["grads"].append(send_vars[i]) for grad_varname, splited_vars in grad_var_mapping_items: if grad_varname not in sparse_gradnames: continue ps_dispatcher.reset() send_vars = [] for _, var in enumerate(splited_vars): send_vars.append(var) recv_vars = [] for _, var in enumerate(send_vars): recv_vars.append(self.grad_param_mapping[var]) eps = ps_dispatcher.dispatch(recv_vars) for i, ep in enumerate(eps): self.param_grad_ep_mapping[ep]["params"].append(recv_vars[i]) self.param_grad_ep_mapping[ep]["grads"].append(send_vars[i]) def _slice_variable(self, var_list, slice_count, min_block_size, uniform=False): """ We may need to split dense tensor to one or more blocks and put them equally onto parameter server. One block is a sub-tensor aligned by dim[0] of the tensor. We need to have a minimal block size so that the calculations in the parameter server side can gain better performance. By default minimum block size 8K elements (maybe 16bit or 32bit or 64bit). Args: var_list (list): List of variables. slice_count (int): Numel of count that variables will be sliced, which could be the pserver services' count. min_block_size (int): Minimum split block size. Returns: blocks (list[(varname, block_id, current_block_size)]): A list of VarBlocks. Each VarBlock specifies a shard of the var. """ blocks = [] for var in var_list: if not uniform: var_numel = reduce(lambda x, y: x * y, var.shape) split_count = 1 if min_block_size == -1: split_count = 1 else: split_count = slice_count max_pserver_count = int( math.floor(var_numel / float(min_block_size))) if max_pserver_count == 0: max_pserver_count = 1 if max_pserver_count < slice_count: split_count = max_pserver_count block_size = int(math.ceil(var_numel / float(split_count))) if len(var.shape) >= 2: # align by dim1(width) dim1 = reduce(lambda x, y: x * y, var.shape[1:]) remains = block_size % dim1 if remains != 0: block_size += dim1 - remains # update split_count after aligning split_count = int(math.ceil(var_numel / float(block_size))) for block_id in range(split_count): curr_block_size = min(block_size, var_numel - ( (block_id) * block_size)) block = vars_metatools.VarBlock(var.name, block_id, curr_block_size) blocks.append(str(block)) else: block_size = var.shape[0] / slice_count remainder = var.shape[0] % slice_count if block_size == 0: dim0s = [block_size] * remainder else: dim0s = [block_size] * slice_count for i in range(remainder): dim0s[i] = dim0s[i] + 1 dim1 = reduce(lambda x, y: x * y, var.shape[1:]) for block_id in range(len(dim0s)): numel = dim0s[block_id] * dim1 block = vars_metatools.VarBlock(var.name, block_id, numel) blocks.append(str(block)) return blocks def _get_param_grad_blocks(self, pairs, min_block_size, uniform=False): param_list = [] grad_list = [] param_grad_set = set() for p, g in pairs: # todo(tangwei12) skip parameter marked not trainable # if type(p) == Parameter and p.trainable == False: # continue p = p.merged_var g = g.merged_var if p.name not in param_grad_set: param_list.append(p) param_grad_set.add(p.name) if g.name not in param_grad_set: grad_list.append(g) param_grad_set.add(g.name) # when we slice var up into blocks, we will slice the var according to # pserver services' count. A pserver may have two or more listening ports. grad_blocks = self._slice_variable(grad_list, len(self.get_ps_endpoints()), min_block_size, uniform) param_blocks = self._slice_variable(param_list, len(self.get_ps_endpoints()), min_block_size, uniform) return param_blocks, grad_blocks def _var_slice_and_distribute(self): # update these mappings for further transpile: # 1. param_var_mapping : param var name->[split params vars] # 2. grad_var_mapping : grad var name->[split grads vars] # 3. grad_param_mapping : grad.blockx->param.blockx # 4. param_grad_ep_mapping : ep->{"params" : [], "grads" : [] } dps, dgs = self._get_param_grad_blocks(self.merged_dense_pairs, self.min_block_size, False) sps, sgs = self._get_param_grad_blocks(self.merged_sparse_pairs, self.min_block_size, True) param_blocks = dps + sps grad_blocks = dgs + sgs assert (len(grad_blocks) == len(param_blocks)) # origin_param_name->[splited_param_vars] self.param_var_mapping = self._create_vars_from_blocklist(param_blocks) self.grad_var_mapping = self._create_vars_from_blocklist(grad_blocks) # dict(grad_splited_var->param_splited_var) self.grad_param_mapping = collections.OrderedDict() for g, p in zip(grad_blocks, param_blocks): g_name, g_bid, _ = g.split(":") p_name, p_bid, _ = p.split(":") self.grad_param_mapping[self.grad_var_mapping[g_name][int(g_bid)]] = \ self.param_var_mapping[p_name][int(p_bid)] print_maps = {} for k, v in self.grad_param_mapping.items(): print_maps[str(k)] = str(v) # create mapping of endpoint->split var to create pserver side program self.param_grad_ep_mapping = collections.OrderedDict() [ self.param_grad_ep_mapping.update({ ep: { "params": [], "grads": [] } }) for ep in self.get_ps_endpoints() ] def _build_var_distributed(self): self.var_distributed = vars_metatools.VarsDistributed() sparse_pairs, dense_pairs = self.get_param_grads() origin_for_sparse = [] origin_for_dense = [] param_name_grad_name = dict() grad_name_to_param_name = dict() for param, grad in sparse_pairs: param = vars_metatools.create_var_struct(param) grad = vars_metatools.create_var_struct(grad) origin_for_sparse.append((param, grad)) for param, grad in dense_pairs: param = vars_metatools.create_var_struct(param) grad = vars_metatools.create_var_struct(grad) origin_for_dense.append((param, grad)) for dense_pair in origin_for_dense: param, grad = dense_pair m_param = MergedVariable(param, [param], [0]) m_grad = MergedVariable(grad, [grad], [0]) self.merged_variables_pairs.append((m_param, m_grad)) self.merged_dense_pairs.append((m_param, m_grad)) for sparse_pair in origin_for_sparse: param, grad = sparse_pair m_param = MergedVariable(param, [param], [0]) m_grad = MergedVariable(grad, [grad], [0]) self.merged_variables_pairs.append((m_param, m_grad)) self.merged_sparse_pairs.append((m_param, m_grad)) for merged in self.merged_variables_pairs: m_param, m_grad = merged self.merged_variable_map[ m_param.merged_var.name] = m_param.merged_var self.merged_variable_map[m_grad.merged_var.name] = m_grad.merged_var param_merges = [] param_merges.extend(origin_for_sparse) param_merges.extend(origin_for_dense) for param, grad in param_merges: param_name_grad_name[param.name] = grad.name grad_name_to_param_name[grad.name] = param.name self.origin_sparse_pairs = origin_for_sparse self.origin_dense_pairs = origin_for_dense self.param_name_to_grad_name = param_name_grad_name self.grad_name_to_param_name = grad_name_to_param_name sparse_pair_map = collections.OrderedDict() for pair in self.origin_sparse_pairs + self.origin_dense_pairs: param, grad = pair sparse_pair_map[param.name] = str(param) sparse_pair_map[grad.name] = str(grad) self._var_slice_and_distribute() self._dispatcher() def get_param_grads(self): origin_program = self.origin_main_program def _get_params_grads(sparse_varnames): block = origin_program.global_block() dense_param_grads = [] sparse_param_grads = [] optimize_params = set() origin_var_dict = origin_program.global_block().vars role_id = int(core.op_proto_and_checker_maker.OpRole.Backward) for op in block.ops: if _is_opt_role_op(op): # delete clip op from opt_ops when run in Parameter Server mode if OP_NAME_SCOPE in op.all_attrs() \ and CLIP_OP_NAME_SCOPE in op.attr(OP_NAME_SCOPE): op._set_attr("op_role", role_id) continue if op.attr(OP_ROLE_VAR_ATTR_NAME): param_name = op.attr(OP_ROLE_VAR_ATTR_NAME)[0] grad_name = op.attr(OP_ROLE_VAR_ATTR_NAME)[1] if param_name not in optimize_params: optimize_params.add(param_name) param_grad = (origin_var_dict[param_name], origin_var_dict[grad_name]) if param_name in sparse_varnames: sparse_param_grads.append(param_grad) else: dense_param_grads.append(param_grad) return sparse_param_grads, dense_param_grads def _get_sparse_varnames(): varnames = [] for op in origin_program.global_block().ops: if op.type in SPARSE_OP_TYPE_DICT.keys() \ and op.attr('remote_prefetch') is True: param_name = op.input(SPARSE_OP_TYPE_DICT[op.type])[0] varnames.append(param_name) return list(set(varnames)) sparse_varnames = _get_sparse_varnames() sparse_param_grads, dense_param_grads = _get_params_grads( sparse_varnames) return sparse_param_grads, dense_param_grads def remove_var_pair_by_grad(self, var_name): for index, pair in enumerate(self.merged_variables_pairs): var = pair[0] var_grad = pair[1] if var_grad.merged_var.name == var_name: del self.merged_variables_pairs[index] for index, pair in enumerate(self.merged_dense_pairs): var = pair[0] var_grad = pair[1] if var_grad.merged_var.name == var_name: del self.merged_dense_pairs[index] return for index, pair in enumerate(self.merged_sparse_pairs): var = pair[0] var_grad = pair[1] if var_grad.merged_var.name == var_name: del self.merged_sparse_pairs[index] return print("Not find {} in self.merge_pairs".format(var_name)) def _is_opt_role_op(op): # NOTE : depend on oprole to find out whether this op is for # optimize op_maker = core.op_proto_and_checker_maker optimize_role = core.op_proto_and_checker_maker.OpRole.Optimize if op_maker.kOpRoleAttrName() in op.attr_names and \ int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(optimize_role): return True return False def _get_optimize_ops(_program): block = _program.global_block() opt_ops = [] for op in block.ops: if _is_opt_role_op(op): # delete clip op from opt_ops when run in Parameter Server mode if OP_NAME_SCOPE in op.all_attrs() \ and CLIP_OP_NAME_SCOPE in op.attr(OP_NAME_SCOPE): op._set_attr( "op_role", int(core.op_proto_and_checker_maker.OpRole.Backward)) continue opt_ops.append(op) return opt_ops def _add_lr_decay_table_pass(main_program, compiled_config, lr_decay_steps): if hasattr(compiled_config.origin_main_program, 'lr_sheduler'): from paddle.optimizer.lr import LRScheduler assert isinstance(compiled_config.origin_main_program.lr_sheduler, LRScheduler), "must be LRScheduler" ops = _get_optimize_ops(compiled_config.origin_main_program) lr_param_dict = _get_lr_param_dict(ops) lr_decay_main_program, lr_decay_startup_program, lr_name = _get_lr_sheduler_program( compiled_config.origin_main_program.lr_sheduler, lr_param_dict, lr_decay_steps) compiled_config.add_tensor_table( "@LR_DECAY_COUNTER@", lr_name, lr_decay_startup_program, lr_decay_main_program, "GlobalStepTable") def _get_lr_param_dict(opt_ops): lr_param_dict = {} for op in opt_ops: lr_name = op.input("LearningRate")[0] param_name = op.input("Param")[0] if lr_name not in lr_param_dict: lr_param_dict[lr_name] = [] lr_param_dict[lr_name].append(param_name) return lr_param_dict def _get_lr_sheduler_program(lr_sheduler, lr_param_dict, lr_decay_steps): schedler_decay = [ 'NoamDecay', 'NaturalExpDecay', 'InverseTimeDecay', 'ExponentialDecay' ] from paddle.optimizer.lr import ExponentialDecay, NoamDecay, PiecewiseDecay, NaturalExpDecay, InverseTimeDecay from paddle.fluid.layers.learning_rate_scheduler import exponential_decay, noam_decay, piecewise_decay, natural_exp_decay, inverse_time_decay decay_main_program = fluid.framework.Program() decay_startup_program = fluid.framework.Program() lr_name = "" if isinstance(lr_sheduler, ExponentialDecay): with fluid.program_guard(decay_main_program, decay_startup_program): lr = exponential_decay(1.0, lr_decay_steps, lr_sheduler.gamma, True) lr_name = lr.name logging.warn( "ExponentialDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow: \n" "\t strategy = paddle.distributed.fleet.DistributedStrategy() \n " "\t strategy.a_sync = True \n" "\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n" % lr_decay_steps) elif isinstance(lr_sheduler, NoamDecay): with fluid.program_guard(decay_main_program, decay_startup_program): lr = noam_decay(lr_sheduler.d_model, lr_sheduler.warmup_steps, 1.0) lr_name = lr.name logging.warn("NoamDecay is set, warmup steps is [ %d ]" % lr_sheduler.warmup_steps) elif isinstance(lr_sheduler, NaturalExpDecay): with fluid.program_guard(decay_main_program, decay_startup_program): lr = natural_exp_decay(1.0, lr_decay_steps, lr_sheduler.gamma, True) lr_name = lr.name logging.warn( "NaturalExpDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow: \n" "\t strategy = paddle.distributed.fleet.DistributedStrategy() \n " "\t strategy.a_sync = True \n" "\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n" % lr_decay_steps) elif isinstance(lr_sheduler, InverseTimeDecay): with fluid.program_guard(decay_main_program, decay_startup_program): lr = inverse_time_decay(1.0, lr_decay_steps, lr_sheduler.gamma, True) lr_name = lr.name logging.warn( "InverseTimeDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow: \n" "\t strategy = paddle.distributed.fleet.DistributedStrategy() \n " "\t strategy.a_sync = True \n" "\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n" % lr_decay_steps) else: raise ValueError( "Not supported current LearningRate strategy, please use follow decay strategy: {}". format(schedler_decay)) return decay_main_program, decay_startup_program, lr_name def _get_varname_parts(varname): # returns origin, blockid, trainerid orig_var_name = "" trainer_part = "" block_part = "" trainer_idx = varname.find(".trainer_") if trainer_idx >= 0: trainer_part = varname[trainer_idx + 1:] else: trainer_idx = len(varname) block_index = varname.find(".block") if block_index >= 0: block_part = varname[block_index + 1:trainer_idx] else: block_index = len(varname) orig_var_name = varname[0:min(block_index, trainer_idx)] return orig_var_name, block_part, trainer_part def _orig_varname(varname): orig, _, _ = _get_varname_parts(varname) return orig
apache-2.0
-7,821,824,388,392,121,000
38.75695
145
0.537979
false
3.973801
false
false
false
animekita/selvbetjening
selvbetjening/frontend/userportal/views.py
1
7865
# coding=UTF-8 from django.shortcuts import render from django.contrib.auth.decorators import login_required from django.contrib.auth.models import AnonymousUser from django.http import HttpResponseRedirect from django.core.urlresolvers import reverse from django.utils.translation import ugettext as _ from django.shortcuts import get_object_or_404 from django.contrib.formtools.preview import FormPreview from django.contrib import messages from django.contrib.auth import login, authenticate from selvbetjening.core.user.models import SUser from selvbetjening.businesslogic.members.forms import UserRegistrationForm, ProfileEditForm, UserWebsiteFormSet from selvbetjening.frontend.userportal.forms import ChangePasswordForm, ChangePictureForm, \ PrivacyForm, ChangeUsernameForm from selvbetjening.frontend.userportal.processor_handlers import profile_page_processors from selvbetjening.frontend.userportal.models import UserPrivacy def profile_redirect(request): if isinstance(request.user, AnonymousUser): return HttpResponseRedirect(reverse('members_login')) else: return HttpResponseRedirect(reverse('members_profile')) @login_required def public_profile_page(request, username, template_name='userportal/public_profile.html', template_no_access_name='userportal/profile_no_access.html'): user = get_object_or_404(SUser, username=username) privacy, created = UserPrivacy.objects.get_or_create(user=user) own_profile = False if privacy.public_profile: handler = profile_page_processors.get_handler(request, user) add_to_profile = handler.view(own_profile) return render(request, template_name, { 'viewed_user': user, 'privacy': privacy, 'add_to_profile': add_to_profile }) else: return render(request, template_no_access_name, { 'username': user.username }) @login_required def profile(request, template_name='userportal/profile.html'): user = request.user privacy = UserPrivacy.full_access() own_profile = True own_privacy, created = UserPrivacy.objects.get_or_create(user=user) handler = profile_page_processors.get_handler(request, user) add_to_profile = handler.view(own_profile) return render(request, template_name, { 'viewed_user': user, 'privacy': privacy, 'own_privacy': own_privacy, 'add_to_profile': add_to_profile }) @login_required def edit_profile(request, template_name='userportal/edit_profile.html', success_page='userportal_profile', form_class=ProfileEditForm): user = request.user if request.method == 'POST': form = form_class(request.POST, instance=user) website_form = UserWebsiteFormSet(request.POST, instance=user) if form.is_valid() and website_form.is_valid(): form.save() website_form.save() messages.success(request, _(u'Personal information updated')) return HttpResponseRedirect(reverse(success_page)) else: form = form_class(instance=user) website_form = UserWebsiteFormSet(instance=user) return render(request, template_name, { 'form': form, 'website_form': website_form }) @login_required def edit_privacy(request, form_class=PrivacyForm, template_name='userportal/edit_privacy.html', success_page='userportal_profile'): privacy, created = UserPrivacy.objects.get_or_create(user=request.user) if request.method == 'POST': form = form_class(request.POST, instance=privacy) if form.is_valid: form.save() messages.success(request, _(u'Privacy settings updated')) return HttpResponseRedirect(reverse(success_page)) else: form = form_class(instance=privacy) return render(request, template_name, { 'form': form }) @login_required def edit_picture(request, form_class=ChangePictureForm, success_page='userportal_profile', template_name='userportal/edit_picture.html'): profile = request.user if request.method == 'POST': form = form_class(data=request.POST, files=request.FILES) if form.is_valid(): profile.picture = form.cleaned_data['picture'] profile.save() messages.success(request, _(u'Profile picture changed')) return HttpResponseRedirect(reverse(success_page)) else: form = form_class() return render(request, template_name, { 'form': form, 'user': profile }) @login_required def edit_password(request, template_name='userportal/edit_password.html', post_change_redirect='userportal_profile', change_password_form=ChangePasswordForm): if request.method == 'POST': form = change_password_form(request.user, request.POST) if form.is_valid(): form.save() messages.success(request, _(u'Password changed')) return HttpResponseRedirect(reverse(post_change_redirect)) else: form = change_password_form(request.user) return render(request, template_name, { 'form': form, }) class UsernameChangeView(FormPreview): preview_template = 'userportal/edit_username_confirmed.html' form_template = 'userportal/edit_username.html' def __call__(self, request, *args, **kwargs): return super(UsernameChangeView, self).__call__(request, *args, **kwargs) def process_preview(self, request, form, context): context['new_username'] = form.cleaned_data['new_username'] def done(self, request, cleaned_data): request.user.username = cleaned_data['new_username'] request.user.save() messages.success(request, _(u'Username changed')) return HttpResponseRedirect(reverse('userportal_profile')) edit_username = login_required(UsernameChangeView(ChangeUsernameForm)) def register(request, success_page, form_class=UserRegistrationForm, login_on_success=False, template_name='userportal/registration.html'): """ Allows a new user to register an account. success_page -- a reversable view name or a function returning an url. The function takes a request and a user object as input. """ if request.method == 'POST': form = form_class(request.POST) if form.is_valid(): user = form.save() if login_on_success: user = authenticate(username=user.username, password=request.POST['password']) login(request, user) if callable(success_page): return HttpResponseRedirect(success_page(request, user)) else: return HttpResponseRedirect(reverse(success_page)) else: form = form_class() return render(request, template_name, { 'form': form })
mit
2,199,686,237,095,671,600
30.464
111
0.595168
false
4.507163
false
false
false
choderalab/ensembler
ensembler/pdb.py
1
4246
import sys if sys.version_info > (3, 0): from urllib.request import urlopen from urllib.error import URLError from io import StringIO else: from urllib2 import urlopen, URLError from StringIO import StringIO import gzip import re import six def extract_residues_by_resnum(output_file, pdb_input_file, template): """ Parameters ---------- output_file: string or gzip.file_like pdb_input_file: string or gzip.file_like """ if isinstance(pdb_input_file, six.string_types): with gzip.open(pdb_input_file, 'r') as pdb_file: pdbtext = pdb_file.readlines() else: pdbtext = pdb_input_file.readlines() # list of resnum strings e.g. ['9', '29', '30B'] must be converted as follows to match the PDB format: # [' 9 ', ' 29 ', ' 30B'] desired_resnums = ['%4s ' % r if re.match('[0-9]', r[-1]) else '%5s' % r for r in template.resolved_pdbresnums] if isinstance(output_file, six.string_types): ofile = open(output_file, 'w') else: ofile = output_file try: resnums_extracted = {} model_index = 0 for bytesline in pdbtext: line = bytesline.decode('UTF-8') # For PDBs containing multiple MODELs (e.g. NMR structures), extract data only from the first model, ignore others. if line[0:6] == 'MODEL ': model_index += 1 if model_index == 2: break if line[0:6] in ['ATOM ', 'HETATM']: resnum = line[22:27] chainid = line[21] if chainid == template.chainid: if resnum in desired_resnums: ofile.write(line) resnums_extracted[resnum] = 1 except Exception as e: print('Exception detected while extracting ATOM/HETATM records:') print(e) finally: if isinstance(output_file, six.string_types): ofile.close() if len(resnums_extracted) != len(desired_resnums): raise Exception( 'Number of residues (%d) extracted from PDB (%s) for template (%s) does not match desired number of residues (%d).' % ( len(resnums_extracted), template.pdbid, template.templateid, len(desired_resnums) ) ) def retrieve_sifts(pdb_id): """Retrieves a SIFTS .xml file, given a PDB ID. Works by modifying the PDBe download URL. Also removes annoying namespace stuff. """ sifts_download_base_url='ftp://ftp.ebi.ac.uk/pub/databases/msd/sifts/xml/' url = sifts_download_base_url + pdb_id.lower() + '.xml.gz' try: response = urlopen(url) except URLError: print('ERROR downloading SIFTS file with PDB ID: %s' % pdb_id) raise sifts_page = response.read(100000000) # Max 100MB # Decompress string sifts_page = gzip.GzipFile(fileobj=StringIO(sifts_page)).read() # Removing all attribs from the entry tag, and the rdf tag and contents sifts_page_processed = '' skip_rdf_tag_flag = False for line in sifts_page.splitlines(): if line[0:6] == '<entry': sifts_page_processed += '<entry>' + '\n' elif line[0:7] == ' <rdf:': skip_rdf_tag_flag = True pass elif line[0:8] == ' </rdf:': skip_rdf_tag_flag = False pass else: if skip_rdf_tag_flag: continue sifts_page_processed += line + '\n' return sifts_page_processed def retrieve_pdb(pdb_id,compressed='no'): """Retrieves a PDB file, given a PDB ID. Works by modifying the PDB download URL. """ pdb_download_base_url='http://www.rcsb.org/pdb/files/' url = pdb_download_base_url + pdb_id + '.pdb' if compressed == 'yes': url += '.gz' response = urlopen(url) pdb_file = response.read(10000000) # Max 10MB return pdb_file def extract_uniprot_acs_from_sifts_xml(siftsxml): uniprot_crossrefs = siftsxml.findall('entity/segment/listResidue/residue/crossRefDb[@dbSource="UniProt"]') uniprot_acs = list(set([uniprot_crossref.get('dbAccessionId') for uniprot_crossref in uniprot_crossrefs])) return uniprot_acs
gpl-2.0
5,171,825,560,777,637,000
35.921739
135
0.5935
false
3.562081
false
false
false
sdbondi/Arduino-Talk
Comet/python/ArduinoServer.py
1
6552
#!/usr/bin/python import human_curl as requests import serial import platform import sys import getopt import socket import json import time _WINDOWS = (platform.system() == 'Windows') _AJAXURL = 'http://localhost/arduino/comet-router.php?action=%(action)s' #_AJAXURL = 'http://themousepotatowebsite.co.za/experiments/arduino/comet-router.php?action=%(action)s' #_AUTH = ('stanb', 'arduino1') _AUTH=None _CHAROFFSET = 32 _CMDMAP = { 'ping' : chr(_CHAROFFSET + 0), 'pinMode' : chr(_CHAROFFSET + 1), 'digitalWrite': chr(_CHAROFFSET + 2), 'digitalRead' : chr(_CHAROFFSET + 3), 'analogWrite' : chr(_CHAROFFSET + 4), 'analogRead' : chr(_CHAROFFSET + 5), 'beep' : chr(_CHAROFFSET + 11) } class ArduinoCommandServer(object): def __init__(self, sc, opts): if not sc: raise ValueError('Serial connection required') self.serial = sc self.options = opts or {} def getIncomingCommands(self): global _AJAXURL, _AUTH opts = self.options url = _AJAXURL % { 'action': 'get_web_data'} while True: while True: try: resp = requests.get(url, timeout=70, auth=_AUTH) break; except requests.exceptions.CurlError as ex: print 'ERROR ', ex.message, ' Retrying...' #except requests.exceptions.Timeout: # print 'Get request timed out. Retrying...' if resp.status_code != 200 or resp.content == False: print 'ERROR: status_code %d or no content' % resp.status_code continue obj = json.loads(resp.content); if obj == False: print 'ERROR: content parse error' print resp.content continue if obj['state'] != 'OK': print 'ERROR: ', obj['message'] continue; if obj['result'] == 'TMOUT': continue return obj['result'] def toArduinoCommand(self, command): global _CMDMAP, _CHAROFFSET if not command['command'] in _CMDMAP: print 'Unrecognised command: ', command['command'] return False op_chr = _CMDMAP[command['command']] if 'pin' in command: pin = str(command['pin']) if pin[0] == 'A': pin = 14 + int(pin[1]) pin = int(pin) result = op_chr+chr(pin + _CHAROFFSET) if 'mode' in command: result += 'i' if command['mode'] == 'input' else 'o' if 'args' in command and isinstance(command['args'], list): command['args'] = [str(c) for c in command['args']] result += '-'.join(command['args']) return result+'\n' def toWeb(self, ar_cmd): op_chr = ar_cmd[0] if op_chr == 'A': return 'ACK' if op_chr == 'R': return int(ar_cmd[1:]) if op_chr == 'F': return { 'error': ar_cmd[1:] } return False def processCommands(self, commands): results = [] for command in commands: cmd_str = self.toArduinoCommand(command) if not cmd_str: results.append(False) continue ar_reply = '' i = 0 while len(ar_reply) == 0: if i % 10 == 0: self.serial.write(cmd_str) time.sleep(0.1) ar_reply = self.serial.readline() i += 1 functionStr = command['command']+'(' if 'pin' in command: functionStr += str(command['pin']) if 'args' in command and isinstance(command['args'], list): if 'pin' in command: functionStr += ', ' functionStr += ', '.join(command['args']) print functionStr + ') -> ' + ar_reply.strip() results.append(self.toWeb(ar_reply)) return results def sendResponse(self, batch_id, results): global _AJAXURL, _AUTH opts = self.options url = _AJAXURL % { 'action': 'put_ar_data'} data = { 'object' : json.dumps({ 'id': batch_id, 'object': results })} while True: try: resp = requests.post(url, data, timeout=10, auth=_AUTH) break; except requests.exceptions.CurlError as ex: print 'ERROR ', ex.message, ' Retrying...' #except requests.exceptions.Timeout: # print 'Send request timed out. Retrying...' if resp.status_code != 200 or resp.content == False: print 'ERROR: status_code %d or no content' % resp.status_code return False obj = json.loads(resp.content); if obj == False: print 'ERROR: content parse error' print resp.content return False if obj['state'] != 'OK': print 'ERROR: ', obj['message'] return False if obj['result'] == 'TMOUT': return False if obj['result'] == 'PASS': return True print 'Got unknown result: ', obj return False def start(self): opts = self.options while True: print 'Waiting for incoming commands...' results = self.getIncomingCommands() print '================================' print 'Got command(s).' for _object in results: batch_id = _object['id'] commands = _object['object'] print 'Batch ID: %d. Processing...' % batch_id results = self.processCommands(commands) print 'Sending reply...' self.sendResponse(batch_id, results) print 'Done' print '================================\n\n' def get_opts(args): global _WINDOWS try: opts, args = getopt.getopt(args, '', ['baud=', 'serialPort=']) except getopt.GetoptError, err: print str(err) sys.exit(2) optsmap = { 'baud': 9600, 'serialPort': not _WINDOWS and '/dev/ttyACM0' } for o, a in opts: if o == "--baud": optsmap['baud'] = int(a) elif o == "--serialPort": optsmap['serialPort'] = a else: assert False, "unhandled option" if optsmap['serialPort'] == False: raise ValueError('Argument --serialPort= is mandatory') return optsmap def main(args): opts = get_opts(args) # Check for arduino serial port try: sc = serial.Serial(opts['serialPort'], opts['baud'], timeout=0) except serial.SerialException, err: print str(err) print 'Please ensure your Arduino is connected and the port is correct.' sys.exit(2) if not sc.isOpen(): print 'Unable to open serial connection to Arduino.' sys.exit(1) print 'Connected to serial on', opts['serialPort'] try: # Start relay server while 1: server = ArduinoCommandServer(sc, opts) server.start() finally: if sc and sc.isOpen(): sc.close() if __name__ == '__main__': main(sys.argv[1:])
mit
1,833,642,196,712,729,600
24.297297
103
0.573107
false
3.6139
false
false
false
thorwhalen/ut
ml/sk/transformers.py
1
4610
__author__ = 'thor' from sklearn.base import TransformerMixin, BaseEstimator from sklearn.neighbors import KNeighborsRegressor from pandas import DataFrame import numpy as np from nltk import word_tokenize from functools import reduce class HourOfDayTransformer(TransformerMixin): def __init__(self, date_field='datetime'): self.date_field = date_field def transform(self, X, **transform_params): hours = DataFrame(X[self.date_field].apply(lambda x: x.hour)) return hours def fit(self, X, y=None, **fit_params): return self class ModelTransformer(TransformerMixin): """ Sometimes transformers do need to be fitted. ModelTransformer is used to wrap a scikit-learn model and make it behave like a transformer. This is useful when you want to use something like a KMeans clustering model to generate features for another model. It needs to be fitted in order to train the model it wraps. """ def __init__(self, model): self.model = model def fit(self, *args, **kwargs): self.model.fit(*args, **kwargs) return self def transform(self, X, **transform_params): return DataFrame(self.model.predict(X)) class KVExtractor(TransformerMixin): """ Transform multiple key/value columns in a scikit-learn pipeline. >>> import pandas as pd >>> D = pd.DataFrame([ ['a', 1, 'b', 2], ['b', 2, 'c', 3]], columns = ['k1', 'v1', 'k2', 'v2']) >>> kvpairs = [ ['k1', 'v1'], ['k2', 'v2'] ] >>> KVExtractor( kvpairs ).transform(D) [{'a': 1, 'b': 2}, {'c': 3, 'b': 2}] """ def __init__(self, kvpairs): self.kpairs = kvpairs def transform(self, X, *_): result = [] for index, rowdata in X.iterrows(): rowdict = {} for kvp in self.kpairs: rowdict.update({rowdata[kvp[0]]: rowdata[kvp[1]]}) result.append(rowdict) return result def fit(self, *_): return self class ColumnSelectTransformer(BaseEstimator, TransformerMixin): def __init__(self, keys): self.keys = keys def fit(self, X, y=None): return self def transform(self, X): return X[self.keys] class CategoryTransformer(BaseEstimator, TransformerMixin): def __init__(self): pass def fit(self, X, y=None): return self def transform(self, X): D = [] for record in X.values: D.append({k: 1 for k in record[0]}) return D class AttributeTransformer(BaseEstimator, TransformerMixin): def __init__(self): pass def _flatten(self, d, parent_key='', sep='_'): """ Flatten dictonary """ import collections items = [] for k, v in list(d.items()): new_key = parent_key + sep + k if parent_key else k if isinstance(v, collections.MutableMapping): items.extend(list(self._flatten(v, new_key, sep=sep).items())) else: new_v = 1 if v == True else 0 items.append((new_key, new_v)) return dict(items) def fit(self, X, y=None): return self def transform(self, X): D = [] for record in X.values: D.append(self._flatten(record[0])) return D class KNNImputer(TransformerMixin): """ Fill missing values using KNN Regressor """ def __init__(self, k): self.k = k def fit(self, X, y=None): return self def transform(self, X, y=None): """ :param X: multidimensional numpy array like. """ rows, features = X.shape mask = list([reduce(lambda h, t: h or t, x) for x in np.isnan(X)]) criteria_for_bad = np.where(mask)[0] criteria_for_good = np.where(mask == np.zeros(len(mask)))[0] X_bad = X[criteria_for_bad] X_good = X[criteria_for_good] knn = KNeighborsRegressor(n_neighbors=self.k) for idx, x_bad in zip(criteria_for_bad.tolist(), X_bad): missing = np.isnan(x_bad) bad_dim = np.where(missing)[0] good_dim = np.where(missing == False)[0] for d in bad_dim: x = X_good[:, good_dim] y = X_good[:, d] knn.fit(x, y) X[idx, d] = knn.predict(x_bad[good_dim]) return X class NLTKBOW(TransformerMixin): def fit(self, X, y=None): return self def transform(self, X): return [{word: True for word in word_tokenize(document)} for document in X]
mit
1,656,264,518,869,154,300
25.964912
120
0.569848
false
3.682109
false
false
false
LucasFerreiraDaSilva/ScrapingINMET
geraBase.py
1
3704
""" Autor: Lucas Ferreira da Silva Email: lferreira@inf.ufsm.br Descricao: Script para download dos dados referentes a cada estacao metereologica e criacao de uma pequena "base de dados" em formato JSON referente a todas as estacoes Execucao (comando): python3 geraBase.py Saida: Arquivo JSON (estacoes.json) contendo dados de todas as estacoes metereologicas do INMET """ import requests import json import bs4 import re # URL base para Scraping das estacoes url_map = "http://www.inmet.gov.br/sonabra/maps/pg_mapa.php" res = requests.get (url_map) res.raise_for_status() # Separacao das estacoes list_markers = (res.text).split("//************* ESTACÃO ") del list_markers[0] # Inicializacao da lista de dados das estacoes para posterior tratamento list_stations = [] # Scraping dos dados mais brutos de cada estacao for i in list_markers: st = (i.split("var imagem",maxsplit=1))[0].split("var ") # Capturar id da estação station_id = str((st[0].split(maxsplit=1))[0]) # Capturar label da estacao station_label = re.search(r"(?<=')[^']+(?=')", str(st[-1])).group(0) # Capturar html da estacao station_html = str(st[2].split("html = ", maxsplit=1)[1]) # Criacao de dicionario auxiliar de dados de cada estacao station_info = {} station_info['id'] = station_id station_info['label'] = station_label station_info['html'] = station_html list_stations.append(station_info) # Inicializacao do dicionario de estacoes stations = {} # Scraping refinado dos dados de cada estacao for x in list_stations: soup = bs4.BeautifulSoup(x['html'], 'html.parser') # Captura o link da tabela de dados link = "" for a in soup.find_all('a'): l = a.get('href') if (l.find("pg_dspDadosCodigo_sim.php?", 32) != -1): link = l break aux = (x['html'].split("<b><b>", maxsplit=1))[1].split("<table ", maxsplit=1) # Captura lista dos dados geograficos localization = ((aux[1].split("</table>", maxsplit=1))[1].split("</font>", maxsplit=1)[0]).split("<br>") # Captura demais dados da estacao data_aux = ((aux[0].replace("<b>", "")).replace("</b>","")).split("<br>") data = [] for d in data_aux: if (d.find("<a ", 0, 4) == -1) and (d.find("</a>", 0, 4) == -1) and (len(d) > 0): data.append(d) # Criacao do objeto estacao para o JSON station_data = {} details = {} details['estacao'] = data[0].split(": ")[1] details['codigo_omm'] = data[1].split(": ")[1] if (len(data) > 2): details['registro'] = data[2].split(": ")[1] details['temp_max'] = (data[3].split(": ")[1]).replace("º","") details['temp_min'] = (data[4].split(": ")[1]).replace("º","") details['umidade'] = data[5].split(": ")[1] details['pressao'] = data[6].split(": ")[1] details['precipitacao'] = data[7].split(": ")[1] details['vento_dir'] = (data[8].split(": ")[1]).replace("º","graus") details['vento_vel'] = data[9].split(": ")[1] station_data['label'] = x['label'] station_data['url'] = link station_data['latitude'] = (localization[1].split(": ")[1]).replace("º","") station_data['longitude'] = (localization[2].split(": ")[1]).replace("º","") station_data['altitude'] = localization[3].split(": ")[1] station_data['abertura'] = localization[0].split(": ")[1] station_data['detalhes'] = details stations[str(x['id'])] = station_data # Escrita dos dados em arquivo JSON with open('estacoes.json', 'w') as fp: json.dump(stations, fp, indent=4, ensure_ascii=False, sort_keys=True) print("Database successfully generated!")
mit
8,083,539,564,384,112,000
31.707965
108
0.606061
false
2.865116
false
false
false
BlackHole/enigma2-1
lib/python/Components/Converter/TemplatedMultiContent.py
2
2918
from Components.Converter.StringList import StringList class TemplatedMultiContent(StringList): """Turns a python tuple list into a multi-content list which can be used in a listbox renderer.""" def __init__(self, args): StringList.__init__(self, args) from enigma import eListboxPythonMultiContent, gFont, RT_HALIGN_LEFT, RT_HALIGN_CENTER, RT_HALIGN_RIGHT, RT_VALIGN_TOP, RT_VALIGN_CENTER, RT_VALIGN_BOTTOM, RT_WRAP, BT_SCALE from skin import parseFont from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmap, MultiContentEntryPixmapAlphaTest, MultiContentEntryPixmapAlphaBlend, MultiContentTemplateColor, MultiContentEntryProgress l = locals() del l["self"] # cleanup locals a bit del l["args"] self.active_style = None self.template = eval(args, {}, l) assert "fonts" in self.template assert "itemHeight" in self.template assert "template" in self.template or "templates" in self.template assert "template" in self.template or "default" in self.template["templates"] # we need to have a default template if not "template" in self.template: # default template can be ["template"] or ["templates"]["default"] self.template["template"] = self.template["templates"]["default"][1] self.template["itemHeight"] = self.template["template"][0] def changed(self, what): if not self.content: from enigma import eListboxPythonMultiContent self.content = eListboxPythonMultiContent() # also setup fonts (also given by source) index = 0 for f in self.template["fonts"]: self.content.setFont(index, f) index += 1 # if only template changed, don't reload list if what[0] == self.CHANGED_SPECIFIC and what[1] == "style": pass elif self.source: self.content.setList(self.source.list) self.setTemplate() self.downstream_elements.changed(what) def setTemplate(self): if self.source: style = self.source.style if style == self.active_style: return # if skin defined "templates", that means that it defines multiple styles in a dict. template should still be a default templates = self.template.get("templates") template = self.template.get("template") itemheight = self.template["itemHeight"] selectionEnabled = self.template.get("selectionEnabled", True) scrollbarMode = self.template.get("scrollbarMode", "showOnDemand") if templates and style and style in templates: # if we have a custom style defined in the source, and different templates in the skin, look it up template = templates[style][1] itemheight = templates[style][0] if len(templates[style]) > 2: selectionEnabled = templates[style][2] if len(templates[style]) > 3: scrollbarMode = templates[style][3] self.content.setTemplate(template) self.content.setItemHeight(itemheight) self.selectionEnabled = selectionEnabled self.scrollbarMode = scrollbarMode self.active_style = style
gpl-2.0
1,706,497,243,091,871,200
40.098592
207
0.735778
false
3.580368
false
false
false
Grumblesaur/quickgen
quickgen.py
1
3966
#!/usr/local/bin/python -tt # -*- coding: utf-8 -*- import os, sys, random #supply input as raw_input if running Python 3 or higher if sys.version_info >= (3,0): raw_input = input def parse(structure, part, phonemes): #grab a random phoneme from the relevant category and return it #structure can be O, N, or C, passed as 0, 1, or 2, respectively #initialize the segment string as empty seg = "" #focus in on relevant O, N, or C possibilities pattern = part[structure] #ensure that values fall within the bounds of list listrange = len(pattern) #pick an O, N, or C to construct index = random.randrange(0, listrange) onc = pattern[index] #obtain an onset, nucleus, or coda pattern if "," in onc: onc = onc.split(",") #if it is a cluster, split on commas #this creates a list of indices to be accessed #loop to construct O, N, or C for i in range(0, len(onc)): pclass = int(onc[i]) #obtain an index for a class of phoneme phone = random.randrange(0, len(phonemes[pclass])) #obtain an index for a specific phone seg += phonemes[pclass][phone] #add phone to segment return seg #return the segment to the main script #end parse function definition #ask for name of input file (default = "input.txt") inn = raw_input("What is the name of your input file? (Leave blank for 'input.txt') ") if inn == "": inn = "input.txt" #ask for name of output file (default = "output.txt") out = raw_input("What is the name of your output file? (Leave blank for 'output.txt') ") if out == "": out = "output.txt" seed = raw_input("Insert seed for RNG (leave blank for system time) ") if seed == "": seed = None else: seed = int(seed) #use system time for seed random.seed(seed) #prepare lists consonants = [] vowels = [] parts = [] structures = [] #prepare the output file fout = open(out, 'w') #extract from input file with open(inn) as fin: #get consonants for line in fin: if line.strip() == "": continue list = line.split() if list[0][0] == '#': break elif list[0][0] != '/': consonants.append(list) #get vowels for line in fin: if line.strip() == "": continue list = line.split() if list[0][0] == '#': break elif list[0][0] != '/': vowels.append(list) #get parts for line in fin: if line.strip() == "": continue list = line.split() if list[0][0] == '#': break elif list[0][0] != '/': parts.append(list) #get structures for line in fin: if line.strip() == "": continue list = line.split() if list[0][0] == '#': break elif list[0][0] != '/': structures.append(list) #un-nest the syllable patterns structures = structures[0] #ask for number of words (default = 100) i = raw_input("How many words would you like to build? (Leave blank for 50) ") if i == "": i = 50 else: i = int(i) low = raw_input("Enter minimum number of syllables per word (Defaults to 1) ") if low == "": low = 1 else: low = int(low) high = raw_input("Enter maximum number of syllables per word (Defaults to 5) ") if high == "": high = 5 else: high = int(high) while i > 0: #working word variable word = "" #create word in this loop for j in range(0, int(random.triangular(low, high + 1, low + 1))): #working syllable variable syll = "" #choose a random syllable pattern to follow form = structures[random.randrange(0, len(structures))] for k in range(0, len(form)): if form[k] == "O": #retrieve a string that is a valid onset syll += parse(0, parts, consonants) elif form[k] == "C": #retrieve a string that is a valid coda syll += parse(2, parts, consonants) elif form[k] == "N": #retrieve a string that is a valid nucleus syll += parse(1, parts, vowels) #add new syllable to the word word += syll #print out the word followed by a newline fout.write(word) fout.write('\n') #decrement loop iterator i -= 1 #close files fin.close() fout.close() sys.stdout.write("Program finished. \n") #end program
gpl-2.0
-7,959,536,635,195,563,000
21.793103
88
0.648512
false
2.926937
false
false
false
irblsensitivity/irblsensitivity
scripts/features/MethodFeatures.py
1
6286
#-*- coding: utf-8 -*- ''' Created on 2016. 11. 19 Updated on 2016. 01. 09 ''' from __future__ import print_function import os import re from utils import PrettyStringBuilder from utils import Progress import javalang class Resource(object): Stopwords = None EngDictionary = None @staticmethod def init(): if Resource.Stopwords is None: Resource.Stopwords = Resource.load_base(u'stopwords') if Resource.EngDictionary is None: Resource.EngDictionary = Resource.load_base(u'en.dict') @staticmethod def load_base(_filename): listDic = {} f = open(_filename, 'r') while True: word = f.readline() if word is None or len(word)==0: break if len(word) <= 2: continue word = word[:-2] listDic[word] = 1 return listDic class MethodWorker(object): __name__ = u'MethodWithComments' basepath = u'/var/experiments/BugLocalization/dist/features/' def run(self, _group, _project, _versionName, _srcBase): print(u'preparing resources...', end=u'') Resource.init() print(u'Done') workingPath = os.path.join(self.basepath, _group, _project, u'sources', u'_methods') filename = os.path.join(workingPath, u'%s.txt' % _versionName) if os.path.exists(workingPath) is False: os.makedirs(workingPath) if os.path.exists(filename) is True: return methods={} files = self.listing_files(_srcBase) progress = Progress(u'Calculating method', 2, 10, True) progress.set_upperbound(len(files)) progress.start() for fname in files: text = open(fname, 'r').read() key = fname[len(_srcBase) + 1:] names = [] try: ADT = javalang.parse.parse(text) cntConstructors, cntConstComments, cntConstInDic = self.count(ADT, javalang.tree.ConstructorDeclaration) cntMethods, cntComments, cntMethodInDic = self.count(ADT, javalang.tree.MethodDeclaration) methods[key] = {'methods':cntMethods+ cntConstructors, 'withComments':cntComments + cntConstComments, 'InDicMethods':cntMethodInDic + cntConstInDic} except javalang.parser.JavaSyntaxError as e: methods[key] = {'methods': 0, 'withComments': 0, 'InDicMethods':0, 'error':'SyntaxError'} except javalang.tokenizer.LexerError as e: methods[key] = {'methods': 0, 'withComments': 0, 'InDicMethods':0,'error':'LexerError'} except Exception as e: methods[key] = {'methods': 0, 'withComments': 0, 'InDicMethods':0,'error':'Exception'} progress.check() progress.done() self.storeData(filename, methods) pass def listing_files(self, _path): results = [] for root, dirs, files in os.walk(_path): for fname in files: if fname.endswith('.java') is False:continue results.append(os.path.join(root, fname)) return results def count(self, _ADT, _filter): cntMethods = 0 cntComments = 0 names = set([]) methodDecls = _ADT.filter(_filter) for path, node in methodDecls: cntMethods += 1 names.add(node.name) if node.documentation is None or len(node.documentation) == 0: continue doc = javalang.javadoc.parse(node.documentation) if doc.description is None or len(doc.description) == 0: continue cntComments += 1 cntInDic = 0 for name in names: tokens = self.splitCamel(name) tokens = self.removingStopwords(tokens) if self.checkingEngDic(tokens) > 0: cntInDic += 1 return cntMethods, cntComments, cntInDic #, list(names) def splitCamel(self, token): corpus = [] token = re.sub(r'([A-Z]+)(in|to|for|at|with|on|off|over)([A-Z]+\w+)', r'\1 \2 \3', token) # Lower case between Upper Cases (ex. XMLtoTEXT) token = re.sub(r'([a-z0-9])([A-Z]\w+)', r'\1 \2', token) # UpperCase after LowerCase items = token.split(' ') for item in items: item = item.strip() if item == '': continue if re.sub(r'[A-Z]+', '', item) != '': item = re.sub(r'([A-Z]+)([A-Z]+\w+)', r'\1 \2', item) # ALLFiles ==> ALL Files items2 = item.split(' ') for item2 in items2: if item.strip() == '': continue corpus.append(item2) else: corpus.append(item) return corpus def removingStopwords(self, _tokens): newer = set([]) for token in _tokens: if len(token) <= 2: continue if token.lower() in Resource.Stopwords: continue newer.add(token) return list(newer) def checkingEngDic(self, _tokens): count = 0 for token in _tokens: if token in Resource.EngDictionary: count += 1 continue if token.lower() in Resource.EngDictionary: count += 1 continue nword = token[0].upper() + token[1:].lower() if nword in Resource.EngDictionary: count += 1 return count ##################################### # managing cache ##################################### def storeData(self, _filename, _data): pretty = PrettyStringBuilder(_indent_depth=1) text = pretty.toString(_data) f = open(_filename, 'w') f.write(text) f.close() def clear(self, _group, _project): workingPath = os.path.join(self.basepath, _group, _project, u'sources', u'_methods') try: shutil.rmtree(workingPath) print(u'Removed : %s' % workingPath) except Exception as e: print(u'No Path : %s' % workingPath) ############################################################################################################### ############################################################################################################### ############################################################################################################### import shutil from commons import Subjects def clear(): S = Subjects() for group in S.groups: # ['JBoss']: # for project in S.projects[group]: obj = MethodWorker() obj.clear(group, project) def work(): S = Subjects() for group in ['JBoss', 'Wildfly']:#S.groups: # ['JBoss']: # for project in S.projects[group]: for versionName in S.bugs[project].keys(): if versionName == 'all' : continue print(u'MethodWithComments for %s / %s / %s' % (group, project, versionName)) obj = MethodWorker() obj.run(group, project, versionName, S.getPath_source(group, project, versionName)) if __name__ == "__main__": #clear() work() pass
apache-2.0
4,348,889,472,794,515,500
30.081633
141
0.600223
false
3.216991
false
false
false
aplicatii-romanesti/allinclusive-kodi-pi
.kodi/addons/plugin.video.kidsplace/brightcovePlayer.py
1
1587
import httplib from pyamf import AMF0, AMF3 from pyamf import remoting from pyamf.remoting.client import RemotingService height = 1080 def build_amf_request(const, playerID, videoPlayer, publisherID): env = remoting.Envelope(amfVersion=3) env.bodies.append( ( "/1", remoting.Request( target="com.brightcove.player.runtime.PlayerMediaFacade.findMediaById", body=[const, playerID, videoPlayer, publisherID], envelope=env ) ) ) return env def get_clip_info(const, playerID, videoPlayer, publisherID, playerKey): conn = httplib.HTTPConnection("c.brightcove.com") envelope = build_amf_request(const, playerID, videoPlayer, publisherID) conn.request("POST", "/services/messagebroker/amf?playerKey=" + playerKey, str(remoting.encode(envelope).read()), {'content-type': 'application/x-amf'}) response = conn.getresponse().read() response = remoting.decode(response).bodies[0][1].body return response def play(const, playerID, videoPlayer, publisherID, playerKey): rtmpdata = get_clip_info(const, playerID, videoPlayer, publisherID, playerKey) streamName = "" streamUrl = rtmpdata['FLVFullLengthURL']; for item in sorted(rtmpdata['renditions'], key=lambda item:item['frameHeight'], reverse=False): streamHeight = item['frameHeight'] if streamHeight <= height: streamUrl = item['defaultURL'] streamName = streamName + rtmpdata['displayName'] return [streamName, streamUrl];
apache-2.0
-1,561,841,478,157,068,000
35.068182
156
0.669187
false
3.742925
false
false
false
iamantony/PythonNotes
src/objects/matrix.py
1
5188
__author__ = 'Antony Cherepanov' from exceptions import Exception class MatrixException(Exception): pass class Matrix(object): def __init__(self, t_rowNum=0, t_colNum=0, t_values=None): if not self.__checkDimensionType(t_rowNum) or\ not self.__checkDimensionType(t_colNum): raise MatrixException("Invalid number of matrix size") self.__rows = max(t_rowNum, 0) self.__cols = max(t_colNum, 0) numOfElements = self.__rows * self.__cols if t_values is None or \ not isinstance(t_values, list) or \ len(t_values) != numOfElements: self.__matrix = [0 for i in range(numOfElements)] else: self.__matrix = t_values def __checkDimensionType(self, t_dim): if isinstance(t_dim, int): return True return False def __str__(self): return "Matrix of " + str(self.__rows) + " rows and " +\ str(self.__cols) + " cols: " + str(self.__matrix) def __add__(self, other): if not isinstance(other, Matrix) or \ (self.__rows != other.rows() and self.__cols != other.cols()): raise MatrixException("Failed to add matrix") sumData = list() for i in range(self.__rows): for j in range(self.__cols): value = self.GetValue(i, j) + other.GetValue(i, j) sumData.append(value) result = Matrix(self.__rows, self.__cols, sumData) return result def __sub__(self, other): if not isinstance(other, Matrix) or \ (self.__rows != other.rows() and self.__cols != other.cols()): raise MatrixException("Failed to subtract matrix") subData = list() for i in range(self.__rows): for j in range(self.__cols): value = self.GetValue(i, j) - other.GetValue(i, j) subData.append(value) result = Matrix(self.__rows, self.__cols, subData) return result def __mul__(self, other): if not isinstance(other, Matrix) or \ self.__cols != other.rows(): raise MatrixException("Failed to multiply matrix") mulData = list() # Iterate by elements of result matrix for i in range(self.__rows): for j in range(other.cols()): sumValue = 0 for iter in range(self.__cols): sumValue += self.GetValue(i, iter) * other.GetValue(iter, j) mulData.append(sumValue) result = Matrix(self.__rows, other.cols(), mulData) return result def rows(self): return self.__rows def cols(self): return self.__cols def IsSquare(self): if self.__cols == self.__rows: return True return False def __getIndex(self, t_row, t_col): if not self.__checkDimensionType(t_row) or\ not self.__checkDimensionType(t_col): raise MatrixException("Invalid coordinates type") index = self.__cols * t_row + t_col if index < 0 or len(self.__matrix) <= index: return None return index def GetValue(self, t_row, t_col): index = self.__getIndex(t_row, t_col) if index is None: raise MatrixException("Invalid index") return self.__matrix[index] def SetValue(self, t_row, t_col, t_value): index = self.__getIndex(t_row, t_col) if index is None: raise MatrixException("Invalid index") self.__matrix[index] = t_value def GetSlice(self, t_topLeft, t_bottomRight): # TODO: Definitely there could be a better approach if 2 != len(t_topLeft) or 2 != len(t_bottomRight): raise MatrixException("Invalid slice coordinates") data = list() startI = t_topLeft[0] endI = t_bottomRight[0] + 1 startJ = t_topLeft[1] endJ = t_bottomRight[1] + 1 for i in range(startI, endI): for j in range(startJ, endJ): value = self.GetValue(i, j) data.append(value) result = Matrix(endI - startI, endJ - startJ, data) return result def SetSlice(self, t_topLeft, t_bottomRight, t_slice): if 2 != len(t_topLeft) or 2 != len(t_bottomRight) or \ not isinstance(t_slice, Matrix): raise MatrixException("Invalid slice coordinates or slice matrix") startI = t_topLeft[0] endI = t_bottomRight[0] + 1 startJ = t_topLeft[1] endJ = t_bottomRight[1] + 1 if (endI - startI) != t_slice.cols() or\ (endJ - startJ) != t_slice.rows(): return False for i, slI in zip(range(startI, endI), range(t_slice.rows())): for j, slJ in zip(range(startJ, endJ), range(t_slice.cols())): value = t_slice.GetValue(slI, slJ) self.SetValue(i, j, value) return True
mit
-3,138,855,623,441,978,400
31.701299
80
0.528142
false
3.963331
false
false
false
jhermann/kunstkopf
src/kunstkopf/__init__.py
1
1258
# -*- coding: utf-8 -*- # pylint: disable=bad-whitespace """ kunstkopf [ˈkʊnstkɔp͜f] is a set of tools that handle audio (meta-)data and control hi-fi gear. Copyright © 2015 Jürgen Hermann <jh@web.de> Licensed under the GNU General Public License, Version 3.0 """ # Copyright © 2015 Jürgen Hermann <jh@web.de> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see {http://www.gnu.org/licenses/}. __url__ = "https://github.com/jhermann/kunstkopf" __version__ = "0.1.0" __license__ = "GPLv3" __author__ = "Jürgen Hermann" __author_email__ = "jh@web.de" __keywords__ = "python audio tool tagging indexing searching syncing" __all__ = []
gpl-3.0
-7,133,038,733,929,701,000
38.03125
99
0.68775
false
3.384824
false
false
false
HBNLdev/DataStore
db/sas_tools.py
1
2566
''' tools for working with .sas7bdat files ''' import os from collections import OrderedDict import pandas as pd from sas7bdat import SAS7BDAT from .knowledge.questionnaires import map_ph4, map_ph4_ssaga map_subject = {'core': {'file_pfixes': []}} parent_dir = '/processed_data/zork/zork-phase4-69/session/' n_header_lines = 30 def extract_descriptions(path): ''' given path to .sas7bdat file, returns dictionary mapping column labels to their verbose descriptions in the SAS header. dictionary will only contain an entry if there was new information present (if there was a description, and it was different from the label) ''' f = SAS7BDAT(path) kmap = OrderedDict() for line in str(f.header).splitlines()[n_header_lines + 1:]: line_parts = line.split(maxsplit=4) label = line_parts[1] try: description = line_parts[4].rstrip() if description == label or description[0] == '$': continue else: kmap[label] = description except IndexError: pass return kmap def exemplary_files(kdict): ''' given a questionnaire knowledge map, return a new dictionary mapping questionnaire names to the filepath of an exemplary .sas7bdat file for each file prefix ''' exemplars = {} for test, tdict in kdict.items(): for fpx in tdict['file_pfixes']: fd = parent_dir + test fn = fpx + '.sas7bdat' fp = os.path.join(fd, fn) if os.path.exists(fp): exemplars[test] = fp else: print(fp, 'did not exist') return exemplars def build_labelmaps(): ''' return a dict in which keys are questionnaires names and values are dictionaries mapping column labels to descriptions ''' comb_dict = map_ph4.copy() comb_dict.update(map_ph4_ssaga) exemplars = exemplary_files(comb_dict) big_kmap = {} for test, fp in exemplars.items(): kmap = extract_descriptions(fp) big_kmap[test] = kmap return big_kmap def df_fromsas(fullpath, id_lbl='ind_id'): ''' convert .sas7bdat to dataframe. unused because fails on incorrectly formatted files. ''' # read csv in as dataframe df = pd.read_sas(fullpath, format='sas7bdat') # convert id to str and save as new column df[id_lbl] = df[id_lbl].apply(int).apply(str) df['ID'] = df[id_lbl] return df
gpl-3.0
-7,685,914,298,742,378,000
30.481013
82
0.606002
false
3.767988
false
false
false
OpenTreeOfLife/gcmdr
run_synth_studies_mono.py
1
1437
import load_synth_extract from plants import studytreelist as plantslist from metazoa import studytreelist as metalist from fungi import studytreelist as fungilist from microbes import studytreelist as microbelist studytreelist = [] studytreelist.extend(metalist) studytreelist.extend(fungilist) studytreelist.extend(microbelist) studytreelist.extend(plantslist) if __name__ == "__main__": from wopr_conf_TEMP import * synthottolid="93302" # cellular organisms # studytreelist = ["420_522"] # studytreelist = ["2460_5285"] # Pyron Squamata study # studytreelist = ["2573_5959"] # Sauria # studytreelist = ["2573_5959"] # from metazoa import studytreelist as metalist # studytreelist = [] # studytreelist.extend(metalist) # studytreelist = [ # "1634_3303", # Chiroptera. Agnarsson et al. 2011. PLoS Currents Tree of Life # ] print "loading synthottolid:",synthottolid print "loading studytreelist:",studytreelist for i in studytreelist: tstudy_list = [i] generallogfileloc = "synth_studies_submission/"+i+".log" ttfntreefn = "synth_studies_submission/"+i+".tre" infmonofn = "synth_studies_submission/"+i+".inf_mono" load_synth_extract.run_load_single_ttfn_inf_mono(dott,dload,studyloc,tstudy_list,javapre, treemloc,generallogfileloc,dsynth,synthottolid,treefn,ttfntreefn,infmonofn)
bsd-2-clause
-8,043,959,451,811,851,000
33.214286
123
0.695198
false
3.057447
false
false
false
n3wb13/OpenNfrGui-5.0-1
lib/python/Plugins/Extensions/MediaPortal/additions/porn/adultbay.py
1
12372
# -*- coding: utf-8 -*- ############################################################################################### # # MediaPortal for Dreambox OS # # Coded by MediaPortal Team (c) 2013-2015 # # This plugin is open source but it is NOT free software. # # This plugin may only be distributed to and executed on hardware which # is licensed by Dream Property GmbH. This includes commercial distribution. # In other words: # It's NOT allowed to distribute any parts of this plugin or its source code in ANY way # to hardware which is NOT licensed by Dream Property GmbH. # It's NOT allowed to execute this plugin and its source code or even parts of it in ANY way # on hardware which is NOT licensed by Dream Property GmbH. # # This applies to the source code as a whole as well as to parts of it, unless # explicitely stated otherwise. # # If you want to use or modify the code or parts of it, # you have to keep OUR license and inform us about the modifications, but it may NOT be # commercially distributed other than under the conditions noted above. # # As an exception regarding modifcations, you are NOT permitted to remove # any copy protections implemented in this plugin or change them for means of disabling # or working around the copy protections, unless the change has been explicitly permitted # by the original authors. Also decompiling and modification of the closed source # parts is NOT permitted. # # Advertising with this plugin is NOT allowed. # For other uses, permission from the authors is necessary. # ############################################################################################### from Plugins.Extensions.MediaPortal.plugin import _ from Plugins.Extensions.MediaPortal.resources.imports import * class adultbayGenreScreen(MPScreen): def __init__(self, session): self.plugin_path = mp_globals.pluginPath self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath path = "%s/%s/defaultGenreScreen.xml" % (self.skin_path, config.mediaportal.skin.value) if not fileExists(path): path = self.skin_path + mp_globals.skinFallback + "/defaultGenreScreen.xml" with open(path, "r") as f: self.skin = f.read() f.close() MPScreen.__init__(self, session) self["actions"] = ActionMap(["MP_Actions"], { "ok" : self.keyOK, "0" : self.closeAll, "cancel": self.keyCancel }, -1) self['title'] = Label("The Adult Bay") self['ContentTitle'] = Label("Genre:") self.keyLocked = True self.suchString = '' self.filmliste = [] self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent) self['liste'] = self.ml self.onLayoutFinish.append(self.genreData) def genreData(self): self.filmliste.append(("--- Search ---", None)) self.filmliste.append(("Newest (Clips)", "http://adultbay.org/category/clips/")) self.filmliste.append(("Newest (Movies)", "http://adultbay.org/category/movies/")) self.filmliste.append(("Clips", None)) self.filmliste.append(("Movies", None)) self.filmliste.append(("HDTV", None)) self.filmliste.append(("DVD-R", "http://adultbay.org/category/dvd-r/")) self.filmliste.append(("Hentai", "http://adultbay.org/category/hentai/")) self.ml.setList(map(self._defaultlistcenter, self.filmliste)) self.keyLocked = False def SuchenCallback(self, callback = None, entry = None): if callback is not None and len(callback): self.suchString = callback.replace(' ', '+') Link = self.suchString Name = "--- Search ---" self.session.open(adultbayListScreen, Link, Name) def keyOK(self): if self.keyLocked: return if not config.mediaportal.premiumize_use.value: message = self.session.open(MessageBoxExt, _("The Adult Bay only works with enabled MP premiumize.me option (MP Setup)!"), MessageBoxExt.TYPE_INFO, timeout=10) return Name = self['liste'].getCurrent()[0][0] Link = self['liste'].getCurrent()[0][1] if Name == "--- Search ---": self.suchen() elif Link != None: self.session.open(adultbayListScreen, Link, Name) else: self.session.open(adultbaySubGenreScreen, Name) class adultbaySubGenreScreen(MPScreen): def __init__(self, session, Name): self.Name = Name self.plugin_path = mp_globals.pluginPath self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath path = "%s/%s/defaultGenreScreen.xml" % (self.skin_path, config.mediaportal.skin.value) if not fileExists(path): path = self.skin_path + mp_globals.skinFallback + "/defaultGenreScreen.xml" with open(path, "r") as f: self.skin = f.read() f.close() MPScreen.__init__(self, session) self["actions"] = ActionMap(["MP_Actions"], { "ok" : self.keyOK, "0" : self.closeAll, "cancel": self.keyCancel }, -1) self['title'] = Label("The Adult Bay") self['ContentTitle'] = Label("Genre:") self.keyLocked = True self.filmliste = [] self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent) self['liste'] = self.ml self.onLayoutFinish.append(self.loadPage) def loadPage(self): url = "http://adultbay.org/" getPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.parseData).addErrback(self.dataError) def parseData(self, data): parse = re.search('class="cat-item.*?>'+self.Name+'</a>(.*?)</ul>', data, re.S) raw = re.findall('<li\sclass="cat-item.*?a\shref="(.*?)".*?>(.*?)</a>', parse.group(1), re.S) if raw: self.filmliste = [] for (Url, Title) in raw: self.filmliste.append((decodeHtml(Title), Url)) self.filmliste.sort() self.ml.setList(map(self._defaultlistcenter, self.filmliste)) self.keyLocked = False def keyOK(self): if self.keyLocked: return Name = self['liste'].getCurrent()[0][0] Link = self['liste'].getCurrent()[0][1] self.session.open(adultbayListScreen, Link, Name) class adultbayListScreen(MPScreen, ThumbsHelper): def __init__(self, session, Link, Name): self.Link = Link self.Name = Name self.plugin_path = mp_globals.pluginPath self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath path = "%s/%s/defaultListWideScreen.xml" % (self.skin_path, config.mediaportal.skin.value) if not fileExists(path): path = self.skin_path + mp_globals.skinFallback + "/defaultListWideScreen.xml" with open(path, "r") as f: self.skin = f.read() f.close() MPScreen.__init__(self, session) ThumbsHelper.__init__(self) self["actions"] = ActionMap(["MP_Actions"], { "ok" : self.keyOK, "0" : self.closeAll, "cancel": self.keyCancel, "5" : self.keyShowThumb, "up" : self.keyUp, "down" : self.keyDown, "right" : self.keyRight, "left" : self.keyLeft, "nextBouquet" : self.keyPageUp, "prevBouquet" : self.keyPageDown, "green" : self.keyPageNumber }, -1) self['title'] = Label("The Adult Bay") self['ContentTitle'] = Label("Genre: %s" % self.Name) self['F2'] = Label(_("Page")) self['Page'] = Label(_("Page:")) self.keyLocked = True self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent) self['liste'] = self.ml self.page = 1 self.onLayoutFinish.append(self.loadPage) def loadPage(self): self.keyLocked = True self.filmliste = [] if re.match(".*?Search", self.Name): url = "http://adultbay.org/search/%s/page/%s/" % (self.Link, str(self.page)) else: if self.page == 1: url = self.Link else: url = self.Link + "page/" + str(self.page) + "/" getPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.parseData).addErrback(self.dataError) def parseData(self, data): if re.match('.*?<h2>Not Found</h2>', data, re.S): self.filmliste.append((_('No movies found!'), None, None, None)) self.ml.setList(map(self._defaultlistleft, self.filmliste)) elif re.match('.*?<h2>Sorry: No Results</h2>', data, re.S): self.filmliste.append((_('No movies found!'), None, None, None)) self.ml.setList(map(self._defaultlistleft, self.filmliste)) elif re.match('.*?Search is temporarily disabled', data, re.S): self.filmliste.append(("Search is temporarily disabled...", None, None, None)) self.ml.setList(map(self._defaultlistleft, self.filmliste)) else: parse = re.search('class="wp-pagenavi">(.*?)</div>', data, re.S) if parse: lastpage = re.findall('\d{0,1},{0,1}\d+', parse.group(1), re.S) lastpage = [x.replace(',', '') for x in lastpage] lastpage = [int(x) for x in lastpage] lastpage.sort(key=int) self.lastpage = int(lastpage[-1]) self['page'].setText("%s / %s" % (str(self.page), str(self.lastpage))) else: parse = re.search('class="navigation">.*?/page/(.*?)/.*?Older Entries', data, re.S) if parse: self.lastpage = int(parse.group(1)) else: self.lastpage = 1 self['page'].setText("%s / %s" % (str(self.page), str(self.lastpage))) raw = re.findall('class="post".*?<a\shref="(.*?)".*?img\ssrc="(.*?)".*?(<strong>|<p>)(.*?)(</strong>|<br\s/>|</p>).*?<p>(.*?)(Read\smore|\(more...\))', data, re.S) if raw: for (link, image, trash, title, trash, desc, trash) in raw: title = stripAllTags(title).strip() desc = stripAllTags(desc).strip() self.filmliste.append((decodeHtml(title), link, image, desc)) self.ml.setList(map(self._defaultlistleft, self.filmliste)) self.ml.moveToIndex(0) self.keyLocked = False self.th_ThumbsQuery(self.filmliste, 0, 1, 2, None, None, self.page, self.lastpage, mode=1) self.showInfos() def showInfos(self): title = self['liste'].getCurrent()[0][0] self['name'].setText(title) desc = self['liste'].getCurrent()[0][3] self['handlung'].setText(desc) coverUrl = self['liste'].getCurrent()[0][2] CoverHelper(self['coverArt']).getCover(coverUrl) def keyOK(self): if self.keyLocked: return Link = self['liste'].getCurrent()[0][0] if Link == None: return Title = self['liste'].getCurrent()[0][1] Cover = self['liste'].getCurrent()[0][2] self.session.open(StreamAuswahl, Link, Title, Cover) class StreamAuswahl(MPScreen): def __init__(self, session, Title, Link, Cover): self.Link = Link self.Title = Title self.Cover = Cover self.plugin_path = mp_globals.pluginPath self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath path = "%s/%s/defaultListWideScreen.xml" % (self.skin_path, config.mediaportal.skin.value) if not fileExists(path): path = self.skin_path + mp_globals.skinFallback + "/defaultListWideScreen.xml" with open(path, "r") as f: self.skin = f.read() f.close() MPScreen.__init__(self, session) self["actions"] = ActionMap(["MP_Actions"], { "ok" : self.keyOK, "0" : self.closeAll, "cancel": self.keyCancel }, -1) self['title'] = Label("The Adult Bay") self['ContentTitle'] = Label("%s" %self.Title) self.filmliste = [] self.keyLocked = True self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent) self['liste'] = self.ml self.onLayoutFinish.append(self.loadPage) def loadPage(self): CoverHelper(self['coverArt']).getCover(self.Cover) self.keyLocked = True url = self.Link getPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.loadPageData).addErrback(self.dataError) def loadPageData(self, data): parse = re.search('class="post_header">(.*?)Recommends:</h2>', data, re.S) streams = re.findall('(http://(?!adultbay.org)(.*?)\/.*?)[\'|"|\&|<]', parse.group(1), re.S) if streams: for (stream, hostername) in streams: if isSupportedHoster(hostername, True): hostername = hostername.replace('www.','') self.filmliste.append((hostername, stream)) # remove duplicates self.filmliste = list(set(self.filmliste)) if len(self.filmliste) == 0: self.filmliste.append((_('No supported streams found!'), None)) self.ml.setList(map(self._defaultlisthoster, self.filmliste)) self.keyLocked = False def keyOK(self): if self.keyLocked: return url = self['liste'].getCurrent()[0][1] if url == None: return get_stream_link(self.session).check_link(url, self.got_link) def got_link(self, stream_url): if stream_url == None: message = self.session.open(MessageBoxExt, _("Stream not found, try another Stream Hoster."), MessageBoxExt.TYPE_INFO, timeout=3) else: title = self.Title self.session.open(SimplePlayer, [(self.Title, stream_url, self.Cover)], showPlaylist=False, ltype='adultbay', cover=True)
gpl-2.0
2,939,069,052,170,796,000
36.374622
166
0.66637
false
2.935453
false
false
false
kll334477/NewsScrapy
thepaper/thepaper/spiders/wshang_spider.py
1
5240
#!/usr/bin/env python # -*- coding:utf-8 -*- __author__ = 'yinzishao' """ 手机版没有cookie,更方便 但是pc版的首页是所有分类混在一起的 手机版则是新闻在各个分类,所以爬取的时候需要爬各个分类。 """ import re import scrapy from bs4 import BeautifulSoup import logging from thepaper.items import NewsItem import json logger = logging.getLogger("WshangSpider") from thepaper.settings import * from thepaper.util import judge_news_crawl #TODO: class NbdSpider(scrapy.spiders.Spider): domain = "http://m.iwshang.com/" name = "wshang" # allowed_domains = ["i.wshang.com",] flag = {} start_urls = [ "http://m.iwshang.com/", ] #pc端新闻页面url pc_news_url = "http://i.wshang.com/Post/Default/Index/pid/%s.html" def parse(self, response): """ :param response: :return:抛出每个类别的post请求 post参数: inslider page pagesize Content-Type:application/x-www-form-urlencoded """ soup = BeautifulSoup(response.body) menu = soup.find_all("a",class_="ui-more") #所有的类别的链接 if menu: for topic in menu: topic_name = topic.text.replace(u"查看","") topic_url = topic.get("href") self.flag.setdefault(topic_url,0) page="1" #post_data需要字符串 post_data = { "inslider":"0", "page":page, "pagesize":"10" } # yield scrapy.Request(topic_url, # callback=self.parse_topic, # method="POST", # headers={"Content-Type":"application/x-www-form-urlencoded"}, # body=json.dumps(post_data) # ) yield scrapy.FormRequest( url=topic_url, formdata=post_data, callback=self.parse_topic, meta={"page":page,"topic_name":topic_name} ) def parse_topic(self,response): topic_url = response.url # print topic_url body = json.loads(response.body) news_list = body["data"] page = response.meta.get("page","1") topic_name = response.meta.get("topic_name",None) #http://m.iwshang.com/category/20 没有新闻 if not news_list: self.flag[topic_url]=page for news in news_list: news_date_timestamp = news.get("published",None) struct_date = datetime.datetime.fromtimestamp(int(news_date_timestamp)) news_date = struct_date.strftime("%Y-%m-%d %H:%M:%S") title = news.get("title",None) news_no = news.get("contentid",None) abstract = news.get("description",None) pic = news.get("thumb",None) news_url = news.get("url",None) #手机端新闻页面链接 referenceid = news.get("referenceid",None) #pc端的id,手机端的id跟pc端的id不一样 pc_news_url = self.pc_news_url % referenceid #pc端新闻页面链接 item = NewsItem( news_date=news_date, title=title, news_no=news_no, abstract=abstract, pic=pic, news_url=pc_news_url, topic=topic_name ) item = judge_news_crawl(item) if item: # yield item yield scrapy.Request(pc_news_url,callback=self.parse_news,meta={"item":item}) else: self.flag[topic_url]=page if not self.flag[topic_url]: page = str(int(page)+1) post_data = { "inslider":"0", "page":page, "pagesize":"10" } yield scrapy.FormRequest( url=topic_url, formdata=post_data, callback=self.parse_topic, meta={"page":page} ) def parse_news(self,response): item = response.meta.get("item",NewsItem()) soup = BeautifulSoup(response.body) #手机 # content = soup.find("div",id="content-show").get_text(strip=True) if soup.find("div",id="content-show") else None #pc content = soup.find("div",class_="article-cont").get_text(strip=True) if soup.find("div",class_="article-cont") else None article_head = soup.find("div",class_="article-head") author=None if article_head: author = article_head.p.text.split(u"/")[1] article_tag_list = soup.find("div",class_="article-tag")("a") if soup.find("div",class_="article-tag") else [] tags = [tag.text for tag in article_tag_list] item["tags"] = tags item["author"] = author item["content"] = content item["crawl_date"] = NOW yield item
lgpl-3.0
526,355,286,195,562,100
34.7
129
0.503601
false
3.532155
false
false
false
archesproject/arches
arches/management/commands/card_component.py
1
3937
""" ARCHES - a program developed to inventory and manage immovable cultural heritage. Copyright (C) 2013 J. Paul Getty Trust and World Monuments Fund This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import os import uuid from arches.management.commands import utils from arches.app.models import models from django.core.management.base import BaseCommand, CommandError from django.db.utils import IntegrityError class Command(BaseCommand): """ Commands for managing Arches functions """ def add_arguments(self, parser): parser.add_argument("operation", nargs="?") parser.add_argument("-s", "--source", action="store", dest="source", default="", help="Widget json file to be loaded") parser.add_argument("-n", "--name", action="store", dest="name", default="", help="The name of the widget to unregister") def handle(self, *args, **options): if options["operation"] == "register": self.register(source=options["source"]) if options["operation"] == "unregister": self.unregister(name=options["name"]) if options["operation"] == "list": self.list() if options["operation"] == "update": self.update(source=options["source"]) def register(self, source): """ Inserts a card component into the arches db """ import json details = {} with open(source) as f: details = json.load(f) try: uuid.UUID(details["componentid"]) except: details["componentid"] = str(uuid.uuid4()) print("Registering card component with componentid: {}".format(details["componentid"])) instance = models.CardComponent( componentid=details["componentid"], name=details["name"], description=details["description"], component=details["component"], componentname=details["componentname"], defaultconfig=details["defaultconfig"], ) instance.save() def update(self, source): """ Updates an existing card component in the arches db """ import json details = {} with open(source) as f: details = json.load(f) instance = models.CardComponent.objects.get(name=details["name"]) instance.description = details["description"] instance.component = details["component"] instance.componentname = details["componentname"] instance.defaultconfig = details["defaultconfig"] instance.save() def unregister(self, name): """ Removes a function from the system """ try: instances = models.CardComponent.objects.filter(name=name) if str(instances[0].componentid) != "f05e4d3a-53c1-11e8-b0ea-784f435179ea": instances[0].delete() else: print("You cannot unregister the default card component.") except Exception as e: print(e) def list(self): """ Lists registered card components """ try: instances = models.CardComponent.objects.all() for instance in instances: print(instance.name) except Exception as e: print(e)
agpl-3.0
-963,692,031,037,702,900
30.496
129
0.626365
false
4.494292
false
false
false
wroersma/volatility
volatility/plugins/overlays/windows/win10.py
1
21936
# Volatility # Copyright (c) 2008-2015 Volatility Foundation # # This file is part of Volatility. # # Volatility is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License Version 2 as # published by the Free Software Foundation. You may not use, modify or # distribute this program under any other version of the GNU General # Public License. # # Volatility is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Volatility. If not, see <http://www.gnu.org/licenses/>. # """ @author: The Volatility Foundation @license: GNU General Public License 2.0 @contact: awalters@4tphi.net This file provides support for Windows 10. """ import volatility.plugins.overlays.windows.windows as windows import volatility.obj as obj import volatility.win32.tasks as tasks import volatility.debug as debug import volatility.plugins.overlays.windows.win8 as win8 try: import distorm3 has_distorm = True except ImportError: has_distorm = False class _HMAP_ENTRY(obj.CType): @property def BlockAddress(self): return self.PermanentBinAddress & 0xFFFFFFFFFFF0 class Win10Registry(obj.ProfileModification): """The Windows 10 registry HMAP""" conditions = {'os': lambda x: x == 'windows', 'major': lambda x: x == 6, 'minor': lambda x: x == 4} def modification(self, profile): profile.object_classes.update({"_HMAP_ENTRY": _HMAP_ENTRY}) class Win10x64DTB(obj.ProfileModification): """The Windows 10 64-bit DTB signature""" before = ['WindowsOverlay', 'Windows64Overlay', 'Win8x64DTB'] conditions = {'os': lambda x: x == 'windows', 'major': lambda x: x == 6, 'minor': lambda x: x == 4, 'memory_model': lambda x: x == '64bit', } def modification(self, profile): profile.merge_overlay({ 'VOLATILITY_MAGIC': [ None, { 'DTBSignature' : [ None, ['VolatilityMagic', dict(value = "\x03\x00\xb6\x00")]], }]}) class Win10x86DTB(obj.ProfileModification): """The Windows 10 32-bit DTB signature""" before = ['WindowsOverlay', 'Win8x86DTB'] conditions = {'os': lambda x: x == 'windows', 'major': lambda x: x == 6, 'minor': lambda x: x == 4, 'memory_model': lambda x: x == '32bit', } def modification(self, profile): build = profile.metadata.get("build", 0) if build >= 15063: signature = "\x03\x00\x2C\x00" else: signature = "\x03\x00\x2A\x00" profile.merge_overlay({ 'VOLATILITY_MAGIC': [ None, { 'DTBSignature' : [ None, ['VolatilityMagic', dict(value = signature)]], }]}) class Win10KDBG(windows.AbstractKDBGMod): """The Windows 10 KDBG signatures""" before = ['Win8KDBG'] conditions = {'os': lambda x: x == 'windows', 'major': lambda x: x == 6, 'minor': lambda x: x == 4, 'build': lambda x: x >= 14393} kdbgsize = 0x368 class ObHeaderCookieStore(object): """A class for finding and storing the nt!ObHeaderCookie value""" _instance = None def __init__(self): self._cookie = None def cookie(self): return self._cookie def findcookie(self, kernel_space): """Find and read the nt!ObHeaderCookie value. On success, return True and save the cookie value in self._cookie. On Failure, return False. This method must be called before performing any tasks that require object header validation including handles, psxview (due to pspcid) and the object scanning plugins (psscan, etc). NOTE: this cannot be implemented as a volatility "magic" class, because it must be persistent across various classes and sources. We don't want to recalculate the cookie value multiple times. """ meta = kernel_space.profile.metadata vers = (meta.get("major", 0), meta.get("minor", 0)) # this algorithm only applies to Windows 10 or greater if vers < (6, 4): return True # prevent subsequent attempts from recalculating the existing value if self._cookie: return True if not has_distorm: debug.warning("distorm3 module is not installed") return False kdbg = tasks.get_kdbg(kernel_space) if not kdbg: debug.warning("Cannot find KDBG") return False nt_mod = None for mod in kdbg.modules(): nt_mod = mod break if nt_mod == None: debug.warning("Cannot find NT module") return False addr = nt_mod.getprocaddress("ObGetObjectType") if addr == None: debug.warning("Cannot find nt!ObGetObjectType") return False # produce an absolute address by adding the DLL base to the RVA addr += nt_mod.DllBase if not nt_mod.obj_vm.is_valid_address(addr): debug.warning("nt!ObGetObjectType at {0} is invalid".format(addr)) return False # in theory...but so far we haven't tested 32-bits model = meta.get("memory_model") if model == "32bit": mode = distorm3.Decode32Bits else: mode = distorm3.Decode64Bits data = nt_mod.obj_vm.read(addr, 100) ops = distorm3.Decompose(addr, data, mode, distorm3.DF_STOP_ON_RET) addr = None # search backwards from the RET and find the MOVZX if model == "32bit": # movzx ecx, byte ptr ds:_ObHeaderCookie for op in reversed(ops): if (op.size == 7 and 'FLAG_DST_WR' in op.flags and len(op.operands) == 2 and op.operands[0].type == 'Register' and op.operands[1].type == 'AbsoluteMemoryAddress' and op.operands[1].size == 8): addr = op.operands[1].disp & 0xFFFFFFFF break else: # movzx ecx, byte ptr cs:ObHeaderCookie for op in reversed(ops): if (op.size == 7 and 'FLAG_RIP_RELATIVE' in op.flags and len(op.operands) == 2 and op.operands[0].type == 'Register' and op.operands[1].type == 'AbsoluteMemory' and op.operands[1].size == 8): addr = op.address + op.size + op.operands[1].disp break if not addr: debug.warning("Cannot find nt!ObHeaderCookie") return False if not nt_mod.obj_vm.is_valid_address(addr): debug.warning("nt!ObHeaderCookie at {0} is not valid".format(addr)) return False cookie = obj.Object("unsigned int", offset = addr, vm = nt_mod.obj_vm) self._cookie = int(cookie) return True @staticmethod def instance(): if not ObHeaderCookieStore._instance: ObHeaderCookieStore._instance = ObHeaderCookieStore() return ObHeaderCookieStore._instance class VolatilityCookie(obj.VolatilityMagic): """The Windows 10 Cookie Finder""" def v(self): if self.value is None: return self.get_best_suggestion() else: return self.value def get_suggestions(self): if self.value: yield self.value for x in self.generate_suggestions(): yield x def generate_suggestions(self): store = ObHeaderCookieStore.instance() store.findcookie(self.obj_vm) yield store.cookie() class Win10Cookie(obj.ProfileModification): """The Windows 10 Cookie Finder""" before = ['WindowsOverlay'] conditions = {'os': lambda x: x == 'windows', 'major': lambda x: x == 6, 'minor': lambda x: x == 4, } def modification(self, profile): profile.merge_overlay({ 'VOLATILITY_MAGIC': [ None, { 'ObHeaderCookie' : [ 0x0, ['VolatilityCookie', dict(configname = "COOKIE")]], }]}) profile.object_classes.update({'VolatilityCookie': VolatilityCookie}) class _OBJECT_HEADER_10(win8._OBJECT_HEADER): @property def TypeIndex(self): """Wrap the TypeIndex member with a property that decodes it with the nt!ObHeaderCookie value.""" cook = obj.VolMagic(self.obj_vm).ObHeaderCookie.v() addr = self.obj_offset indx = int(self.m("TypeIndex")) return ((addr >> 8) ^ cook ^ indx) & 0xFF def is_valid(self): """Determine if a given object header is valid""" if not obj.CType.is_valid(self): return False if self.InfoMask > 0x88: return False if self.PointerCount > 0x1000000 or self.PointerCount < 0: return False return True type_map = { 2: 'Type', 3: 'Directory', 4: 'SymbolicLink', 5: 'Token', 6: 'Job', 7: 'Process', 8: 'Thread', 9: 'UserApcReserve', 10: 'IoCompletionReserve', 11: 'Silo', 12: 'DebugObject', 13: 'Event', 14: 'Mutant', 15: 'Callback', 16: 'Semaphore', 17: 'Timer', 18: 'IRTimer', 19: 'Profile', 20: 'KeyedEvent', 21: 'WindowStation', 22: 'Desktop', 23: 'Composition', 24: 'RawInputManager', 25: 'TpWorkerFactory', 26: 'Adapter', 27: 'Controller', 28: 'Device', 29: 'Driver', 30: 'IoCompletion', 31: 'WaitCompletionPacket', 32: 'File', 33: 'TmTm', 34: 'TmTx', 35: 'TmRm', 36: 'TmEn', 37: 'Section', 38: 'Session', 39: 'Partition', 40: 'Key', 41: 'ALPC Port', 42: 'PowerRequest', 43: 'WmiGuid', 44: 'EtwRegistration', 45: 'EtwConsumer', 46: 'DmaAdapter', 47: 'DmaDomain', 48: 'PcwObject', 49: 'FilterConnectionPort', 50: 'FilterCommunicationPort', 51: 'NetworkNamespace', 52: 'DxgkSharedResource', 53: 'DxgkSharedSyncObject', 54: 'DxgkSharedSwapChainObject', } class _OBJECT_HEADER_10_1AC738FB(_OBJECT_HEADER_10): type_map = { 2: 'Type', 3: 'Directory', 4: 'SymbolicLink', 5: 'Token', 6: 'Job', 7: 'Process', 8: 'Thread', 9: 'UserApcReserve', 10: 'IoCompletionReserve', 11: 'DebugObject', 12: 'Event', 13: 'Mutant', 14: 'Callback', 15: 'Semaphore', 16: 'Timer', 17: 'IRTimer', 18: 'Profile', 19: 'KeyedEvent', 20: 'WindowStation', 21: 'Desktop', 22: 'Composition', 23: 'RawInputManager', 24: 'TpWorkerFactory', 25: 'Adapter', 26: 'Controller', 27: 'Device', 28: 'Driver', 29: 'IoCompletion', 30: 'WaitCompletionPacket', 31: 'File', 32: 'TmTm', 33: 'TmTx', 34: 'TmRm', 35: 'TmEn', 36: 'Section', 37: 'Session', 38: 'Partition', 39: 'Key', 40: 'ALPC Port', 41: 'PowerRequest', 42: 'WmiGuid', 43: 'EtwRegistration', 44: 'EtwConsumer', 45: 'DmaAdapter', 46: 'DmaDomain', 47: 'PcwObject', 48: 'FilterConnectionPort', 49: 'FilterCommunicationPort', 50: 'NetworkNamespace', 51: 'DxgkSharedResource', 52: 'DxgkSharedSyncObject', 53: 'DxgkSharedSwapChainObject', } class _OBJECT_HEADER_10_DD08DD42(_OBJECT_HEADER_10): type_map = { 2: 'Type', 3: 'Directory', 4: 'SymbolicLink', 5: 'Token', 6: 'Job', 7: 'Process', 8: 'Thread', 9: 'UserApcReserve', 10: 'IoCompletionReserve', 11: 'PsSiloContextPaged', 12: 'PsSiloContextNonPaged', 13: 'DebugObject', 14: 'Event', 15: 'Mutant', 16: 'Callback', 17: 'Semaphore', 18: 'Timer', 19: 'IRTimer', 20: 'Profile', 21: 'KeyedEvent', 22: 'WindowStation', 23: 'Desktop', 24: 'Composition', 25: 'RawInputManager', 26: 'CoreMessaging', 27: 'TpWorkerFactory', 28: 'Adapter', 29: 'Controller', 30: 'Device', 31: 'Driver', 32: 'IoCompletion', 33: 'WaitCompletionPacket', 34: 'File', 35: 'TmTm', 36: 'TmTx', 37: 'TmRm', 38: 'TmEn', 39: 'Section', 40: 'Session', 41: 'Partition', 42: 'Key', 43: 'RegistryTransaction', 44: 'ALPC', 45: 'PowerRequest', 46: 'WmiGuid', 47: 'EtwRegistration', 48: 'EtwConsumer', 49: 'DmaAdapter', 50: 'DmaDomain', 51: 'PcwObject', 52: 'FilterConnectionPort', 53: 'FilterCommunicationPort', 54: 'NdisCmState', 55: 'DxgkSharedResource', 56: 'DxgkSharedSyncObject', 57: 'DxgkSharedSwapChainObject', 58: 'VRegConfigurationContext', 59: 'VirtualKey', } class _OBJECT_HEADER_10_15063(_OBJECT_HEADER_10): type_map = { 2: 'Type', 3: 'Directory', 4: 'SymbolicLink', 5: 'Token', 6: 'Job', 7: 'Process', 8: 'Thread', 9: 'UserApcReserve', 10: 'IoCompletionReserve', 11: 'ActivityReference', 12: 'PsSiloContextPaged', 13: 'PsSiloContextNonPaged', 14: 'DebugObject', 15: 'Event', 16: 'Mutant', 17: 'Callback', 18: 'Semaphore', 19: 'Timer', 20: 'IRTimer', 21: 'Profile', 22: 'KeyedEvent', 23: 'WindowStation', 24: 'Desktop', 25: 'Composition', 26: 'RawInputManager', 27: 'CoreMessaging', 28: 'TpWorkerFactory', 29: 'Adapter', 30: 'Controller', 31: 'Device', 32: 'Driver', 33: 'IoCompletion', 34: 'WaitCompletionPacket', 35: 'File', 36: 'TmTm', 37: 'TmTx', 38: 'TmRm', 39: 'TmEn', 40: 'Section', 41: 'Session', 42: 'Partition', 43: 'Key', 44: 'RegistryTransaction', 45: 'ALPC Port', 46: 'PowerRequest', 47: 'WmiGuid', 48: 'EtwRegistration', 49: 'EtwSessionDemuxEntry', 50: 'EtwConsumer', 51: 'DmaAdapter', 52: 'DmaDomain', 53: 'PcwObject', 54: 'FilterConnectionPort', 55: 'FilterCommunicationPort', 56: 'NdisCmState', 57: 'DxgkSharedResource', 58: 'DxgkSharedSyncObject', 59: 'DxgkSharedSwapChainObject', 60: 'DxgkCurrentDxgProcessObject', 61: 'VRegConfigurationContext' } class _HANDLE_TABLE_10_DD08DD42(win8._HANDLE_TABLE_81R264): def decode_pointer(self, value): value = value & 0xFFFFFFFFFFFFFFF8 value = value >> self.DECODE_MAGIC if (value & (1 << 47)): value = value | 0xFFFF000000000000 return value class Win10ObjectHeader(obj.ProfileModification): before = ["Win8ObjectClasses"] conditions = {'os': lambda x: x == 'windows', 'major': lambda x: x == 6, 'minor': lambda x: x == 4} def modification(self, profile): metadata = profile.metadata build = metadata.get("build", 0) if build >= 15063: header = _OBJECT_HEADER_10_15063 ## update the handle table here as well if metadata.get("memory_model") == "64bit": profile.object_classes.update({ "_HANDLE_TABLE": _HANDLE_TABLE_10_DD08DD42}) elif build >= 14393: header = _OBJECT_HEADER_10_DD08DD42 ## update the handle table here as well if metadata.get("memory_model") == "64bit": profile.object_classes.update({ "_HANDLE_TABLE": _HANDLE_TABLE_10_DD08DD42}) elif build >= 10240: header = _OBJECT_HEADER_10_1AC738FB else: header = _OBJECT_HEADER_10 profile.object_classes.update({"_OBJECT_HEADER": header}) class Win10PoolHeader(obj.ProfileModification): before = ['WindowsOverlay'] conditions = {'os': lambda x: x == 'windows', 'major': lambda x: x == 6, 'minor': lambda x: x == 4, 'build': lambda x: x == 10240} def modification(self, profile): meta = profile.metadata memory_model = meta.get("memory_model", "32bit") if memory_model == "32bit": pool_types = {'_POOL_HEADER' : [ 0x8, { 'PreviousSize' : [ 0x0, ['BitField', dict(start_bit = 0, end_bit = 9, native_type='unsigned short')]], 'PoolIndex' : [ 0x0, ['BitField', dict(start_bit = 9, end_bit = 16, native_type='unsigned short')]], 'BlockSize' : [ 0x2, ['BitField', dict(start_bit = 0, end_bit = 9, native_type='unsigned short')]], 'PoolType' : [ 0x2, ['BitField', dict(start_bit = 9, end_bit = 16, native_type='unsigned short')]], 'Ulong1' : [ 0x0, ['unsigned long']], 'PoolTag' : [ 0x4, ['unsigned long']], 'AllocatorBackTraceIndex' : [ 0x4, ['unsigned short']], 'PoolTagHash' : [ 0x6, ['unsigned short']], }]} else: pool_types = {'_POOL_HEADER' : [ 0x10, { 'PreviousSize' : [ 0x0, ['BitField', dict(start_bit = 0, end_bit = 8, native_type='unsigned short')]], 'PoolIndex' : [ 0x0, ['BitField', dict(start_bit = 8, end_bit = 16, native_type='unsigned short')]], 'BlockSize' : [ 0x2, ['BitField', dict(start_bit = 0, end_bit = 8, native_type='unsigned short')]], 'PoolType' : [ 0x2, ['BitField', dict(start_bit = 8, end_bit = 16, native_type='unsigned short')]], 'Ulong1' : [ 0x0, ['unsigned long']], 'PoolTag' : [ 0x4, ['unsigned long']], 'ProcessBilled' : [ 0x8, ['pointer64', ['_EPROCESS']]], 'AllocatorBackTraceIndex' : [ 0x8, ['unsigned short']], 'PoolTagHash' : [ 0xa, ['unsigned short']], }]} profile.vtypes.update(pool_types) class Win10x64(obj.Profile): """ A Profile for Windows 10 x64 """ _md_memory_model = '64bit' _md_os = 'windows' _md_major = 6 _md_minor = 4 _md_build = 9841 _md_vtype_module = 'volatility.plugins.overlays.windows.win10_x64_vtypes' _md_product = ["NtProductWinNt"] class Win10x64_10586(obj.Profile): """ A Profile for Windows 10 x64 (10.0.10586.306 / 2016-04-23) """ _md_memory_model = '64bit' _md_os = 'windows' _md_major = 6 _md_minor = 4 _md_build = 10240 _md_vtype_module = 'volatility.plugins.overlays.windows.win10_x64_1AC738FB_vtypes' _md_product = ["NtProductWinNt"] class Win10x64_14393(obj.Profile): """ A Profile for Windows 10 x64 (10.0.14393.0 / 2016-07-16) """ _md_memory_model = '64bit' _md_os = 'windows' _md_major = 6 _md_minor = 4 _md_build = 14393 _md_vtype_module = 'volatility.plugins.overlays.windows.win10_x64_DD08DD42_vtypes' _md_product = ["NtProductWinNt"] class Win10x86(obj.Profile): """ A Profile for Windows 10 x86 """ _md_memory_model = '32bit' _md_os = 'windows' _md_major = 6 _md_minor = 4 _md_build = 9841 _md_vtype_module = 'volatility.plugins.overlays.windows.win10_x86_vtypes' _md_product = ["NtProductWinNt"] class Win10x86_10586(obj.Profile): """ A Profile for Windows 10 x86 (10.0.10586.420 / 2016-05-28) """ _md_memory_model = '32bit' _md_os = 'windows' _md_major = 6 _md_minor = 4 _md_build = 10240 _md_vtype_module = 'volatility.plugins.overlays.windows.win10_x86_44B89EEA_vtypes' _md_product = ["NtProductWinNt"] class Win10x86_14393(obj.Profile): """ A Profile for Windows 10 x86 (10.0.14393.0 / 2016-07-16) """ _md_memory_model = '32bit' _md_os = 'windows' _md_major = 6 _md_minor = 4 _md_build = 14393 _md_vtype_module = 'volatility.plugins.overlays.windows.win10_x86_9619274A_vtypes' _md_product = ["NtProductWinNt"] class Win2016x64_14393(Win10x64_14393): """ A Profile for Windows Server 2016 x64 (10.0.14393.0 / 2016-07-16) """ _md_memory_model = '64bit' _md_os = 'windows' _md_major = 6 _md_minor = 4 _md_build = 14393 _md_vtype_module = 'volatility.plugins.overlays.windows.win10_x64_DD08DD42_vtypes' _md_product = ["NtProductLanManNt", "NtProductServer"] class Win10x86_15063(obj.Profile): """ A Profile for Windows 10 x86 (10.0.15063.0 / 2017-04-04) """ _md_memory_model = '32bit' _md_os = 'windows' _md_major = 6 _md_minor = 4 _md_build = 15063 _md_vtype_module = 'volatility.plugins.overlays.windows.win10_x86_15063_vtypes' _md_product = ["NtProductWinNt"] class Win10x64_15063(obj.Profile): """ A Profile for Windows 10 x64 (10.0.15063.0 / 2017-04-04) """ _md_memory_model = '64bit' _md_os = 'windows' _md_major = 6 _md_minor = 4 _md_build = 15063 _md_vtype_module = 'volatility.plugins.overlays.windows.win10_x64_15063_vtypes' _md_product = ["NtProductWinNt"]
gpl-2.0
-771,911,055,582,781,700
30.026874
119
0.552744
false
3.533505
false
false
false
StefanWinterfeldt/Buildicator
messageSinks/consoleMessageSink.py
1
2127
# Copyright 2014 Stefan Winterfeldt <stefan.winterfeldt@bitz.it> # <stefan.winterfeldt@outlook.de # BITZ GmbH <info@bitz.it> # #This file is part of Buildicator. # #Buildicator is free software: you can redistribute it and/or modify #it under the terms of the GNU General Public License as published by #the Free Software Foundation, either version 3 of the License, or #(at your option) any later version. # #Buildicator is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. # #You should have received a copy of the GNU General Public License #along with Buildicator. If not, see <http://www.gnu.org/licenses/>. """This module contains the console message sink. All message sink modules must implement the 'getInstance' method, returning an instance of the message sink class that has been initialized with the appropriate args dictionary. """ from messageSinks.abstractMessageSink import AbstractMessageSink import libs.statusEnum as statusEnum class ConsoleMessageSink(AbstractMessageSink): """A message sink that simply displays messages on the console. This message sink uses the following arguments: errorMessage - The message to display in case of an error status. failureMessage - The message to display in case of a failure status. successMessage - The message to display in case of a success status. """ def __init__(self, args): self.errorMessage = args['errorMessage'] self.failureMessage = args['failureMessage'] self.successMessage = args['successMessage'] def showStatus(self, status): if status == statusEnum.STATUS_ERROR: print(self.errorMessage) elif status == statusEnum.STATUS_FAILURE: print(self.failureMessage) elif status == statusEnum.STATUS_SUCCESS: print(self.successMessage) def getInstance(args): return ConsoleMessageSink(args)
gpl-3.0
237,320,716,383,971,300
38.407407
76
0.720733
false
4.228628
false
false
false
IMIO/django-fixmystreet
django_fixmystreet/api/reports/serializers.py
1
1936
# -*- coding: utf-8 -*- from rest_framework import serializers from . import models class ReportAssignmentAcceptSerializer(serializers.Serializer): reference_id = serializers.CharField() comment = serializers.CharField(required=False) created_at = serializers.DateTimeField() def restore_object(self, attrs, instance=None): # Update existing instance. if instance: instance.reference_id = attrs.get("reference_id", instance.reference_id) instance.comment = attrs.get("comment", instance.comment) instance.created_at = attrs.get("created_at", instance.created_at) return instance # Create new instance. return models.ReportAssignmentAccept(**attrs) class ReportAssignmentRejectSerializer(serializers.Serializer): comment = serializers.CharField() created_at = serializers.DateTimeField() def restore_object(self, attrs, instance=None): # Update existing instance. if instance: instance.comment = attrs.get("comment", instance.comment) instance.created_at = attrs.get("created_at", instance.created_at) return instance # Create new instance. return models.ReportAssignmentReject(**attrs) class ReportAssignmentCloseSerializer(serializers.Serializer): reference_id = serializers.CharField() comment = serializers.CharField(required=False) created_at = serializers.DateTimeField() def restore_object(self, attrs, instance=None): # Update existing instance. if instance: instance.reference_id = attrs.get("reference_id", instance.reference_id) instance.comment = attrs.get("comment", instance.comment) instance.created_at = attrs.get("created_at", instance.created_at) return instance # Create new instance. return models.ReportAssignmentClose(**attrs)
agpl-3.0
-65,268,529,041,024,920
33.571429
84
0.681302
false
4.687651
false
false
false
nickaugust/pychatkit
clients.py
1
1973
#!/usr/bin/env python import asyncio import logging logger = logging.getLogger("chatkit:" + __name__) class WSClientManager: def __init__(self): self._clients = [] def all(self): return self._clients def add(self, client): logging.info("+ WSClient {}".format(client)) self._clients.append(client) def remove(self, client): logging.info("- WSClient {}".format(client)) self._clients.remove(client) class WSClient: objects = WSClientManager() def __init__(self, server, ws, user=None, token=None): self.server = server self._ws = ws self.user = user self.token = token WSClient.objects.add(self) @asyncio.coroutine def disconnect(self, message): self.server.disconnect(self, message) @asyncio.coroutine def send(self, data): if self._ws.state != "OPEN": logging.info("WS state not OPEN, disconnecting" + str(self.user)) self.disconnect("WS state not OPEN.") return logging.info("> {} {}".format(self.user, data)) yield from self._ws.send(data) @asyncio.coroutine def send_one(self, to_client, data): if to_client._ws.state != "OPEN": to_client.disconnect("WS state not OPEN.") yield from to_client._ws.send(data) logging.info("> {} {}".format(to_client.user, data)) @asyncio.coroutine def send_all(self, from_client, data): for c in WSClient.clients: yield from self.send_one(c, data) @asyncio.coroutine def send_others(self, from_client, data): for c in WSClient.clients: if c != from_client: yield from self.send_one(c, data) @asyncio.coroutine def get_others(self, client): for c in WSClient.clients: resp = "join {}".format(c.user.username) yield from self.send_one(self, resp)
mit
-1,395,320,659,051,184,000
26.788732
61
0.581855
false
3.838521
false
false
false
likit/BioUtils
fetch_entrez_from_geneid.py
1
1631
'''Selects protein sequences from NCBI that are in a list from Geisha text file. Output is written to standard output. ''' import os import sys import time from Bio import SeqIO, Entrez def parse(infile): '''Return a set of gene IDs from an input file.''' for line in open(infile): geneid = line.split()[0] yield geneid def fetch(geneid): print >> sys.stderr, 'fetching.. gene ID: %s' % geneid handle = Entrez.efetch(db='gene', retmode='xml', id=geneid) xmldata = Entrez.read(handle) product = xmldata[0]['Entrezgene_locus'][0]\ ['Gene-commentary_products'][0] prodtype = product['Gene-commentary_type'].attributes['value'] print >> sys.stderr, 'product type = %s' % (prodtype) seq_gi = xmldata[0]['Entrezgene_locus'][0]\ ['Gene-commentary_products'][0]\ ['Gene-commentary_seqs'][0]\ ['Seq-loc_whole']['Seq-id']\ ['Seq-id_gi'] handle = Entrez.efetch(db='nucleotide', retmode='text', rettype='fasta', id=seq_gi) seq = SeqIO.read(handle, 'fasta') return seq def main(): infile = sys.argv[1] Entrez.email = sys.argv[2] outfile = os.path.splitext(infile)[0] + ".fa" records = [] for geneid in parse(infile): try: records.append(fetch(geneid)) except: print >> sys.stderr, 'Cannot retrieve a sequence' continue time.sleep(3) SeqIO.write(records, outfile, 'fasta') print >> sys.stderr, 'Total sequences = %d' % len(records) if __name__=='__main__': main()
bsd-2-clause
-5,944,984,507,646,516,000
24.888889
66
0.578786
false
3.40501
false
false
false
forseti-security/forseti-security
tests/services/scanner/scanner_base_db.py
1
4263
"""Helper base class for testing scanners.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from datetime import datetime from datetime import timedelta import os import unittest.mock as mock from sqlalchemy.orm import sessionmaker from google.cloud.forseti.common.util import date_time from google.cloud.forseti.scanner import scanner from google.cloud.forseti.services.inventory import storage from google.cloud.forseti.services.scanner import dao as scanner_dao from tests.services.util.db import create_test_engine_with_file from tests.unittest_utils import ForsetiTestCase FAKE_INV_INDEX_ID = 'aaa' FAKE_VIOLATION_HASH = (u'111111111111111111111111111111111111111111111111111111' '111111111111111111111111111111111111111111111111111111' '11111111111111111111') FAKE_VIOLATIONS = [ {'resource_id': 'fake_firewall_111', 'full_name': 'full_name_111', 'rule_name': 'disallow_all_ports_111', 'rule_index': 111, 'violation_data': {'policy_names': ['fw-tag-match_111'], 'recommended_actions': {'DELETE_FIREWALL_RULES': ['fw-tag-match_111']}}, 'violation_type': 'FIREWALL_BLACKLIST_VIOLATION_111', 'resource_type': 'firewall_rule', 'resource_data': 'inventory_data_111', 'resource_name': 'fw-tag-match_111', }, {'resource_id': 'fake_firewall_222', 'full_name': 'full_name_222', 'rule_name': 'disallow_all_ports_222', 'rule_index': 222, 'violation_data': {'policy_names': ['fw-tag-match_222'], 'recommended_actions': {'DELETE_FIREWALL_RULES': ['fw-tag-match_222']}}, 'violation_type': 'FIREWALL_BLACKLIST_VIOLATION_222', 'resource_type': 'firewall_rule', 'resource_data': 'inventory_data_222', 'resource_name': 'fw-tag-match_222', } ] # pylint: disable=bad-indentation class ScannerBaseDbTestCase(ForsetiTestCase): """Base class for database centric tests.""" def setUp(self): """Setup method.""" ForsetiTestCase.setUp(self) self.engine, self.dbfile = create_test_engine_with_file() session_maker = sessionmaker() self.session = session_maker(bind=self.engine) storage.initialize(self.engine) scanner_dao.initialize(self.engine) self.session.flush() self.violation_access = scanner_dao.ViolationAccess(self.session) self.inv_index_id1, self.inv_index_id2, self.inv_index_id3 = ( _setup_inv_indices(self.session)) def tearDown(self): """Teardown method.""" os.unlink(self.dbfile) ForsetiTestCase.tearDown(self) def populate_db( self, violations=FAKE_VIOLATIONS, inv_index_id=FAKE_INV_INDEX_ID, scanner_index_id=None, succeeded=['IamPolicyScanner'], failed=[]): """Populate the db with violations. Args: violations (dict): the violations to write to the test database inv_index_id (str): the inventory index to use scanner_index_id (str): the scanner index to use succeeded (list): names of scanners that ran successfully failed (list): names of scanners that failed """ if not scanner_index_id: scanner_index_id = scanner.init_scanner_index( self.session, inv_index_id) self.violation_access.create(violations, scanner_index_id) scanner.mark_scanner_index_complete( self.session, scanner_index_id, succeeded, failed) return scanner_index_id def _setup_inv_indices(session): """The method under test returns the newest `ScannerIndex` row.""" with mock.patch.object(date_time, 'get_utc_now_datetime') as mock_date_time: time1 = datetime.utcnow() time2 = time1 + timedelta(minutes=5) time3 = time1 + timedelta(minutes=7) mock_date_time.side_effect = [time1, time2, time3] iidx1 = storage.InventoryIndex.create() iidx2 = storage.InventoryIndex.create() iidx3 = storage.InventoryIndex.create() session.add(iidx1) session.add(iidx2) session.add(iidx3) session.flush() return (iidx1.id, iidx2.id, iidx3.id)
apache-2.0
-3,047,828,724,957,816,300
36.394737
80
0.65658
false
3.606599
true
false
false
vatlab/SOS
src/sos/tasks.py
1
77822
#!/usr/bin/env python3 # # Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center # Distributed under the terms of the 3-clause BSD License. import copy import os import fasteners import pickle import time import lzma import math import struct from enum import Enum from collections import namedtuple from collections.abc import Sequence from datetime import datetime from typing import Union, Dict, List from .utils import ( env, expand_time, linecount_of_file, sample_lines, short_repr, tail_of_file, pretty_size, expand_size, format_HHMMSS, DelayedAction, format_duration, ) from .targets import sos_targets monitor_interval = 5 resource_monitor_interval = 60 class TaskParams(object): """A parameter object that encaptulates parameters sending to task executors. This would makes the output of workers, especially in the web interface much cleaner (issue #259)""" def __init__(self, name, global_def, task, sos_dict, tags): self.name = name self.global_def = global_def self.task = task self.sos_dict = sos_dict self.tags = tags # remove builtins that could be saved in a dictionary if "CONFIG" in self.sos_dict and "__builtins__" in self.sos_dict["CONFIG"]: self.sos_dict["CONFIG"].pop("__builtins__") def __repr__(self): return self.name class MasterTaskParams(TaskParams): def __init__(self, num_workers=None): self.ID = "t0" self.name = self.ID self.global_def = "" self.task = "" self.sos_dict = { "_runtime": {"num_workers": num_workers}, "_input": sos_targets(), "_output": sos_targets(), "_depends": sos_targets(), "step_input": sos_targets(), "step_output": sos_targets(), "step_depends": sos_targets(), "step_name": "", "_index": 0, } self.tags = [] # a collection of tasks that will be executed by the master task self.task_stack = [] def _parse_num_workers(self, num_workers): # return number of nodes and workers if isinstance(num_workers, Sequence) and len(num_workers) >= 1: val = str(num_workers[0]) n_workers = val.rsplit(":", 1)[-1] if ":" in val else val n_nodes = len(num_workers) elif isinstance(num_workers, str): n_workers = ( num_workers.rsplit(":", 1)[-1] if ":" in num_workers else num_workers ) n_nodes = 1 elif isinstance(num_workers, int): n_workers = num_workers n_nodes = 1 elif num_workers is None: n_workers = 1 n_nodes = 1 else: raise RuntimeError( f"Unacceptable value for parameter trunk_workers {num_workers}" ) try: n_workers = int(n_workers) except Exception: raise ValueError( f"Unacceptable value for option trunk_workers {num_workers}" ) if n_workers <= 0: raise ValueError( f"Unacceptable value for option trunk_workers {num_workers}" ) return n_nodes, n_workers def num_tasks(self): return len(self.task_stack) def push(self, task_id, params): # update walltime, cores, and mem # right now we require all tasks to have same resource requirment, which is # quite natural because they are from the same step # # update input, output, and depends # # walltime etc n_nodes, n_workers = self._parse_num_workers( self.sos_dict["_runtime"]["num_workers"] ) if not self.task_stack: for key in ( "walltime", "max_walltime", "cores", "nodes", "max_cores", "mem", "max_mem", "name", "workdir", "verbosity", "sig_mode", "run_mode", ): if ( key in params.sos_dict["_runtime"] and params.sos_dict["_runtime"][key] is not None ): self.sos_dict["_runtime"][key] = params.sos_dict["_runtime"][key] self.sos_dict["step_name"] = params.sos_dict["step_name"] self.tags = params.tags else: for key in ( "walltime", "max_walltime", "cores", "max_cores", "mem", "max_mem", "name", "workdir", ): val0 = self.task_stack[0][1].sos_dict["_runtime"].get(key, None) val = params.sos_dict["_runtime"].get(key, None) if val0 != val: raise ValueError(f"All tasks should have the same resource {key}") if val0 is None: continue # If there are multiple nodes and multiple workers, there are # n_workers * n_nodes workers at the same time, so the jobs # will be completed in n_batches n_batches = math.ceil( (len(self.task_stack) + 1) / (n_workers * n_nodes) ) if key == "walltime": # the real walltime would be the total time on one node self.sos_dict["_runtime"]["walltime"] = format_HHMMSS( n_batches * expand_time(val0) ) elif key == "mem": # number of columns * mem for each + 100M for master self.sos_dict["_runtime"]["mem"] = n_workers * expand_size(val0) elif key == "cores": self.sos_dict["_runtime"]["cores"] = n_workers * val0 elif key == "name": self.sos_dict["_runtime"][ "name" ] = f"{val0}_{len(self.task_stack) + 1}" self.tags.extend(params.tags) # if cores is unspecified but there are more than one workers if ( "cores" not in self.sos_dict["_runtime"] and n_workers is not None and n_workers > 1 ): self.sos_dict["_runtime"]["cores"] = n_workers # # input, output, preserved vars etc for key in ["_input", "_output", "_depends"]: if key in params.sos_dict and isinstance(params.sos_dict[key], sos_targets): if key == "__builtins__": continue # do not extend duplicated input etc self.sos_dict[key].extend(params.sos_dict[key]) # self.task_stack.append([task_id, params]) self.tags = sorted(list(set(self.tags))) # id_prefix = f't{len(self.task_stack)}' self.ID = f"{id_prefix}{self.task_stack[0][0][:-(len(id_prefix))]}" self.name = self.ID def finalize(self): if not self.task_stack: return common_dict = None common_keys = set() for _, params in self.task_stack: if common_dict is None: common_dict = params.sos_dict common_keys = set(params.sos_dict.keys()) else: common_keys = { key for key in common_keys if key in params.sos_dict and common_dict[key] == params.sos_dict[key] } if not common_keys: break # if there is only one subtask, _output will be moved out of subtasks and makes # the retrival of outputs difficult. common_keys.discard("_output") self.common_dict = {x: common_dict[x] for x in common_keys} for _, params in self.task_stack: params.sos_dict = { k: v for k, v in params.sos_dict.items() if k not in common_keys } # n_nodes = self._parse_num_workers(self.sos_dict["_runtime"]["num_workers"])[0] # trunk_workers and cores cannot be specified together, so if n_nodes > 1, # nodes should not have been specified. if n_nodes is not None and n_nodes > 1: self.sos_dict["_runtime"]["nodes"] = n_nodes return self def combine_results(task_id, results): # now we collect result all_res = { "ret_code": 0, "output": None, "subtasks": {}, "shared": {}, "skipped": 0, "signature": {}, } for res in results: tid = res["task"] all_res["subtasks"][tid] = res if "exception" in res: all_res["exception"] = res["exception"] all_res["ret_code"] += 1 continue all_res["ret_code"] += res["ret_code"] if all_res["output"] is None: all_res["output"] = copy.deepcopy(res["output"]) else: try: all_res["output"].extend(res["output"], keep_groups=True) except Exception: env.logger.warning( f"Failed to extend output {all_res['output']} with {res['output']}" ) all_res["shared"].update(res["shared"]) # does not care if one or all subtasks are executed or skipped. all_res["skipped"] += res.get("skipped", 0) if "signature" in res: all_res["signature"].update(res["signature"]) if all_res["ret_code"] != 0: if all_res["ret_code"] == len(results): if env.config["run_mode"] == "run": env.logger.info(f"All {len(results)} tasks in {task_id} ``failed``") else: env.logger.debug(f"All {len(results)} tasks in {task_id} ``failed``") else: if env.config["run_mode"] == "run": env.logger.info( f'{all_res["ret_code"]} of {len(results)} tasks in {task_id} ``failed``' ) else: env.logger.debug( f'{all_res["ret_code"]} of {len(results)} tasks in {task_id} ``failed``' ) # if some failed, some skipped, not skipped if "skipped" in all_res: all_res.pop("skipped") elif all_res["skipped"]: if all_res["skipped"] == len(results): if env.config["run_mode"] == "run": env.logger.info( f"All {len(results)} tasks in {task_id} ``ignored`` or skipped" ) else: env.logger.debug( f"All {len(results)} tasks in {task_id} ``ignored`` or skipped" ) else: # if only partial skip, we still save signature and result etc if env.config["run_mode"] == "run": env.logger.info( f'{all_res["skipped"]} of {len(results)} tasks in {task_id} ``ignored`` or skipped' ) else: env.logger.debug( f'{all_res["skipped"]} of {len(results)} tasks in {task_id} ``ignored`` or skipped' ) all_res.pop("skipped") else: if env.config["run_mode"] == "run": env.logger.info(f"All {len(results)} tasks in {task_id} ``completed``") else: env.logger.debug(f"All {len(results)} tasks in {task_id} ``completed``") return all_res class TaskStatus(Enum): new = 0 pending = 1 submitted = 2 running = 3 aborted = 4 failed = 5 completed = 6 class TaskFile(object): """ The task file has the following format: 1. A binary header with the information of the structure of the file with field defined by TaskHeader 2. compressed pickled param of task 3. compressed pulse file 4. compressed pickled result 5. compressed stdout 6. compressed stderr 7. compressed pickled signatures """ TaskHeader_v1 = namedtuple( "TaskHeader", "version status last_modified " "new_time pending_time submitted_time running_time aborted_time failed_time completed_time " "params_size pulse_size stdout_size stderr_size result_size signature_size " "tags", ) TaskHeader_v2 = namedtuple( "TaskHeader", "version status last_modified " "new_time pending_time submitted_time running_time aborted_time failed_time completed_time " "params_size shell_size pulse_size stdout_size stderr_size result_size signature_size " "tags", ) TaskHeader_v3 = namedtuple( "TaskHeader", "version status last_modified " "new_time pending_time submitted_time running_time aborted_time failed_time completed_time " "params_size runtime_size shell_size pulse_size stdout_size stderr_size result_size signature_size " "tags", ) TaskHeader = TaskHeader_v3 header_fmt_v1 = "!2h 8d 6i 128s" header_fmt_v2 = "!2h 8d 7i 124s" header_fmt_v3 = "!2h 8d 8i 120s" header_fmt = header_fmt_v3 header_size = 220 # struct.calcsize(header_fmt) tags_offset = [92, 96, 100] # struct.calcsize(status_fmt + '6i') tags_size = [128, 124, 120] def __init__(self, task_id: str): self.task_id = task_id self.task_file = os.path.join( os.path.expanduser("~"), ".sos", "tasks", task_id + ".task" ) def save(self, params): if os.path.isfile(self.task_file): if self.status == "running": env.logger.debug(f"Task {self.task_id} is running and is not updated") return # keep original stuff but update params, which could contain # new runtime info self.params = params return # updating job_file will not change timestamp because it will be Only # the update of runtime info now = time.time() # we keep in both places because params.tags is the only place to have it for subtasks tags = params.tags params_block = lzma.compress(pickle.dumps(params)) # env.logger.error(f'saving {self.task_id} params of size {len(params_block)}') header = self.TaskHeader( version=3, status=TaskStatus.new.value, last_modified=now, new_time=now, pending_time=0, running_time=0, submitted_time=0, aborted_time=0, failed_time=0, completed_time=0, params_size=len(params_block), runtime_size=0, shell_size=0, pulse_size=0, stdout_size=0, stderr_size=0, result_size=0, signature_size=0, tags=" ".join(sorted(tags)).ljust(128).encode(), ) with fasteners.InterProcessLock( os.path.join(env.temp_dir, self.task_id + ".lck") ): with open(self.task_file, "wb+") as fh: self._write_header(fh, header) fh.write(params_block) def exists(self): return os.path.isfile(self.task_file) def _reset(self, fh): # remove result, input, output etc and set the status of the task to new header = self._read_header(fh) now = time.time() header = header._replace( version=2, status=TaskStatus.new.value, last_modified=now, new_time=now, pending_time=0, submitted_time=0, running_time=0, aborted_time=0, failed_time=0, completed_time=0, runtime_size=0, shell_size=0, pulse_size=0, stdout_size=0, stderr_size=0, result_size=0, signature_size=0, ) self._write_header(fh, header) fh.truncate(self.header_size + header.params_size) return header def reset(self): # remove result, input, output etc and set the status of the task to new with fasteners.InterProcessLock( os.path.join(env.temp_dir, self.task_id + ".lck") ): with open(self.task_file, "r+b") as fh: self._reset(fh) def _read_header(self, fh): fh.seek(0, 0) data = fh.read(self.header_size) if struct.unpack("!h", data[:2])[0] == 1: header = self.TaskHeader_v1._make(struct.unpack(self.header_fmt_v1, data)) if header.version not in (1, 2, 3): raise RuntimeError( f"Corrupted task file {self.task_file}. Please report a bug if you can reproduce the generation of this file." ) return self.TaskHeader( runtime_size=0, shell_size=0, **header._asdict() )._replace(version=3) if struct.unpack("!h", data[:2])[0] == 2: header = self.TaskHeader_v2._make(struct.unpack(self.header_fmt_v2, data)) if header.version not in (1, 2, 3): raise RuntimeError( f"Corrupted task file {self.task_file}. Please report a bug if you can reproduce the generation of this file." ) return self.TaskHeader(runtime_size=0, **header._asdict())._replace( version=3 ) header = self.TaskHeader._make(struct.unpack(self.header_fmt, data)) if header.version not in (1, 2, 3): raise RuntimeError( f"Corrupted task file {self.task_file}. Please report a bug if you can reproduce the generation of this file." ) return header def _write_header(self, fh, header): fh.seek(0, 0) fh.write(struct.pack(self.header_fmt, *header)) def _get_content(self, exts): if isinstance(exts, str): exts = [exts] content = b"" for ext in exts: filename = self.task_file[:-5] + ext if not os.path.isfile(filename): continue with open(filename, "rb") as fh: content += fh.read() if not content: return b"" return lzma.compress(content) def add_outputs(self, keep_result=False): # get header shell = self._get_content(".sh") pulse = self._get_content(".pulse") stdout = self._get_content([".out", ".sosout"]) stderr = self._get_content([".err", ".soserr"]) with fasteners.InterProcessLock( os.path.join(env.temp_dir, self.task_id + ".lck") ): with open(self.task_file, "r+b") as fh: header = self._read_header(fh) if header.result_size != 0: if not keep_result: result_size = 0 signature_size = 0 else: result_size = header.result_size signature_size = header.signature_size fh.seek( self.header_size + header.params_size + header.runtime_size + header.shell_size + header.pulse_size + header.stdout_size + header.stderr_size, 0, ) result = fh.read(header.result_size) signature = fh.read(header.signature_size) else: result_size = 0 signature_size = 0 header = header._replace( shell_size=len(shell), pulse_size=len(pulse), stdout_size=len(stdout), stderr_size=len(stderr), result_size=result_size, signature_size=signature_size, ) self._write_header(fh, header) fh.seek(self.header_size + header.params_size + header.runtime_size, 0) if shell: fh.write(shell) if pulse: fh.write(pulse) if stdout: fh.write(stdout) if stderr: fh.write(stderr) if result_size > 0: fh.write(result) if signature_size > 0: fh.write(signature) def add_result(self, result: dict = {}): if not result: params = self._get_params() # this is a master task, get all sub task IDs if hasattr(params, "task_stack"): missing_tasks = set([x[0] for x in params.task_stack]) # cache_file = os.path.join( os.path.expanduser("~"), ".sos", "tasks", self.task_id + ".cache" ) results = [] if os.path.isfile(cache_file): try: with open(cache_file, "rb") as f: while True: res = pickle.load(f) if not "task" in res: # something is wrong break missing_tasks.remove(res["task"]) results.append(res) os.remove(cache_file) except Exception: # we read until an error occurs pass if not results: # if there is no result at all, do not save result return else: # now, if we have some results, we need to fill the rest of the aborted ones results.extend( [ { "task": t, "ret_code": 2, "shared": {}, "exception": RuntimeError(f"Subtask {t} is aborted"), } for t in missing_tasks ] ) result = combine_results(self.task_id, results) else: # single task, no result, do not save return # add signature if exists signature = result.get("signature", {}) result.pop("signature", None) # result_block = lzma.compress(pickle.dumps(result)) signature_block = lzma.compress(pickle.dumps(signature)) if signature else b"" with fasteners.InterProcessLock( os.path.join(env.temp_dir, self.task_id + ".lck") ): with open(self.task_file, "r+b") as fh: header = self._read_header(fh) header = header._replace( result_size=len(result_block), signature_size=len(signature_block), ) self._write_header(fh, header) fh.seek( self.header_size + header.params_size + header.runtime_size + header.shell_size + header.pulse_size + header.stdout_size + header.stderr_size ) fh.write(result_block) if signature: fh.write(signature_block) def _get_info(self): with open(self.task_file, "rb") as fh: return self._read_header(fh) def _set_info(self, info): with open(self.task_file, "r+b") as fh: fh.write(struct.pack(self.header_fmt, *info)) info = property(_get_info, _set_info) def has_shell(self): return self.info.shell_size > 0 def has_pulse(self): return self.info.pulse_size > 0 def has_result(self): return self.info.result_size > 0 def has_stdout(self): return self.info.stdout_size > 0 def has_stderr(self): return self.info.stderr_size > 0 def has_signature(self): return self.info.signature_size > 0 def _get_params(self): with open(self.task_file, "rb") as fh: header = self._read_header(fh) if header.params_size == 0 and header.runtime_size == 0: return {} fh.seek(self.header_size, 0) if header.params_size == 0: return {} else: try: return pickle.loads(lzma.decompress(fh.read(header.params_size))) except Exception as e: raise RuntimeError( f"Failed to obtain params of task {self.task_id}: {e}" ) def _set_params(self, params): params_block = lzma.compress(pickle.dumps(params)) # env.logger.error(f'updating {self.task_id} params of size {len(params_block)}') with fasteners.InterProcessLock( os.path.join(env.temp_dir, self.task_id + ".lck") ): with open(self.task_file, "r+b") as fh: header = self._read_header(fh) if len(params_block) == header.params_size: fh.seek(self.header_size, 0) fh.write(params_block) else: fh.read(header.params_size) runtime = fh.read(header.runtime_size) shell = fh.read(header.shell_size) pulse = fh.read(header.pulse_size) stdout = fh.read(header.stdout_size) stderr = fh.read(header.stderr_size) result = fh.read(header.result_size) signature = fh.read(header.signature_size) header = header._replace(params_size=len(params_block)) self._write_header(fh, header) fh.write(params_block) if runtime: fh.write(runtime) if shell: fh.write(shell) if pulse: fh.write(pulse) if stdout: fh.write(stdout) if stderr: fh.write(stderr) if result: fh.write(result) if signature: fh.write(signature) fh.truncate( self.header_size + header.params_size + header.runtime_size + header.shell_size + header.pulse_size + header.stdout_size + header.stderr_size + header.result_size + header.signature_size ) params = property(_get_params, _set_params) def _get_runtime(self): with open(self.task_file, "rb") as fh: header = self._read_header(fh) if header.runtime_size == 0: return {} fh.seek(self.header_size + header.params_size, 0) try: return pickle.loads(lzma.decompress(fh.read(header.runtime_size))) except Exception as e: env.logger.error( f"Failed to obtain runtime of task {self.task_id}: {e}" ) return {"_runtime": {}} def _set_runtime(self, runtime): runtime_block = lzma.compress(pickle.dumps(runtime)) # env.logger.error(f'updating {self.task_id} params of size {len(params_block)}') with fasteners.InterProcessLock( os.path.join(env.temp_dir, self.task_id + ".lck") ): with open(self.task_file, "r+b") as fh: header = self._read_header(fh) if len(runtime_block) == header.runtime_size: fh.seek(self.header_size + header.params_size, 0) fh.write(runtime_block) else: params = fh.read(header.params_size) fh.seek( self.header_size + header.params_size + header.runtime_size, 0 ) shell = fh.read(header.shell_size) if header.shell_size else b"" pulse = fh.read(header.pulse_size) if header.pulse_size else b"" stdout = fh.read(header.stdout_size) if header.stdout_size else b"" stderr = fh.read(header.stderr_size) if header.stderr_size else b"" result = fh.read(header.result_size) if header.result_size else b"" signature = ( fh.read(header.signature_size) if header.signature_size else b"" ) header = header._replace(runtime_size=len(runtime_block)) self._write_header(fh, header) fh.write(params) fh.write(runtime_block) if shell: fh.write(shell) if pulse: fh.write(pulse) if stdout: fh.write(stdout) if stderr: fh.write(stderr) if result: fh.write(result) if signature: fh.write(signature) fh.truncate( self.header_size + header.params_size + header.runtime_size + header.shell_size + header.pulse_size + header.stdout_size + header.stderr_size + header.result_size + header.signature_size ) runtime = property(_get_runtime, _set_runtime) def get_params_and_runtime(self): with open(self.task_file, "rb") as fh: header = self._read_header(fh) if header.params_size == 0 and header.runtime_size == 0: return {} fh.seek(self.header_size, 0) if header.params_size == 0: params = {} else: try: params = pickle.loads(lzma.decompress(fh.read(header.params_size))) except Exception as e: env.logger.error( f"Failed to obtain params with runtime of task {self.task_id}: {e}" ) params = {} if "_runtime" not in params.sos_dict: params.sos_dict["_runtime"] = {} if header.runtime_size > 0: try: runtime = pickle.loads( lzma.decompress(fh.read(header.runtime_size)) ) except Exception as e: env.logger.error( f"Failed to obtain runtime of task {self.task_id}: {e}" ) runtime = {"_runtime": {}} else: runtime = {"_runtime": {}} return params, runtime def _get_status(self): if not os.path.isfile(self.task_file): return "missing" try: with open(self.task_file, "rb") as fh: fh.seek(2, 0) return TaskStatus(struct.unpack("!h", fh.read(2))[0]).name except Exception as e: env.logger.warning( f"Incompatible task file {self.task_file} is removed. This might was most likely generated by a previous version of SoS but please report a bug if you can reproduce this warning message: {e}" ) os.remove(self.task_file) def _get_version(self): with open(self.task_file, "rb") as fh: fh.seek(0, 0) return struct.unpack("!h", fh.read(2))[0] version = property(_get_version) def _get_last_updated(self): with open(self.task_file, "rb") as fh: fh.seek(4, 0) return struct.unpack("!d", fh.read(8))[0] last_updated = property(_get_last_updated) def _set_status(self, status): with fasteners.InterProcessLock( os.path.join(env.temp_dir, self.task_id + ".lck") ): with open(self.task_file, "r+b") as fh: fh.seek(2, 0) if status == "skipped": # special status, set completed_time = running_time # to make sure duration is zero now = time.time() sts = TaskStatus["completed"].value # update status and last modified fh.write(struct.pack("!hd", sts, now)) # also set 'run' fh.seek(3 * 8, 1) fh.write(struct.pack("!d", now)) # from the current location, move by status fh.seek(2 * 8, 1) fh.write(struct.pack("!d", now)) else: if status == "running": # setting to running status ... refresh the pulse file pulse_file = os.path.join( os.path.expanduser("~"), ".sos", "tasks", self.task_id + ".pulse", ) with open(pulse_file, "w") as pd: pd.write(f"#task: {self.task_id}\n") pd.write( f'#started at {datetime.now().strftime("%A, %d. %B %Y %I:%M%p")}\n#\n' ) # wait for the pulse file to be created before updating task status while True: if os.path.isfile(pulse_file): break else: time.sleep(0.01) # if completed, we make sure that the duration will not # be zero even if the task is completed very rapidly now = time.time() + (0.01 if status == "completed" else 0) sts = TaskStatus[status].value # update status and last modified fh.write(struct.pack("!hd", sts, now)) # from the current location, move by status fh.seek(sts * 8, 1) fh.write(struct.pack("!d", now)) # if restarting the task, make sure all irrelevant files # are removed or finishing tasks. if status in ("aborted", "completed", "failed", "pending"): # terminal status remove_task_files( self.task_id, [ ".sh", ".job_id", ".sosout", ".soserr", ".out", ".err", ".pulse", ".cache", ], ) status = property(_get_status, _set_status) def _get_tags(self): try: with open(self.task_file, "rb") as fh: fh.seek(0, 0) ver = struct.unpack("!h", fh.read(2))[0] fh.seek(self.tags_offset[ver - 1], 0) return fh.read(self.tags_size[ver - 1]).decode().strip() except Exception: raise RuntimeError( f"Corrupted task file {self.task_file}. Please report a bug if you can reproduce the generation of this file." ) def _set_tags(self, tags: list): with open(self.task_file, "r+b") as fh: fh.seek(0, 0) ver = struct.unpack("!h", fh.read(2))[0] fh.seek(self.tags_offset[ver - 1], 0) fh.write(" ".join(sorted(tags)).ljust(self.tags_size[ver - 1]).encode()) tags = property(_get_tags, _set_tags) def _get_shell(self): with open(self.task_file, "rb") as fh: header = self._read_header(fh) if header.shell_size == 0: return "" fh.seek(self.header_size + header.params_size + header.runtime_size, 0) try: return lzma.decompress(fh.read(header.shell_size)).decode() except Exception as e: env.logger.warning(f"Failed to decode shell: {e}") return "" shell = property(_get_shell) def _get_pulse(self): with open(self.task_file, "rb") as fh: header = self._read_header(fh) if header.pulse_size == 0: return "" fh.seek( self.header_size + header.params_size + header.runtime_size + header.shell_size, 0, ) try: return lzma.decompress(fh.read(header.pulse_size)).decode() except Exception as e: env.logger.warning(f"Failed to decode pulse: {e}") return "" pulse = property(_get_pulse) def _get_stdout(self): with open(self.task_file, "rb") as fh: header = self._read_header(fh) if header.stdout_size == 0: return "" fh.seek( self.header_size + header.params_size + header.runtime_size + header.pulse_size + header.shell_size, 0, ) try: return lzma.decompress(fh.read(header.stdout_size)).decode() except Exception as e: env.logger.warning(f"Failed to decode stdout: {e}") return "" stdout = property(_get_stdout) def _get_stderr(self): with open(self.task_file, "rb") as fh: header = self._read_header(fh) if header.stderr_size == 0: return "" fh.seek( self.header_size + header.params_size + header.runtime_size + header.shell_size + header.pulse_size + header.stdout_size, 0, ) try: return lzma.decompress(fh.read(header.stderr_size)).decode() except Exception as e: env.logger.warning(f"Failed to decode stderr: {e}") return "" stderr = property(_get_stderr) def _get_result(self): with open(self.task_file, "rb") as fh: header = self._read_header(fh) if header.result_size == 0: return {} fh.seek( self.header_size + header.params_size + header.runtime_size + header.shell_size + header.pulse_size + header.stdout_size + header.stderr_size, 0, ) try: return pickle.loads(lzma.decompress(fh.read(header.result_size))) except Exception as e: env.logger.warning(f"Failed to decode result: {e}") return {"ret_code": 1} result = property(_get_result) def _get_signature(self): with open(self.task_file, "rb") as fh: header = self._read_header(fh) if header.signature_size == 0: return {} fh.seek( self.header_size + header.params_size + header.runtime_size + header.shell_size + header.pulse_size + header.stdout_size + header.stderr_size + header.result_size, 0, ) try: return pickle.loads(lzma.decompress(fh.read(header.signature_size))) except Exception as e: env.logger.warning(f"Failed to decode signature: {e}") return {"ret_code": 1} signature = property(_get_signature) def tags_created_start_and_duration(self, formatted=False): try: with open(self.task_file, "rb") as fh: header = self._read_header(fh) try: tags = header.tags.decode().strip() except Exception: raise ValueError( f"{self.task_file} is in a format that is no longer supported." ) ct = header.new_time if header.running_time != 0: st = header.running_time if TaskStatus(header.status) == TaskStatus.running: dr = time.time() - st else: dr = header.last_modified - st else: return ( tags, ("Created " + format_duration(time.time() - ct, True) + " ago") if formatted else ct, "", "", ) if not formatted: return tags, ct, st, dr # return ( tags, "Created " + format_duration(time.time() - ct, True) + " ago", "Started " + format_duration(time.time() - st) + " ago", ("Ran for " + format_duration(int(dr))) if dr > 0 else "Signature checked", ) except Exception: # missing tag file or something went wrong return "", "", "", "" def taskDuration(task): filename = os.path.join(os.path.expanduser("~"), ".sos", "tasks", f"{task}.task") return os.path.getatime(filename) - os.path.getmtime(filename) def remove_task_files(task: str, exts: list): task_dir = os.path.join(os.path.expanduser("~"), ".sos", "tasks") for ext in exts: filename = os.path.join(task_dir, task + ext) if os.path.isfile(filename): try: os.remove(filename) except Exception: # if the file cannot be removed now, we use a thread to wait a # bit and try to remove it later. The function should not # wait for the thread though try: DelayedAction(os.remove, filename) except Exception: pass def check_task(task, hint={}) -> Dict[str, Union[str, Dict[str, float]]]: # when testing. if the timestamp is 0, the file does not exist originally, it should # still does not exist. Otherwise the file should exist and has the same timestamp if ( hint and hint["status"] not in ("pending", "running") and all( (os.path.isfile(f) and os.stat(f).st_mtime == v) if v else (not os.path.isfile(f)) for f, v in hint["files"].items() ) ): return {} # status of the job, please refer to https://github.com/vatlab/SOS/issues/529 # for details. # task_file = os.path.join(os.path.expanduser("~"), ".sos", "tasks", task + ".task") if not os.path.isfile(task_file): return dict(status="missing", files={task_file: 0}) mtime = os.stat(task_file).st_mtime def task_changed(): return os.stat(task_file).st_mtime != mtime tf = TaskFile(task) status = tf.status if status in ["failed", "completed", "aborted"]: # thse are terminal states. We simply return them # only change of the task file will trigger recheck of status stdout_file = os.path.join( os.path.expanduser("~"), ".sos", "tasks", task + ".sosout" ) stderr_file = os.path.join( os.path.expanduser("~"), ".sos", "tasks", task + ".soserr" ) # 1242 if os.path.isfile(stdout_file) or os.path.isfile(stderr_file): tf.add_outputs(keep_result=True) # 1323 tf.add_result() remove_task_files(task, [".sosout", ".soserr", ".out", ".err"]) # stdout and stderr files should not exist status_files = { task_file: os.stat(task_file).st_mtime, stdout_file: 0, stderr_file: 0, } return dict(status=status, files=status_files) pulse_file = os.path.join(os.path.expanduser("~"), ".sos", "tasks", task + ".pulse") # check the existence and validity of .pulse file if os.path.isfile(pulse_file): try: status_files = { task_file: os.stat(task_file).st_mtime, pulse_file: os.stat(pulse_file).st_mtime, } # if we have hint, we know the time stamp of last # status file. if ( not hint or pulse_file not in hint["files"] or status_files[pulse_file] != hint["files"][pulse_file] ): return dict(status="running", files=status_files) elapsed = time.time() - status_files[pulse_file] if elapsed < 60: return dict(status="running", files=status_files) syserr_file = os.path.join( os.path.expanduser("~"), ".sos", "tasks", task + ".err" ) # if the system does not return any error message, write sos-specific one if os.path.isfile(syserr_file) and os.path.getsize(syserr_file) > 0: try: with open(syserr_file) as syserr: env.logger.warning("".join(syserr.readlines()[-5:])) except Exception as e: env.logger.warning( f"{task} is suspected to be killed but {syserr_file} cannot be read: {e}" ) else: soserr_file = os.path.join( os.path.expanduser("~"), ".sos", "tasks", task + ".soserr" ) with open(soserr_file, "a") as err: err.write( f"Task {task} inactive for more than {int(elapsed)} seconds, might have been killed." ) env.logger.warning( f"Task {task} inactive for more than {int(elapsed)} seconds, might have been killed." ) tf.add_outputs() # 1323 tf.add_result() # assume aborted tf.status = "aborted" return dict( status="aborted", files={task_file: os.stat(task_file).st_mtime, pulse_file: 0}, ) except Exception: # the pulse file could disappear when the job is completed. if task_changed(): return check_task(task) raise elif status == "running": # starting of task will create a pulse file. If the pulse file is gone # and the status is still showing as running, something is wrong. # if there is no pulse file . tf.status = "aborted" with open( os.path.join(os.path.expanduser("~"), ".sos", "tasks", task + ".soserr"), "a", ) as err: err.write(f"Task {task} considered as aborted due to missing pulse file.") env.logger.warning( f"Task {task} considered as aborted due to missing pulse file." ) tf.add_outputs() # 1323 tf.add_result() return dict( status="aborted", files={task_file: os.stat(task_file).st_mtime, pulse_file: 0}, ) # if there is no pulse file job_file = os.path.join(os.path.expanduser("~"), ".sos", "tasks", task + ".sh") def has_job(): job_id_file = os.path.join( os.path.expanduser("~"), ".sos", "tasks", task + ".job_id" ) return ( os.path.isfile(job_file) and os.stat(job_file).st_mtime >= os.stat(task_file).st_mtime and os.path.isfile(job_id_file) and os.stat(job_id_file).st_mtime >= os.stat(job_file).st_mtime ) if has_job(): try: if status != "submitted": tf.status = "submitted" return dict( status="submitted", files={ task_file: os.stat(task_file).st_mtime, job_file: os.stat(job_file).st_mtime, pulse_file: 0, }, ) except Exception: # the pulse file could disappear when the job is completed. if task_changed(): return check_task(task) else: raise else: # status not changed try: if ( hint and hint["status"] in ("new", "pending") and hint["files"][task_file] == os.stat(task_file).st_mtime ): return {} else: return dict( status=status, files={task_file: os.stat(task_file).st_mtime, job_file: 0}, ) except Exception: # the pulse file could disappear when the job is completed. if task_changed(): return check_task(task) else: raise def check_tasks(tasks, is_all: bool): if not tasks: return {} cache_file: str = os.path.join( os.path.expanduser("~"), ".sos", "tasks", "status_cache.pickle" ) # status_cache: Dict = {} if os.path.isfile(cache_file): try: with fasteners.InterProcessLock(cache_file + "_"): with open(cache_file, "rb") as cache: status_cache = pickle.load(cache) except Exception: # if the cache file is corrupted, remove it. #1275 os.remove(cache_file) # at most 20 threads from multiprocessing.pool import ThreadPool as Pool p = Pool(min(20, len(tasks))) # the result can be {} for unchanged, or real results raw_status = p.starmap(check_task, [(x, status_cache.get(x, {})) for x in tasks]) # if check all, we clear the cache and record all existing tasks has_changes: bool = any(x for x in raw_status) if has_changes: if is_all: status_cache = { k: v if v else status_cache[k] for k, v in zip(tasks, raw_status) } else: status_cache.update({k: v for k, v in zip(tasks, raw_status) if v}) with fasteners.InterProcessLock(cache_file + "_"): with open(cache_file, "wb") as cache: pickle.dump(status_cache, cache) return status_cache def print_task_status( tasks, check_all=False, verbosity: int = 1, html: bool = False, numeric_times=False, age=None, tags=None, status=None, ): # # verbose is ignored for now # if not check_all and not tasks: # from .signatures import WorkflowSignatures # workflow_signatures = WorkflowSignatures() # tasks = [ # x for x in workflow_signatures.tasks() if os.path.isfile( # os.path.join( # os.path.expanduser('~'), '.sos', 'tasks', x + '.task')) # ] import glob all_tasks: List = [] if check_all: tasks = glob.glob( os.path.join(os.path.expanduser("~"), ".sos", "tasks", "*.task") ) all_tasks = [(os.path.basename(x)[:-5], os.path.getmtime(x)) for x in tasks] if not all_tasks: return else: for t in tasks: matched_names = glob.glob( os.path.join(os.path.expanduser("~"), ".sos", "tasks", f"{t}*.task") ) matched = [ (os.path.basename(x)[:-5], os.path.getmtime(x)) for x in matched_names ] if not matched: all_tasks.append((t, None)) else: all_tasks.extend(matched) if age is not None: age = expand_time(age, default_unit="d") if age > 0: all_tasks = [x for x in all_tasks if time.time() - x[1] >= age] else: all_tasks = [x for x in all_tasks if time.time() - x[1] <= -age] all_tasks = sorted(list(set(all_tasks)), key=lambda x: 0 if x[1] is None else x[1]) if tags: all_tasks = [ x for x in all_tasks if TaskFile(x[0]).exists() and any(y in tags for y in TaskFile(x[0]).tags.split()) ] if not all_tasks: env.logger.debug("No matching tasks are identified.") return raw_status = check_tasks([x[0] for x in all_tasks], check_all) obtained_status = [raw_status[x[0]]["status"] for x in all_tasks] # # automatically remove non-running tasks that are more than 30 days old to_be_removed = [ t for s, (t, d) in zip(obtained_status, all_tasks) if d is not None and time.time() - d > 30 * 24 * 60 * 60 and s != "running" ] if status: all_tasks = [x for x, s in zip(all_tasks, obtained_status) if s in status] obtained_status = [x for x in obtained_status if x in status] # from .monitor import summarizeExecution if html: # HTML output from .utils import isPrimitive import pprint print('<table width="100%" class="resource_table">') def row(th=None, td=None): if td is None: print(f'<tr><th align="right" width="30%">{th}</th><td></td></tr>') elif th is None: print(f'<tr><td colspan="2" align="left" width="30%">{td}</td></tr>') else: print( f'<tr><th align="right" width="30%">{th}</th><td align="left"><div class="one_liner">{td}</div></td></tr>' ) for s, (t, d) in zip(obtained_status, all_tasks): tf = TaskFile(t) ts, ct, st, dr = tf.tags_created_start_and_duration(formatted=True) row("ID", t) row("Status", s) row("Created", ct) if st: row("Started", st) if dr: row("Duration", dr) params = tf.params row("Task") if hasattr(params, "task_stack"): row( td=f'<pre style="text-align:left">{params.task_stack[0][1].task}</pre>' ) else: row(td=f'<pre style="text-align:left">{params.task}</pre>') row("Tags") row(td=f'<pre style="text-align:left">{tf.tags}</pre>') if params.global_def: row("Global") row(td=f'<pre style="text-align:left">{params.global_def}</pre>') # row('Environment') global_runtime = tf.runtime["_runtime"] job_vars = params.sos_dict job_vars["_runtime"].update(global_runtime) for k in sorted(job_vars.keys()): v = job_vars[k] if not k.startswith("__") and not k == "CONFIG": if k == "_runtime": for _k, _v in v.items(): if isPrimitive(_v) and _v not in (None, "", [], (), {}): row(_k, _v) elif isPrimitive(v) and v not in (None, "", [], (), {}): row( k, f'<pre style="text-align:left">{pprint.pformat(v)}</pre>' ) pulse_content = "" if tf.has_result(): if s not in ("pending", "submitted", "running"): res = tf.result if "start_time" in res and "end_time" in res: row( "Duration", format_duration(res["end_time"] - res["start_time"]), ) if "peak_cpu" in res: row("Peak CPU", f'{res["peak_cpu"]*100} %') if "peak_mem" in res: row("Peak mem", pretty_size(res["peak_mem"])) # this is a placeholder for the frontend to draw figure row(td=f'<div id="res_{t}"></div>') elif s == "running": pulse_file = os.path.join( os.path.expanduser("~"), ".sos", "tasks", t + ".pulse" ) if os.path.isfile(pulse_file): with open(pulse_file) as pulse: pulse_content = pulse.read() summary = summarizeExecution(t, pulse_content, status=s) if summary: # row('Execution') for line in summary.split("\n"): fields = line.split(None, 1) if fields[0] == "task": continue row(fields[0], "" if fields[1] is None else fields[1]) # this is a placeholder for the frontend to draw figure row(td=f'<div id="res_{t}"></div>') if s not in ("pending", "submitted", "running"): # if tf.has_shell(): shell = tf.shell numLines = shell.count("\n") row("shell", f"{numLines} lines") row(td=f'<small><pre style="text-align:left">{shell}</pre></small>') if tf.has_stdout(): stdout = tf.stdout numLines = stdout.count("\n") row( "stdout", "(empty)" if numLines == 0 else f'{numLines} lines{"" if numLines < 200 else " (showing last 200)"}', ) if numLines > 200: stdout = "\n".join(stdout.splitlines()[-200:]) row( td=f'<small><pre style="text-align:left">{stdout}</pre></small>' ) if tf.has_stderr(): stderr = tf.stderr numLines = stderr.count("\n") row( "stderr", "(empty)" if numLines == 0 else f'{numLines} lines{"" if numLines < 200 else " (showing last 200)"}', ) if numLines > 200: stderr = "\n".join(stderr.splitlines()[-200:]) row( td=f'<small><pre style="text-align:left">{stderr}</pre></small>' ) elif s == "running": files = glob.glob( os.path.join(os.path.expanduser("~"), ".sos", "tasks", t + ".*") ) for f in sorted( [ x for x in files if os.path.splitext(x)[-1] not in (".task", ".pulse") ] ): numLines = linecount_of_file(f) rhead = os.path.splitext(f)[-1] if rhead == ".sh": rhead = "shell" elif rhead == ".job_id": rhead = "job ID" elif rhead == ".err": rhead = "stderr" elif rhead == ".out": rhead = "stdout" elif rhead == ".soserr": rhead = "sos error" elif rhead == ".sosout": rhead = "sos output" row( rhead, "(empty)" if numLines == 0 else f'{numLines} lines{"" if numLines < 200 else " (showing last 200)"}', ) try: row( td=f'<small><pre style="text-align:left">{tail_of_file(f, 200, ansi2html=True)}</pre></small>' ) except Exception: row( td='<small><pre style="text-align:left">ignored.</pre><small>' ) print("</table>") # if not pulse_content: return # A sample of 400 point should be enough to show the change of resources lines = sample_lines(pulse_content, 400).splitlines() if len(lines) <= 2: return # read the pulse file and plot it # time proc_cpu proc_mem children children_cpu children_mem try: etime = [] cpu = [] mem = [] for line in lines: if line.startswith("#") or not line.strip(): continue fields = line.split() etime.append(float(fields[0])) cpu.append(float(fields[1]) + float(fields[4])) mem.append(float(fields[2]) / 1e6 + float(fields[5]) / 1e6) if not etime: return except Exception: return # print( """ <script> function loadFiles(files, fn) { if (!files.length) { files = []; } var head = document.head || document.getElementsByTagName('head')[0]; function loadFile(index) { if (files.length > index) { if (files[index].endsWith('.css')) { var fileref = document.createElement('link'); fileref.setAttribute("rel", "stylesheet"); fileref.setAttribute("type", "text/css"); fileref.setAttribute("href", files[index]); } else { var fileref = document.createElement('script'); fileref.setAttribute("type", "text/javascript"); fileref.setAttribute("src", files[index]); } console.log('Load ' + files[index]); head.appendChild(fileref); index = index + 1; // Used to call a callback function fileref.onload = function() { loadFile(index); } } else if (fn) { fn(); } } loadFile(0); } function plotResourcePlot_""" + t + """() { // get the item // parent element is a table cell, needs enlarge document.getElementById( "res_""" + t + """").parentElement.setAttribute("height", "300px;"); $("#res_""" + t + """").css("height", "300px"); $("#res_""" + t + """").css("width", "100%"); $("#res_""" + t + """").css("min-height", "300px"); var cpu = [""" + ",".join([f"[{x*1000},{y}]" for x, y in zip(etime, cpu)]) + """]; var mem = [""" + ",".join([f"[{x*1000},{y}]" for x, y in zip(etime, mem)]) + """]; $.plot('#res_""" + t + """', [{ data: cpu, label: "CPU (%)" }, { data: mem, label: "mem (M)", yaxis: 2 } ], { xaxes: [{ mode: "time" }], yaxes: [{ min: 0 }, { position: "right", tickFormatter: function(v, axis) { return v.toFixed(1) + 'M'; } }], legend: { position: "nw" } }); } var dt = 100; // the frontend might be notified before the table is inserted as results. function showResourceFigure_""" + t + """() { if ( $("#res_""" + t + """").length === 0) { dt = dt * 1.5; // slow-down checks for datatable as time goes on; setTimeout(showResourceFigure_""" + t + """, dt); return; } else { $("#res_""" + t + """").css('width', "100%").css('height', "300px"); loadFiles(["http://www.flotcharts.org/flot/jquery.flot.js", "http://www.flotcharts.org/flot/jquery.flot.time.js" ], plotResourcePlot_""" + t + """); } } showResourceFigure_""" + t + """() </script> """ ) elif verbosity == 0: print("\n".join(obtained_status)) elif verbosity == 1: for s, (t, d) in zip(obtained_status, all_tasks): print(f"{t}\t{s}") elif verbosity == 2: tsize = 20 for s, (t, d) in zip(obtained_status, all_tasks): ts, _, _, dr = TaskFile(t).tags_created_start_and_duration( formatted=not numeric_times ) tsize = max(tsize, len(ts)) print(f"{t}\t{ts.ljust(tsize)}\t{dr:<14}\t{s}") elif verbosity == 3: tsize = 20 for s, (t, d) in zip(obtained_status, all_tasks): ts, ct, st, dr = TaskFile(t).tags_created_start_and_duration( formatted=not numeric_times ) tsize = max(tsize, len(ts)) print(f"{t}\t{ts.ljust(tsize)}\t{ct:<14}\t{st:<14}\t{dr:<14}\t{s}") elif verbosity == 4: import pprint for s, (t, d) in zip(obtained_status, all_tasks): tf = TaskFile(t) if s == "missing": print(f"{t}\t{s}\n") continue ts, ct, st, dr = tf.tags_created_start_and_duration(formatted=True) print(f"{t}\t{s}\n") print(f"{ct}") if st: print(f"{st}") if dr: print(f"{dr}") params = tf.params print("TASK:\n=====") if hasattr(params, "task_stack"): # show task of subtask print(f"#1 of {len(params.task_stack)} subtasks:") print(params.task_stack[0][1].task) else: print(params.task) print("TAGS:\n=====") print(tf.tags) print() if params.global_def: print("GLOBAL:\n=======") print(params.global_def) print() print("ENVIRONMENT:\n============") global_runtime = tf.runtime["_runtime"] job_vars = params.sos_dict job_vars["_runtime"].update(global_runtime) for k in sorted(job_vars.keys()): v = job_vars[k] print(f"{k:22}{short_repr(v) if verbosity == 3 else pprint.pformat(v)}") print() if tf.has_result(): if s not in ("pending", "submitted", "running"): res = tf.result print("EXECUTION STATS:\n================") if "start_time" in res and "end_time" in res: print( f'Duration:\t{format_duration(res["end_time"] - res["start_time"])}' ) if "peak_cpu" in res: print(f'Peak CPU:\t{res["peak_cpu"]*100} %') if "peak_mem" in res: print(f'Peak mem:\t{pretty_size(res["peak_mem"])}') elif s == "running": # we have separate pulse, out and err files pulse_file = os.path.join( os.path.expanduser("~"), ".sos", "tasks", t + ".pulse" ) if os.path.isfile(pulse_file): print("EXECUTION STATS:\n================") with open(pulse_file) as pulse: print(summarizeExecution(t, pulse.read(), status=s)) # if there are other files such as job file, print them. def show_file(task, exts): if isinstance(exts, str): exts = [exts] for ext in exts: f = os.path.join( os.path.expanduser("~"), ".sos", "tasks", task + ext ) if not os.path.isfile(f) or os.path.getsize(f) == 0: return print( f'\n{os.path.basename(f)}:\n{"="*(len(os.path.basename(f))+1)}' ) try: with open(f) as fc: print(fc.read()) except Exception: print("Binary file") if s == "running": show_file(t, ".sh") show_file(t, ".job_id") show_file(t, [".sosout", ".out"]) show_file(t, [".soserr", ".err"]) elif s == "submitted": show_file(t, ".sh") show_file(t, ".job_id") elif s != "pending": if tf.has_shell(): print("\nexecution script:\n================\n" + tf.shell) else: show_file(t, ".sh") if tf.has_stdout(): print("\nstandard output:\n================\n" + tf.stdout) else: show_file(t, [".sosout", ".out"]) if tf.has_stderr(): print("\nstandard error:\n================\n" + tf.stderr) else: show_file(t, [".soserr", ".err"]) # remove jobs that are older than 1 month if to_be_removed: purge_tasks(to_be_removed, verbosity=0) def kill_tasks(tasks, tags=None): # import glob from multiprocessing.pool import ThreadPool as Pool if not tasks: tasks = glob.glob( os.path.join(os.path.expanduser("~"), ".sos", "tasks", "*.task") ) all_tasks = [os.path.basename(x)[:-5] for x in tasks] else: all_tasks = [] for t in tasks: matched = glob.glob( os.path.join(os.path.expanduser("~"), ".sos", "tasks", f"{t}*.task") ) matched = [os.path.basename(x)[:-5] for x in matched] if not matched: env.logger.warning(f"{t} does not match any existing task") else: all_tasks.extend(matched) if tags: all_tasks = [ x for x in all_tasks if any(x in tags for x in TaskFile(x).tags.split()) ] if not all_tasks: env.logger.debug("No task to kill") return all_tasks = sorted(list(set(all_tasks))) # at most 20 threads p = Pool(min(20, len(all_tasks))) killed = p.map(kill_task, all_tasks) for s, t in zip(killed, all_tasks): print(f"{t}\t{s}") def kill_task(task): tf = TaskFile(task) status = tf.status if status == "completed": return "completed" with open( os.path.join(os.path.expanduser("~"), ".sos", "tasks", task + ".soserr"), "a" ) as err: err.write(f"Task {task} killed by sos kill command or task engine.") tf.add_outputs() # 1323 tf.add_result() TaskFile(task).status = "aborted" remove_task_files( task, [".sosout", ".soserr", ".out", ".err", ".pulse", ".sh", ".job_id"] ) return "aborted" def purge_tasks(tasks, purge_all=None, age=None, status=None, tags=None, verbosity=2): # verbose is ignored for now # if not tasks and not purge_all: # # if not --all and no task is specified, find all tasks in the current directory # from .signatures import WorkflowSignatures # workflow_signatures = WorkflowSignatures() # tasks = [ # x for x in workflow_signatures.tasks() if os.path.isfile( # os.path.join( # os.path.expanduser('~'), '.sos', 'tasks', x + '.task')) # ] import glob if tasks: all_tasks = [] for t in tasks: matched = glob.glob( os.path.join(os.path.expanduser("~"), ".sos", "tasks", f"{t}*.task") ) matched = [(os.path.basename(x)[:-5], os.path.getmtime(x)) for x in matched] if not matched: print(f"{t}\tmissing") all_tasks.extend(matched) elif purge_all or age or status or tags: tasks = glob.glob( os.path.join(os.path.expanduser("~"), ".sos", "tasks", "*.task") ) all_tasks = [(os.path.basename(x)[:-5], os.path.getmtime(x)) for x in tasks] else: raise ValueError( "Please specify either tasks or one or more of --all, --status, --tags--age" ) # if age is not None: age = expand_time(age, default_unit="d") if age > 0: all_tasks = [x for x in all_tasks if time.time() - x[1] >= age] else: all_tasks = [x for x in all_tasks if time.time() - x[1] <= -age] if status: # at most 20 threads task_status = check_tasks([x[0] for x in all_tasks], not tasks) all_tasks = [x for x in all_tasks if task_status[x[0]]["status"] in status] if tags: all_tasks = [ x for x in all_tasks if any(x in tags for x in TaskFile(x[0]).tags.split()) ] # # remoe all task files all_tasks = set([x[0] for x in all_tasks]) if all_tasks: # # find all related files, including those in nested directories from collections import defaultdict to_be_removed = defaultdict(list) for dirname, _, filelist in os.walk( os.path.join(os.path.expanduser("~"), ".sos", "tasks") ): for f in filelist: ID = os.path.basename(f).split(".", 1)[0] if ID in all_tasks: to_be_removed[ID].append(os.path.join(dirname, f)) # cache_file: str = os.path.join( os.path.expanduser("~"), ".sos", "tasks", "status_cache.pickle" ) if os.path.isfile(cache_file): with fasteners.InterProcessLock(cache_file + "_"): with open(cache_file, "rb") as cache: status_cache = pickle.load(cache) else: status_cache = {} for task in all_tasks: removed = True for f in to_be_removed[task]: try: if verbosity > 3: if ( "TASK" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"] ): env.log_to_file("TASK", f"Remove {f}") os.remove(f) except Exception as e: removed = False if verbosity > 0: env.logger.warning(f"Failed to purge task {task[0]}: {e}") status_cache.pop(task, None) if removed and verbosity > 1: print(f"{task}\tpurged") with fasteners.InterProcessLock(cache_file + "_"): with open(cache_file, "wb") as cache: pickle.dump(status_cache, cache) elif verbosity > 1: env.logger.debug("No matching tasks to purge") if purge_all and age is None and status is None and tags is None: matched = glob.glob(os.path.join(os.path.expanduser("~"), ".sos", "tasks", "*")) count = 0 for f in matched: if os.path.isdir(f): import shutil try: shutil.rmtree(f) count += 1 except Exception as e: if verbosity > 0: env.logger.warning(f"Failed to remove {f}: {e}") else: try: os.remove(f) count += 1 except Exception as e: if verbosity > 0: env.logger.warning(f"Failed to remove {e}") if count > 0 and verbosity > 1: env.logger.info(f"{count} other files and directories are removed.") return ""
gpl-3.0
-1,311,812,755,292,229,400
36.3247
207
0.464547
false
4.217995
false
false
false
bjodah/PyLaTeX
pylatex/base_classes/command.py
1
10139
# -*- coding: utf-8 -*- """ This module implements a class that implements a latex command. This can be used directly or it can be inherited to make an easier interface to it. .. :copyright: (c) 2014 by Jelte Fennema. :license: MIT, see License for more details. """ from .latex_object import LatexObject from ..utils import dumps_list class CommandBase(LatexObject): """A class that represents a LaTeX command. The name of this class (when lowercased) will be the name of this command. To supply a different name set the ``_latex_name`` attribute. """ def __init__(self, arguments=None, options=None, *, extra_arguments=None): r""" Args ---- arguments: None, str, list or `~.Arguments` The main arguments of the command. options: None, str, list or `~.Options` Options of the command. These are placed in front of the arguments. extra_arguments: None, str, list or `~.Arguments` Extra arguments for the command. When these are supplied the options will be placed before them instead of before the normal arguments. This allows for a way of having one or more arguments before the options. """ self._set_parameters(arguments, 'arguments') self._set_parameters(options, 'options') if extra_arguments is None: self.extra_arguments = None else: self._set_parameters(extra_arguments, 'extra_arguments') super().__init__() def _set_parameters(self, parameters, argument_type): parameter_cls = Options if argument_type == 'options' else Arguments if parameters is None: parameters = parameter_cls() elif not isinstance(parameters, parameter_cls): parameters = parameter_cls(parameters) # Pass on escaping to generated parameters parameters._default_escape = self._default_escape setattr(self, argument_type, parameters) def __key(self): """Return a hashable key, representing the command. Returns ------- tuple """ return (self.latex_name, self.arguments, self.options, self.extra_arguments) def __eq__(self, other): """Compare two commands. Args ---- other: `~.Command` instance The command to compare this command to Returns ------- bool: If the two instances are equal """ if isinstance(other, Command): return self.__key() == other.__key() return False def __hash__(self): """Calculate the hash of a command. Returns ------- int: The hash of the command """ return hash(self.__key()) def dumps(self): """Represent the command as a string in LaTeX syntax. Returns ------- str The LaTeX formatted command """ options = self.options.dumps() arguments = self.arguments.dumps() if self.extra_arguments is None: return r'\{command}{options}{arguments}'\ .format(command=self.latex_name, options=options, arguments=arguments) extra_arguments = self.extra_arguments.dumps() return r'\{command}{arguments}{options}{extra_arguments}'\ .format(command=self.latex_name, arguments=arguments, options=options, extra_arguments=extra_arguments) class Command(CommandBase): """A class that represents a LaTeX command. This class is meant for one-off commands. When a command of the same type is used multiple times it is better to subclass `.CommandBase`. """ _repr_attributes_mapping = {'command': 'latex_name'} def __init__(self, command=None, arguments=None, options=None, *, extra_arguments=None, packages=None): r""" Args ---- command: str Name of the command arguments: None, str, list or `~.Arguments` The main arguments of the command. options: None, str, list or `~.Options` Options of the command. These are placed in front of the arguments. extra_arguments: None, str, list or `~.Arguments` Extra arguments for the command. When these are supplied the options will be placed before them instead of before the normal arguments. This allows for a way of having one or more arguments before the options. packages: list of `~.Package` instances A list of the packages that this command requires Examples -------- >>> Command('documentclass', >>> options=Options('12pt', 'a4paper', 'twoside'), >>> arguments='article').dumps() '\\documentclass[12pt,a4paper,twoside]{article}' >>> Command('com') '\\com' >>> Command('com', 'first') '\\com{first}' >>> Command('com', 'first', 'option') '\\com[option]{first}' >>> Command('com', 'first', 'option', 'second') '\\com{first}[option]{second}' """ self.latex_name = command if packages is not None: self.packages |= packages super().__init__(arguments, options, extra_arguments=extra_arguments) class UnsafeCommand(Command): """An unsafe version of the `Command` class. This class is meant for one-off commands that should not escape their arguments and options. Use this command with care and only use this when the arguments are hardcoded. When an unsafe command of the same type is used multiple times it is better to subclass `.CommandBase` and set the ``_default_escape`` attribute to false. """ _default_escape = False class Parameters(LatexObject): """The base class used by `~Options` and `~Arguments`. This class should probably never be used on its own and inhereting from it is only useful if a class like `~Options` or `~Arguments` is needed again. """ def __init__(self, *args, **kwargs): r""" Args ---- \*args: Positional parameters \*\*kwargs: Keyword parameters """ if len(args) == 1 and not isinstance(args[0], str): if hasattr(args[0], 'items') and len(kwargs) == 0: kwargs = args[0] # do not just iterate over the dict keys args = () elif hasattr(args[0], '__iter__'): args = args[0] self._positional_args = list(args) self._key_value_args = dict(kwargs) super().__init__() def __key(self): """Generate a unique hashable key representing the parameter object. Returns ------- tuple """ return tuple(self._list_args_kwargs()) def __eq__(self, other): """Compare two parameters. Returns ------- bool """ return type(self) == type(other) and self.__key() == other.__key() def __hash__(self): """Generate a hash of the parameters. Returns ------- int """ return hash(self.__key()) def _format_contents(self, prefix, separator, suffix): """Format the parameters. The formatting is done using the three arguments suplied to this function. Arguments --------- prefix: str separator: str suffix: str Returns ------- str """ params = self._list_args_kwargs() if len(params) <= 0: return '' string = prefix + dumps_list(params, escape=self.escape, token=separator) + suffix return string def _list_args_kwargs(self): """Make a list of strings representing al parameters. Returns ------- list """ params = [] params.extend(self._positional_args) params.extend(['{k}={v}'.format(k=k, v=v) for k, v in self._key_value_args.items()]) return params class Options(Parameters): """A class implementing LaTex options for a command. It supports normal positional parameters, as well as key-value pairs. Options are the part of a command located between the square brackets (``[]``). The positional parameters will be outputted in order and will appear before the key-value-pairs. The key value-pairs won't be outputted in the order in which they were entered Examples -------- >>> args = Options('a', 'b', 'c').dumps() '[a,b,c]' >>> Options('clip', width=50, height='25em', trim='1 2 3 4').dumps() '[clip,trim=1 2 3 4,width=50,height=25em]' """ def dumps(self): """Represent the parameters as a string in LaTeX syntax. This is to be appended to a command. Returns ------- str """ return self._format_contents('[', ',', ']') class Arguments(Parameters): """A class implementing LaTex arguments for a command. It supports normal positional parameters, as well as key-value pairs. Arguments are the part of a command located between the curly braces (``{}``). The positional parameters will be outputted in order and will appear before the key-value-pairs. The key value-pairs won't be outputted in the order in which they were entered Examples -------- >>> args = Arguments('a', 'b', 'c').dumps() '{a}{b}{c}' >>> args = Arguments('clip', width=50, height='25em').dumps() >>> args.dumps() '{clip}{width=50}{height=25em}' """ def dumps(self): """Represent the parameters as a string in LaTeX syntax. This is to be appended to a command. Returns ------- str """ return self._format_contents('{', '}{', '}')
mit
-7,111,534,106,801,539,000
27.085873
79
0.56909
false
4.579494
false
false
false
arunkgupta/gramps
gramps/gen/filters/rules/person/_ischildoffiltermatch.py
1
2492
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2002-2007 Donald N. Allingham # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # $Id$ #------------------------------------------------------------------------- # # Standard Python modules # #------------------------------------------------------------------------- from ....ggettext import gettext as _ #------------------------------------------------------------------------- # # GRAMPS modules # #------------------------------------------------------------------------- from .. import Rule from _matchesfilter import MatchesFilter #------------------------------------------------------------------------- # # IsChildOfFilterMatch # #------------------------------------------------------------------------- class IsChildOfFilterMatch(Rule): """Rule that checks for a person that is a child of someone matched by a filter""" labels = [ _('Filter name:') ] name = _('Children of <filter> match') category = _('Family filters') description = _("Matches children of anybody matched by a filter") def prepare(self,db): self.db = db self.map = set() filt = MatchesFilter(self.list) filt.requestprepare(db) for person in db.iter_people(): if filt.apply(db, person): self.init_list(person) filt.requestreset() def reset(self): self.map.clear() def apply(self,db,person): return person.handle in self.map def init_list(self,person): if not person: return for fam_id in person.get_family_handle_list(): fam = self.db.get_family_from_handle(fam_id) if fam: self.map.update(child_ref.ref for child_ref in fam.get_child_ref_list())
gpl-2.0
-4,666,658,151,900,319,000
32.226667
75
0.536116
false
4.597786
false
false
false
cheery/spirthon
annotator.py
1
5192
# Annotator needs to find the least generic type for everything. # To do that, it needs to hold a model of our types. class Annotator(object): def __init__(self, unit): self.unit = unit self.stack = [] def update(self, func): for block in func: for op in block.ops: if not op.queued: self.stack.append(op) op.queued = True def run(self): while len(self.stack) > 0: op = self.stack.pop() op.queued = False if op.name == 'call': print 'annotate', op elif op.name == 'return': a = union(op.block.func.annotation.restype, op.args[0].annotation) op.args[0].annotation = a op.block.func.annotation.restype = a op.annotation = a print 'return update', op, a # bit incorrect, should push uses of argument in too. else: assert False # Should annotate here, if some of the fields change, # should reschedule the used fields. # SPIR-V annotation may need much simpler rules than specified here. # Anything -annotation in translation unit most likely means # that the translation failed. class Anything(object): specificity = 0 parametric = False def __repr__(self): return 'anything' # The next most specific type after 'Unbound'. class Constant(object): def __init__(self, type, value): self.type = type self.value = value def __repr__(self): return 'Constant({}, {})'.format(self.type, self.value) class FuncType(object): def __init__(self, restype, argtypes): self.restype = restype self.argtypes = argtypes def __getitem__(self, index): return self.argtypes[index] def __len__(self): return len(self.argtypes) def __repr__(self): return '({}) ->'.format(', '.join(map(repr, self.argtypes)), self.restype) class Type(object): def __call__(self, parameter): assert self.parametric return Parametric(self, parameter) def __init__(self, name, generic, parametric=False): self.name = name self.generic = generic self.parametric = parametric self.specificity = generic.specificity+1 def __repr__(self): return self.name class Parametric(object): def __init__(self, func, parameter): self.func = func self.parameter = parameter def __repr__(self): return "{}({})".format(self.func, self.parameter) # Types are treated as notation. They should be uniquely identified. anything = Anything() # not sure whether these belong here. t_int = Type('int', anything) t_uint = Type('uint', t_int) t_bool = Type('bool', t_uint) t_float = Type('float', anything) t_vec2 = Type('vec2', anything, parametric=True) t_vec3 = Type('vec3', anything, parametric=True) t_vec4 = Type('vec4', anything, parametric=True) # Thought about doing them this way, but realized types # would require unification by their type hierarchies. # # nullable = Type('nullable', anything, parametric=True) # # instance = Type('instance', nullable, parametric=True) # # t_null = Type('null', nullable) # I don't want parametric types to leak from # their parametric container. def union(a, b): c = union_raw(a, b) while isinstance(c, Type) and c.parametric: c = c.generic return c # But we still may use unification results which # return parametric types. def union_raw(a, b): if a is b: return a if a is None: return b if b is None: return a if isinstance(a, Constant) and isinstance(b, Constant): if a.value == b.value: return a else: return union_raw(a.type, b.type) elif isinstance(a, Constant): return union_raw(a.type, b) elif isinstance(b, Constant): return union_raw(a, b.type) if isinstance(a, Type) and isinstance(b, Type): specificity = min(a.specificity, b.specificity) while a.specificity > specificity: a = a.generic while b.specificity > specificity: b = b.generic while a is not b: a = a.generic b = b.generic assert a is not None return a elif isinstance(a, Parametric) and isinstance(b, Parametric): tp = union_raw(a.func, b.func) if tp.parametric: return Parametric(tp, union(a.parameter, b.parameter)) else: return tp elif isinstance(a, Parametric): tp = union_raw(a.func, b) if tp.parametric: return Parametric(tp, a.parameter) else: return tp elif isinstance(b, Parametric): tp = union_raw(b.func, a) if tp.parametric: return Parametric(tp, b.parameter) else: return tp elif isinstance(a, FuncType) and isinstance(b, FuncType) and len(a) == len(b): return FuncType( union(a.restype, b.restype), [union(c, d) for c, d in zip(a, b)]) return anything
mit
7,691,338,691,917,519,000
30.08982
82
0.589176
false
3.7815
false
false
false
tibor95/phatch-python2.7
build/lib.linux-i686-2.7/phatch/core/pil.py
1
26512
# Phatch - Photo Batch Processor # Copyright (C) 2007-2009 www.stani.be # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses/ # # Phatch recommends SPE (http://pythonide.stani.be) for editing python files. # Follows PEP8 """All PIL related issues.""" #FIXME: # - info should be defined on layer level # -> move call afterwards also to layer level # -> adapt image inspector import datetime import os import re import types from PIL import Image #todo make this lazy from lib import formField from lib import imtools from lib import metadata from lib import openImage from lib import system from lib import thumbnail from lib import unicoding from lib.reverse_translation import _t from lib.formField import RE_FILE_IN, RE_FILE_OUT from ct import TITLE from config import USER_BIN_PATH #from other import EXIF system.set_bin_paths([USER_BIN_PATH]) try: import pyexiv2 from lib import _pyexiv2 as exif except: pyexiv2 = None exif = False WWW_PYEXIV2 = 'http://tilloy.net/dev/pyexiv2/' NEEDS_PYEXIV2 = _('pyexiv2 needs to be installed') + ' (%s)' % WWW_PYEXIV2 CONVERTED_MODE = \ _('%(mode)s has been converted to %(mode_copy)s to save as %(format)s.') DYNAMIC_VARS = set(('width', 'height', 'size', 'mode', 'transparency')) IMAGE_DEFAULT_DPI = 72 SEPARATOR = '_' # should be same as in core.translations MONTHS = (_t('January'), _t('February'), _t('March'), _t('April'), _t('May'), _t('June'), _t('July'), _t('August'), _t('September'), _t('October'), _t('November'), _t('December')) WEEKDAYS = (_t('Monday'), _t('Tuesday'), _t('Wednesday'), _t('Thursday'), _t('Friday'), _t('Saturday'), _t('Sunday')) DATETIME_KEYS = ['year', 'month', 'day', 'hour', 'minute', 'second'] re_DATETIME = re.compile( '(?P<year>\d{4})[-:](?P<month>\d{2})[-:](?P<day>\d{2}) ' '(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2})') re_TAG = re.compile('(Pil|Exif|Iptc|Pexif|Zexif)([.]\w+)+') re_KEY = re.compile('(#*)((\w|[.])*$|[$])') TRANSPARENCY_ERROR = _('Only palette images have transparency.') IMAGE_READ_EXTENSIONS = set(formField.IMAGE_READ_EXTENSIONS)\ .union(openImage.WITHOUT_PIL.extensions) IMAGE_READ_EXTENSIONS = list(IMAGE_READ_EXTENSIONS) IMAGE_READ_EXTENSIONS.sort() IMAGE_EXTENSIONS = [ext for ext in IMAGE_READ_EXTENSIONS if ext in formField.IMAGE_WRITE_EXTENSIONS] BASE_VARS = ['dpi', 'compression', 'filename', 'format', 'orientation', 'path', 'transparency', 'type'] def split_data(d): """Provide attribute access to the variables. :param d: a dumped metadata dictionary :type d: dict >>> d = {'date': '2008-11-27 13:54:33', 'tuple': (1, 2)} """ value = d.values()[0] #tuples or list if type(value) in (types.ListType, types.TupleType): if len(value) > 1: for k, v in d.items(): for i, x in enumerate(v): d['%s.%d' % (k, i)] = v[i] return #datetime strings done = False for k, v in d.items(): if type(v) in types.StringTypes: dt = re_DATETIME.match(v) if dt: for key in DATETIME_KEYS: d['%s.%s' % (k, key)] = dt.group(key) done = True if done: return #date time values if type(value) == datetime.datetime: for k, v in d.items(): for key in DATETIME_KEYS: d['%s.%s' % (k, key)] = getattr(v, key) def fix_EXIF(tag): if not tag.startswith('EXIF'): tag = 'EXIF.' + tag return tag.replace(' ', SEPARATOR) def image_to_dict(filename, im=None): folder, name = os.path.split(filename) d = {'path': filename, 'filename': name} if im: width, height = im.size d['width'] = width d['height'] = height d['mode'] = im.mode return d def get_photo(filename): return Photo(metadata.InfoExtract(filename, vars=BASE_VARS).dump()) def split_vars_static_dynamic(vars): vars = set(vars) static = vars.difference(DYNAMIC_VARS) dynamic = vars.intersection(DYNAMIC_VARS) return list(static), list(dynamic) class NotWritableTagError(Exception): pass class InfoPhoto(dict): def __init__(self, info, info_to_dump, get_pil, image=None): """The ``get_pil`` parameter is necessary for tags as width, height, size and mode. :param info: pil, pyexiv2, ... tag, value info :type info: dict :param get_pil: method to retrieve the pil image :type get_pil: callable """ #parameters self.get_pil = get_pil path = info['path'] #sources if image == None: image = get_pil() sources = { metadata.InfoPil: image, metadata.InfoPexif: image, metadata.InfoZexif: image} #check format -> readable/writable metadata with pyexiv2 if exif and exif.is_readable_format(image.format): self.pyexiv2 = pyexiv2.ImageMetadata(path) self.pyexiv2.read() self.writable_exif = exif.is_writable_format_exif(image.format) self.writable_iptc = exif.is_writable_format_exif(image.format) self.writable = self.writable_exif or self.writable_iptc if self.writable_exif: self.pyexiv2['Exif.Image.Software'] = TITLE sources[metadata.InfoExif] = sources[metadata.InfoIptc] =\ self.pyexiv2 else: self.pyexiv2 = None self.writable = self.writable_exif = self.writable_iptc = False #retrieve dump info try: info_dumped = info_to_dump.open(path, sources).dump(free=True) except Exception, details: reason = unicoding.exception_to_unicode(details) #log error details message = u'%s:%s:\n%s' % (_('Unable extract variables from file'), path, reason) raise Exception(message) self.update(info, explicit=False) self.update(info_dumped, explicit=False) #private vars self._original_size = image.size # to compare if changed later self._dirty = False self._log = '' self._flushed = True def close(self): """Remove circular reference.""" del self.get_pil def is_dirty(self): """The photo can become dirty in two ways: * new metadata has been set * the image has changes size In case the image size has changed it will update the ``Exif.Photo.PixelXDimension`` and ``Exif.Photo.PixelYimension`` accordingly. :returns: True, if dirty :rtype: boolean """ if self._dirty: return True self.update_size() return self._dirty def set(self, tag, value): super(InfoPhoto, self).__setitem__(tag, value) def update(self, d, explicit=True): """Do this explicitly so __setitem__ gets called.""" if explicit: for key, value in d.items(): self[key] = value else: super(InfoPhoto, self).update(d) def update_size(self): """If the image is exif writable and if the size has changed, it will update ``Exif.Photo.PixelXDimension`` and ``Exif.Photo.PixelYimension``. """ if not self.writable_exif: return size = width, height = self.get_pil().size if self._original_size != size: self.pyexiv2['Exif.Photo.PixelXDimension'] = width self.pyexiv2['Exif.Photo.PixelYDimension'] = height self._dirty = True def __getitem__(self, tag): """If a dynamic tag (size, mode) is requested, it will extract it from the image. Otherwise get it normally. :param tag: metadata tag :type tag: string :returns: value """ if tag in DYNAMIC_VARS: #this can maybe be optimized if necessary if tag == 'size': return self.get_pil().size elif tag in ('width', 'Exif_Photo_PixelXDimension'): return self.get_pil().size[0] elif tag in ('height', 'Exif_Photo_PixelYDimension'): return self.get_pil().size[1] elif tag == 'mode': return self.get_pil().mode elif tag == 'transparency': self.assert_transparency() return self.get_pil().info['transparency'] else: raise KeyError('Fatal Error: tag "%s" is not dynamic?!' % tag) elif tag in metadata.ORIENTATION_TAGS: #give priority to writable tag if 'Exif_Image_Orientation' in self: return super(InfoPhoto, self).\ __getitem__('Exif_Image_Orientation') else: return super(InfoPhoto, self).__getitem__(tag) else: return super(InfoPhoto, self).__getitem__(tag) def __contains__(self, tag): """ """ if super(InfoPhoto, self).__contains__(tag): return True if tag == 'transparency' and tag in self.get_pil().info: return self['mode'] == 'P' return tag in DYNAMIC_VARS def __delitem__(self, tag): """Delete a tag after :method:`InfoPhoto.assert_writable`. :param tag: metadata tag :type tag: string """ self.assert_writable(tag) if tag == 'transparency': self.assert_transparency() del self.get_pil().info[tag] return pyexiv2_tag = self._fix(tag) # pexiv2 demands str # a bit clumsy but pyexiv2 does not support get or in try: pyexiv2_tag_value = self.pyexiv2[pyexiv2_tag] except KeyError: pyexiv2_tag_value = None if self.pyexiv2 and pyexiv2_tag_value != None: self.pyexiv2[pyexiv2_tag] = None if tag in self: super(InfoPhoto, self).__delitem__(tag) def __setitem__(self, tag, value): """Delete a tag after :method:`InfoPhoto.assert_writable`. :param tag: metadata tag :type tag: string :param value: new value """ self.assert_writable(tag) if tag in metadata.ORIENTATION_TAGS: if self.pyexiv2 is None and value == 1: #allow to ignore this (e.g. transpose method) return #redirect to writable tag tag = 'Exif_Image_Orientation' if tag in DYNAMIC_VARS: if tag == 'transparency': self.assert_transparency() self.get_pil().info['transparency'] = value else: raise KeyError(_('Tag "%s" is read only.') % tag) else: super(InfoPhoto, self).__setitem__(tag, value) if metadata.RE_PYEXIV2_TAG_EDITABLE.match(tag): try: self.pyexiv2[self._fix(tag)] = value except Exception, message: raise KeyError('%s:\n%s' % (_('Impossible to write tag "%s"') % tag, message)) self._dirty = True self._flushed = False def assert_transparency(self): """Raise a ``KeyError`` for ``'transparency'`` when ``image.mode`` is not ``'P'``. """ if self['mode'] != 'P': raise KeyError(TRANSPARENCY_ERROR) def log(self, message): """Log a message :param message: message :type message: string """ self._log += message + '\n' def clear_log(self): """Clears the log.""" self._log = '' def get_log(self): """Get the log contents. :returns: the log :rtype: string """ return self._log @classmethod def _fix(cls, tag): """Phatch uses ``_`` as a separator while pyexiv2 uses a dot (``.``). Moreover pyexiv2 demands str. >>> InfoPhoto._fix('Exif_Photo_PixelXDimension') 'Exif.Photo.PixelXDimension' :param tag: tag in info notation :type tag: string :returns: tag in pyexiv2 notation :rtype: string """ return str(tag.replace('_', '.')) def assert_writable(self, tag): """Assert that the tag is writable. This can raise an ``NotWritableTagError`` because of several reasons: * Tag might be read-only (e.g. Exif_Photo_PixelXDimension) * Tag might be not Exif or Iptc * Image file format might not allow writing of this tag :param tag: tag name :type tag: string :returns: True, if writable :rtype: bool """ if not metadata.is_writable_tag(tag): raise NotWritableTagError(_('Tag "%s" is not writable.') % tag) if not ((self.writable_exif and tag.startswith('Exif')) or (self.writable_iptc and tag.startswith('Iptc')) or metadata.is_writeable_not_exif_tag(tag, self['mode'])): raise NotWritableTagError( _('Format %(format)s does not support overwriting "%(tag)s".')\ % {'format': self['format'], 'tag': tag}) def save(self, target, target_format=None, thumbdata=None): """ :param target: target filename :type target: string :param target_format: target format e.g. obtained by PIL :type target_format: string :param thumbdata: new thumbnail (eg with StringIO, see :mod:`imtools`) :type thumbdata: string """ if not exif: raise ImportError(NEEDS_PYEXIV2) if not pyexiv2: #FIXME: when starting with a not exif image png #but save as exif jpg return if target == self['path']: if self.is_dirty() and not self._flushed: # includes update_size warnings = exif.flush(self.pyexiv2, thumbdata) self._flushed = True else: self.update_size() warnings = exif.write_metadata(self.pyexiv2, target, self['format'], target_format, thumbdata) return warnings class Photo: """Use :func:`get_photo` to obtain a photo from a filename.""" def __init__(self, info, info_to_dump=None): self.modify_date = None # for time shift action self.report_files = [] # for reports self._exif_transposition_reverse = None #layer path = info['path'] name = self.current_layer_name = _t('background') layer = Layer(path, load=True) self.layers = {name: layer} #info self.info = InfoPhoto(info, info_to_dump, self.get_flattened_image, layer.image) self.rotate_exif() def close(self): """Remove circular references.""" self.info.close() del self.info def log(self, message): self.info.log(message) def clear_log(self): self.info.clear_log() def get_log(self): return self.info.get_log() def get_filename(self, folder, filename, typ): return os.path.join(folder, '%s.%s' % (filename, typ))\ .replace('<', '%(').replace('>', ')s') % self.__dict__ #---layers def get_flattened_image(self): return self.get_layer().image.copy() def get_layer(self, name=None): if name is None: name = self.current_layer_name return self.layers[name] def get_thumb(self, size=thumbnail.SIZE): return thumbnail.thumbnail(self.get_flattened_image(), size=size, checkboard=True) def set_layer(self, layer, name=None): if name is None: name = self.current_layer_name self.layers[name] = layer #---image operations affecting all layers def save(self, filename, format=None, save_metadata=True, **options): """Saves a flattened image""" #todo: flatten layers if format is None: format = imtools.get_format_filename(filename) image = self.get_flattened_image() image_copy = imtools.convert_save_mode_by_format(image, format) if image_copy.mode == 'P' and 'transparency' in image_copy.info: options['transparency'] = image_copy.info['transparency'] if image_copy.mode != image.mode: self.log(CONVERTED_MODE % {'mode': image.mode, 'mode_copy': image_copy.mode, 'format': format} + '\n') #reverse exif previously applied exif orientation #exif thumbnails are usually within 160x160 #desktop thumbnails size is defined by thumbnail.py and is #probably 128x128 save_metadata = save_metadata and exif \ and exif.is_writable_format(format) if save_metadata: # Exif thumbnails are stored in their own format (eg JPG) thumb = thumbnail.thumbnail(image_copy, (160, 160)) thumbdata = imtools.get_format_data(thumb, format) image_copy = imtools.transpose(image_copy, self._exif_transposition_reverse) #thumb = thumbnail.thumbnail(thumb, copy=False) else: thumbdata = None #postpone thumbnail production to see later if it is needed thumb = None if 'compression.tif' in options: compression = options['compression.tif'] del options['compression.tif'] else: compression = 'none' try: if compression.lower() in ['raw', 'none']: #save image with pil file_mode = imtools.save_check_mode(image_copy, filename, **options) #did PIL silently change the image mode? if file_mode: #PIL did change the image mode without throwing # an exception. #Do not save thumbnails in this case # as they won't be reliable. if image_copy.mode.endswith('A') and \ not file_mode.endswith('A'): #force RGBA when transparency gets lost #eg saving TIFF format with LA mode mode = image_copy.mode image_copy = image_copy.convert('RGBA') file_mode = imtools.save_check_mode(image_copy, filename, **options) if file_mode: # RGBA failed self.log(CONVERTED_MODE % {'mode': mode, 'mode_copy': file_mode, 'format': format} \ + '\n') else: # RGBA succeeded self.log(CONVERTED_MODE % {'mode': mode, 'mode_copy': 'RGBA', 'format': format} + '\n') else: self.log(CONVERTED_MODE % {'mode': image_copy.mode, 'mode_copy': file_mode, 'format': format} + '\n') elif thumbnail.is_needed(image_copy, format): # save thumbnail in system cache if needed if thumb is None: thumb = image_copy thumb_info = { 'width': image.size[0], 'height': image.size[1]} thumbnail.save_to_cache(filename, thumb, thumb_info=thumb_info, **options) # copy metadata if needed (problematic for tiff) # FIXME: if metdata corrupts the image, there should be # no thumbnail if save_metadata: self.info.save(filename, thumbdata=thumbdata) else: # save with pil>libtiff openImage.check_libtiff(compression) self.log(openImage.save_libtiff(image_copy, filename, compression=compression, **options)) if self.modify_date: # Update file access and modification date os.utime(filename, (self.modify_date, self.modify_date)) self.append_to_report(filename, image_copy) except IOError, message: # clean up corrupted drawing if os.path.exists(filename): os.remove(filename) raise IOError(message) #update info if hasattr(options, 'dpi'): self.info['dpi'] = options['dpi'][0] def append_to_report(self, filename, image=None): report = image_to_dict(filename, image) report[_t('source')] = self.info['path'] self.report_files.append(report) def convert(self, mode, *args, **keyw): """Converts all layers to a different mode.""" for layer in self.layers.values(): if layer.image.mode == mode: continue if mode == 'P' and imtools.has_alpha(layer.image): layer.image = imtools.convert(layer.image, mode, *args, **keyw) self.info['transparency'] = 255 elif mode == 'P': layer.image = imtools.convert(layer.image, mode, *args, **keyw) self.info['transparency'] = None else: layer.image = imtools.convert(layer.image, mode, *args, **keyw) def safe_mode(self, format): """Convert the photo into a safe mode for this specific format""" layer = self.get_layer() layer.image = imtools.convert_save_mode_by_format(layer.image, format) def resize(self, size, method): """Resizes all layers to a different size""" size = (max(1, size[0]), max(1, size[1])) for layer in self.layers.values(): layer.image = layer.image.resize(size, method) def rotate_exif(self, reverse=False): layers = self.layers.values() if reverse: transposition = self._exif_transposition_reverse self._exif_transposition_reverse = () else: transposition, self._exif_transposition_reverse = \ imtools.get_exif_transposition(self.info['orientation']) if transposition: for layer in layers: layer.image = imtools.transpose(layer.image, transposition) #---pil def apply_pil(self, function, *arg, **keyw): for layer in self.layers.values(): layer.apply_pil(function, *arg, **keyw) #---external def call(self, command, check_exe=True, shell=None, size=None, unlock=False, output_filename=None, mode=None): if shell is None: shell = not system.WINDOWS #get command line info = self.info layer = self.get_layer() image = layer.image if mode != image.mode: image = imtools.convert(image, mode) if size != None and size[0] < image.size[0]: image = image.copy() image.thumbnail(size, Image.ANTIALIAS) #loop over input -> save to temp files temp_files = [] done = [] error = None for match in RE_FILE_IN.finditer(command): source = match.group() if not(source in done): ext = match.group(1) target = system.TempFile(ext) try: imtools.save_safely(image, target.path) except Exception, error: pass temp_files.append((source, target)) done.append(source) if error: break # check if we have a file_in # clean up in case of error if error: for source, target in temp_files: target.close() # os.remove(target) raise error # loop over output output = None for index, match in \ enumerate(RE_FILE_OUT.finditer(command)): if index > 0: # only 1 is allowed raise Exception('Only one file_out.* is allowed.') source = match.group() ext = match.group(1) output = system.TempFile(ext, output_filename) command = command.replace(source, system.fix_quotes(output.path)) # tweak command line for source, target in temp_files: command = command.replace(source, system.fix_quotes(target.path)) # execute system.call(command, shell=shell) # give back filename if output and not os.path.exists(output.path): error = True else: error = False for source, target in temp_files: target.close() # os.remove(target) if error: raise Exception( _('Command did not produce an output image:\n%s')\ % command) if output: layer.open(output.path) # DO NOT REMOVE image.load() or output.close will fail on windows layer.image.load() output.close() class Layer: def __init__(self, filename, position=(0, 0), load=True): self.open(filename) self.position = position # VERY IMPORTANT # do not remove load option, otherwise openImage.py won't work # correctly with group4 tiff compression if load: self.image.load() def open(self, uri): self.image = openImage.open(uri) if self.image.mode in ['F', 'I']: # Phatch doesn't support F and I # FIXME: It will better to add some sort of warning here self.image = self.image.convert('L') def apply_pil(self, function, *arg, **keyw): self.image = function(self.image, *arg, **keyw)
gpl-3.0
3,443,809,914,918,861,000
35.021739
79
0.55688
false
3.961153
false
false
false
klpdotorg/dubdubdub
apps/ivrs/migrations/0026_auto_20170101_2313.py
1
1139
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations def forwards_func(apps, schema_editor): State = apps.get_model("ivrs", "State") User = apps.get_model("users", "User") states = State.objects.all() for state in states: # Trimming the starting 0. Have checked to make sure # all telephones on the State table have 11 digits # including the 0 at the beginning. telephone = state.telephone[1:] try: user = User.objects.get(mobile_no=telephone) state.user = user except: pass state.telephone = telephone state.save() def reverse_func(apps, schema_editor): State = apps.get_model("ivrs", "State") states = State.objects.all() for state in states: telephone = "0" + state.telephone state.telephone = telephone state.user = None state.save() class Migration(migrations.Migration): dependencies = [ ('ivrs', '0025_state_user'), ] operations = [ migrations.RunPython(forwards_func, reverse_func), ]
mit
7,417,438,785,882,545,000
24.886364
60
0.604917
false
3.87415
false
false
false
google/starthinker
dags/test_dag.py
1
4439
########################################################################### # # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ########################################################################### ''' -------------------------------------------------------------- Before running this Airflow module... Install StarThinker in cloud composer ( recommended ): From Release: pip install starthinker From Open Source: pip install git+https://github.com/google/starthinker Or push local code to the cloud composer plugins directory ( if pushing local code changes ): source install/deploy.sh 4) Composer Menu l) Install All -------------------------------------------------------------- If any recipe task has "auth" set to "user" add user credentials: 1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON] OR 1. Visit Airflow UI > Admin > Connections. 2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication. - Conn Type: Google Cloud Platform - Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md - Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials -------------------------------------------------------------- If any recipe task has "auth" set to "service" add service credentials: 1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON] OR 1. Visit Airflow UI > Admin > Connections. 2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication. - Conn Type: Google Cloud Platform - Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md - Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md -------------------------------------------------------------- Test Script Used by tests. - This should be called by the tests scripts only. - When run will generate a say hello log. -------------------------------------------------------------- This StarThinker DAG can be extended with any additional tasks from the following sources: - https://google.github.io/starthinker/ - https://github.com/google/starthinker/tree/master/dags ''' from starthinker.airflow.factory import DAG_Factory INPUTS = {} RECIPE = { 'setup': { 'day': [ 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun' ], 'hour': [ 1, 3, 23 ] }, 'tasks': [ { 'hello': { 'auth': 'user', 'hour': [ 1 ], 'say': 'Hello At 1', 'sleep': 0 } }, { 'hello': { 'auth': 'user', 'hour': [ 3 ], 'say': 'Hello At 3', 'sleep': 0 } }, { 'hello': { 'auth': 'user', 'hour': [ ], 'say': 'Hello Manual', 'sleep': 0 } }, { 'hello': { 'auth': 'user', 'hour': [ 23 ], 'say': 'Hello At 23 Sleep', 'sleep': 30 } }, { 'hello': { 'auth': 'user', 'say': 'Hello At Anytime', 'sleep': 0 } }, { 'hello': { 'auth': 'user', 'hour': [ 1, 3, 23 ], 'say': 'Hello At 1, 3, 23', 'sleep': 0 } }, { 'hello': { 'auth': 'user', 'hour': [ 3 ], 'say': 'Hello At 3 Reordered', 'sleep': 0 } } ] } dag_maker = DAG_Factory('test', RECIPE, INPUTS) dag = dag_maker.generate() if __name__ == "__main__": dag_maker.print_commandline()
apache-2.0
-252,957,889,067,501,380
24.079096
145
0.514305
false
4.035455
false
false
false
DongjunLee/kino-bot
kino/slack/plot.py
1
2684
from matplotlib import pyplot as plt import matplotlib.dates as dt import seaborn seaborn.set() import datetime class Plot(object): def __init__(self): pass def make_bar( x, y, f_name, title=None, legend=None, x_label=None, y_label=None, x_ticks=None, y_ticks=None, ): fig = plt.figure() if title is not None: plt.title(title, fontsize=16) if x_label is not None: plt.ylabel(x_label) if y_label is not None: plt.xlabel(y_label) if x_ticks is not None: plt.xticks(x, x_ticks) if y_ticks is not None: plt.yticks(y_ticks) plt.bar(x, y, align="center") if legend is not None: plt.legend(legend) plt.savefig(f_name) plt.close(fig) def make_line( x, y, f_name, title=None, legend=None, x_label=None, y_label=None, x_ticks=None, y_ticks=None, ): fig = plt.figure() if title is not None: plt.title(title, fontsize=16) if x_label is not None: plt.ylabel(x_label) if y_label is not None: plt.xlabel(y_label) if x_ticks is not None: plt.xticks(x, x_ticks) if y_ticks is not None: plt.yticks(y_ticks) if isinstance(y[0], list): for data in y: plt.plot(x, data) else: plt.plot(x, y) if legend is not None: plt.legend(legend) plt.savefig(f_name) plt.close(fig) def make_efficiency_date( total_data, avg_data, f_name, title=None, x_label=None, y_label=None, x_ticks=None, y_ticks=None, ): fig = plt.figure() if title is not None: plt.title(title, fontsize=16) if x_label is not None: plt.ylabel(x_label) if y_label is not None: plt.xlabel(y_label) v_date = [] v_val = [] for data in total_data: dates = dt.date2num(datetime.datetime.strptime(data[0], "%H:%M")) to_int = round(float(data[1])) plt.plot_date(dates, data[1], color=plt.cm.brg(to_int)) for data in avg_data: dates = dt.date2num(datetime.datetime.strptime(data[0], "%H:%M")) v_date.append(dates) v_val.append(data[1]) plt.plot_date(v_date, v_val, "^y-", label="Average") plt.legend() plt.savefig(f_name) plt.close(fig)
mit
2,999,971,998,501,694,500
22.137931
77
0.492921
false
3.494792
false
false
false
sylvchev/mdla
examples/example_benchmark_performance.py
1
6309
"""Benchmarking dictionary learning algorithms on random dataset""" from multiprocessing import cpu_count from time import time import matplotlib.pyplot as plt import numpy as np from numpy import array from numpy.linalg import norm from numpy.random import permutation, rand, randint, randn from mdla import MiniBatchMultivariateDictLearning, MultivariateDictLearning # TODO: # investigate perf break from pydico def benchmarking_plot(figname, pst, plot_sep, minibatchRange, mprocessRange): _ = plt.figure(figsize=(15, 10)) bar_width = 0.35 _ = plt.bar( np.array([0]), pst[0], bar_width, color="b", label="Online, no multiprocessing (baseline)", ) index = [0] for i in range(1, plot_sep[1]): if i == 1: _ = plt.bar( np.array([i + 1]), pst[i], bar_width, color="r", label="Online with minibatch", ) else: _ = plt.bar(np.array([i + 1]), pst[i], bar_width, color="r") index.append(i + 1) for _ in range(plot_sep[1], plot_sep[2]): if i == plot_sep[1]: _ = plt.bar( np.array([i + 2]), pst[i], bar_width, label="Batch with multiprocessing", color="magenta", ) else: _ = plt.bar(np.array([i + 2]), pst[i], bar_width, color="magenta") index.append(i + 2) plt.ylabel("Time per iteration (s)") plt.title("Processing time for online and batch processing") tick = [""] tick.extend(map(str, minibatchRange)) tick.extend(map(str, mprocessRange)) plt.xticks(index, tuple(tick)) plt.legend() plt.savefig(figname + ".png") def _generate_testbed( kernel_init_len, n_nonzero_coefs, n_kernels, n_samples=10, n_features=5, n_dims=3, snr=1000, ): """Generate a dataset from a random dictionary Generate a random dictionary and a dataset, where samples are combination of n_nonzero_coefs dictionary atoms. Noise is added, based on SNR value, with 1000 indicated that no noise should be added. Return the dictionary, the dataset and an array indicated how atoms are combined to obtain each sample """ print("Dictionary sampled from uniform distribution") dico = [rand(kernel_init_len, n_dims) for i in range(n_kernels)] for i in range(len(dico)): dico[i] /= norm(dico[i], "fro") signals = list() decomposition = list() for _ in range(n_samples): s = np.zeros(shape=(n_features, n_dims)) d = np.zeros(shape=(n_nonzero_coefs, 3)) rk = permutation(range(n_kernels)) for j in range(n_nonzero_coefs): k_idx = rk[j] k_amplitude = 3.0 * rand() + 1.0 k_offset = randint(n_features - kernel_init_len + 1) s[k_offset : k_offset + kernel_init_len, :] += k_amplitude * dico[k_idx] d[j, :] = array([k_amplitude, k_offset, k_idx]) decomposition.append(d) noise = randn(n_features, n_dims) if snr == 1000: alpha = 0 else: ps = norm(s, "fro") pn = norm(noise, "fro") alpha = ps / (pn * 10 ** (snr / 20.0)) signals.append(s + alpha * noise) signals = np.array(signals) return dico, signals, decomposition rng_global = np.random.RandomState(1) n_samples, n_dims = 1500, 1 n_features = kernel_init_len = 5 n_nonzero_coefs = 3 n_kernels, max_iter, learning_rate = 50, 10, 1.5 n_jobs, batch_size = -1, None iter_time, plot_separator, it_separator = list(), list(), 0 generating_dict, X, code = _generate_testbed( kernel_init_len, n_nonzero_coefs, n_kernels, n_samples, n_features, n_dims ) # Online without mini-batch print( "Processing ", max_iter, "iterations in online mode, " "without multiprocessing:", end="", ) batch_size, n_jobs = n_samples, 1 learned_dict = MiniBatchMultivariateDictLearning( n_kernels=n_kernels, batch_size=batch_size, n_iter=max_iter, n_nonzero_coefs=n_nonzero_coefs, n_jobs=n_jobs, learning_rate=learning_rate, kernel_init_len=kernel_init_len, verbose=1, dict_init=None, random_state=rng_global, ) ts = time() learned_dict = learned_dict.fit(X) iter_time.append((time() - ts) / max_iter) it_separator += 1 plot_separator.append(it_separator) # Online with mini-batch minibatch_range = [cpu_count()] minibatch_range.extend([cpu_count() * i for i in range(3, 10, 2)]) n_jobs = -1 for mb in minibatch_range: print( "\nProcessing ", max_iter, "iterations in online mode, with ", "minibatch size", mb, "and", cpu_count(), "processes:", end="", ) batch_size = mb learned_dict = MiniBatchMultivariateDictLearning( n_kernels=n_kernels, batch_size=batch_size, n_iter=max_iter, n_nonzero_coefs=n_nonzero_coefs, n_jobs=n_jobs, learning_rate=learning_rate, kernel_init_len=kernel_init_len, verbose=1, dict_init=None, random_state=rng_global, ) ts = time() learned_dict = learned_dict.fit(X) iter_time.append((time() - ts) / max_iter) it_separator += 1 plot_separator.append(it_separator) # Batch learning mp_range = range(1, cpu_count() + 1) for p in mp_range: print( "\nProcessing ", max_iter, "iterations in batch mode, with", p, "processes:", end="", ) n_jobs = p learned_dict = MultivariateDictLearning( n_kernels=n_kernels, max_iter=max_iter, verbose=1, n_nonzero_coefs=n_nonzero_coefs, n_jobs=n_jobs, learning_rate=learning_rate, kernel_init_len=kernel_init_len, dict_init=None, random_state=rng_global, ) ts = time() learned_dict = learned_dict.fit(X) iter_time.append((time() - ts) / max_iter) it_separator += 1 plot_separator.append(it_separator) print("Done benchmarking") figname = "minibatch-performance" print("Plotting results in", figname) benchmarking_plot(figname, iter_time, plot_separator, minibatch_range, mp_range) print("Exiting.")
gpl-3.0
-6,148,818,530,008,922,000
27.547511
84
0.592645
false
3.379218
false
false
false
tferr/ASA
scripting-examples/3D_Analysis_ImageStack.py
1
2179
#@ImagePlus imp #@LogService log ''' This script uses an outdated API. For a modern replacement, have a look at https://github.com/morphonets/SNT/tree/master/src/main/resources/script_templates/Neuroanatomy ''' from sholl import Sholl_Analysis from sholl import Options from os.path import expanduser def spacedDistances(start, end, step): """Retrieves a list of Sholl sampling distances""" leng = (end - start) / step + 1 return [start + i * step for i in range(leng)] # x,y,z coordinates of center of analysis xc, yc, zc = 100, 100, 10 # Threshold values for segmentation lower_t, upper_t = 88, 255 # Definitions for sampling distances start_radius, end_radius, step_size, = 10, 100, 10 # Destination directory for saving plots and tables export_path = expanduser("~") sa = Sholl_Analysis() if sa.validateImage(imp): # Specify plugin settings sa.setDescription(imp.getTitle(), True) sa.setExportPath(export_path, True) sa.setInteractiveMode(False) # Customize output options so = Options() so.setMetric(Options.MEDIAN_INTERS, False) # "Sholl Results" table so.setPlotOutput(Options.NO_PLOTS) # Which plots should be generated? so.setPromptChoice(Options.HIDE_SAVED_FILES, True) # Main prompt option so.setPromptChoice(Options.OVERLAY_SHELLS, True) # Main prompt option sa.setOptions(so) # Specify analysis settings sa.setCenter(xc, yc, zc) sa.setThreshold(lower_t, upper_t) # Retrieve intersection counts distances = spacedDistances(start_radius, end_radius, step_size) counts = sa.analyze3D(xc, yc, zc, distances, imp) if all(c == 0 for c in counts): log.warn("All intersection counts were zero") else: # Do something with sampled data if analysis was successful for idx, inters in enumerate(counts): log.info("r=%s: %s inters." % (distances[idx],inters)) # Retrieve metrics sa.analyzeProfile(distances, counts, True) log.info("Analysis finished. Files saved to %s" % export_path) log.info("Sholl Results Table has not been saved") else: log.error(imp.getTitle() + " is not a valid image")
gpl-3.0
-3,595,713,342,764,342,300
28.849315
98
0.693437
false
3.480831
false
false
false
jschaul/ComplexNetworkSim
examples/getting started code/first_visualisation.py
1
1471
''' Complete code file only from ComplexNetworkSim's "getting started" documentation section, for visualising a simulation. For explanations refer to the documentation page. Current link: http://complexnetworksim.0sites.net/start.html (documentation hosting may change place - see the PyPi index page.) @author: Joe Schaul <joe.schaul@gmail.com> ''' from ComplexNetworkSim import PlotCreator, AnimationCreator directory = 'test' #location of simulation result files myName = "SIR" #name that you wish to give your image output files title = "Simulation of agent-based simple SIR" #define three simulation-specific constants: SUSCEPTIBLE = 0 INFECTED = 1 RECOVERED = 2 statesToMonitor = [INFECTED, SUSCEPTIBLE] #even if we have states 0,1,2,3,... plot only 1 and 0 colours = ["r", "g"] #state 1 in red, state 0 in green labels = ["Infected", "Susceptible"] #state 1 named 'Infected', 0 named 'Susceptible' mapping = {SUSCEPTIBLE:"w", INFECTED:"r", RECOVERED:"0.4"} trialToVisualise = 0 p = PlotCreator(directory, myName, title, statesToMonitor, colours, labels) p.plotSimulation(show=False) #show=True shows the graph directly, #otherwise only a png file is created in the directory defined above. visualiser = AnimationCreator(directory, myName, title, mapping, trial=trialToVisualise) #gif speed can be changed by giving a parameter 'delay' (default=100) to AnimationCreator visualiser.create_gif(verbose=True)
bsd-2-clause
2,120,398,871,978,929,200
44.03125
170
0.743712
false
3.358447
false
false
false
jonfoster/pyxb1
pyxb/__init__.py
1
10123
"""PyXB stands for Python U{W3C XML Schema<http://www.w3.org/XML/Schema>} Bindings, and is pronounced "pixbee". It enables translation between XML instance documents and Python objects following rules specified by an XML Schema document. This is the top-level entrypoint to the PyXB system. Importing this gets you all the L{exceptions<pyxb.exceptions_.PyXBException>}, and L{pyxb.namespace}. For more functionality, delve into these submodules: - L{pyxb.xmlschema} Module holding the L{structures<pyxb.xmlschema.structures>} that convert XMLSchema from a DOM model to a Python class model based on the XMLSchema components. Use this when you need to operate on the component model. - L{pyxb.binding} Module used to generate the bindings and at runtime to support the generated bindings. Use this if you need to use the binding model or content model. - L{pyxb.utils} Common utilities used in parsing, generating, and executing. The submodules must be imported separately. """ import logging _log = logging.getLogger(__name__) class cscRoot (object): """This little bundle of joy exists because in Python 2.6 it became an error to invoke C{object.__init__} with parameters (unless you also override C{__new__}, in which case it's only a warning. Whatever.). Since I'm bloody not going to check in every class whether C{super(Myclass,self)} refers to C{object} (even if I could figure out how to do that, 'cuz the obvious solutions don't work), we'll just make this thing the root of all U{cooperative super calling<http://www.geocities.com/foetsch/python/new_style_classes.htm#super>} hierarchies. The standard syntax in PyXB for this pattern is:: def method_csc (self, *args, **kw): super_fn = getattr(super(ThisClass, self), 'method_csc', lambda *a,**kw: self) return super_fn(*args, **kw) """ def __init__ (self, *args, **kw): # Oh gross. If this class descends from list (and probably dict), we # get here when object is *not* our direct superclass. In that case, # we have to pass the arguments on up, or the strings don't get # created right. Below is the only way I've figured out to detect the # situation. # # Note that we might also get here if you mix-in a class that used # object as a parent instead of cscRoot. Don't do that. Printing the # mro() is a decent way of identifying the problem. if issubclass(self.__class__.mro()[-2], ( list, dict )): super(cscRoot, self).__init__(*args) __version__ = '1.1.5-DEV' """The version of PyXB""" __url__ = 'http://pyxb.sourceforge.net' """The URL for PyXB's homepage""" __license__ = 'Apache License 2.0' # Bring in the exception hierarchy from exceptions_ import * # Bring in namespace stuff import namespace class BIND (object): """Bundle data for automated binding generation. Instances of this class capture positional and keyword arguments that are used to create binding instances based on context. For example, if C{w} is an instance of a complex type whose C{option} element is declared to be an anonymous class with simple content of type integer and an attribute of C{units}, a correct assignment to that element could be achieved with:: w.option = BIND(54, units="m") """ __args = None __kw = None def __init__ (self, *args, **kw): """Cache parameters for subsequent binding creation. Invoke just as you would the factory for a binding class.""" self.__args = args self.__kw = kw def createInstance (self, factory, **kw): """Invoke the given factory method. Position arguments to the factory are those cached in this instance. Keyword arguments are the ones on the command line, updated from the ones in this instance.""" kw.update(self.__kw) return factory(*self.__args, **kw) XMLStyle_minidom = 0 """Use xml.dom.minidom for XML processing. This is the fastest, but does not provide location information. It produces DOM instances.""" XMLStyle_saxdom = 1 """Use pyxb.utils.saxdom for XML processing. This is the slowest, but both provides location information and generates a DOM instance.""" XMLStyle_saxer = 2 """Use pyxb.binding.saxer when converting documents to binding instances. This style supports location information in the bindings. It produces binding instances directly, without going through a DOM stage, so is faster than XMLStyle_saxdom. However, since the pyxb.xmlschema.structures classes require a DOM model, XMLStyle_saxdom will be used for pyxb.utils.domutils.StringToDOM if this style is selected.""" _XMLStyle = XMLStyle_saxer """The current XML processing style.""" _XMLStyleMap = { 'minidom' : XMLStyle_minidom, 'saxdom' : XMLStyle_saxdom, 'saxer' : XMLStyle_saxer } _XMLStyleMapReverse = dict([ (_v, _k) for (_k, _v) in _XMLStyleMap.items() ]) _XMLStyle_envvar = 'PYXB_XML_STYLE' def _SetXMLStyle (style=None): """Set the interface used to parse XML content. This can be invoked within code. The system default of L{XMLStyle_saxer} can also be overridden at runtime by setting the environment variable C{PYXB_XML_STYLE} to one of C{minidom}, C{saxdom}, or C{saxer}. @param style: One of L{XMLStyle_minidom}, L{XMLStyle_saxdom}, L{XMLStyle_saxer}. If not provided, the system default is used. """ global _XMLStyle if style is None: import os style_name = os.environ.get(_XMLStyle_envvar) if style_name is None: style_name = 'saxer' style = _XMLStyleMap.get(style_name) if style is None: raise PyXBException('Bad value "%s" for %s' % (style_name, _XMLStyle_envvar)) if _XMLStyleMapReverse.get(style) is None: raise PyXBException('Bad value %s for _SetXMLStyle' % (style,)) _XMLStyle = style #_log.debug("XML style %s", _XMLStyleMapReverse.get(_XMLStyle)) _SetXMLStyle() # Global flag that we can use to determine whether optimization is active in # this session. There may be cases where we can bypass methods that just # check for things we don't care about in an optimized context _OptimizationActive = False try: assert False _OptimizationActive = True except: pass _CorruptionDetectionEnabled = not _OptimizationActive """If C{True}, blocks attempts to assign to attributes that are reserved for PyXB methods. Applies only at compilation time; dynamic changes are ignored. """ _GenerationRequiresValid = True def RequireValidWhenGenerating (value=None): """Query or set a flag that controls validation checking in XML generation. Normally any attempts to convert a binding instance to a DOM or XML representation requires that the binding validate against the content model, since only in this way can the content be generated in the correct order. In some cases it may be necessary or useful to generate a document from a binding that is incomplete. If validation is not required, the generated documents may not validate even if the content validates, because ordering constraints will be ignored. @keyword value: If absent or C{None}, no change is made; otherwise, this enables (C{True}) or disables (C{False}) the requirement that instances validate before being converted to XML. @type value: C{bool} @return: C{True} iff attempts to generate XML for a binding that does not validate should raise an exception. """ global _GenerationRequiresValid if value is None: return _GenerationRequiresValid if not isinstance(value, bool): raise TypeError(value) _GenerationRequiresValid = value return _GenerationRequiresValid _ParsingRequiresValid = True def RequireValidWhenParsing (value=None): """Query or set a flag that controls validation checking in XML parsing. Normally any attempts to convert XML to a binding instance to a binding instance requires that the document validate against the content model. In some cases it may be necessary or useful to process a document that is incomplete. If validation is not required, the generated documents may not validate even if the content validates, because ordering constraints will be ignored. @keyword value: If absent or C{None}, no change is made; otherwise, this enables (C{True}) or disables (C{False}) the requirement that documents validate when being converted to bindings. @type value: C{bool} @return: C{True} iff attempts to generate bindings for a document that does not validate should raise an exception.""" global _ParsingRequiresValid if value is None: return _ParsingRequiresValid if not isinstance(value, bool): raise TypeError(value) _ParsingRequiresValid = value return _ParsingRequiresValid _PreserveInputTimeZone = False def PreserveInputTimeZone (value=None): """Control whether time values are converted to UTC during input. The U{specification <http://www.w3.org/TR/xmlschema-2/#dateTime>} makes clear that timezoned times are in UTC and that times in other timezones are to be translated to UTC when converted from literal to value form. Provide an option to bypass this step, so the input timezone is preserved. @note: Naive processing of unnormalized times--i.e., ignoring the C{tzinfo} field--may result in errors.""" global _PreserveInputTimeZone if value is None: return _PreserveInputTimeZone if not isinstance(value, bool): raise TypeError(value) _PreserveInputTimeZone = value return _PreserveInputTimeZone _OutputEncoding = 'utf-8' """Default unicode encoding to use when creating output. Material being written to an XML parser is not output.""" _InputEncoding = 'utf-8' """Default unicode encoding to assume when decoding input. Material being written to an XML parser is treated as input.""" ## Local Variables: ## fill-column:78 ## End:
apache-2.0
-4,845,504,037,243,957,000
38.236434
89
0.708683
false
4.039505
false
false
false
Southpaw-TACTIC/Team
src/python/Lib/site-packages/PySide/examples/widgets/analogclock.py
1
3131
#!/usr/bin/env python ############################################################################# ## ## Copyright (C) 2004-2005 Trolltech AS. All rights reserved. ## ## This file is part of the example classes of the Qt Toolkit. ## ## This file may be used under the terms of the GNU General Public ## License version 2.0 as published by the Free Software Foundation ## and appearing in the file LICENSE.GPL included in the packaging of ## this file. Please review the following information to ensure GNU ## General Public Licensing requirements will be met: ## http://www.trolltech.com/products/qt/opensource.html ## ## If you are unsure which license is appropriate for your use, please ## review the following information: ## http://www.trolltech.com/products/qt/licensing.html or contact the ## sales department at sales@trolltech.com. ## ## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE ## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. ## ############################################################################# from PySide import QtCore, QtGui class AnalogClock(QtGui.QWidget): hourHand = QtGui.QPolygon([ QtCore.QPoint(7, 8), QtCore.QPoint(-7, 8), QtCore.QPoint(0, -40) ]) minuteHand = QtGui.QPolygon([ QtCore.QPoint(7, 8), QtCore.QPoint(-7, 8), QtCore.QPoint(0, -70) ]) hourColor = QtGui.QColor(127, 0, 127) minuteColor = QtGui.QColor(0, 127, 127, 191) def __init__(self, parent=None): super(AnalogClock, self).__init__(parent) timer = QtCore.QTimer(self) timer.timeout.connect(self.update) timer.start(1000) self.setWindowTitle("Analog Clock") self.resize(200, 200) def paintEvent(self, event): side = min(self.width(), self.height()) time = QtCore.QTime.currentTime() painter = QtGui.QPainter(self) painter.setRenderHint(QtGui.QPainter.Antialiasing) painter.translate(self.width() / 2, self.height() / 2) painter.scale(side / 200.0, side / 200.0) painter.setPen(QtCore.Qt.NoPen) painter.setBrush(AnalogClock.hourColor) painter.save() painter.rotate(30.0 * ((time.hour() + time.minute() / 60.0))) painter.drawConvexPolygon(AnalogClock.hourHand) painter.restore() painter.setPen(AnalogClock.hourColor) for i in range(12): painter.drawLine(88, 0, 96, 0) painter.rotate(30.0) painter.setPen(QtCore.Qt.NoPen) painter.setBrush(AnalogClock.minuteColor) painter.save() painter.rotate(6.0 * (time.minute() + time.second() / 60.0)) painter.drawConvexPolygon(AnalogClock.minuteHand) painter.restore() painter.setPen(AnalogClock.minuteColor) for j in range(60): if (j % 5) != 0: painter.drawLine(92, 0, 96, 0) painter.rotate(6.0) if __name__ == '__main__': import sys app = QtGui.QApplication(sys.argv) clock = AnalogClock() clock.show() sys.exit(app.exec_())
epl-1.0
-2,085,368,576,522,494,700
30
77
0.608432
false
3.749701
false
false
false
google/clusterfuzz
src/local/butler/reproduce_tool/android.py
1
3980
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Android emulator installation and management.""" import os import time from local.butler.reproduce_tool import errors from local.butler.reproduce_tool import prompts from platforms.android import adb from platforms.android import device from system import environment from system import new_process ADB_DEVICES_SEPARATOR_STRING = 'List of devices attached' EMULATOR_RELATIVE_PATH = os.path.join('local', 'bin', 'android-sdk', 'emulator', 'emulator') def start_emulator(): """Return a ProcessRunner configured to start the Android emulator.""" root_dir = environment.get_value('ROOT_DIR') runner = new_process.ProcessRunner( os.path.join(root_dir, EMULATOR_RELATIVE_PATH), ['-avd', 'TestImage', '-writable-system', '-partition-size', '2048']) emulator_process = runner.run() # If we run adb commands too soon after the emulator starts, we may see # flake or errors. Delay a short while to account for this. # TODO(mbarbella): This is slow and flaky, but wait-for-device isn't usable if # another device is connected (as we don't know the serial yet). Find a better # solution. time.sleep(30) return emulator_process def get_devices(): """Get a list of all connected Android devices.""" adb_runner = new_process.ProcessRunner(adb.get_adb_path()) result = adb_runner.run_and_wait(additional_args=['devices']) if result.return_code: raise errors.ReproduceToolUnrecoverableError('Unable to run adb.') # Ignore non-device lines (those before "List of devices attached"). store_devices = False devices = [] for line in result.output.splitlines(): if line == ADB_DEVICES_SEPARATOR_STRING: store_devices = True continue if not store_devices or not line: continue devices.append(line.split()[0]) return devices def prepare_environment(disable_android_setup): """Additional environment overrides needed to run on an Android device.""" environment.set_value('OS_OVERRIDE', 'ANDROID') # Bail out if we can't determine which Android device to use. serial = environment.get_value('ANDROID_SERIAL') if not serial: devices = get_devices() if len(devices) == 1: serial = devices[0] environment.set_value('ANDROID_SERIAL', serial) elif not devices: raise errors.ReproduceToolUnrecoverableError( 'No connected Android devices were detected. Run with the -e ' 'argument to use an emulator.') else: raise errors.ReproduceToolUnrecoverableError( 'You have multiple Android devices or emulators connected. Please ' 'set the ANDROID_SERIAL environment variable and try again.\n\n' 'Attached devices: ' + ', '.join(devices)) print('Warning: this tool will make changes to settings on the connected ' 'Android device with serial {serial} that could result in data ' 'loss.'.format(serial=serial)) willing_to_continue = prompts.get_boolean( 'Are you sure you want to continue?') if not willing_to_continue: raise errors.ReproduceToolUnrecoverableError( 'Bailing out to avoid changing settings on the connected device.') # Push the test case and build APK to the device. apk_path = environment.get_value('APP_PATH') device.update_build( apk_path, should_initialize_device=not disable_android_setup) device.push_testcases_to_device()
apache-2.0
-1,405,921,316,545,967,000
35.851852
80
0.711809
false
3.972056
false
false
false
lostinplace/filtered-intervaltree
setup.py
1
1509
from setuptools import setup, find_packages from codecs import open from os import path import os here = path.abspath(path.dirname(__file__)) with open(path.join(here, 'readme.md'), encoding='utf-8') as f: long_description = f.read() with open(path.join(here, '.library-version'), encoding='utf-8') as f: existing_version = f.read() with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f: requirements = f.read().split('\n') env_version = os.environ.get('LIBVER') version = env_version or existing_version setup( name='filtered-intervaltree', version=version, description='an intervaltree with early exit bloom filters', long_description=long_description, url='https://github.com/lostinplace/filtered-intervaltree', author='cwheeler', author_email='cmwhee@gmail.com', license='MIT', # See https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Topic :: Software Development :: Libraries', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.5', ], requires=[], keywords='rbtree intervaltree bloomfilter', # You can just specify the packages manually here if your project is # simple. Or you can use find_packages(). packages=find_packages(exclude=['test']), install_requires=requirements, extras_require={ 'test': ['coverage'], } )
mit
3,768,461,388,915,031,600
26.962963
72
0.666667
false
3.859335
false
true
false
Kpler/scrapy-logentries-extension
scrapylogentries/extension.py
1
1989
import logging import os from scrapy import signals from scrapy.exceptions import NotConfigured from logentries import LogentriesHandler from logentriesadapter import LogentriesAdapter, ScrapingHubFilter logger = logging.getLogger(__name__) class LogentriesExtension(object): def __init__(self, token): self.token = token root = logging.getLogger() self.handler = LogentriesHandler(token) spider_id = os.environ.get('SCRAPY_SPIDER_ID') project_id = os.environ.get('SCRAPY_PROJECT_ID') job_id = os.environ.get('SCRAPY_JOB_ID') formatted = False if job_id is not None: formatted = True filter = ScrapingHubFilter({ 'project_id': project_id, 'spider_id': spider_id, 'job_id': job_id, }) format = "%(name)s - %(levelname)s - [project_id=%(project_id)s spider_id=%(spider_id)s job_id=%(job_id)s] %(message)s" formatter = logging.Formatter(format) self.handler.addFilter(filter) self.handler.setFormatter(formatter) root.addHandler(self.handler) # NCA: not sure we want sensitive information like the token in the logs # Maybe use debug log level instead if formatted: logger.info('Logentries activated with token {} and custom SH format'.format(token)) else: logger.info('Logentries activated with token {} and no custom SH format'.format(token)) @classmethod def from_crawler(cls, crawler): # first check if the extension should be enabled and raise # NotConfigured otherwise token = crawler.settings.get('LOGENTRIES_TOKEN') if not token: raise NotConfigured # instantiate the extension object ext = cls(token) # return the extension object return ext # vim: syntax=python:sws=4:sw=4:et:
mit
7,340,216,088,831,452,000
30.078125
131
0.612871
false
4.240938
false
false
false
teracyhq/flask-classy
test_classful/test_decorators.py
2
7323
from flask import Flask from .view_classes import DecoratedView from .view_classes import DecoratedBoldListView from .view_classes import DecoratedBoldItalicsListView from .view_classes import DecoratedListMemberView from .view_classes import DecoratedListFunctionAttributesView from .view_classes import DecoratedListMemberFunctionAttributesView from .view_classes import DecoratedAppendClassAttributeView from nose.tools import eq_ app = Flask("decorated") DecoratedView.register(app) DecoratedBoldListView.register(app) DecoratedBoldItalicsListView.register(app) DecoratedListMemberView.register(app) DecoratedListFunctionAttributesView.register(app) DecoratedListMemberFunctionAttributesView.register(app) DecoratedAppendClassAttributeView.register(app) client = app.test_client() def test_func_decorator_index(): resp = client.get('/decorated/') eq_(b"Index", resp.data) resp = client.get('/decorated') eq_(resp.status_code, 308) def test_func_decorator_get(): resp = client.get('/decorated/1234/') eq_(b"Get 1234", resp.data) resp = client.get('/decorated/1234') eq_(resp.status_code, 308) def test_recursive_decorator_post(): resp = client.post('/decorated/') eq_(b"Post", resp.data) resp = client.post('/decorated') eq_(resp.status_code, 308) def test_more_recursive_decorator_get(): resp = client.get('/decorated/get_some/') eq_(b"Get Some", resp.data) resp = client.get('/decorated/get_some') eq_(resp.status_code, 308) def test_multiple_recursive_decorators_get(): resp = client.get('/decorated/get_this/') eq_(b"Get This", resp.data) resp = client.get('/decorated/get_this') eq_(resp.status_code, 308) def test_routes_with_recursive_decorators(): resp = client.get('/decorated/mixitup/') eq_(b"Mix It Up", resp.data) resp = client.get('/decorated/mixitup') eq_(resp.status_code, 308) def test_recursive_with_parameter(): resp = client.get('/decorated/someval/1234/') eq_(b"Someval 1234", resp.data) def test_recursive_with_route_with_parameter(): resp = client.get('/decorated/anotherval/1234/') eq_(b"Anotherval 1234", resp.data) def test_params_decorator(): resp = client.get('/decorated/params_decorator_method/') eq_(b"Params Decorator", resp.data) def test_params_decorator_delete(): resp = client.delete('/decorated/1234/') eq_(b"Params Decorator Delete 1234", resp.data) resp = client.delete('/decorated/1234') eq_(resp.status_code, 308) def test_decorator_bold_list_get(): """Tests that the get route is wrapped in bold""" resp = client.get('/decorated_bold_list_view/1234/') eq_(b'<b>' in resp.data, True) eq_(b'</b>' in resp.data, True) eq_(b'<b>Get 1234</b>', resp.data) resp = client.get('/decorated_bold_list_view/1234') eq_(resp.status_code, 308) def test_decorator_bold_list_index(): """Tests that the index route is wrapped in bold""" resp = client.get('/decorated_bold_list_view/') eq_(b'<b>' in resp.data, True) eq_(b'</b>' in resp.data, True) eq_(b'<b>Index</b>', resp.data) def test_decorator_bold_italics_list_get(): """Tests that the get route is wrapped in bold and italics""" resp = client.get('/decorated_bold_italics_list_view/1234/') eq_(b'<i>' in resp.data, True) eq_(b'</i>' in resp.data, True) eq_(b'<b>' in resp.data, True) eq_(b'</b>' in resp.data, True) eq_(b'<b><i>Get 1234</i></b>', resp.data) resp = client.get('/decorated_bold_italics_list_view/1234') eq_(resp.status_code, 308) def test_decorator_bold_italics_list_index(): """Tests that the index route is wrapped in bold and italics""" resp = client.get('/decorated_bold_italics_list_view/') eq_(b'<i>' in resp.data, True) eq_(b'</i>' in resp.data, True) eq_(b'<b>' in resp.data, True) eq_(b'</b>' in resp.data, True) eq_(b'<b><i>Index</i></b>', resp.data) def test_decorator_list_member_index(): """ Tests that the index route is wrapped in bold, italics and paragraph """ resp = client.get('/decorated_list_member_view/') eq_(b'<i>' in resp.data, True) eq_(b'</i>' in resp.data, True) eq_(b'<b>' in resp.data, True) eq_(b'</b>' in resp.data, True) eq_(b'<p>' not in resp.data, True) eq_(b'</p>' not in resp.data, True) eq_(b'<b><i>Index</i></b>', resp.data) def test_decorator_list_member_get(): """Tests the ordering of decorators""" resp = client.get('/decorated_list_member_view/1234/') eq_(b'<b>', resp.data[:3]) eq_(b'<i>', resp.data[3:6]) eq_(b'<p>', resp.data[6:9]) eq_(b'</p>', resp.data[-12:-8]) eq_(b'</i>', resp.data[-8:-4]) eq_(b'</b>', resp.data[-4:]) eq_(b'<b><i><p>Get 1234</p></i></b>', resp.data) resp = client.get('/decorated_list_member_view/1234') eq_(resp.status_code, 308) def test_decorator_list_function_attributes_get(): """ Verify list of decorators with attributes modify all functions in FlaskView """ resp = client.get('/decorated_list_function_attributes_view/1234/') eq_(b'Get 1234' in resp.data, True) eq_(b'<i><b>Get 1234</b></i>', resp.data) eq_(hasattr( app.view_functions['DecoratedListFunctionAttributesView:get'], 'eggs'), True) eq_('scrambled', app.view_functions['DecoratedListFunctionAttributesView:get'].eggs) resp = client.get('/decorated_list_function_attributes_view/1234') eq_(resp.status_code, 308) def test_decorator_list_function_attributes_index(): """ Verify list of decorators with attributes modify all functions in FlaskView """ resp = client.get('/decorated_list_function_attributes_view/') eq_(b'Index' in resp.data, True) eq_(b'<i>Index</i>', resp.data) eq_(hasattr( app.view_functions['DecoratedListFunctionAttributesView:index'], 'eggs'), True) eq_('scrambled', app.view_functions['DecoratedListFunctionAttributesView:index'].eggs) def test_decorator_list_member_function_attributes_get(): """Verify decorator with attributes does not modify other members""" resp = client.get('/decorated_list_member_function_attributes_view/4321/') eq_(b'Get 4321' in resp.data, True) eq_(b'<i><b>Get 4321</b></i>', resp.data) eq_( hasattr( app.view_functions[ 'DecoratedListMemberFunctionAttributesView:get' ], 'eggs'), False) resp = client.get('/decorated_list_member_function_attributes_view/4321') eq_(resp.status_code, 308) def test_decorator_list_member_function_attributes_index(): """Verify decorator with attributes modify decorated memeber functions""" resp = client.get('/decorated_list_member_function_attributes_view/') eq_(b'Index' in resp.data, True) eq_(b'<i>Index</i>', resp.data) eq_(hasattr( app.view_functions[ 'DecoratedListMemberFunctionAttributesView:index' ], 'eggs'), True) eq_('scrambled', app.view_functions[ 'DecoratedListMemberFunctionAttributesView:index' ].eggs) def test_decorator_append_class_attribute_index(): resp = client.get('/decorated_append_class_attribute_view/') eq_(b'Index (this is a test)', resp.data)
bsd-3-clause
495,681,920,520,354,400
32.286364
79
0.654377
false
3.319583
true
false
false
iwm911/plaso
plaso/formatters/mcafeeav.py
1
1238
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2013 The Plaso Project Authors. # Please see the AUTHORS file for details on individual authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Formatter for the McAfee AV Logs files.""" from plaso.lib import eventdata class McafeeAccessProtectionLogEventFormatter(eventdata.EventFormatter): """Class that formats the McAfee Access Protection Log events.""" DATA_TYPE = 'av:mcafee:accessprotectionlog' # The format string. FORMAT_STRING = (u'File Name: {filename} User: {username} {trigger_location} ' u'{status} {rule} {action}') FORMAT_STRING_SHORT = u'{filename} {action}' SOURCE_LONG = 'McAfee Access Protection Log' SOURCE_SHORT = 'LOG'
apache-2.0
-6,182,131,290,445,977,000
35.411765
80
0.731018
false
3.77439
false
false
false
jarrodmcc/OpenFermion
src/openfermion/utils/_sparse_tools_test.py
1
55877
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for sparse_tools.py.""" from __future__ import absolute_import, division import numpy import unittest from numpy.linalg import multi_dot from scipy.linalg import eigh, norm from scipy.sparse import csc_matrix from scipy.special import comb from openfermion.hamiltonians import (fermi_hubbard, jellium_model, wigner_seitz_length_scale) from openfermion.ops import FermionOperator, up_index, down_index from openfermion.transforms import (get_fermion_operator, get_sparse_operator, jordan_wigner) from openfermion.utils import ( Grid, fourier_transform, normal_ordered, number_operator) from openfermion.utils._jellium_hf_state import ( lowest_single_particle_energy_states) from openfermion.utils._linear_qubit_operator import LinearQubitOperator from openfermion.utils._slater_determinants_test import ( random_quadratic_hamiltonian) from openfermion.utils._sparse_tools import * class SparseOperatorTest(unittest.TestCase): def test_kronecker_operators(self): self.assertAlmostEqual( 0, numpy.amax(numpy.absolute( kronecker_operators(3 * [identity_csc]) - kronecker_operators(3 * [pauli_x_csc]) ** 2))) def test_qubit_jw_fermion_integration(self): # Initialize a random fermionic operator. fermion_operator = FermionOperator(((3, 1), (2, 1), (1, 0), (0, 0)), -4.3) fermion_operator += FermionOperator(((3, 1), (1, 0)), 8.17) fermion_operator += 3.2 * FermionOperator() # Map to qubits and compare matrix versions. qubit_operator = jordan_wigner(fermion_operator) qubit_sparse = get_sparse_operator(qubit_operator) qubit_spectrum = sparse_eigenspectrum(qubit_sparse) fermion_sparse = jordan_wigner_sparse(fermion_operator) fermion_spectrum = sparse_eigenspectrum(fermion_sparse) self.assertAlmostEqual(0., numpy.amax( numpy.absolute(fermion_spectrum - qubit_spectrum))) class JordanWignerSparseTest(unittest.TestCase): def test_jw_sparse_0create(self): expected = csc_matrix(([1], ([1], [0])), shape=(2, 2)) self.assertTrue(numpy.allclose( jordan_wigner_sparse(FermionOperator('0^')).A, expected.A)) def test_jw_sparse_1annihilate(self): expected = csc_matrix(([1, -1], ([0, 2], [1, 3])), shape=(4, 4)) self.assertTrue(numpy.allclose( jordan_wigner_sparse(FermionOperator('1')).A, expected.A)) def test_jw_sparse_0create_2annihilate(self): expected = csc_matrix(([-1j, 1j], ([4, 6], [1, 3])), shape=(8, 8)) self.assertTrue(numpy.allclose( jordan_wigner_sparse(FermionOperator('0^ 2', -1j)).A, expected.A)) def test_jw_sparse_0create_3annihilate(self): expected = csc_matrix(([-1j, 1j, 1j, -1j], ([8, 10, 12, 14], [1, 3, 5, 7])), shape=(16, 16)) self.assertTrue(numpy.allclose( jordan_wigner_sparse(FermionOperator('0^ 3', -1j)).A, expected.A)) def test_jw_sparse_twobody(self): expected = csc_matrix(([1, 1], ([6, 14], [5, 13])), shape=(16, 16)) self.assertTrue(numpy.allclose( jordan_wigner_sparse(FermionOperator('2^ 1^ 1 3')).A, expected.A)) def test_qubit_operator_sparse_n_qubits_too_small(self): with self.assertRaises(ValueError): qubit_operator_sparse(QubitOperator('X3'), 1) def test_qubit_operator_sparse_n_qubits_not_specified(self): expected = csc_matrix(([1, 1, 1, 1], ([1, 0, 3, 2], [0, 1, 2, 3])), shape=(4, 4)) self.assertTrue(numpy.allclose( qubit_operator_sparse(QubitOperator('X1')).A, expected.A)) def test_get_linear_qubit_operator_diagonal_wrong_n(self): """Testing with wrong n_qubits.""" with self.assertRaises(ValueError): get_linear_qubit_operator_diagonal(QubitOperator('X3'), 1) def test_get_linear_qubit_operator_diagonal_0(self): """Testing with zero term.""" qubit_operator = QubitOperator.zero() vec_expected = numpy.zeros(8) self.assertTrue(numpy.allclose( get_linear_qubit_operator_diagonal(qubit_operator, 3), vec_expected)) def test_get_linear_qubit_operator_diagonal_zero(self): """Get zero diagonals from get_linear_qubit_operator_diagonal.""" qubit_operator = QubitOperator('X0 Y1') vec_expected = numpy.zeros(4) self.assertTrue(numpy.allclose( get_linear_qubit_operator_diagonal(qubit_operator), vec_expected)) def test_get_linear_qubit_operator_diagonal_non_zero(self): """Get non zero diagonals from get_linear_qubit_operator_diagonal.""" qubit_operator = QubitOperator('Z0 Z2') vec_expected = numpy.array([1, -1, 1, -1, -1, 1, -1, 1]) self.assertTrue(numpy.allclose( get_linear_qubit_operator_diagonal(qubit_operator), vec_expected)) def test_get_linear_qubit_operator_diagonal_cmp_zero(self): """Compare get_linear_qubit_operator_diagonal with get_linear_qubit_operator.""" qubit_operator = QubitOperator('Z1 X2 Y5') vec_expected = numpy.diag(LinearQubitOperator(qubit_operator) * numpy.eye(2 ** 6)) self.assertTrue(numpy.allclose( get_linear_qubit_operator_diagonal(qubit_operator), vec_expected)) def test_get_linear_qubit_operator_diagonal_cmp_non_zero(self): """Compare get_linear_qubit_operator_diagonal with get_linear_qubit_operator.""" qubit_operator = QubitOperator('Z1 Z2 Z5') vec_expected = numpy.diag(LinearQubitOperator(qubit_operator) * numpy.eye(2 ** 6)) self.assertTrue(numpy.allclose( get_linear_qubit_operator_diagonal(qubit_operator), vec_expected)) class ComputationalBasisStateTest(unittest.TestCase): def test_computational_basis_state(self): comp_basis_state = jw_configuration_state([0, 2, 5], 7) self.assertAlmostEqual(comp_basis_state[82], 1.) self.assertAlmostEqual(sum(comp_basis_state), 1.) class JWHartreeFockStateTest(unittest.TestCase): def test_jw_hartree_fock_state(self): hartree_fock_state = jw_hartree_fock_state(3, 7) self.assertAlmostEqual(hartree_fock_state[112], 1.) self.assertAlmostEqual(sum(hartree_fock_state), 1.) class JWNumberIndicesTest(unittest.TestCase): def test_jw_sparse_index(self): """Test the indexing scheme for selecting specific particle numbers""" expected = [1, 2] calculated_indices = jw_number_indices(1, 2) self.assertEqual(expected, calculated_indices) expected = [3] calculated_indices = jw_number_indices(2, 2) self.assertEqual(expected, calculated_indices) def test_jw_number_indices(self): n_qubits = numpy.random.randint(1, 12) n_particles = numpy.random.randint(n_qubits + 1) number_indices = jw_number_indices(n_particles, n_qubits) subspace_dimension = len(number_indices) self.assertEqual(subspace_dimension, comb(n_qubits, n_particles)) for index in number_indices: binary_string = bin(index)[2:].zfill(n_qubits) n_ones = binary_string.count('1') self.assertEqual(n_ones, n_particles) class JWSzIndicesTest(unittest.TestCase): def test_jw_sz_indices(self): """Test the indexing scheme for selecting specific sz value""" def sz_integer(bitstring): """Computes the total number of occupied up sites minus the total number of occupied down sites.""" n_sites = len(bitstring) // 2 n_up = len([site for site in range(n_sites) if bitstring[up_index(site)] == '1']) n_down = len([site for site in range(n_sites) if bitstring[down_index(site)] == '1']) return n_up - n_down def jw_sz_indices_brute_force(sz_value, n_qubits): """Computes the correct indices by brute force.""" indices = [] for bitstring in itertools.product(['0', '1'], repeat=n_qubits): if (sz_integer(bitstring) == int(2 * sz_value)): indices.append(int(''.join(bitstring), 2)) return indices # General test n_sites = numpy.random.randint(1, 10) n_qubits = 2 * n_sites sz_int = ((-1) ** numpy.random.randint(2) * numpy.random.randint(n_sites + 1)) sz_value = sz_int / 2. correct_indices = jw_sz_indices_brute_force(sz_value, n_qubits) subspace_dimension = len(correct_indices) calculated_indices = jw_sz_indices(sz_value, n_qubits) self.assertEqual(len(calculated_indices), subspace_dimension) for index in calculated_indices: binary_string = bin(index)[2:].zfill(n_qubits) self.assertEqual(sz_integer(binary_string), sz_int) # Test fixing particle number n_particles = abs(sz_int) correct_indices = [index for index in correct_indices if bin(index)[2:].count('1') == n_particles] subspace_dimension = len(correct_indices) calculated_indices = jw_sz_indices(sz_value, n_qubits, n_electrons=n_particles) self.assertEqual(len(calculated_indices), subspace_dimension) for index in calculated_indices: binary_string = bin(index)[2:].zfill(n_qubits) self.assertEqual(sz_integer(binary_string), sz_int) self.assertEqual(binary_string.count('1'), n_particles) # Test exceptions with self.assertRaises(ValueError): indices = jw_sz_indices(3, 3) with self.assertRaises(ValueError): indices = jw_sz_indices(3.1, 4) with self.assertRaises(ValueError): indices = jw_sz_indices(1.5, 8, n_electrons=6) with self.assertRaises(ValueError): indices = jw_sz_indices(1.5, 8, n_electrons=1) class JWNumberRestrictOperatorTest(unittest.TestCase): def test_jw_restrict_operator(self): """Test the scheme for restricting JW encoded operators to number""" # Make a Hamiltonian that cares mostly about number of electrons n_qubits = 6 target_electrons = 3 penalty_const = 100. number_sparse = jordan_wigner_sparse(number_operator(n_qubits)) bias_sparse = jordan_wigner_sparse( sum([FermionOperator(((i, 1), (i, 0)), 1.0) for i in range(n_qubits)], FermionOperator())) hamiltonian_sparse = penalty_const * ( number_sparse - target_electrons * scipy.sparse.identity(2**n_qubits)).dot( number_sparse - target_electrons * scipy.sparse.identity(2**n_qubits)) + bias_sparse restricted_hamiltonian = jw_number_restrict_operator( hamiltonian_sparse, target_electrons, n_qubits) true_eigvals, _ = eigh(hamiltonian_sparse.A) test_eigvals, _ = eigh(restricted_hamiltonian.A) self.assertAlmostEqual(norm(true_eigvals[:20] - test_eigvals[:20]), 0.0) def test_jw_restrict_operator_hopping_to_1_particle(self): hop = FermionOperator('3^ 1') + FermionOperator('1^ 3') hop_sparse = jordan_wigner_sparse(hop, n_qubits=4) hop_restrict = jw_number_restrict_operator(hop_sparse, 1, n_qubits=4) expected = csc_matrix(([1, 1], ([0, 2], [2, 0])), shape=(4, 4)) self.assertTrue(numpy.allclose(hop_restrict.A, expected.A)) def test_jw_restrict_operator_interaction_to_1_particle(self): interaction = FermionOperator('3^ 2^ 4 1') interaction_sparse = jordan_wigner_sparse(interaction, n_qubits=6) interaction_restrict = jw_number_restrict_operator( interaction_sparse, 1, n_qubits=6) expected = csc_matrix(([], ([], [])), shape=(6, 6)) self.assertTrue(numpy.allclose(interaction_restrict.A, expected.A)) def test_jw_restrict_operator_interaction_to_2_particles(self): interaction = (FermionOperator('3^ 2^ 4 1') + FermionOperator('4^ 1^ 3 2')) interaction_sparse = jordan_wigner_sparse(interaction, n_qubits=6) interaction_restrict = jw_number_restrict_operator( interaction_sparse, 2, n_qubits=6) dim = 6 * 5 // 2 # shape of new sparse array # 3^ 2^ 4 1 maps 2**4 + 2 = 18 to 2**3 + 2**2 = 12 and vice versa; # in the 2-particle subspace (1, 4) and (2, 3) are 7th and 9th. expected = csc_matrix(([-1, -1], ([7, 9], [9, 7])), shape=(dim, dim)) self.assertTrue(numpy.allclose(interaction_restrict.A, expected.A)) def test_jw_restrict_operator_hopping_to_1_particle_default_nqubits(self): interaction = (FermionOperator('3^ 2^ 4 1') + FermionOperator('4^ 1^ 3 2')) interaction_sparse = jordan_wigner_sparse(interaction, n_qubits=6) # n_qubits should default to 6 interaction_restrict = jw_number_restrict_operator( interaction_sparse, 2) dim = 6 * 5 // 2 # shape of new sparse array # 3^ 2^ 4 1 maps 2**4 + 2 = 18 to 2**3 + 2**2 = 12 and vice versa; # in the 2-particle subspace (1, 4) and (2, 3) are 7th and 9th. expected = csc_matrix(([-1, -1], ([7, 9], [9, 7])), shape=(dim, dim)) self.assertTrue(numpy.allclose(interaction_restrict.A, expected.A)) def test_jw_restrict_jellium_ground_state_integration(self): n_qubits = 4 grid = Grid(dimensions=1, length=n_qubits, scale=1.0) jellium_hamiltonian = jordan_wigner_sparse( jellium_model(grid, spinless=False)) # 2 * n_qubits because of spin number_sparse = jordan_wigner_sparse(number_operator(2 * n_qubits)) restricted_number = jw_number_restrict_operator(number_sparse, 2) restricted_jellium_hamiltonian = jw_number_restrict_operator( jellium_hamiltonian, 2) energy, ground_state = get_ground_state(restricted_jellium_hamiltonian) number_expectation = expectation(restricted_number, ground_state) self.assertAlmostEqual(number_expectation, 2) class JWSzRestrictOperatorTest(unittest.TestCase): def test_restrict_interaction_hamiltonian(self): """Test restricting a coulomb repulsion Hamiltonian to a specified Sz manifold.""" x_dim = 3 y_dim = 2 interaction_term = fermi_hubbard(x_dim, y_dim, 0., 1.) interaction_sparse = get_sparse_operator(interaction_term) sz_value = 2 interaction_restricted = jw_sz_restrict_operator(interaction_sparse, sz_value) restricted_interaction_values = set([ int(value.real) for value in interaction_restricted.diagonal()]) # Originally the eigenvalues run from 0 to 6 but after restricting, # they should run from 0 to 2 self.assertEqual(restricted_interaction_values, {0, 1, 2}) class JWNumberRestrictStateTest(unittest.TestCase): def test_jw_number_restrict_state(self): n_qubits = numpy.random.randint(1, 12) n_particles = numpy.random.randint(0, n_qubits) number_indices = jw_number_indices(n_particles, n_qubits) subspace_dimension = len(number_indices) # Create a vector that has entry 1 for every coordinate with # the specified particle number, and 0 everywhere else vector = numpy.zeros(2**n_qubits, dtype=float) vector[number_indices] = 1 # Restrict the vector restricted_vector = jw_number_restrict_state(vector, n_particles) # Check that it has the correct shape self.assertEqual(restricted_vector.shape[0], subspace_dimension) # Check that it has the same norm as the original vector self.assertAlmostEqual(inner_product(vector, vector), inner_product(restricted_vector, restricted_vector)) class JWSzRestrictStateTest(unittest.TestCase): def test_jw_sz_restrict_state(self): n_sites = numpy.random.randint(1, 10) n_qubits = 2 * n_sites sz_int = ((-1) ** numpy.random.randint(2) * numpy.random.randint(n_sites + 1)) sz_value = sz_int / 2 sz_indices = jw_sz_indices(sz_value, n_qubits) subspace_dimension = len(sz_indices) # Create a vector that has entry 1 for every coordinate in # the specified subspace, and 0 everywhere else vector = numpy.zeros(2**n_qubits, dtype=float) vector[sz_indices] = 1 # Restrict the vector restricted_vector = jw_sz_restrict_state(vector, sz_value) # Check that it has the correct shape self.assertEqual(restricted_vector.shape[0], subspace_dimension) # Check that it has the same norm as the original vector self.assertAlmostEqual(inner_product(vector, vector), inner_product(restricted_vector, restricted_vector)) class JWGetGroundStatesByParticleNumberTest(unittest.TestCase): def test_jw_get_ground_state_at_particle_number_herm_conserving(self): # Initialize a particle-number-conserving Hermitian operator ferm_op = FermionOperator('0^ 1') + FermionOperator('1^ 0') + \ FermionOperator('1^ 2') + FermionOperator('2^ 1') + \ FermionOperator('1^ 3', -.4) + FermionOperator('3^ 1', -.4) jw_hamiltonian = jordan_wigner(ferm_op) sparse_operator = get_sparse_operator(jw_hamiltonian) n_qubits = 4 num_op = get_sparse_operator(number_operator(n_qubits)) # Test each possible particle number for particle_number in range(n_qubits): # Get the ground energy and ground state at this particle number energy, state = jw_get_ground_state_at_particle_number( sparse_operator, particle_number) # Check that it's an eigenvector with the correct eigenvalue self.assertTrue( numpy.allclose(sparse_operator.dot(state), energy * state)) # Check that it has the correct particle number num = expectation(num_op, state) self.assertAlmostEqual(num, particle_number) def test_jw_get_ground_state_at_particle_number_hubbard(self): model = fermi_hubbard(2, 2, 1.0, 4.0) sparse_operator = get_sparse_operator(model) n_qubits = count_qubits(model) num_op = get_sparse_operator(number_operator(n_qubits)) # Test each possible particle number for particle_number in range(n_qubits): # Get the ground energy and ground state at this particle number energy, state = jw_get_ground_state_at_particle_number( sparse_operator, particle_number) # Check that it's an eigenvector with the correct eigenvalue self.assertTrue( numpy.allclose(sparse_operator.dot(state), energy * state)) # Check that it has the correct particle number num = expectation(num_op, state) self.assertAlmostEqual(num, particle_number) def test_jw_get_ground_state_at_particle_number_jellium(self): grid = Grid(2, 2, 1.0) model = jellium_model(grid, spinless=True, plane_wave=False) sparse_operator = get_sparse_operator(model) n_qubits = count_qubits(model) num_op = get_sparse_operator(number_operator(n_qubits)) # Test each possible particle number for particle_number in range(n_qubits): # Get the ground energy and ground state at this particle number energy, state = jw_get_ground_state_at_particle_number( sparse_operator, particle_number) # Check that it's an eigenvector with the correct eigenvalue self.assertTrue( numpy.allclose(sparse_operator.dot(state), energy * state)) # Check that it has the correct particle number num = expectation(num_op, state) self.assertAlmostEqual(num, particle_number) class JWGetGaussianStateTest(unittest.TestCase): def setUp(self): self.n_qubits_range = range(2, 10) def test_ground_state_particle_conserving(self): """Test getting the ground state of a Hamiltonian that conserves particle number.""" for n_qubits in self.n_qubits_range: # Initialize a particle-number-conserving Hamiltonian quadratic_hamiltonian = random_quadratic_hamiltonian( n_qubits, True) # Compute the true ground state sparse_operator = get_sparse_operator(quadratic_hamiltonian) ground_energy, ground_state = get_ground_state(sparse_operator) # Compute the ground state using the circuit circuit_energy, circuit_state = jw_get_gaussian_state( quadratic_hamiltonian) # Check that the energies match self.assertAlmostEqual(ground_energy, circuit_energy) # Check that the state obtained using the circuit is a ground state difference = (sparse_operator * circuit_state - ground_energy * circuit_state) discrepancy = numpy.amax(numpy.abs(difference)) self.assertAlmostEqual(discrepancy, 0) def test_ground_state_particle_nonconserving(self): """Test getting the ground state of a Hamiltonian that does not conserve particle number.""" for n_qubits in self.n_qubits_range: # Initialize a non-particle-number-conserving Hamiltonian quadratic_hamiltonian = random_quadratic_hamiltonian( n_qubits, False) # Compute the true ground state sparse_operator = get_sparse_operator(quadratic_hamiltonian) ground_energy, ground_state = get_ground_state(sparse_operator) # Compute the ground state using the circuit circuit_energy, circuit_state = ( jw_get_gaussian_state(quadratic_hamiltonian)) # Check that the energies match self.assertAlmostEqual(ground_energy, circuit_energy) # Check that the state obtained using the circuit is a ground state difference = (sparse_operator * circuit_state - ground_energy * circuit_state) discrepancy = numpy.amax(numpy.abs(difference)) self.assertAlmostEqual(discrepancy, 0) def test_excited_state_particle_conserving(self): """Test getting an excited state of a Hamiltonian that conserves particle number.""" for n_qubits in self.n_qubits_range: # Initialize a particle-number-conserving Hamiltonian quadratic_hamiltonian = random_quadratic_hamiltonian( n_qubits, True) # Pick some orbitals to occupy num_occupied_orbitals = numpy.random.randint(1, n_qubits + 1) occupied_orbitals = numpy.random.choice( range(n_qubits), num_occupied_orbitals, False) # Compute the Gaussian state circuit_energy, gaussian_state = jw_get_gaussian_state( quadratic_hamiltonian, occupied_orbitals) # Compute the true energy orbital_energies, constant = ( quadratic_hamiltonian.orbital_energies()) energy = numpy.sum(orbital_energies[occupied_orbitals]) + constant # Check that the energies match self.assertAlmostEqual(energy, circuit_energy) # Check that the state obtained using the circuit is an eigenstate # with the correct eigenvalue sparse_operator = get_sparse_operator(quadratic_hamiltonian) difference = (sparse_operator * gaussian_state - energy * gaussian_state) discrepancy = numpy.amax(numpy.abs(difference)) self.assertAlmostEqual(discrepancy, 0) def test_excited_state_particle_nonconserving(self): """Test getting an excited state of a Hamiltonian that conserves particle number.""" for n_qubits in self.n_qubits_range: # Initialize a non-particle-number-conserving Hamiltonian quadratic_hamiltonian = random_quadratic_hamiltonian( n_qubits, False) # Pick some orbitals to occupy num_occupied_orbitals = numpy.random.randint(1, n_qubits + 1) occupied_orbitals = numpy.random.choice( range(n_qubits), num_occupied_orbitals, False) # Compute the Gaussian state circuit_energy, gaussian_state = jw_get_gaussian_state( quadratic_hamiltonian, occupied_orbitals) # Compute the true energy orbital_energies, constant = ( quadratic_hamiltonian.orbital_energies()) energy = numpy.sum(orbital_energies[occupied_orbitals]) + constant # Check that the energies match self.assertAlmostEqual(energy, circuit_energy) # Check that the state obtained using the circuit is an eigenstate # with the correct eigenvalue sparse_operator = get_sparse_operator(quadratic_hamiltonian) difference = (sparse_operator * gaussian_state - energy * gaussian_state) discrepancy = numpy.amax(numpy.abs(difference)) self.assertAlmostEqual(discrepancy, 0) def test_bad_input(self): """Test bad input.""" with self.assertRaises(ValueError): energy, state = jw_get_gaussian_state('a') class JWSparseGivensRotationTest(unittest.TestCase): def test_bad_input(self): with self.assertRaises(ValueError): givens_matrix = jw_sparse_givens_rotation(0, 2, 1., 1., 5) with self.assertRaises(ValueError): givens_matrix = jw_sparse_givens_rotation(4, 5, 1., 1., 5) class JWSlaterDeterminantTest(unittest.TestCase): def test_hadamard_transform(self): r"""Test creating the states 1 / sqrt(2) (a^\dagger_0 + a^\dagger_1) |vac> and 1 / sqrt(2) (a^\dagger_0 - a^\dagger_1) |vac>. """ slater_determinant_matrix = numpy.array([[1., 1.]]) / numpy.sqrt(2.) slater_determinant = jw_slater_determinant(slater_determinant_matrix) self.assertAlmostEqual(slater_determinant[1], slater_determinant[2]) self.assertAlmostEqual(abs(slater_determinant[1]), 1. / numpy.sqrt(2.)) self.assertAlmostEqual(abs(slater_determinant[0]), 0.) self.assertAlmostEqual(abs(slater_determinant[3]), 0.) slater_determinant_matrix = numpy.array([[1., -1.]]) / numpy.sqrt(2.) slater_determinant = jw_slater_determinant(slater_determinant_matrix) self.assertAlmostEqual(slater_determinant[1], -slater_determinant[2]) self.assertAlmostEqual(abs(slater_determinant[1]), 1. / numpy.sqrt(2.)) self.assertAlmostEqual(abs(slater_determinant[0]), 0.) self.assertAlmostEqual(abs(slater_determinant[3]), 0.) class GroundStateTest(unittest.TestCase): def test_get_ground_state_hermitian(self): ground = get_ground_state(get_sparse_operator( QubitOperator('Y0 X1') + QubitOperator('Z0 Z1'))) expected_state = csc_matrix(([1j, 1], ([1, 2], [0, 0])), shape=(4, 1)).A expected_state /= numpy.sqrt(2.0) self.assertAlmostEqual(ground[0], -2) self.assertAlmostEqual( numpy.absolute( expected_state.T.conj().dot(ground[1]))[0], 1.) class ExpectationTest(unittest.TestCase): def test_expectation_correct_sparse_matrix(self): operator = get_sparse_operator(QubitOperator('X0'), n_qubits=2) vector = numpy.array([0., 1.j, 0., 1.j]) self.assertAlmostEqual(expectation(operator, vector), 2.0) density_matrix = scipy.sparse.csc_matrix( numpy.outer(vector, numpy.conjugate(vector))) self.assertAlmostEqual(expectation(operator, density_matrix), 2.0) def test_expectation_correct_linear_operator(self): operator = LinearQubitOperator(QubitOperator('X0'), n_qubits=2) vector = numpy.array([0., 1.j, 0., 1.j]) self.assertAlmostEqual(expectation(operator, vector), 2.0) def test_expectation_handles_column_vector(self): operator = get_sparse_operator(QubitOperator('X0'), n_qubits=2) vector = numpy.array([[0.], [1.j], [0.], [1.j]]) self.assertAlmostEqual(expectation(operator, vector), 2.0) def test_expectation_correct_zero(self): operator = get_sparse_operator(QubitOperator('X0'), n_qubits=2) vector = numpy.array([1j, -1j, -1j, -1j]) self.assertAlmostEqual(expectation(operator, vector), 0.0) class VarianceTest(unittest.TestCase): def test_variance_row_vector(self): X = pauli_matrix_map['X'] Z = pauli_matrix_map['Z'] zero = numpy.array([1., 0.]) plus = numpy.array([1., 1.]) / numpy.sqrt(2) minus = numpy.array([1., -1.]) / numpy.sqrt(2) self.assertAlmostEqual(variance(Z, zero), 0.) self.assertAlmostEqual(variance(X, zero), 1.) self.assertAlmostEqual(variance(Z, plus), 1.) self.assertAlmostEqual(variance(X, plus), 0.) self.assertAlmostEqual(variance(Z, minus), 1.) self.assertAlmostEqual(variance(X, minus), 0.) def test_variance_column_vector(self): X = pauli_matrix_map['X'] Z = pauli_matrix_map['Z'] zero = numpy.array([[1.], [0.]]) plus = numpy.array([[1.], [1.]]) / numpy.sqrt(2) minus = numpy.array([[1.], [-1.]]) / numpy.sqrt(2) self.assertAlmostEqual(variance(Z, zero), 0.) self.assertAlmostEqual(variance(X, zero), 1.) self.assertAlmostEqual(variance(Z, plus), 1.) self.assertAlmostEqual(variance(X, plus), 0.) self.assertAlmostEqual(variance(Z, minus), 1.) self.assertAlmostEqual(variance(X, minus), 0.) class ExpectationComputationalBasisStateTest(unittest.TestCase): def test_expectation_fermion_operator_single_number_terms(self): operator = FermionOperator('3^ 3', 1.9) + FermionOperator('2^ 1') state = csc_matrix(([1], ([15], [0])), shape=(16, 1)) self.assertAlmostEqual( expectation_computational_basis_state(operator, state), 1.9) def test_expectation_fermion_operator_two_number_terms(self): operator = (FermionOperator('2^ 2', 1.9) + FermionOperator('2^ 1') + FermionOperator('2^ 1^ 2 1', -1.7)) state = csc_matrix(([1], ([6], [0])), shape=(16, 1)) self.assertAlmostEqual( expectation_computational_basis_state(operator, state), 3.6) def test_expectation_identity_fermion_operator(self): operator = FermionOperator.identity() * 1.1 state = csc_matrix(([1], ([6], [0])), shape=(16, 1)) self.assertAlmostEqual( expectation_computational_basis_state(operator, state), 1.1) def test_expectation_state_is_list_single_number_terms(self): operator = FermionOperator('3^ 3', 1.9) + FermionOperator('2^ 1') state = [1, 1, 1, 1] self.assertAlmostEqual( expectation_computational_basis_state(operator, state), 1.9) def test_expectation_state_is_list_fermion_operator_two_number_terms(self): operator = (FermionOperator('2^ 2', 1.9) + FermionOperator('2^ 1') + FermionOperator('2^ 1^ 2 1', -1.7)) state = [0, 1, 1] self.assertAlmostEqual( expectation_computational_basis_state(operator, state), 3.6) def test_expectation_state_is_list_identity_fermion_operator(self): operator = FermionOperator.identity() * 1.1 state = [0, 1, 1] self.assertAlmostEqual( expectation_computational_basis_state(operator, state), 1.1) def test_expectation_bad_operator_type(self): with self.assertRaises(TypeError): expectation_computational_basis_state( 'never', csc_matrix(([1], ([6], [0])), shape=(16, 1))) def test_expectation_qubit_operator_not_implemented(self): with self.assertRaises(NotImplementedError): expectation_computational_basis_state( QubitOperator(), csc_matrix(([1], ([6], [0])), shape=(16, 1))) class ExpectationDualBasisOperatorWithPlaneWaveBasisState(unittest.TestCase): def setUp(self): grid_length = 4 dimension = 1 wigner_seitz_radius = 10. self.spinless = True self.n_spatial_orbitals = grid_length ** dimension n_qubits = self.n_spatial_orbitals self.n_particles = 3 # Compute appropriate length scale and the corresponding grid. length_scale = wigner_seitz_length_scale( wigner_seitz_radius, self.n_particles, dimension) self.grid1 = Grid(dimension, grid_length, length_scale) # Get the occupied orbitals of the plane-wave basis Hartree-Fock state. hamiltonian = jellium_model(self.grid1, self.spinless, plane_wave=True) hamiltonian = normal_ordered(hamiltonian) hamiltonian.compress() occupied_states = numpy.array(lowest_single_particle_energy_states( hamiltonian, self.n_particles)) self.hf_state_index1 = numpy.sum(2 ** occupied_states) self.hf_state1 = numpy.zeros(2 ** n_qubits) self.hf_state1[self.hf_state_index1] = 1.0 self.orbital_occupations1 = [digit == '1' for digit in bin(self.hf_state_index1)[2:]][::-1] self.occupied_orbitals1 = [index for index, occupied in enumerate(self.orbital_occupations1) if occupied] self.reversed_occupied_orbitals1 = list(self.occupied_orbitals1) for i in range(len(self.reversed_occupied_orbitals1)): self.reversed_occupied_orbitals1[i] = -1 + int(numpy.log2( self.hf_state1.shape[0])) - self.reversed_occupied_orbitals1[i] self.reversed_hf_state_index1 = sum( 2 ** index for index in self.reversed_occupied_orbitals1) def test_1body_hopping_operator_1D(self): operator = FermionOperator('2^ 0') operator = normal_ordered(operator) transformed_operator = normal_ordered(fourier_transform( operator, self.grid1, self.spinless)) expected = expectation(get_sparse_operator( transformed_operator), self.hf_state1) actual = expectation_db_operator_with_pw_basis_state( operator, self.reversed_occupied_orbitals1, self.n_spatial_orbitals, self.grid1, self.spinless) self.assertAlmostEqual(expected, actual) def test_1body_number_operator_1D(self): operator = FermionOperator('2^ 2') operator = normal_ordered(operator) transformed_operator = normal_ordered(fourier_transform( operator, self.grid1, self.spinless)) expected = expectation(get_sparse_operator( transformed_operator), self.hf_state1) actual = expectation_db_operator_with_pw_basis_state( operator, self.reversed_occupied_orbitals1, self.n_spatial_orbitals, self.grid1, self.spinless) self.assertAlmostEqual(expected, actual) def test_2body_partial_number_operator_high_1D(self): operator = FermionOperator('2^ 1^ 2 0') operator = normal_ordered(operator) transformed_operator = normal_ordered(fourier_transform( operator, self.grid1, self.spinless)) expected = expectation(get_sparse_operator( transformed_operator), self.hf_state1) actual = expectation_db_operator_with_pw_basis_state( operator, self.reversed_occupied_orbitals1, self.n_spatial_orbitals, self.grid1, self.spinless) self.assertAlmostEqual(expected, actual) def test_2body_partial_number_operator_mid_1D(self): operator = FermionOperator('1^ 0^ 1 2') operator = normal_ordered(operator) transformed_operator = normal_ordered(fourier_transform( operator, self.grid1, self.spinless)) expected = expectation(get_sparse_operator( transformed_operator), self.hf_state1) actual = expectation_db_operator_with_pw_basis_state( operator, self.reversed_occupied_orbitals1, self.n_spatial_orbitals, self.grid1, self.spinless) self.assertAlmostEqual(expected, actual) def test_3body_double_number_operator_1D(self): operator = FermionOperator('3^ 2^ 1^ 3 1 0') operator = normal_ordered(operator) transformed_operator = normal_ordered(fourier_transform( operator, self.grid1, self.spinless)) expected = expectation(get_sparse_operator( transformed_operator), self.hf_state1) actual = expectation_db_operator_with_pw_basis_state( operator, self.reversed_occupied_orbitals1, self.n_spatial_orbitals, self.grid1, self.spinless) self.assertAlmostEqual(expected, actual) def test_2body_adjacent_number_operator_1D(self): operator = FermionOperator('3^ 2^ 2 1') operator = normal_ordered(operator) transformed_operator = normal_ordered(fourier_transform( operator, self.grid1, self.spinless)) expected = expectation(get_sparse_operator( transformed_operator), self.hf_state1) actual = expectation_db_operator_with_pw_basis_state( operator, self.reversed_occupied_orbitals1, self.n_spatial_orbitals, self.grid1, self.spinless) self.assertAlmostEqual(expected, actual) def test_1d5_with_spin_10particles(self): dimension = 1 grid_length = 5 n_spatial_orbitals = grid_length ** dimension wigner_seitz_radius = 9.3 spinless = False n_qubits = n_spatial_orbitals if not spinless: n_qubits *= 2 n_particles_big = 10 length_scale = wigner_seitz_length_scale( wigner_seitz_radius, n_particles_big, dimension) self.grid3 = Grid(dimension, grid_length, length_scale) # Get the occupied orbitals of the plane-wave basis Hartree-Fock state. hamiltonian = jellium_model(self.grid3, spinless, plane_wave=True) hamiltonian = normal_ordered(hamiltonian) hamiltonian.compress() occupied_states = numpy.array(lowest_single_particle_energy_states( hamiltonian, n_particles_big)) self.hf_state_index3 = numpy.sum(2 ** occupied_states) self.hf_state3 = csc_matrix( ([1.0], ([self.hf_state_index3], [0])), shape=(2 ** n_qubits, 1)) self.orbital_occupations3 = [digit == '1' for digit in bin(self.hf_state_index3)[2:]][::-1] self.occupied_orbitals3 = [index for index, occupied in enumerate(self.orbital_occupations3) if occupied] self.reversed_occupied_orbitals3 = list(self.occupied_orbitals3) for i in range(len(self.reversed_occupied_orbitals3)): self.reversed_occupied_orbitals3[i] = -1 + int(numpy.log2( self.hf_state3.shape[0])) - self.reversed_occupied_orbitals3[i] self.reversed_hf_state_index3 = sum( 2 ** index for index in self.reversed_occupied_orbitals3) operator = (FermionOperator('6^ 0^ 1^ 3 5 4', 2) + FermionOperator('7^ 6^ 5 4', -3.7j) + FermionOperator('3^ 3', 2.1) + FermionOperator('3^ 2', 1.7)) operator = normal_ordered(operator) transformed_operator = normal_ordered(fourier_transform( operator, self.grid3, spinless)) expected = 2.1 # Calculated from expectation(get_sparse_operator( # transformed_operator), self.hf_state3) actual = expectation_db_operator_with_pw_basis_state( operator, self.reversed_occupied_orbitals3, n_spatial_orbitals, self.grid3, spinless) self.assertAlmostEqual(expected, actual) def test_1d5_with_spin_7particles(self): dimension = 1 grid_length = 5 n_spatial_orbitals = grid_length ** dimension wigner_seitz_radius = 9.3 spinless = False n_qubits = n_spatial_orbitals if not spinless: n_qubits *= 2 n_particles_big = 7 length_scale = wigner_seitz_length_scale( wigner_seitz_radius, n_particles_big, dimension) self.grid3 = Grid(dimension, grid_length, length_scale) # Get the occupied orbitals of the plane-wave basis Hartree-Fock state. hamiltonian = jellium_model(self.grid3, spinless, plane_wave=True) hamiltonian = normal_ordered(hamiltonian) hamiltonian.compress() occupied_states = numpy.array(lowest_single_particle_energy_states( hamiltonian, n_particles_big)) self.hf_state_index3 = numpy.sum(2 ** occupied_states) self.hf_state3 = csc_matrix( ([1.0], ([self.hf_state_index3], [0])), shape=(2 ** n_qubits, 1)) self.orbital_occupations3 = [digit == '1' for digit in bin(self.hf_state_index3)[2:]][::-1] self.occupied_orbitals3 = [index for index, occupied in enumerate(self.orbital_occupations3) if occupied] self.reversed_occupied_orbitals3 = list(self.occupied_orbitals3) for i in range(len(self.reversed_occupied_orbitals3)): self.reversed_occupied_orbitals3[i] = -1 + int(numpy.log2( self.hf_state3.shape[0])) - self.reversed_occupied_orbitals3[i] self.reversed_hf_state_index3 = sum( 2 ** index for index in self.reversed_occupied_orbitals3) operator = (FermionOperator('6^ 0^ 1^ 3 5 4', 2) + FermionOperator('7^ 2^ 4 1') + FermionOperator('3^ 3', 2.1) + FermionOperator('5^ 3^ 1 0', 7.3)) operator = normal_ordered(operator) transformed_operator = normal_ordered(fourier_transform( operator, self.grid3, spinless)) expected = 1.66 - 0.0615536707435j # Calculated with expected = expectation(get_sparse_operator( # transformed_operator), self.hf_state3) actual = expectation_db_operator_with_pw_basis_state( operator, self.reversed_occupied_orbitals3, n_spatial_orbitals, self.grid3, spinless) self.assertAlmostEqual(expected, actual) def test_3d2_spinless(self): dimension = 3 grid_length = 2 n_spatial_orbitals = grid_length ** dimension wigner_seitz_radius = 9.3 spinless = True n_qubits = n_spatial_orbitals if not spinless: n_qubits *= 2 n_particles_big = 5 length_scale = wigner_seitz_length_scale( wigner_seitz_radius, n_particles_big, dimension) self.grid3 = Grid(dimension, grid_length, length_scale) # Get the occupied orbitals of the plane-wave basis Hartree-Fock state. hamiltonian = jellium_model(self.grid3, spinless, plane_wave=True) hamiltonian = normal_ordered(hamiltonian) hamiltonian.compress() occupied_states = numpy.array(lowest_single_particle_energy_states( hamiltonian, n_particles_big)) self.hf_state_index3 = numpy.sum(2 ** occupied_states) self.hf_state3 = csc_matrix( ([1.0], ([self.hf_state_index3], [0])), shape=(2 ** n_qubits, 1)) self.orbital_occupations3 = [digit == '1' for digit in bin(self.hf_state_index3)[2:]][::-1] self.occupied_orbitals3 = [index for index, occupied in enumerate(self.orbital_occupations3) if occupied] self.reversed_occupied_orbitals3 = list(self.occupied_orbitals3) for i in range(len(self.reversed_occupied_orbitals3)): self.reversed_occupied_orbitals3[i] = -1 + int(numpy.log2( self.hf_state3.shape[0])) - self.reversed_occupied_orbitals3[i] self.reversed_hf_state_index3 = sum( 2 ** index for index in self.reversed_occupied_orbitals3) operator = (FermionOperator('4^ 2^ 3^ 5 5 4', 2) + FermionOperator('7^ 6^ 7 4', -3.7j) + FermionOperator('3^ 7', 2.1)) operator = normal_ordered(operator) transformed_operator = normal_ordered(fourier_transform( operator, self.grid3, spinless)) expected = -0.2625 - 0.4625j # Calculated with expectation(get_sparse_operator( # transformed_operator), self.hf_state3) actual = expectation_db_operator_with_pw_basis_state( operator, self.reversed_occupied_orbitals3, n_spatial_orbitals, self.grid3, spinless) self.assertAlmostEqual(expected, actual) def test_3d2_with_spin(self): dimension = 3 grid_length = 2 n_spatial_orbitals = grid_length ** dimension wigner_seitz_radius = 9.3 spinless = False n_qubits = n_spatial_orbitals if not spinless: n_qubits *= 2 n_particles_big = 9 length_scale = wigner_seitz_length_scale( wigner_seitz_radius, n_particles_big, dimension) self.grid3 = Grid(dimension, grid_length, length_scale) # Get the occupied orbitals of the plane-wave basis Hartree-Fock state. hamiltonian = jellium_model(self.grid3, spinless, plane_wave=True) hamiltonian = normal_ordered(hamiltonian) hamiltonian.compress() occupied_states = numpy.array(lowest_single_particle_energy_states( hamiltonian, n_particles_big)) self.hf_state_index3 = numpy.sum(2 ** occupied_states) self.hf_state3 = csc_matrix( ([1.0], ([self.hf_state_index3], [0])), shape=(2 ** n_qubits, 1)) self.orbital_occupations3 = [digit == '1' for digit in bin(self.hf_state_index3)[2:]][::-1] self.occupied_orbitals3 = [index for index, occupied in enumerate(self.orbital_occupations3) if occupied] self.reversed_occupied_orbitals3 = list(self.occupied_orbitals3) for i in range(len(self.reversed_occupied_orbitals3)): self.reversed_occupied_orbitals3[i] = -1 + int(numpy.log2( self.hf_state3.shape[0])) - self.reversed_occupied_orbitals3[i] self.reversed_hf_state_index3 = sum( 2 ** index for index in self.reversed_occupied_orbitals3) operator = (FermionOperator('4^ 2^ 3^ 5 5 4', 2) + FermionOperator('7^ 6^ 7 4', -3.7j) + FermionOperator('3^ 7', 2.1)) operator = normal_ordered(operator) transformed_operator = normal_ordered(fourier_transform( operator, self.grid3, spinless)) expected = -0.2625 - 0.578125j # Calculated from expected = expectation(get_sparse_operator( # transformed_operator), self.hf_state3) actual = expectation_db_operator_with_pw_basis_state( operator, self.reversed_occupied_orbitals3, n_spatial_orbitals, self.grid3, spinless) self.assertAlmostEqual(expected, actual) class GetGapTest(unittest.TestCase): def test_get_gap(self): operator = QubitOperator('Y0 X1') + QubitOperator('Z0 Z1') self.assertAlmostEqual(get_gap(get_sparse_operator(operator)), 2.0) def test_get_gap_nonhermitian_error(self): operator = (QubitOperator('X0 Y1', 1 + 1j) + QubitOperator('Z0 Z1', 1j) + QubitOperator((), 2 + 1j)) with self.assertRaises(ValueError): get_gap(get_sparse_operator(operator)) class InnerProductTest(unittest.TestCase): def test_inner_product(self): state_1 = numpy.array([1., 1.j]) state_2 = numpy.array([1., -1.j]) self.assertAlmostEqual(inner_product(state_1, state_1), 2.) self.assertAlmostEqual(inner_product(state_1, state_2), 0.) class BosonSparseTest(unittest.TestCase): def setUp(self): self.hbar = 1. self.d = 5 self.b = numpy.diag(numpy.sqrt(numpy.arange(1, self.d)), 1) self.bd = self.b.conj().T self.q = numpy.sqrt(self.hbar/2)*(self.b + self.bd) self.p = -1j*numpy.sqrt(self.hbar/2)*(self.b - self.bd) self.Id = numpy.identity(self.d) def test_boson_ladder_noninteger_trunc(self): with self.assertRaises(ValueError): b = boson_ladder_sparse(1, 0, 0, 0.1) with self.assertRaises(ValueError): b = boson_ladder_sparse(1, 0, 0, -1) with self.assertRaises(ValueError): b = boson_ladder_sparse(1, 0, 0, 0) def test_boson_ladder_destroy_one_mode(self): b = boson_ladder_sparse(1, 0, 0, self.d).toarray() self.assertTrue(numpy.allclose(b, self.b)) def test_boson_ladder_create_one_mode(self): bd = boson_ladder_sparse(1, 0, 1, self.d).toarray() self.assertTrue(numpy.allclose(bd, self.bd)) def test_boson_ladder_single_adjoint(self): b = boson_ladder_sparse(1, 0, 0, self.d).toarray() bd = boson_ladder_sparse(1, 0, 1, self.d).toarray() self.assertTrue(numpy.allclose(b.conj().T, bd)) def test_boson_ladder_two_mode(self): res = boson_ladder_sparse(2, 0, 0, self.d).toarray() expected = numpy.kron(self.b, self.Id) self.assertTrue(numpy.allclose(res, expected)) res = boson_ladder_sparse(2, 1, 0, self.d).toarray() expected = numpy.kron(self.Id, self.b) self.assertTrue(numpy.allclose(res, expected)) def test_single_quad_noninteger_trunc(self): with self.assertRaises(ValueError): b = single_quad_op_sparse(1, 0, 'q', self.hbar, 0.1) with self.assertRaises(ValueError): b = single_quad_op_sparse(1, 0, 'q', self.hbar, -1) with self.assertRaises(ValueError): b = single_quad_op_sparse(1, 0, 'q', self.hbar, 0) def test_single_quad_q_one_mode(self): res = single_quad_op_sparse(1, 0, 'q', self.hbar, self.d).toarray() self.assertTrue(numpy.allclose(res, self.q)) self.assertTrue(numpy.allclose(res, res.conj().T)) def test_single_quad_p_one_mode(self): res = single_quad_op_sparse(1, 0, 'p', self.hbar, self.d).toarray() self.assertTrue(numpy.allclose(res, self.p)) self.assertTrue(numpy.allclose(res, res.conj().T)) def test_single_quad_two_mode(self): res = single_quad_op_sparse(2, 0, 'q', self.hbar, self.d).toarray() expected = numpy.kron(self.q, self.Id) self.assertTrue(numpy.allclose(res, expected)) res = single_quad_op_sparse(2, 1, 'p', self.hbar, self.d).toarray() expected = numpy.kron(self.Id, self.p) self.assertTrue(numpy.allclose(res, expected)) def test_boson_operator_sparse_trunc(self): op = BosonOperator('0') with self.assertRaises(ValueError): b = boson_operator_sparse(op, 0.1) with self.assertRaises(ValueError): b = boson_operator_sparse(op, -1) with self.assertRaises(ValueError): b = boson_operator_sparse(op, 0) def test_boson_operator_invalid_op(self): op = FermionOperator('0') with self.assertRaises(ValueError): b = boson_operator_sparse(op, self.d) def test_boson_operator_sparse_empty(self): for op in (BosonOperator(), QuadOperator()): res = boson_operator_sparse(op, self.d) self.assertEqual(res, numpy.array([[0]])) def test_boson_operator_sparse_identity(self): for op in (BosonOperator(''), QuadOperator('')): res = boson_operator_sparse(op, self.d) self.assertEqual(res, numpy.array([[1]])) def test_boson_operator_sparse_single(self): op = BosonOperator('0') res = boson_operator_sparse(op, self.d).toarray() self.assertTrue(numpy.allclose(res, self.b)) op = BosonOperator('0^') res = boson_operator_sparse(op, self.d).toarray() self.assertTrue(numpy.allclose(res, self.bd)) op = QuadOperator('q0') res = boson_operator_sparse(op, self.d, self.hbar).toarray() self.assertTrue(numpy.allclose(res, self.q)) op = QuadOperator('p0') res = boson_operator_sparse(op, self.d, self.hbar).toarray() self.assertTrue(numpy.allclose(res, self.p)) def test_boson_operator_sparse_number(self): op = BosonOperator('0^ 0') res = boson_operator_sparse(op, self.d).toarray() self.assertTrue(numpy.allclose(res, numpy.dot(self.bd, self.b))) def test_boson_operator_sparse_multi_mode(self): op = BosonOperator('0^ 1 1^ 2') res = boson_operator_sparse(op, self.d).toarray() b0 = boson_ladder_sparse(3, 0, 0, self.d).toarray() b1 = boson_ladder_sparse(3, 1, 0, self.d).toarray() b2 = boson_ladder_sparse(3, 2, 0, self.d).toarray() expected = multi_dot([b0.T, b1, b1.T, b2]) self.assertTrue(numpy.allclose(res, expected)) op = QuadOperator('q0 p0 p1') res = boson_operator_sparse(op, self.d, self.hbar).toarray() expected = numpy.identity(self.d**2) for term in op.terms: for i, j in term: expected = expected.dot(single_quad_op_sparse( 2, i, j, self.hbar, self.d).toarray()) self.assertTrue(numpy.allclose(res, expected)) def test_boson_operator_sparse_addition(self): op = BosonOperator('0^ 1') op += BosonOperator('0 0^') res = boson_operator_sparse(op, self.d).toarray() b0 = boson_ladder_sparse(2, 0, 0, self.d).toarray() b1 = boson_ladder_sparse(2, 1, 0, self.d).toarray() expected = numpy.dot(b0.T, b1) + numpy.dot(b0, b0.T) self.assertTrue(numpy.allclose(res, expected))
apache-2.0
8,698,967,164,351,995,000
40.668158
81
0.614493
false
3.586457
true
false
false
rchuppala/usc_agent
src/usc-agent-dev/common/source/pyang/pyang/syntax.py
1
11073
"""Description of YANG & YIN syntax.""" import re ### Regular expressions - constraints on arguments # keywords and identifiers identifier = r"[_A-Za-z][._\-A-Za-z0-9]*" prefix = identifier keyword = '((' + prefix + '):)?(' + identifier + ')' # no group version of keyword keyword_ng = '(?:(' + prefix + '):)?(?:' + identifier + ')' re_keyword = re.compile(keyword) re_keyword_start = re.compile('^' + keyword) pos_integer = r"[1-9][0-9]*" nonneg_integer = r"(0|[1-9])[0-9]*" integer_ = r"[-+]?" + nonneg_integer decimal_ = r"(\+|\-)?[0-9]+(\.[0-9]+)?" length_str = '((min|max|[0-9]+)\s*' \ '(\.\.\s*' \ '(min|max|[0-9]+)\s*)?)' length_expr = length_str + '(\|\s*' + length_str + ')*' re_length_part = re.compile(length_str) range_str = '((\-INF|min|max|((\+|\-)?[0-9]+(\.[0-9]+)?))\s*' \ '(\.\.\s*' \ '(INF|min|max|(\+|\-)?[0-9]+(\.[0-9]+)?)\s*)?)' range_expr = range_str + '(\|\s*' + range_str + ')*' re_range_part = re.compile(range_str) re_identifier = re.compile("^" + identifier + "$") # path and unique node_id = keyword_ng rel_path_keyexpr = r"(\.\./)+(" + node_id + "/)*" + node_id path_key_expr = r"(current\s*\(\s*\)/" + rel_path_keyexpr + ")" path_equality_expr = node_id + r"\s*=\s*" + path_key_expr path_predicate = r"\s*\[\s*" + path_equality_expr + r"\s*\]\s*" absolute_path_arg = "(?:/" + node_id + "(" + path_predicate + ")*)+" descendant_path_arg = node_id + "(" + path_predicate + ")*" + \ "(?:" + absolute_path_arg + ")?" relative_path_arg = r"(\.\./)*" + descendant_path_arg deref_path_arg = r"deref\s*\(\s*(?:" + relative_path_arg + \ ")\s*\)/\.\./" + relative_path_arg path_arg = "(" + absolute_path_arg + "|" + relative_path_arg + "|" + \ deref_path_arg + ")" absolute_schema_nodeid = "(/" + node_id + ")+" descendant_schema_nodeid = node_id + "(" + absolute_schema_nodeid + ")?" schema_nodeid = "("+absolute_schema_nodeid+"|"+descendant_schema_nodeid+")" unique_arg = descendant_schema_nodeid + "(\s+" + descendant_schema_nodeid + ")*" key_arg = node_id + "(\s+" + node_id + ")*" re_schema_node_id_part = re.compile('/' + keyword) # URI - RFC 3986, Appendix A scheme = "[A-Za-z][-+.A-Za-z0-9]*" unreserved = "[-._~A-Za-z0-9]" pct_encoded = "%[0-9A-F]{2}" sub_delims = "[!$&'()*+,;=]" pchar = ("(" + unreserved + "|" + pct_encoded + "|" + sub_delims + "|[:@])") segment = pchar + "*" segment_nz = pchar + "+" userinfo = ("(" + unreserved + "|" + pct_encoded + "|" + sub_delims + "|:)*") dec_octet = "([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])" ipv4address = "(" + dec_octet + r"\.){3}" + dec_octet h16 = "[0-9A-F]{1,4}" ls32 = "(" + h16 + ":" + h16 + "|" + ipv4address + ")" ipv6address = ( "((" + h16 + ":){6}" + ls32 + "|::(" + h16 + ":){5}" + ls32 + "|(" + h16 + ")?::(" + h16 + ":){4}" + ls32 + "|((" + h16 + ":)?" + h16 + ")?::(" + h16 + ":){3}" + ls32 + "|((" + h16 + ":){,2}" + h16 + ")?::(" + h16 + ":){2}" + ls32 + "|((" + h16 + ":){,3}" + h16 + ")?::" + h16 + ":" + ls32 + "|((" + h16 + ":){,4}" + h16 + ")?::" + ls32 + "|((" + h16 + ":){,5}" + h16 + ")?::" + h16 + "|((" + h16 + ":){,6}" + h16 + ")?::)") ipvfuture = r"v[0-9A-F]+\.(" + unreserved + "|" + sub_delims + "|:)+" ip_literal = r"\[(" + ipv6address + "|" + ipvfuture + r")\]" reg_name = "(" + unreserved + "|" + pct_encoded + "|" + sub_delims + ")*" host = "(" + ip_literal + "|" + ipv4address + "|" + reg_name + ")" port = "[0-9]*" authority = "(" + userinfo + "@)?" + host + "(:" + port + ")?" path_abempty = "(/" + segment + ")*" path_absolute = "/(" + segment_nz + "(/" + segment + ")*)?" path_rootless = segment_nz + "(/" + segment + ")*" path_empty = pchar + "{0}" hier_part = ("(" + "//" + authority + path_abempty + "|" + path_absolute + "|" + path_rootless + "|" + path_empty + ")") query = "(" + pchar + "|[/?])*" fragment = query uri = (scheme + ":" + hier_part + r"(\?" + query + ")?" + "(#" + fragment + ")?") # Date date = r"[1-2][0-9]{3}-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])" re_nonneg_integer = re.compile("^" + nonneg_integer + "$") re_integer = re.compile("^" + integer_ + "$") re_decimal = re.compile("^" + decimal_ + "$") re_uri = re.compile("^" + uri + "$") re_boolean = re.compile("^(true|false)$") re_version = re.compile("^1$") re_date = re.compile("^" + date +"$") re_status = re.compile("^(current|obsolete|deprecated)$") re_key = re.compile("^" + key_arg + "$") re_length = re.compile("^" + length_expr + "$") re_range = re.compile("^" + range_expr + "$") re_pos_integer = re.compile(r"^(unbounded|" + pos_integer + r")$") re_ordered_by = re.compile(r"^(user|system)$") re_node_id = re.compile("^" + node_id + "$") re_path = re.compile("^" + path_arg + "$") re_absolute_path = re.compile("^" + absolute_path_arg + "$") re_unique = re.compile("^" + unique_arg + "$") re_schema_nodeid = re.compile("^" + schema_nodeid + "$") re_absolute_schema_nodeid = re.compile("^" + absolute_schema_nodeid + "$") re_descendant_schema_nodeid = re.compile("^" + descendant_schema_nodeid + "$") re_deviate = re.compile("^(add|delete|replace|not-supported)$") arg_type_map = { "identifier": lambda s: re_identifier.search(s) is not None, "non-negative-integer": lambda s: re_nonneg_integer.search(s) is not None, "integer": lambda s: re_integer.search(s) is not None, "uri": lambda s: re_uri.search(s) is not None, "boolean": lambda s: re_boolean.search(s) is not None, "version": lambda s: re_version.search(s) is not None, "date": lambda s: re_date.search(s) is not None, "status-arg": lambda s: re_status.search(s) is not None, "key-arg": lambda s: re_key.search(s) is not None, "length-arg": lambda s: re_length.search(s) is not None, "range-arg": lambda s: re_range.search(s) is not None, "max-value": lambda s: re_pos_integer.search(s) is not None, "ordered-by-arg": lambda s: re_ordered_by.search(s) is not None, "identifier-ref": lambda s: re_node_id.search(s) is not None, "path-arg": lambda s: re_path.search(s) is not None, "absolute-path-arg": lambda s: re_absolute_path.search(s) is not None, "unique-arg": lambda s: re_unique.search(s) is not None, "absolute-schema-nodeid": lambda s: \ re_absolute_schema_nodeid.search(s) is not None, "descendant-schema-nodeid": lambda s: \ re_descendant_schema_nodeid.search(s) is not None, "schema-nodeid": lambda s: \ re_schema_nodeid.search(s) is not None, "enum-arg": lambda s: chk_enum_arg(s), "fraction-digits-arg": lambda s: chk_fraction_digits_arg(s), "deviate-arg": lambda s: re_deviate.search(s) is not None, } """Argument type definitions. Regular expressions for all argument types except plain string that are checked directly by the parser. """ def chk_enum_arg(s): """Checks if the string `s` is a valid enum string. Return True or False.""" if len(s) == 0 or s[0].isspace() or s[-1].isspace(): return False else: return True def chk_fraction_digits_arg(s): """Checks if the string `s` is a valid fraction-digits argument. Return True or False.""" try: v = int(s) if v >= 1 and v <= 18: return True else: return False except ValueError: return False def add_arg_type(arg_type, regexp): """Add a new arg_type to the map. Used by extension plugins to register their own argument types.""" arg_type_map[arg_type] = regexp # keyword argument-name yin-element yin_map = \ {'anyxml': ('name', False), 'argument': ('name', False), 'augment': ('target-node', False), 'base': ('name', False), 'belongs-to': ('module', False), 'bit': ('name', False), 'case': ('name', False), 'choice': ('name', False), 'config': ('value', False), 'contact': ('text', True), 'container': ('name', False), 'default': ('value', False), 'description': ('text', True), 'deviate': ('value', False), 'deviation': ('target-node', False), 'enum': ('name', False), 'error-app-tag': ('value', False), 'error-message': ('value', True), 'extension': ('name', False), 'feature': ('name', False), 'fraction-digits': ('value', False), 'grouping': ('name', False), 'identity': ('name', False), 'if-feature': ('name', False), 'import': ('module', False), 'include': ('module', False), 'input': (None, None), 'key': ('value', False), 'leaf': ('name', False), 'leaf-list': ('name', False), 'length': ('value', False), 'list': ('name', False), 'mandatory': ('value', False), 'max-elements': ('value', False), 'min-elements': ('value', False), 'module': ('name', False), 'must': ('condition', False), 'namespace': ('uri', False), 'notification': ('name', False), 'ordered-by': ('value', False), 'organization': ('text', True), 'output': (None, None), 'path': ('value', False), 'pattern': ('value', False), 'position': ('value', False), 'presence': ('value', False), 'prefix': ('value', False), 'range': ('value', False), 'reference': ('text', True), 'refine': ('target-node', False), 'require-instance': ('value', False), 'revision': ('date', False), 'revision-date': ('date', False), 'rpc': ('name', False), 'status': ('value', False), 'submodule': ('name', False), 'type': ('name', False), 'typedef': ('name', False), 'unique': ('tag', False), 'units': ('name', False), 'uses': ('name', False), 'value': ('value', False), 'when': ('condition', False), 'yang-version': ('value', False), 'yin-element': ('value', False), } """Mapping of statements to the YIN representation of their arguments. The values are pairs whose first component specifies whether the argument is stored in a subelement and the second component is the name of the attribute or subelement carrying the argument. See YANG specification. """
gpl-2.0
2,142,903,817,177,840,400
41.588462
80
0.493362
false
3.092153
false
false
false
ngokevin/zamboni
mkt/operators/tests/test_authorization.py
1
5750
from nose.tools import ok_ from rest_framework.generics import GenericAPIView from django.contrib.auth.models import AnonymousUser from amo.tests import TestCase from mkt.access.middleware import ACLMiddleware from mkt.carriers import CARRIER_MAP as CARRIERS from mkt.feed.constants import FEED_TYPE_SHELF from mkt.feed.tests.test_models import FeedTestMixin from mkt.operators.authorization import (OperatorAuthorization, OperatorShelfAuthorization) from mkt.operators.models import OperatorPermission from mkt.regions import REGIONS_DICT as REGIONS from mkt.site.fixtures import fixture from mkt.users.models import UserProfile from test_utils import RequestFactory class BaseTestOperatorAuthorization(FeedTestMixin, TestCase): fixtures = fixture('user_2519') + FeedTestMixin.fixtures def setUp(self): super(BaseTestOperatorAuthorization, self).setUp() self.auth = self.auth_class() self.user = UserProfile.objects.get(pk=2519) self.view = GenericAPIView() def make_admin(self): self.grant_permission(self.user, 'OperatorDashboard:*') def give_objpermission(self, carrier, region): carrier_id = CARRIERS[carrier].id region_id = REGIONS[region].id OperatorPermission.objects.create(user=self.user, region=region_id, carrier=carrier_id) def is_authorized(self, verb, anon=False, carrier='telefonica', region='br'): request = self.request(verb, anon=anon, carrier=carrier, region=region) return self.auth.has_permission(request, self.view) def is_object_authorized(self, verb, obj, anon=False, carrier='telefonica', region='br'): request = self.request(verb, anon=anon, carrier=carrier, region=region) return self.auth.has_object_permission(request, self.view, obj) def request(self, verb, anon=False, **kwargs): request = getattr(RequestFactory(), verb.lower())('/', kwargs) request.user = AnonymousUser() if anon else self.user ACLMiddleware().process_request(request) return request class TestOperatorAuthorization(BaseTestOperatorAuthorization): auth_class = OperatorAuthorization def test_safe(self): ok_(self.is_authorized('GET', anon=True)) ok_(self.is_authorized('GET')) def test_safe_permission(self): self.make_admin() ok_(self.is_authorized('GET')) def test_safe_objpermission_correct(self): self.give_objpermission('telefonica', 'br') ok_(self.is_authorized('GET', carrier='telefonica', region='br')) def test_safe_objpermission_mismatch(self): self.give_objpermission('telefonica', 'br') ok_(self.is_authorized('GET', carrier='america_movil', region='fr')) def test_unsafe(self): ok_(not self.is_authorized('POST', anon=True)) ok_(not self.is_authorized('POST')) def test_unsafe_permission(self): self.make_admin() ok_(self.is_authorized('POST')) def test_unsafe_objpermission_correct(self): self.give_objpermission('telefonica', 'br') ok_(self.is_authorized('POST')) def test_unsafe_objpermission_mismatch(self): self.give_objpermission('telefonica', 'br') ok_(not self.is_authorized('POST', carrier='america_movil', region='fr')) class TestOperatorShelfAuthorization(BaseTestOperatorAuthorization): auth_class = OperatorShelfAuthorization def setUp(self): super(TestOperatorShelfAuthorization, self).setUp() self.feed_item = self.feed_item_factory(carrier=1, region=7, # TEF/BR item_type=FEED_TYPE_SHELF) self.shelf = self.feed_item.shelf def test_safe_object(self): ok_(self.is_object_authorized('GET', self.feed_item, anon=True)) ok_(self.is_object_authorized('GET', self.shelf, anon=True)) ok_(self.is_object_authorized('GET', self.feed_item)) ok_(self.is_object_authorized('GET', self.shelf)) self.make_admin() ok_(self.is_object_authorized('GET', self.feed_item)) ok_(self.is_object_authorized('GET', self.shelf)) def test_safe_object_objpermission_correct(self): self.give_objpermission('telefonica', 'br') ok_(self.is_object_authorized('GET', self.feed_item)) ok_(self.is_object_authorized('GET', self.shelf)) def test_safe_object_objpermission_mismatch(self): self.give_objpermission('america_movil', 'fr') ok_(self.is_object_authorized('GET', self.feed_item)) ok_(self.is_object_authorized('GET', self.shelf)) def test_unsafe_object(self): ok_(not self.is_object_authorized('POST', self.feed_item, anon=True)) ok_(not self.is_object_authorized('POST', self.shelf, anon=True)) ok_(not self.is_object_authorized('POST', self.feed_item)) ok_(not self.is_object_authorized('POST', self.shelf)) self.make_admin() ok_(self.is_object_authorized('POST', self.feed_item)) ok_(self.is_object_authorized('POST', self.shelf)) def test_unsafe_object_objpermission_correct(self): self.give_objpermission('telefonica', 'br') ok_(self.is_object_authorized('POST', self.feed_item)) ok_(self.is_object_authorized('POST', self.shelf)) def test_unsafe_object_objpermission_mismatch(self): self.give_objpermission('america_movil', 'fr') ok_(not self.is_object_authorized('POST', self.feed_item)) ok_(not self.is_object_authorized('POST', self.shelf))
bsd-3-clause
-7,164,898,191,117,203,000
39.20979
79
0.652522
false
3.787879
true
false
false
obedmr/MPIaaS
app/echoserv.py
1
1498
#!/usr/bin/env python from twisted.internet.protocol import Protocol, Factory from twisted.internet import reactor import twisted.internet.error import sys import ConfigParser CONFIG_CONF = "setup.conf" PORT=8000 class Echo(Protocol): def dataReceived(self, data): """ As soon as any data is received, write it back. """ lines = data.split('\n') for line in lines: if "PORT:" in line: print line port = line.split(":")[1].strip() if "SERVER_IP:" in line: print line server_ip = line.split(":")[1].strip() if "LOCAL_IP:" in line: print line client_ip = line.split(":")[1].strip() parser = ConfigParser.SafeConfigParser() section = 'CLIENTS_' + client_ip parser.add_section(section) parser.set(section, 'ip',str(client_ip)) parser.set(section, 'port',str(port)) parser.write(sys.stdout) file_conf = open(CONFIG_CONF,'a') parser.write(file_conf) file_conf.close() self.transport.write(data) def main(): try: f = Factory() f.protocol = Echo reactor.listenTCP(PORT, f) reactor.run() except twisted.internet.error.CannotListenError, ex: print "Port is %d busy: %s" % (PORT, ex) print "Run ./mpiaas_runner.py --killserver" sys.exit(1) if __name__ == '__main__': main()
apache-2.0
-7,503,330,617,456,163,000
25.280702
56
0.55474
false
3.8509
false
false
false
SimoneLucia/EmbASP-Python
languages/asp/answer_set.py
1
1221
from languages.asp.asp_mapper import ASPMapper class AnserSet(object): """A collection of data representing a generic Answer Set""" def __init__(self, value, weightMap=dict()): self.__value = value # Where data of answer set is stored self.__weight_map = weightMap # Where weights of the answer set are stored self.__atoms = set() # Where Answer set's atoms are stored def get_answer_set(self): """Return the current __value data The method return a list of answer sets in a String format """ return self.__value def get_atoms(self): """Return atoms stored in __atoms The method return a set of Object filled with atoms data """ if not self.__atoms: mapper = ASPMapper.get_instance() for atom in self.__value: obj = mapper.get_object(atom) if (not obj == None): self.__atoms.add(obj) return self.__atoms def get_weights(self): """Return the weight_map""" return self.__weight_map def __str__(self): """Overload string method""" return str(self.__value)
mit
-8,103,394,283,243,837,000
31.131579
83
0.564292
false
4.505535
false
false
false
nkoep/pymanopt
pymanopt/manifolds/psd.py
1
15204
import warnings import numpy as np from numpy import linalg as la, random as rnd from scipy.linalg import expm # Workaround for SciPy bug: https://github.com/scipy/scipy/pull/8082 try: from scipy.linalg import solve_continuous_lyapunov as lyap except ImportError: from scipy.linalg import solve_lyapunov as lyap from pymanopt.manifolds.manifold import EuclideanEmbeddedSubmanifold, Manifold from pymanopt.tools.multi import multilog, multiprod, multisym, multitransp class _RetrAsExpMixin: """Mixin class which defers calls to the exponential map to the retraction and issues a warning. """ def exp(self, Y, U): warnings.warn( "Exponential map for manifold '{:s}' not implemented yet. Using " "retraction instead.".format(self._get_class_name()), RuntimeWarning) return self.retr(Y, U) class SymmetricPositiveDefinite(EuclideanEmbeddedSubmanifold): """Manifold of (n x n)^k symmetric positive definite matrices, based on the geometry discussed in Chapter 6 of Positive Definite Matrices (Bhatia 2007). Some of the implementation is based on sympositivedefinitefactory.m from the Manopt MATLAB package. Also see "Conic geometric optimisation on the manifold of positive definite matrices" (Sra & Hosseini 2013) for more details. """ def __init__(self, n, k=1): self._n = n self._k = k if k == 1: name = ("Manifold of positive definite ({} x {}) matrices").format( n, n) else: name = "Product manifold of {} ({} x {}) matrices".format(k, n, n) dimension = int(k * n * (n + 1) / 2) super().__init__(name, dimension) @property def typicaldist(self): return np.sqrt(self.dim) def dist(self, x, y): # Adapted from equation 6.13 of "Positive definite matrices". The # Cholesky decomposition gives the same result as matrix sqrt. There # may be more efficient ways to compute this. c = la.cholesky(x) c_inv = la.inv(c) logm = multilog(multiprod(multiprod(c_inv, y), multitransp(c_inv)), pos_def=True) return la.norm(logm) def inner(self, x, u, v): return np.tensordot(la.solve(x, u), la.solve(x, v), axes=x.ndim) def proj(self, X, G): return multisym(G) def egrad2rgrad(self, x, u): # TODO: Check that this is correct return multiprod(multiprod(x, multisym(u)), x) def ehess2rhess(self, x, egrad, ehess, u): # TODO: Check that this is correct return (multiprod(multiprod(x, multisym(ehess)), x) + multisym(multiprod(multiprod(u, multisym(egrad)), x))) def norm(self, x, u): # This implementation is as fast as np.linalg.solve_triangular and is # more stable, as the above solver tends to output non positive # definite results. c = la.cholesky(x) c_inv = la.inv(c) return la.norm(multiprod(multiprod(c_inv, u), multitransp(c_inv))) def rand(self): # The way this is done is arbitrary. I think the space of p.d. # matrices would have infinite measure w.r.t. the Riemannian metric # (cf. integral 0-inf [ln(x)] dx = inf) so impossible to have a # 'uniform' distribution. # Generate eigenvalues between 1 and 2 d = np.ones((self._k, self._n, 1)) + rnd.rand(self._k, self._n, 1) # Generate an orthogonal matrix. Annoyingly qr decomp isn't # vectorized so need to use a for loop. Could be done using # svd but this is slower for bigger matrices. u = np.zeros((self._k, self._n, self._n)) for i in range(self._k): u[i], r = la.qr(rnd.randn(self._n, self._n)) if self._k == 1: return multiprod(u, d * multitransp(u))[0] return multiprod(u, d * multitransp(u)) def randvec(self, x): k = self._k n = self._n if k == 1: u = multisym(rnd.randn(n, n)) else: u = multisym(rnd.randn(k, n, n)) return u / self.norm(x, u) def transp(self, x1, x2, d): return d def exp(self, x, u): # TODO: Check which method is faster depending on n, k. x_inv_u = la.solve(x, u) if self._k > 1: e = np.zeros(np.shape(x)) for i in range(self._k): e[i] = expm(x_inv_u[i]) else: e = expm(x_inv_u) return multiprod(x, e) # This alternative implementation is sometimes faster though less # stable. It can return a matrix with small negative determinant. # c = la.cholesky(x) # c_inv = la.inv(c) # e = multiexp(multiprod(multiprod(c_inv, u), multitransp(c_inv)), # sym=True) # return multiprod(multiprod(c, e), multitransp(c)) retr = exp def log(self, x, y): c = la.cholesky(x) c_inv = la.inv(c) logm = multilog(multiprod(multiprod(c_inv, y), multitransp(c_inv)), pos_def=True) return multiprod(multiprod(c, logm), multitransp(c)) def zerovec(self, x): k = self._k n = self._n if k == 1: return np.zeros((k, n, n)) return np.zeros((n, n)) # TODO(nkoep): This could either stay in here (seeing how it's a manifold of # psd matrices, or in fixed_rank. Alternatively, move this one and # the next class to a dedicated 'psd_fixed_rank' module. class _PSDFixedRank(Manifold, _RetrAsExpMixin): def __init__(self, n, k, name, dimension): self._n = n self._k = k super().__init__(name, dimension) @property def typicaldist(self): return 10 + self._k def inner(self, Y, U, V): # Euclidean metric on the total space. return float(np.tensordot(U, V)) def norm(self, Y, U): return la.norm(U, "fro") def dist(self, U, V): raise NotImplementedError( "The manifold '{:s}' currently provides no implementation of the " "'dist' method".format(self._get_class_name())) def proj(self, Y, H): # Projection onto the horizontal space YtY = Y.T.dot(Y) AS = Y.T.dot(H) - H.T.dot(Y) Omega = lyap(YtY, AS) return H - Y.dot(Omega) def egrad2rgrad(self, Y, egrad): return egrad def ehess2rhess(self, Y, egrad, ehess, U): return self.proj(Y, ehess) def retr(self, Y, U): return Y + U def rand(self): return rnd.randn(self._n, self._k) def randvec(self, Y): H = self.rand() P = self.proj(Y, H) return self._normalize(P) def transp(self, Y, Z, U): return self.proj(Z, U) def _normalize(self, Y): return Y / self.norm(None, Y) def zerovec(self, X): return np.zeros((self._n, self._k)) class PSDFixedRank(_PSDFixedRank): """ Manifold of n-by-n symmetric positive semidefinite matrices of rank k. A point X on the manifold is parameterized as YY^T where Y is a matrix of size nxk. As such, X is symmetric, positive semidefinite. We restrict to full-rank Y's, such that X has rank exactly k. The point X is numerically represented by Y (this is more efficient than working with X, which may be big). Tangent vectors are represented as matrices of the same size as Y, call them Ydot, so that Xdot = Y Ydot' + Ydot Y. The metric is the canonical Euclidean metric on Y. Since for any orthogonal Q of size k, it holds that (YQ)(YQ)' = YY', we "group" all matrices of the form YQ in an equivalence class. The set of equivalence classes is a Riemannian quotient manifold, implemented here. Notice that this manifold is not complete: if optimization leads Y to be rank-deficient, the geometry will break down. Hence, this geometry should only be used if it is expected that the points of interest will have rank exactly k. Reduce k if that is not the case. An alternative, complete, geometry for positive semidefinite matrices of rank k is described in Bonnabel and Sepulchre 2009, "Riemannian Metric and Geometric Mean for Positive Semidefinite Matrices of Fixed Rank", SIAM Journal on Matrix Analysis and Applications. The geometry implemented here is the simplest case of the 2010 paper: M. Journee, P.-A. Absil, F. Bach and R. Sepulchre, "Low-Rank Optimization on the Cone of Positive Semidefinite Matrices". Paper link: http://www.di.ens.fr/~fbach/journee2010_sdp.pdf """ def __init__(self, n, k): name = ("YY' quotient manifold of {:d}x{:d} psd matrices of " "rank {:d}".format(n, n, k)) dimension = int(k * n - k * (k - 1) / 2) super().__init__(n, k, name, dimension) class PSDFixedRankComplex(_PSDFixedRank): """ Manifold of n x n complex Hermitian pos. semidefinite matrices of rank k. Manifold of n-by-n complex Hermitian positive semidefinite matrices of fixed rank k. This follows the quotient geometry described in Sarod Yatawatta's 2013 paper: "Radio interferometric calibration using a Riemannian manifold", ICASSP. Paper link: http://dx.doi.org/10.1109/ICASSP.2013.6638382. A point X on the manifold M is parameterized as YY^*, where Y is a complex matrix of size nxk of full rank. For any point Y on the manifold M, given any kxk complex unitary matrix U, we say Y*U is equivalent to Y, i.e., YY^* does not change. Therefore, M is the set of equivalence classes and is a Riemannian quotient manifold C^{nk}/U(k) where C^{nk} is the set of all complex matrix of size nxk of full rank. The metric is the usual real-trace inner product, that is, it is the usual metric for the complex plane identified with R^2. Notice that this manifold is not complete: if optimization leads Y to be rank-deficient, the geometry will break down. Hence, this geometry should only be used if it is expected that the points of interest will have rank exactly k. Reduce k if that is not the case. """ def __init__(self, n, k): name = ("YY' quotient manifold of Hermitian {:d}x{:d} complex " "matrices of rank {:d}".format(n, n, k)) dimension = 2 * k * n - k * k super().__init__(n, k, name, dimension) def inner(self, Y, U, V): return 2 * float(np.tensordot(U, V).real) def norm(self, Y, U): return np.sqrt(self.inner(Y, U, U)) def dist(self, U, V): S, _, D = la.svd(V.T.conj().dot(U)) E = U - V.dot(S).dot(D) return self.inner(None, E, E) / 2 def rand(self): rand_ = super().rand return rand_() + 1j * rand_() class Elliptope(Manifold, _RetrAsExpMixin): """ Manifold of n-by-n psd matrices of rank k with unit diagonal elements. A point X on the manifold is parameterized as YY^T where Y is a matrix of size nxk. As such, X is symmetric, positive semidefinite. We restrict to full-rank Y's, such that X has rank exactly k. The point X is numerically represented by Y (this is more efficient than working with X, which may be big). Tangent vectors are represented as matrices of the same size as Y, call them Ydot, so that Xdot = Y Ydot' + Ydot Y and diag(Xdot) == 0. The metric is the canonical Euclidean metric on Y. The diagonal constraints on X (X(i, i) == 1 for all i) translate to unit-norm constraints on the rows of Y: norm(Y(i, :)) == 1 for all i. The set of such Y's forms the oblique manifold. But because for any orthogonal Q of size k, it holds that (YQ)(YQ)' = YY', we "group" all matrices of the form YQ in an equivalence class. The set of equivalence classes is a Riemannian quotient manifold, implemented here. Note that this geometry formally breaks down at rank-deficient Y's. This does not appear to be a major issue in practice when optimization algorithms converge to rank-deficient Y's, but convergence theorems no longer hold. As an alternative, you may use the oblique manifold (it has larger dimension, but does not break down at rank drop.) The geometry is taken from the 2010 paper: M. Journee, P.-A. Absil, F. Bach and R. Sepulchre, "Low-Rank Optimization on the Cone of Positive Semidefinite Matrices". Paper link: http://www.di.ens.fr/~fbach/journee2010_sdp.pdf """ def __init__(self, n, k): self._n = n self._k = k name = ("YY' quotient manifold of {:d}x{:d} psd matrices of rank {:d} " "with diagonal elements being 1".format(n, n, k)) dimension = int(n * (k - 1) - k * (k - 1) / 2) super().__init__(name, dimension) @property def typicaldist(self): return 10 * self._k def inner(self, Y, U, V): return float(np.tensordot(U, V)) def dist(self, U, V): raise NotImplementedError( "The manifold '{:s}' currently provides no implementation of the " "'dist' method".format(self._get_class_name())) def norm(self, Y, U): return np.sqrt(self.inner(Y, U, U)) # Projection onto the tangent space, i.e., on the tangent space of # ||Y[i, :]||_2 = 1 def proj(self, Y, H): eta = self._project_rows(Y, H) # Projection onto the horizontal space YtY = Y.T.dot(Y) AS = Y.T.dot(eta) - H.T.dot(Y) Omega = lyap(YtY, -AS) return eta - Y.dot((Omega - Omega.T) / 2) def retr(self, Y, U): return self._normalize_rows(Y + U) # Euclidean gradient to Riemannian gradient conversion. We only need the # ambient space projection: the remainder of the projection function is not # necessary because the Euclidean gradient must already be orthogonal to # the vertical space. def egrad2rgrad(self, Y, egrad): return self._project_rows(Y, egrad) def ehess2rhess(self, Y, egrad, ehess, U): scaling_grad = (egrad * Y).sum(axis=1) hess = ehess - U * scaling_grad[:, np.newaxis] scaling_hess = (U * egrad + Y * ehess).sum(axis=1) hess -= Y * scaling_hess[:, np.newaxis] return self.proj(Y, hess) def rand(self): return self._normalize_rows(rnd.randn(self._n, self._k)) def randvec(self, Y): H = self.proj(Y, self.rand()) return H / self.norm(Y, H) def transp(self, Y, Z, U): return self.proj(Z, U) def _normalize_rows(self, Y): """Return an l2-row-normalized copy of the matrix Y.""" return Y / la.norm(Y, axis=1)[:, np.newaxis] # Orthogonal projection of each row of H to the tangent space at the # corresponding row of X, seen as a point on a sphere. def _project_rows(self, Y, H): # Compute the inner product between each vector H[i, :] with its root # point Y[i, :], i.e., Y[i, :].T * H[i, :]. Returns a row vector. inners = (Y * H).sum(axis=1) return H - Y * inners[:, np.newaxis] def zerovec(self, X): return np.zeros((self._n, self._k))
bsd-3-clause
3,308,188,274,906,176,000
36.173594
79
0.615496
false
3.443715
false
false
false
UrLab/DocHub
www/rest_urls.py
1
1862
from rest_framework.routers import APIRootView, DefaultRouter import catalog.rest import documents.rest import notifications.rest import search.rest import telepathy.rest import users.rest import www.rest class DochubAPI(APIRootView): """ This is the API of DocHub. You are free to use it to crawl DocHub, write your own frontend or even make a copy of our documents. But please, if you do, respect those rules : * To not hit the server too hard. If you degrade the service for other users, we will ban you. * Respect the privacy of the users * If you scrape and reuse our content, plase credit DocHub and the original uploader. This whole API is auth protected. To be able to use it without your session cookie, use your personal token from <a hre="/api/me">/api/me</a> ([doc](http://www.django-rest-framework.org/api-guide/authentication/#tokenauthentication)) """ pass class Router(DefaultRouter): APIRootView = DochubAPI router = Router() router.register(r'users', users.rest.UserViewSet) router.register(r'courses', catalog.rest.CourseViewSet) router.register(r'categories', catalog.rest.CategoryViewSet) router.register(r'threads', telepathy.rest.ThreadViewSet) router.register(r'messages', telepathy.rest.MessageViewSet) router.register(r'documents', documents.rest.DocumentViewSet) router.register(r'search/courses', search.rest.CourseSearchViewSet, basename="search-courses") router.register(r'feed', www.rest.FeedViewSet, basename="feed") router.register(r'me', users.rest.Me, basename="users-me") router.register(r'notifications', notifications.rest.NotificationsViewSet, basename="notifications") router.register(r'me/actions', www.rest.SelfFeedViewSet, basename="user-actions") router.register(r'tree', catalog.rest.Tree, basename="catalog-tree") urlpatterns = router.urls
agpl-3.0
402,545,330,675,040,000
34.807692
100
0.762084
false
3.643836
false
false
false
pombredanne/discern
examples/problem_grader/grader/models.py
1
5156
from django.db import models from django.contrib.auth.models import User from django.forms.models import model_to_dict from django.db.models.signals import post_save, pre_save import random import string from django.conf import settings import requests import json import logging log= logging.getLogger(__name__) class Rubric(models.Model): """ The rubric object is a way to locally store data about rubric options. Each rubric is associated with a problem object stored on the API side. """ #Each rubric is specific to a problem and a user. associated_problem = models.IntegerField() user = models.ForeignKey(User) created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def get_scores(self): """ Calculate the final score for a given rubric. """ scores = [] all_scores = [] final_score=0 max_score = 0 options = self.get_rubric_dict() for option in options: #Add to all_scores for each of the scores all_scores.append(option['option_points']) #If the student was marked as correct for a given option, add it to the score if option['selected']: scores.append(option['option_points']) if len(scores)>0: final_score = sum(scores) if len(all_scores)>0: max_score = sum(all_scores) return { 'score' : final_score, 'max_score' : max_score } def get_rubric_dict(self): """ Get the rubric in dictionary form. """ options = [] #Bundle up all of the rubric options option_set = self.rubricoption_set.all().order_by('id') for option in option_set: options.append(model_to_dict(option)) return options class RubricOption(models.Model): """ Each rubric has multiple options """ #Associate options with rubrics rubric = models.ForeignKey(Rubric) #Number of points the rubric option is worth option_points = models.IntegerField() #Text to show to users for this option option_text = models.TextField() #Whether or not this option is selected (ie marked correct) selected = models.BooleanField(default=False) class UserProfile(models.Model): """ Every user has a profile. Used to store additional fields. """ user = models.OneToOneField(User) #Api key api_key = models.TextField(default="") #Api username api_user = models.TextField(default="") #whether or not an api user has been created api_user_created = models.BooleanField(default=False) def get_api_auth(self): """ Returns the api authentication dictionary for the given user """ return { 'username' : self.api_user, 'api_key' : self.api_key } def create_user_profile(sender, instance, created, **kwargs): """ Creates a user profile based on a signal from User when it is created """ #Create a userprofile if the user has just been created, don't if not. if created: profile, created = UserProfile.objects.get_or_create(user=instance) else: return #If a userprofile was not created (gotten instead), then don't make an api user if not created: return #Create a random password for the api user random_pass = ''.join([random.choice(string.digits + string.letters) for i in range(0, 15)]) #Data we will post to the api to make a user data = { 'username' : instance.username, 'password' : random_pass, 'email' : instance.email } headers = {'content-type': 'application/json'} #Now, let's try to get the schema for the create user model. create_user_url = settings.FULL_API_START + "createuser/" counter = 0 status_code = 400 #Try to create the user at the api while status_code==400 and counter<2 and not instance.profile.api_user_created: try: #Post our information to try to create a user response = requests.post(create_user_url, data=json.dumps(data),headers=headers) status_code = response.status_code #If a user has been created, store the api key locally if status_code==201: instance.profile.api_user_created = True response_data = json.loads(response.content) instance.profile.api_key = response_data['api_key'] instance.profile.api_user = data['username'] instance.profile.save() except: log.exception("Could not create an API user!") instance.profile.save() counter+=1 #If we could not create a user in the first pass through the loop, add to the username to try to make it unique data['username'] += random.choice(string.digits + string.letters) post_save.connect(create_user_profile, sender=User) #Maps the get_profile() function of a user to an attribute profile User.profile = property(lambda u: u.get_profile())
agpl-3.0
7,535,211,253,820,330,000
32.480519
119
0.632661
false
4.1248
false
false
false
mabotech/mabo.io
py/AK/test/redis_lua000.py
1
1801
# -*- coding: utf-8 -*- """ redis lua redis eval, notyify in lua script """ import time import redis def main(key, val, key2, val2): # connection pool r = redis.Redis(host='localhost', port=6379, db=5) d = {"a":"v1"} """ eval("lua script","number of kkeys", keys[],argv[]) KEYS[1] ARGV[1] compare value update value when change create job to update db when value change set heartbeat pre tag """ lua_code = """if redis.call("EXISTS", KEYS[1]) == 1 then -- redis.call("SET", "ST", ARGV[3]) -- redis.call("LPUSH", "c1","chan1") -- redis.call("PUBLISH", "c1","new") -- local payload = redis.call("GET", KEYS[1]) if payload == ARGV[1] then return "same" else redis.call("SET", KEYS[1],ARGV[1]) redis.call("SET", KEYS[2],ARGV[2]) redis.call("LPUSH", "c1","chan2") return payload -- return old val end else redis.call("SET", KEYS[1],ARGV[1]) redis.call("SET", KEYS[2],ARGV[2]) redis.call("LPUSH", "c1","chan2") return nil end""" #.format(**d) #print(lua_code) #benchmark """ 0.22 ms 4545 times/second """ t1 = time.time() stamp = t1*1000 val2 = t1*1000 n = 1 for i in xrange(0, n): v = r.eval(lua_code, 2, key, key2, val, val2, stamp) t2 = time.time() t = (t2-t1)*1000/n print("%sms" %(t)) #print(1000/t) print(v) h = r.script_load(lua_code) print h #print dir(r) if __name__ == "__main__": key = "y:a:c" val = "10.20" key2 = "y:a:c_st" val2 = time.time() main(key, val, key2, val2)
mit
-3,571,517,743,080,625,000
17.770833
61
0.481954
false
3.052542
false
false
false
levilucio/SyVOLT
t_core/HTopClass2TableNAC0.py
1
8862
from core.himesis import Himesis, HimesisPreConditionPatternNAC import cPickle as pickle from uuid import UUID class HTopClass2TableNAC0(HimesisPreConditionPatternNAC): def __init__(self, LHS): """ Creates the himesis graph representing the AToM3 model HTopClass2TableNAC0. """ # Flag this instance as compiled now self.is_compiled = True super(HTopClass2TableNAC0, self).__init__(name='HTopClass2TableNAC0', num_nodes=3, edges=[], LHS=LHS) # Add the edges self.add_edges([(1, 0), (0, 2)]) # Set the graph attributes self["mm__"] = pickle.loads("""(lp1 S'MT_pre__CD2RDBMSMetaModel' p2 aS'MoTifRule' p3 a.""") self["MT_constraint__"] = """#=============================================================================== # This code is executed after the nodes in the NAC have been matched. # You can access a matched node labelled n by: PreNode('n'). # To access attribute x of node n, use: PreNode('n')['x']. # The given constraint must evaluate to a boolean expression: # returning True forbids the rule from being applied, # returning False enables the rule to be applied. #=============================================================================== return True """ self["name"] = """""" self["GUID__"] = UUID('d74c9eae-e470-4aa6-8817-2e15a1b64aab') # Set the node attributes self.vs[0]["MT_subtypeMatching__"] = False self.vs[0]["MT_label__"] = """3""" self.vs[0]["MT_subtypes__"] = pickle.loads("""(lp1 .""") self.vs[0]["mm__"] = """MT_pre__Parent""" self.vs[0]["MT_dirty__"] = False self.vs[0]["GUID__"] = UUID('94914a38-3999-44e8-8ecc-1e356a6b3e23') self.vs[1]["MT_subtypeMatching__"] = False self.vs[1]["MT_pre__is_persistent"] = """ #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True """ self.vs[1]["MT_label__"] = """1""" self.vs[1]["MT_subtypes__"] = pickle.loads("""(lp1 .""") self.vs[1]["mm__"] = """MT_pre__Clazz""" self.vs[1]["MT_pre__name"] = """ #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True """ self.vs[1]["MT_dirty__"] = False self.vs[1]["GUID__"] = UUID('a2616a97-3c66-4aa2-928f-52a37b14147b') self.vs[2]["MT_subtypeMatching__"] = False self.vs[2]["MT_pre__is_persistent"] = """ #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True """ self.vs[2]["MT_label__"] = """2""" self.vs[2]["MT_subtypes__"] = pickle.loads("""(lp1 .""") self.vs[2]["mm__"] = """MT_pre__Clazz""" self.vs[2]["MT_pre__name"] = """ #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True """ self.vs[2]["MT_dirty__"] = False self.vs[2]["GUID__"] = UUID('4a053f4e-83f0-474b-af5a-6e2e58e5ea12') # Load the bridge between this NAC and its LHS from HTopClass2TableNAC0Bridge import HTopClass2TableNAC0Bridge self.bridge = HTopClass2TableNAC0Bridge() def eval_is_persistent1(self, attr_value, this): #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True def eval_name1(self, attr_value, this): #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True def eval_is_persistent2(self, attr_value, this): #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True def eval_name2(self, attr_value, this): #=============================================================================== # This code is executed when evaluating if a node shall be matched by this rule. # You can access the value of the current node's attribute value by: attr_value. # You can access any attribute x of this node by: this['x']. # If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead. # The given constraint must evaluate to a boolean expression. #=============================================================================== return True def constraint(self, PreNode, graph): """ Executable constraint code. @param PreNode: Function taking an integer as parameter and returns the node corresponding to that label. """ #=============================================================================== # This code is executed after the nodes in the NAC have been matched. # You can access a matched node labelled n by: PreNode('n'). # To access attribute x of node n, use: PreNode('n')['x']. # The given constraint must evaluate to a boolean expression: # returning True forbids the rule from being applied, # returning False enables the rule to be applied. #=============================================================================== return True
mit
731,728,089,143,411,300
47.233333
117
0.51072
false
4.568041
false
false
false