repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
montaggroup/montag-token-redeemer
|
web2py/applications/token_redeemer/languages/pl-pl.py
|
Python
|
gpl-3.0
| 3,788
| 0.022703
|
# coding: utf8
{
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"Uaktualnij" jest dodatkowym wyra\xc5\xbceniem postaci "pole1=\'nowawarto\xc5\x9b\xc4\x87\'". Nie mo\xc5\xbcesz uaktualni\xc4\x87 lub usun\xc4\x85\xc4\x87 wynik\xc3\xb3w z JOIN:',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%s rows deleted': 'Wierszy usuni\xc4\x99tych: %s',
'%s rows updated': 'Wierszy uaktualnionych: %s',
'Available databases and tables': 'Dost\xc4\x99pne bazy danych i tabele',
'Cannot be empty': 'Nie mo\xc5\xbce by\xc4\x87 puste',
'Change Password': 'Change Password',
'Check to delete': 'Zaznacz aby usun\xc4\x85\xc4\x87',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Current request': 'Aktualne \xc5\xbc\xc4\x85danie',
'Current response': 'Aktualna odpowied\xc5\xba',
'Current session': 'Aktualna sesja',
'DB Model': 'DB Model',
'Database': 'Database',
'Delete:': 'Usu\xc5\x84:',
'Edit': 'Edit',
'Edit Profile': 'Edit Profile',
'Edit This App': 'Edit This App',
'Edit current record': 'Edytuj aktualny rekord',
'Hello World': 'Witaj \xc5\x9awiecie',
'Import/Export': 'Importuj/eksportuj',
'Index': 'Index',
'Internal State': 'Stan wewn\xc4\x99trzny',
'Invalid Query': 'B\xc5\x82\xc4\x99dne zapytanie',
'Layout': 'Layout',
'Login': 'Zaloguj',
'Logout': 'Logout',
'Lost Password': 'Przypomnij has\xc5\x82o',
'Main Menu': 'Main Menu',
'Menu Model': 'Menu Model',
'New Record': 'Nowy rekord',
'No databases in this application': 'Brak baz danych w tej aplikacji',
'Powered by': 'Powered by',
'Query:': 'Zapytanie:',
'Register': 'Zarejestruj',
'Rows in table': 'Wiersze w tabeli',
'Rows selected': 'Wybrane wiersze',
'Stylesheet': 'Stylesheet',
'Sure you want to delete this object?': 'Czy na pewno chcesz usun\xc4\x85\xc4\x87 ten obiekt?',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Zapytanie" jest warunkiem postaci "db.tabela1.pole1==\'warto\xc5\x9b\xc4\x87\'". Takie co\xc5\x9b jak "db.tabela1.pole1==db.tabela2.pole2" oznacza SQL JOIN.',
'Update:': 'Uaktualnij:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to buil
|
d more complex queries.': 'U\xc5\xbcyj (...)&(...) jako AND, (...)|(...
|
) jako OR oraz ~(...) jako NOT do tworzenia bardziej skomplikowanych zapyta\xc5\x84.',
'View': 'View',
'Welcome %s': 'Welcome %s',
'Welcome to web2py': 'Witaj w web2py',
'appadmin is disabled because insecure channel': 'appadmin is disabled because insecure channel',
'cache': 'cache',
'change password': 'change password',
'Online examples': 'Kliknij aby przej\xc5\x9b\xc4\x87 do interaktywnych przyk\xc5\x82ad\xc3\xb3w',
'Administrative interface': 'Kliknij aby przej\xc5\x9b\xc4\x87 do panelu administracyjnego',
'customize me!': 'dostosuj mnie!',
'data uploaded': 'dane wys\xc5\x82ane',
'database': 'baza danych',
'database %s select': 'wyb\xc3\xb3r z bazy danych %s',
'db': 'baza danych',
'design': 'projektuj',
'done!': 'zrobione!',
'edit profile': 'edit profile',
'export as csv file': 'eksportuj jako plik csv',
'insert new': 'wstaw nowy rekord tabeli',
'insert new %s': 'wstaw nowy rekord do tabeli %s',
'invalid request': 'B\xc5\x82\xc4\x99dne \xc5\xbc\xc4\x85danie',
'login': 'login',
'logout': 'logout',
'new record inserted': 'nowy rekord zosta\xc5\x82 wstawiony',
'next 100 rows': 'nast\xc4\x99pne 100 wierszy',
'or import from csv file': 'lub zaimportuj z pliku csv',
'previous 100 rows': 'poprzednie 100 wierszy',
'record': 'record',
'record does not exist': 'rekord nie istnieje',
'record id': 'id rekordu',
'register': 'register',
'selected': 'wybranych',
'state': 'stan',
'table': 'tabela',
'unable to parse csv file': 'nie mo\xc5\xbcna sparsowa\xc4\x87 pliku csv',
}
|
ArnossArnossi/checkmate
|
checkmate/contrib/plugins/python/metrics/issues_data.py
|
Python
|
agpl-3.0
| 813
| 0.00246
|
# -*- coding: utf-8 -*-
"""
This file is part of checkmate, a meta code checker written in Python.
Copyright (C) 2015 Andreas Dewes, QuantifiedCode UG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without e
|
ven the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/
|
>.
"""
issues_data = {
}
|
Fendoe/open-hackathon-o
|
open-hackathon-server/src/hackathon/util.py
|
Python
|
mit
| 5,753
| 0.001912
|
# -*- coding: utf-8 -*-
#
# -----------------------------------------------------------------------------------
# Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------------
import importlib
import json
import os
from datetime import datetime
from hackathon.log import log
try:
from config import Config
except ImportError:
from config_sample import Config
__all__ = [
"get_config",
"safe_get_config",
"get_class",
"load_template",
"call",
"get_now",
"Utility"
]
def get_config(key):
"""Get configured value from configuration file according to specified key
:type key: str or unicode
:param key: the search key, separate section with '.'. For example: "mysql.connection"
:Example:
get_config("mysql.connection")
:return configured value if specified key exists else None
:rtype str or unicode or dict
"""
ret = Config
for arg in key.split("."):
if arg in ret and isinstance(ret, dict):
ret = ret[arg]
else:
return None
return ret
def safe_get_config(key, default_value):
"""Get configured value from configuration file according to specified key and a default value
:type key: str | unicode
:param key: the search key, separate section with '.'. For example: "mysql.connection"
:type default_value: object
:param default_value: the default value if specified key cannot be found in configuration file
:Example:
safe_get_config("mysql.connection", "mysql://root:root@localhost:3306/db")
:return configured value if specified key exists else the default value
:rtype str or unicode or dict
"""
r = get_config(key)
return r if r else default_value
def get_class(kls):
"""Get the class object by it's name
:type kls: str or unicode
:param kls: the the full name, including module name of class name , of a class obj
:return the class object
:rtype classobj
:Example:
get_class("hackathon.user.UserManager")
:raise ModuleException if module cannot be imported
"""
parts = kls.split('.')
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
def load_template(url):
"""Load hackathon template from file into a dict
:type url: str|unicode
:param url: the absolute path of the template.
:return dict indicates a hackathon template
:rtype dict
"""
try:
template = json.load(file(url))
except Exception as e:
log.error(e)
return None
return template
def call(mdl_cls_func, cls_args, func_args):
# todo refactoring the call method to use standard hackathon_scheduler
mdl_name = mdl_cls_func[0]
cls_name = mdl_cls_func[1]
func_name = mdl_cls_func[2]
log.debug('call: mdl_name [%s], cls_name [%s], func_name [%s]' % (mdl_name, cls_name, func_name))
mdl = importlib.import_module(mdl_name)
cls = getattr(mdl, cls_name)
func = getattr(cls(*cls_args), func_name)
func(*func_args)
def get_now():
"""Return the current local date and time without tzinfo"""
return datetime.utcnow() # tzinfo=None
class Utility(object):
"""An utility class for those commonly used methods"""
def get_now(self):
"""Return the current local date and time without tzinfo"""
return get_now()
def convert(self, value):
"""Convert unicode string to str"""
if isinstance(value, dict):
return {self.convert(key): self.convert(value) for key, value in value.iteritems()}
elif isinstance(value, list):
return [self.convert(element) for element in value]
elif isinstance(value, unicode):
return value.encode('utf-8')
else:
return value
def get_config(self, key):
"""Get configured value from configuration file according to specified key
.. seealso:: get_config outside Utility class
"""
return get_config(key)
def safe_get_config(self, key, default_value
|
):
"""Get configured value from configuration file according to specified key and a default value
.. seealso:: safe_get_config out
|
side Utility class
"""
return safe_get_config(key, default_value)
def mkdir_safe(self, path):
"""Create a directory if it doesn't exist
:return the directory path
"""
if path and not (os.path.exists(path)):
os.makedirs(path)
return path
|
AlbertoPeon/invenio
|
modules/bibformat/lib/elements/bfe_references.py
|
Python
|
gpl-2.0
| 3,979
| 0.012315
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints references
"""
__revision__ = "$Id$"
def format_element(bfo, reference_prefix, reference_suffix):
"""
Prints the references of this record
@param reference_prefix: a prefix displayed before each reference
@param reference_suffix: a suffix d
|
isplayed after each reference
|
"""
from invenio.config import CFG_SITE_URL, CFG_ADS_SITE
from invenio.search_engine import get_mysql_recid_from_aleph_sysno, \
print_record
if CFG_ADS_SITE:
## FIXME: store external sysno into 999 $e, not into 999 $r
# do not escape field values for now because of things like A&A in
# 999 $r that are going to be resolved further down:
references = bfo.fields("999C5", escape=0)
else:
references = bfo.fields("999C5", escape=1)
out = ""
for reference in references:
ref_out = ''
if reference.has_key('o'):
if out != "":
ref_out = '</li>'
ref_out += "<li><small>"+ reference['o']+ "</small> "
if reference.has_key('m'):
ref_out += "<small>"+ reference['m']+ "</small> "
if reference.has_key('r'):
if CFG_ADS_SITE:
# 999 $r contains external sysno to be resolved:
recid_to_display = get_mysql_recid_from_aleph_sysno(reference['r'])
if recid_to_display:
ref_out += print_record(recid_to_display, 'hs')
else:
ref_out += '<small>' + reference['r'] + ' (not in ADS)</small>'
else:
ref_out += '<small> [<a href="'+CFG_SITE_URL+'/search?f=reportnumber&p='+ \
reference['r']+ \
'&ln=' + bfo.lang + \
'">'+ reference['r']+ "</a>] </small> <br />"
if reference.has_key('t'):
ejournal = bfo.kb("ejournals", reference.get('t', ""))
if ejournal != "":
ref_out += ' <small> <a href="https://cds.cern.ch/ejournals.py?publication='\
+ reference['t'].replace(" ", "+") \
+"&volume="+reference.get('v', "")+"&year="+\
reference.get('y', "")+"&page="+\
reference.get('p',"").split("-")[0]+'">'
ref_out += reference['t']+": "+reference.get('v', "")+\
" ("+reference.get('y', "")+") "
ref_out += reference.get('p', "")+"</a> </small> <br />"
else:
ref_out += " <small> "+reference['t']+ reference.get('v', "")+\
reference.get('y',"")+ reference.get('p',"")+ \
" </small> <br />"
if reference_prefix is not None and ref_out != '':
ref_out = reference_prefix + ref_out
if reference_suffix is not None and ref_out != '':
ref_out += reference_suffix
out += ref_out
if out != '':
out += '</li>'
return out
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
|
Morgan-Stanley/treadmill
|
lib/python/treadmill/sproc/cleanup.py
|
Python
|
apache-2.0
| 1,571
| 0
|
"""Runs the Treadmill container cleanup job.
"""
from __future__ import absolute_import
from __fut
|
ure__ import division
from __future__ import print_function
from __future__ import unicode_literals
import click
from treadmill import appenv
from treadmill import cleanu
|
p
from treadmill import cli
from treadmill import utils
def init():
"""Top level command handler."""
@click.group(name='cleanup')
def cleanup_grp():
"""Cleanup click group."""
@cleanup_grp.command('watcher')
@click.option('--approot', type=click.Path(exists=True),
envvar='TREADMILL_APPROOT', required=True)
def cleanup_watcher(approot):
"""Start cleanup watcher."""
tm_env = appenv.AppEnvironment(root=approot)
cleaner = cleanup.Cleanup(tm_env)
cleaner.run()
@cleanup_grp.command('instance')
@click.option('--approot', type=click.Path(exists=True),
envvar='TREADMILL_APPROOT', required=True)
@click.option('--runtime', envvar='TREADMILL_RUNTIME', required=True)
@click.option('--runtime-param', type=cli.LIST, required=False)
@click.argument('instance', nargs=1)
def cleanup_instance(approot, runtime, instance, runtime_param):
"""Actually do the cleanup of the instance.
"""
param = utils.equals_list2dict(runtime_param or [])
tm_env = appenv.AppEnvironment(root=approot)
cleaner = cleanup.Cleanup(tm_env)
cleaner.invoke(runtime, instance, param)
del cleanup_watcher
del cleanup_instance
return cleanup_grp
|
whyflyru/django-seo
|
djangoseo/version.py
|
Python
|
bsd-3-clause
| 54
| 0
|
#
|
-*- coding:
|
utf-8 -*-
__version__ = '2.6.0+whyfly.3'
|
leopoul/mupy
|
muparse/migrations/0001_initial.py
|
Python
|
apache-2.0
| 7,178
| 0.00822
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'NodeGroup'
db.create_table('muparse_nodegroup', (
('url', self.gf('django.db.models.fields.CharField')(max_length=512)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('muparse', ['NodeGroup'])
# Adding model 'GraphCategory'
db.create_table('muparse_graphcategory', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.SlugField')(max_length=255, db_index=True)),
))
db.send_create_signal('muparse', ['GraphCategory'])
# Adding model 'Graph'
db.create_table('muparse_graph', (
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['muparse.GraphCategory'])),
('slug', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.Slu
|
gField')(max_length=255, db_index=True)),
))
db.send_create_signal('muparse', ['Graph'])
# Addi
|
ng model 'Node'
db.create_table('muparse_node', (
('url', self.gf('django.db.models.fields.CharField')(max_length=512)),
('group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['muparse.NodeGroup'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.SlugField')(max_length=255, db_index=True)),
))
db.send_create_signal('muparse', ['Node'])
# Adding model 'NodeGraphs'
db.create_table('muparse_nodegraphs', (
('node', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['muparse.Node'])),
('graph', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['muparse.Graph'])),
('pageurl', self.gf('django.db.models.fields.CharField')(max_length=512)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('baseurl', self.gf('django.db.models.fields.CharField')(max_length=512)),
))
db.send_create_signal('muparse', ['NodeGraphs'])
# Adding model 'SavedSearch'
db.create_table('muparse_savedsearch', (
('display_type', self.gf('django.db.models.fields.CharField')(max_length=64)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('muparse', ['SavedSearch'])
# Adding M2M table for field graphs on 'SavedSearch'
db.create_table('muparse_savedsearch_graphs', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('savedsearch', models.ForeignKey(orm['muparse.savedsearch'], null=False)),
('nodegraphs', models.ForeignKey(orm['muparse.nodegraphs'], null=False))
))
db.create_unique('muparse_savedsearch_graphs', ['savedsearch_id', 'nodegraphs_id'])
def backwards(self, orm):
# Deleting model 'NodeGroup'
db.delete_table('muparse_nodegroup')
# Deleting model 'GraphCategory'
db.delete_table('muparse_graphcategory')
# Deleting model 'Graph'
db.delete_table('muparse_graph')
# Deleting model 'Node'
db.delete_table('muparse_node')
# Deleting model 'NodeGraphs'
db.delete_table('muparse_nodegraphs')
# Deleting model 'SavedSearch'
db.delete_table('muparse_savedsearch')
# Removing M2M table for field graphs on 'SavedSearch'
db.delete_table('muparse_savedsearch_graphs')
models = {
'muparse.graph': {
'Meta': {'object_name': 'Graph'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['muparse.GraphCategory']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'muparse.graphcategory': {
'Meta': {'object_name': 'GraphCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'})
},
'muparse.node': {
'Meta': {'object_name': 'Node'},
'graphs': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['muparse.Graph']", 'null': 'True', 'through': "orm['muparse.NodeGraphs']", 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['muparse.NodeGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'muparse.nodegraphs': {
'Meta': {'object_name': 'NodeGraphs'},
'baseurl': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'graph': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['muparse.Graph']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['muparse.Node']"}),
'pageurl': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'muparse.nodegroup': {
'Meta': {'object_name': 'NodeGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'muparse.savedsearch': {
'Meta': {'object_name': 'SavedSearch'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'graphs': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['muparse.NodeGraphs']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['muparse']
|
PrivacyScore/PrivacyScore
|
privacyscore/api/views.py
|
Python
|
gpl-3.0
| 7,028
| 0.000711
|
import re
from django.db import transaction
from django.db.models import Count, Q
from django.utils.translation import ugettext_lazy as _
from rest_framework.decorators import api_view
from rest_framework.exceptions import NotFound, ParseError
from rest_framework.request import Request
from rest_framework.response import Response
from privacyscore.backend.models import ScanList, ListColumnValue, Site, \
Scan, ScanResult
from privacyscore.utils import normalize_url
# TODO: Improve, add missing functionality
# TODO: Count views of sites and scan lists
@api_view(['GET'])
def get_scan_lists(request: Request) -> Response:
"""Get lists."""
scan_lists = ScanList.objects.annotate(sites__count=Count('sites')).filter(
editable=False,
private=False,
)
return Response([l.as_dict() for l in scan_lists])
@api_view(['GET'])
def get_scan_list_by_token(request: Request, token: str) -> Response:
"""Get a list by its token."""
try:
l = ScanList.objects.get(token=token)
return Response(l.as_dict())
except ScanList.DoesNotExist:
raise NotFound
@api_view(['POST'])
def save_scan_list(request: Request) -> Response:
"""Save a new list."""
try:
with transaction.atomic():
scan_list = ScanList.objects.create(
name=request.data['listname'],
description=request.data['description'],
private=bool(request.data['isprivate']),
user=request.user if request.user.is_authenticated else None)
scan_list.save_tags(request.data['tags'])
# save columns
scan_list.save_columns(request.data['columns'])
return Response({
'list_id': scan_list.pk,
'token': scan_list.token
}, status=201)
except KeyError:
raise ParseError
@api_view(['POST'])
def update_scan_list(request: Request, scan_list_id: int) -> Response:
"""Update an existing list."""
try:
# TODO: Check if list is editable (and by current user)
scan_list = ScanList.objects.get(pk=scan_list_id,
token=request.data['token'])
scan_list.name = request.data['listname']
scan_list.description = request.data['description']
scan_list.private = request.data['isprivate']
# save tags
scan_list.save_tags(request.data['tags'])
# save columns
scan_list.save_columns(request.data['columns'])
scan_list.save()
return Response({
'type': 'success',
'message': 'ok',
})
except KeyError as e:
raise ParseError
except ScanList.DoesNotExist:
raise NotFound
@api_view(['DELETE'])
def delete_scan_list(request: Request, token: str) -> Response:
"""Update an existing list."""
# TODO: Access control (Or is token sufficient)?
try:
scan_list = ScanList.objects.get(token=token)
# all related objects CASCADE automatically.
scan_list.delete()
return Response({
'type': 'success',
'message': 'ok',
})
except KeyError as e:
raise ParseError
except ScanList.DoesNotExist:
raise NotFound
# TODO: Why POST?
# TODO: Add a filter option to get_lists and get rid of this search method
@api_view(['POST'])
def search_scan_lists(request: Request) -> Response:
"""Search for lists."""
# TODO: Access control
try:
search_text = request.data['searchtext']
scan_lists = ScanList.objects.filter(
Q(name__icontains=search_text) |
Q(description__icontains=search_text) |
Q(tags__name__icontains=search_text)).distinct()
return Response([l.as_dict() for l in scan_lists])
except KeyError:
raise ParseError
@api_view(['POST'])
def scan_scan_list(request: Request, scan_list_id: int) -> Response:
""
|
"Schedule a scan for the list."""
try:
s
|
can_list = ScanList.objects.get(pk=scan_list_id)
# This always succeeds as rate limit check is done per-site
scan_list.scan()
return Response({
'type': 'success',
'message': 'ok',
})
except ScanList.DoesNotExist:
raise NotFound
@api_view(['POST'])
def save_site(request: Request) -> Response:
"""Save all sites for a list."""
try:
# TODO: Check if user is allowed to add this site to the list and if
# the list is editable at all
scan_list = ScanList.objects.get(pk=request.data['listid'])
# get columns
columns = scan_list.columns.order_by('sort_key')
columns_count = len(columns)
with transaction.atomic():
# delete all sites which previously existed.
scan_list.sites.through.objects.filter(scanlist=scan_list).delete()
# delete all column values for this list
ListColumnValue.objects.filter(column__scan_list=scan_list).delete()
for site in request.data['sites']:
if not site['url']:
continue
url = normalize_url(site['url'])
if not url.startswith("http"):
# non-http url supplied; not supported
continue
site_object = Site.objects.get_or_create(url=url)[0]
if site_object in scan_list.sites.all():
# redundant site, already added to list
continue
site_object.scan_lists.add(scan_list)
# TODO: Remove empty columns in frontend to prevent count
# mismatch (as empty columns are filtered before so it is not
# clear which column values belong to which column.
# workaround: remove all empty values (so values are required
# for all sites in every used column
site['column_values'] = [v for v in site['column_values'] if v]
# Save column values
if len(site['column_values']) != columns_count:
raise ParseError(
'number of columns in site does not match number of '
'columns in list.')
for i, column in enumerate(site['column_values']):
ListColumnValue.objects.create(
column=columns[i], site=site_object, value=column)
return Response({
'type': 'success',
'message': 'ok',
})
except KeyError:
raise ParseError
except ScanList.DoesNotExist:
raise NotFound
@api_view(['GET'])
def scan_result(request: Request, scan_id: int) -> Response:
"""Get a scan result by its id."""
try:
scan = Scan.objects.get(pk=scan_id)
return Response(scan.result.result)
except Scan.DoesNotExist:
raise NotFound
except ScanResult.DoesNotExist:
raise NotFound('scan not finished')
|
rbian/virt-test
|
virttest/staging/utils_memory.py
|
Python
|
gpl-2.0
| 8,312
| 0.001444
|
import re
import glob
import math
import logging
import os
from autotest.client import utils
from autotest.client.shared import error
# Returns total memory in kb
def read_from_meminfo(key):
cmd_result = utils.run('grep %s /proc/meminfo' % key, verbose=False)
meminfo = cmd_result.stdout
return int(re.search(r'\d+', meminfo).group(0))
def memtotal():
return read_from_meminfo('MemTotal')
def freememtotal():
return read_from_meminfo('MemFree')
def rounded_memtotal():
# Get total of all physical mem, in kbytes
usable_kbytes = memtotal()
# usable_kbytes is system's usable DRAM in kbytes,
# as reported by memtotal() from device /proc/meminfo memtotal
# after Linux deducts 1.5% to 5.1% for system table overhead
# Undo the unknown actual deduction by rounding up
# to next small multiple of a big power-of-two
# eg 12GB - 5.1% gets rounded back up to 12GB
mindeduct = 0.015 # 1.5 percent
maxdeduct = 0.055 # 5.5 percent
# deduction range 1.5% .. 5.5% supports physical mem sizes
# 6GB .. 12GB in steps of .5GB
# 12GB .. 24GB in steps of 1 GB
# 24GB .. 48GB in steps of 2 GB ...
# Finer granularity in physical mem sizes would require
# tighter spread between min and max possible deductions
# increase mem size by at least min deduction, without rounding
min_kbytes = int(usable_kbytes / (1.0 - mindeduct))
# increase mem size further by 2**n rounding, by 0..roundKb or more
round_kbytes = int(usable_kbytes / (1.0 - maxdeduct)) - min_kbytes
# find least binary roundup 2**n that covers worst-cast roundKb
mod2n = 1 << int(math.ceil(math.log(round_kbytes, 2)))
# have round_kbytes <= mod2n < round_kbytes*2
# round min_kbytes up to next multiple of mod2n
phys_kbytes = min_kbytes + mod2n - 1
phys_kbytes = phys_kbytes - (phys_kbytes % mod2n) # clear low bits
return phys_kbytes
def numa_nodes():
node_paths = glob.glob('/sys/devices/system/node/node*')
nodes = [int(re.sub(r'.*node(\d+)', r'\1', x)) for x in node_paths]
return (sorted(nodes))
def node_size():
nodes = max(len(numa_nodes()), 1)
return ((memtotal() * 1024) / nodes)
def get_huge_page_size():
return read_from_meminfo('Hugepagesize')
def get_num_huge_pages():
return read_from_meminfo('HugePages_Total')
def get_num_huge_pages_free():
return read_from_meminfo('HugePages_Free')
def get_num_huge_pages_rsvd():
return read_from_meminfo('HugePages_Rsvd')
def get_num_anon_huge_pages(pid):
return read_from_smaps(pid, 'AnonHugePages')
def get_transparent_hugepage():
UPSTREAM_THP_PATH = "/sys/kernel/mm/transparent_hugepage"
RH_THP_PATH = "/sys/kernel/mm/redhat_transparent_hugepage"
if os.path.isdir(UPSTREAM_THP_PATH):
thp_path = UPSTREAM_THP_PATH
elif os.path.isdir(RH_THP_PATH):
thp_path = RH_THP_PATH
else:
raise error.TestFail("transparent hugepage Not supported")
out = utils.system_output('cat %s/enabled' % thp_path)
if out[0] == "[always]":
return 'always'
elif out[1] == "[madvise]":
return 'madvise'
else:
return 'never'
def set_num_huge_pages(num):
utils.system('/sbin/sysctl vm.nr_hugepages=%d' % num)
def set_transparent_hugepage(sflag):
"""
sflag only can be set always, madvise or never.
"""
flags = ['always', 'madvise', 'never']
if sflag not in flags:
raise error.TestFail("specify wrong parameter")
UPSTREAM_THP_PATH = "/sys/kernel/mm/transparent_hugepage"
RH_THP_PATH = "/sys/kernel/mm/redhat_transparent_hugepage"
if os.path.isdir(UPSTREAM_THP_PATH):
thp_path = UPSTREAM_THP_PATH
elif os.path.isdir(RH_THP_PATH):
thp_path = RH_THP_PATH
else:
raise error.TestFail("transparent hugepage Not supported")
ret = os.system("echo %s > %s/enabled" % (sflag, thp_path))
if ret != 0:
raise error.TestFail("setting transparent_hugepage failed")
def drop_caches():
"""Writes back all dirty pages to disk and clears all the caches."""
utils.run("sync", verbose=False)
# We ignore failures here as this will fail on 2.6.11 kernels.
utils.run("echo 3 > /proc/sys/vm/drop_caches", ignore_status=True,
verbose=False)
def read_from_vmstat(key):
"""
Get specific item value from vmstat
:param key: The item you want to check from vmstat
:type key: String
:return: The value of the item
:rtype: int
"""
vmstat = open("/proc/vmstat")
vmstat_info = vmstat.read()
vmstat.close()
return int(re.findall("%s\s+(\d+)" % key, vmstat_info)[0])
def read_from_smaps(pid, key):
"""
Get specific item value from the smaps of a process include all sections.
:param pid: Process id
:type pid: String
:param key: The item you want to check from smaps
:type key: String
:return: The value of the item in kb
:rtype: int
"""
smaps = open("/proc/%s/smaps" % pid)
smaps_info = smaps.read()
smaps.close()
memory_size = 0
for each_number in re.findall("%s:\s+(\d+)" % key, smaps_info):
memory_size += int(each_number)
return memory_size
def read_from_numa_maps(pid, key):
"""
Get the process numa related info from numa_maps. This function
only use to get the numbers like anon=1.
:param pid: Process id
:type pid: String
:param key: The item you want to check from numa_maps
:type key: String
:return: A dict using the address as the keys
:rtype: dict
"""
numa_maps = open("/proc/%s/numa_maps" % pid)
numa_map_info = numa_maps.read()
numa_maps.close()
numa_maps_dict = {}
numa_pattern = r"(^[\dabcdfe]+)\s+.*%s[=:](\d+)" % key
for address, number in re.findall(numa_pattern, numa_map_info, re.M):
numa_maps_dict[address] = number
return numa_maps_dict
def get_buddy_info(chunk_sizes, nodes="all", zones="all"):
"""
Get the fragement status of the host. It use the same method
to get the page size in buddyinfo.
2^chunk_size * page_size
The chunk_sizes can be string make up by all orders that you want to check
splited with blank or a mathematical expression with '>', '<' or '='.
For example:
The input of chunk_size could be: "0 2 4"
And the return will be: {'0': 3, '2': 286, '4': 687}
if you are using expression: ">=9"
the return will be: {'9': 63, '10': 225}
:param chunk_size: The order number shows in buddyinfo. This is not
the real page size.
:type chunk_size: string
:param nodes: The numa node that you want to check. Default value is all
:type nodes: string
:param zones: The memory zone that you want to check. Default value is all
:type zones: string
:return: A dict using the chunk_size as the keys
:rtype: dict
"""
buddy_info = open("/proc/buddyinfo")
buddy_in
|
fo_content = buddy_info.read()
buddy_info.close()
re_buddyinfo = "Node\s+"
if nodes == "all":
re_buddyinfo += "(\d+)"
else:
re_buddyinfo += "(%s)" % "|".join(nodes.split())
if not re.findall(re_buddyinfo, buddy_info_content):
logging.warn("Can not find Nodes %s" % nodes)
return None
re_buddyinfo += ".*?zone\s+"
if zones ==
|
"all":
re_buddyinfo += "(\w+)"
else:
re_buddyinfo += "(%s)" % "|".join(zones.split())
if not re.findall(re_buddyinfo, buddy_info_content):
logging.warn("Can not find zones %s" % zones)
return None
re_buddyinfo += "\s+([\s\d]+)"
buddy_list = re.findall(re_buddyinfo, buddy_info_content)
if re.findall("[<>=]", chunk_sizes) and buddy_list:
size_list = range(len(buddy_list[-1][-1].strip().split()))
chunk_sizes = [str(_) for _ in size_list if eval("%s %s" % (_,
chunk_sizes))]
chunk_sizes = ' '.join(chunk_sizes)
buddyinfo_dict = {}
for chunk_size in chunk_sizes.split():
buddyinfo_dict[chunk_size] = 0
for _, _, chunk_info in buddy_list:
chunk_info
|
TomAugspurger/pandas
|
pandas/tests/indexes/interval/test_astype.py
|
Python
|
bsd-3-clause
| 7,740
| 0.000904
|
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype, IntervalDtype
from pandas import (
CategoricalIndex,
Index,
IntervalIndex,
NaT,
Timedelta,
Timestamp,
interval_range,
)
import pandas._testing as tm
class Base:
"""Tests common to IntervalIndex with any subtype"""
def test_astype_idempotent(self, index):
result = index.astype("interval")
tm.assert_index_equal(result, index)
result = index.astype(index.dtype)
tm.assert_index_equal(result, index)
def test_astype_object(self, index):
result = index.astype(object)
expected = Index(index.values, dtype="object")
tm.assert_index_equal(result, expected)
assert not result.equals(index)
def test_astype_category(self, index):
result = index.astype("category")
expected = CategoricalIndex(index.values)
tm.assert_index_equal(result, expected)
result = index.astype(CategoricalDtype())
tm.assert_index_equal(result, expected)
# non-default params
categories = index.dropna().unique().values[:-1]
dtype = CategoricalDtype(categories=categories, ordered=True)
result = index.astype(dtype)
expected = CategoricalIndex(index.values, categories=categories, ordered=True)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[
"int64",
"uint64",
"float64",
"complex128",
"period[M]",
"timedelta64",
"timedelta64[ns]",
"datetime64",
"datetime64[ns]",
"datetime64[ns, US/Eastern]",
],
)
def test_astype_cannot_cast(self, index, dtype):
msg = "Cannot cast IntervalIndex to dtype"
with pytest.raises(TypeError, match=msg):
index.astype(dtype)
def test_astype_invalid_dtype(self, index):
msg = "data type [\"']fake_dtype[\"'] not understood"
with pytest.raises(TypeError, match=msg):
index.astype("fake_dtype")
class TestIntSubtype(Base):
"""Tests specific to IntervalIndex with integer-like subtype"""
indexes = [
IntervalIndex.from_breaks(np.arange(-10, 11, dtype="int64")),
IntervalIndex.from_breaks(np.arange(100, dtype="uint64"), closed="left"),
]
@pytest.fixture(params=indexes)
def index(self, request):
return request.param
@pytest.mark.parametrize(
"subtype", ["float64", "datetime64[ns]", "timedelta64[ns]"]
)
def test_subtype_conversion(self, index, subtype):
dtype = IntervalDtype(subtype)
result = index.astype(dtype)
expected = IntervalIndex.from_arrays(
index.left.astype(subtype), index.right.astype(subtype), closed=index.closed
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"subtype_start, subtype_end", [("int64", "uint64"), ("uint64", "int64")]
)
def test_subtype_integer(self, subtype_start, subtype_end):
index = IntervalIndex.from_breaks(np.arange(100, dtype=subtype_start))
dtype = IntervalDtype(subtype_end)
result = index.astype(dtype)
expected = IntervalIndex.from_arrays(
index.left.astype(subtype_end),
index.right.astype(subtype_end),
closed=index.closed,
)
tm.assert_index_equal(result, expected)
@pytest.mark.xfail(reason="GH#15832")
def test_subtype_integer_errors(self):
# int64 -> uint64 fails with negative values
index = interval_range(-10, 10)
dtype = IntervalDtype("uint64")
with pytest.raises(ValueError):
index.astype(dtype)
class TestFloatSubtype(Base):
"""Tests specific to IntervalIndex with float subtype"""
indexes = [
interval_range(-10.0, 10.0, closed="neither"),
IntervalIndex.from_arrays(
[-1.5, np.nan, 0.0, 0.0, 1.5], [-0.5, np.nan, 1.0, 1.0, 3.0], closed="both"
),
]
|
@pytest.fixture(params=indexes)
def index(self, request):
return request.param
@pytest.mark.parametrize("subtype", ["int64", "uint64"])
def test_subtype_integer(self, subtype):
index = interval_range(0.0, 10.0)
dtype = IntervalDtype(subtype)
result = index.astype(dtype)
expected = IntervalIndex.from_arrays(
index.left.astype(subtype), index.right.astype(subtype), c
|
losed=index.closed
)
tm.assert_index_equal(result, expected)
# raises with NA
msg = r"Cannot convert non-finite values \(NA or inf\) to integer"
with pytest.raises(ValueError, match=msg):
index.insert(0, np.nan).astype(dtype)
@pytest.mark.xfail(reason="GH#15832")
def test_subtype_integer_errors(self):
# float64 -> uint64 fails with negative values
index = interval_range(-10.0, 10.0)
dtype = IntervalDtype("uint64")
with pytest.raises(ValueError):
index.astype(dtype)
# float64 -> integer-like fails with non-integer valued floats
index = interval_range(0.0, 10.0, freq=0.25)
dtype = IntervalDtype("int64")
with pytest.raises(ValueError):
index.astype(dtype)
dtype = IntervalDtype("uint64")
with pytest.raises(ValueError):
index.astype(dtype)
@pytest.mark.parametrize("subtype", ["datetime64[ns]", "timedelta64[ns]"])
def test_subtype_datetimelike(self, index, subtype):
dtype = IntervalDtype(subtype)
msg = "Cannot convert .* to .*; subtypes are incompatible"
with pytest.raises(TypeError, match=msg):
index.astype(dtype)
class TestDatetimelikeSubtype(Base):
"""Tests specific to IntervalIndex with datetime-like subtype"""
indexes = [
interval_range(Timestamp("2018-01-01"), periods=10, closed="neither"),
interval_range(Timestamp("2018-01-01"), periods=10).insert(2, NaT),
interval_range(Timestamp("2018-01-01", tz="US/Eastern"), periods=10),
interval_range(Timedelta("0 days"), periods=10, closed="both"),
interval_range(Timedelta("0 days"), periods=10).insert(2, NaT),
]
@pytest.fixture(params=indexes)
def index(self, request):
return request.param
@pytest.mark.parametrize("subtype", ["int64", "uint64"])
def test_subtype_integer(self, index, subtype):
dtype = IntervalDtype(subtype)
result = index.astype(dtype)
expected = IntervalIndex.from_arrays(
index.left.astype(subtype), index.right.astype(subtype), closed=index.closed
)
tm.assert_index_equal(result, expected)
def test_subtype_float(self, index):
dtype = IntervalDtype("float64")
msg = "Cannot convert .* to .*; subtypes are incompatible"
with pytest.raises(TypeError, match=msg):
index.astype(dtype)
def test_subtype_datetimelike(self):
# datetime -> timedelta raises
dtype = IntervalDtype("timedelta64[ns]")
msg = "Cannot convert .* to .*; subtypes are incompatible"
index = interval_range(Timestamp("2018-01-01"), periods=10)
with pytest.raises(TypeError, match=msg):
index.astype(dtype)
index = interval_range(Timestamp("2018-01-01", tz="CET"), periods=10)
with pytest.raises(TypeError, match=msg):
index.astype(dtype)
# timedelta -> datetime raises
dtype = IntervalDtype("datetime64[ns]")
index = interval_range(Timedelta("0 days"), periods=10)
with pytest.raises(TypeError, match=msg):
index.astype(dtype)
|
coreos/depot_tools
|
my_activity.py
|
Python
|
bsd-3-clause
| 36,618
| 0.009504
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Get stats about your activity.
Example:
- my_activity.py for stats for the current week (last week on mondays).
- my_activity.py -Q for stats for last quarter.
- my_activity.py -Y for stats for this year.
- my_activity.py -b 4/5/12 for stats since 4/5/12.
- my_activity.py -b 4/5/12 -e 6/7/12 for stats between 4/5/12 and 6/7/12.
"""
# These services typically only provide a created time and a last modified time
# for each item for general queries. This is not enough to determine if there
# was activity in a given time period. So, we first query for all things created
# before end and modified after begin. Then, we get the details of each item and
# check those details to determine if there was activity in the given period.
# This means that query time scales mostly with (today() - begin).
import cookielib
import csv
import datetime
from datetime import datetime
from datetime import timedelta
from functools import partial
import json
import optparse
import os
import re
import subprocess
import sys
import urllib
import urllib2
import rietveld
from third_party import upload
# Imported later, once options are set.
webkitpy = None
try:
from dateutil.relativedelta import relativedelta # pylint: disable=F0401
except ImportError:
print 'python-dateutil package required'
exit(1)
# python-keyring provides easy access to the system keyring.
try:
import keyring # pylint: disable=W0611,F0401
except ImportError:
print 'Consider installing python-keyring'
def webkit_account(user):
if not webkitpy:
return None
committer_list = webkitpy.common.config.committers.CommitterList()
email = user + "@chromium.org"
return committer_list.account_by_email(email)
def user_to_webkit_email(user):
account = webkit_account(user)
if not account:
return None
return account.emails[0]
def user_to_webkit_owner_search(user):
account = webkit_account(user)
if not account:
return ['--author=%s@chromium.org' % user]
search = []
for email in account.emails:
search.append('--author=' + email)
# commit-bot is author for contributors who are not committers.
search.append('--grep=Patch by ' + account.full_name)
return search
def user_to_webkit_reviewer_search(user):
committer_list = webkitpy.common.config.committers.CommitterList()
email = user + "@chromium.org"
account = committer_list.reviewer_by_email(email)
if not account:
return []
return ['--grep=Reviewed by ' + account.full_name]
rietveld_instances = [
{
'url': 'codereview.chromium.org',
'shorturl': 'crrev.com',
'supports_owner_modified_query': True,
'requires_auth': False,
'email_domain': 'chromium.org',
},
{
'url': 'chromereviews.googleplex.com',
'shorturl': 'go/chromerev',
'supports_owner_modified_query': True,
'requires_auth': True,
'email_domain': 'google.com',
},
{
'url': 'codereview.appspot.com',
'supports_owner_modified_query': True,
'requires_auth': False,
'email_domain': 'chromium.org',
},
{
'url': 'breakpad.appspot.com',
'supports_owner_modified_query': False,
'requires_auth': False,
|
'email_domain': 'chromium.org',
},
]
gerrit_instances = [
{
'url': 'chromium-review.googlesource.com',
'shorturl': 'crosreview.com',
},
# TODO(deymo): chrome-internal-review requires login credentials. Enable once
# login support is added to this client. See crbug.com/281695.
#{
# 'url': 'chrome-internal-review.googlesource.com',
# 'shorturl': 'crosreview.com/i',
#},
{
'host': 'gerrit.chromium.org',
'port': 29418,
},
{
'host': 'ger
|
rit-int.chromium.org',
'port': 29419,
},
]
google_code_projects = [
{
'name': 'chromium',
'shorturl': 'crbug.com',
},
{
'name': 'chromium-os',
'shorturl': 'crosbug.com',
},
{
'name': 'chrome-os-partner',
},
{
'name': 'google-breakpad',
},
{
'name': 'gyp',
},
{
'name': 'skia',
},
]
bugzilla_instances = [
{
'search_url': 'http://bugs.webkit.org/buglist.cgi',
'url': 'wkb.ug',
'user_func': user_to_webkit_email,
},
]
git_instances = [
{
'option': 'webkit_repo',
'change_re':
r'git-svn-id: http://svn\.webkit\.org/repository/webkit/trunk@(\d*)',
'change_url': 'trac.webkit.org/changeset',
'review_re': r'https://bugs\.webkit\.org/show_bug\.cgi\?id\=(\d*)',
'review_url': 'wkb.ug',
'review_prop': 'webkit_bug_id',
'owner_search_func': user_to_webkit_owner_search,
'reviewer_search_func': user_to_webkit_reviewer_search,
},
]
# Uses ClientLogin to authenticate the user for Google Code issue trackers.
def get_auth_token(email):
# KeyringCreds will use the system keyring on the first try, and prompt for
# a password on the next ones.
creds = upload.KeyringCreds('code.google.com', 'code.google.com', email)
for _ in xrange(3):
email, password = creds.GetUserCredentials()
url = 'https://www.google.com/accounts/ClientLogin'
data = urllib.urlencode({
'Email': email,
'Passwd': password,
'service': 'code',
'source': 'chrome-my-activity',
'accountType': 'GOOGLE',
})
req = urllib2.Request(url, data=data, headers={'Accept': 'text/plain'})
try:
response = urllib2.urlopen(req)
response_body = response.read()
response_dict = dict(x.split('=')
for x in response_body.split('\n') if x)
return response_dict['Auth']
except urllib2.HTTPError, e:
print e
print 'Unable to authenticate to code.google.com.'
print 'Some issues may be missing.'
return None
def username(email):
"""Keeps the username of an email address."""
return email and email.split('@', 1)[0]
def datetime_to_midnight(date):
return date - timedelta(hours=date.hour, minutes=date.minute,
seconds=date.second, microseconds=date.microsecond)
def get_quarter_of(date):
begin = (datetime_to_midnight(date) -
relativedelta(months=(date.month % 3) - 1, days=(date.day - 1)))
return begin, begin + relativedelta(months=3)
def get_year_of(date):
begin = (datetime_to_midnight(date) -
relativedelta(months=(date.month - 1), days=(date.day - 1)))
return begin, begin + relativedelta(years=1)
def get_week_of(date):
begin = (datetime_to_midnight(date) - timedelta(days=date.weekday()))
return begin, begin + timedelta(days=7)
def get_yes_or_no(msg):
while True:
response = raw_input(msg + ' yes/no [no] ')
if response == 'y' or response == 'yes':
return True
elif not response or response == 'n' or response == 'no':
return False
def datetime_from_gerrit(date_string):
return datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S.%f000')
def datetime_from_rietveld(date_string):
return datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S.%f')
def datetime_from_google_code(date_string):
return datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%S.%fZ')
class MyActivity(object):
def __init__(self, options):
self.options = options
self.modified_after = options.begin
self.modified_before = options.end
self.user = options.user
self.changes = []
self.reviews = []
self.issues = []
self.check_cookies()
self.google_code_auth_token = None
self.webkit_repo = options.webkit_repo
if self.webkit_repo:
self.setup_webkit_info()
# Check the codereview cookie jar to determine which Rietveld instances to
# authenticate to.
def check_cookies(self):
cookie_file = os.path.expanduser('~/.codereview_upload_cookies')
cookie_jar = cookielib.MozillaCookieJar(cookie_file)
if not os.path.exists(cookie_file):
exit(1)
try:
cookie_jar.load()
print 'Found cookie file: %s' % cookie_file
except (cookielib.LoadError, IOError):
exit(1)
filtered_instances = []
def has_cookie(instance):
for cookie in cookie_jar:
if cookie.name == 'SACSI
|
Stratoscale/yumcache
|
yumcache/downloader.py
|
Python
|
apache-2.0
| 5,695
| 0.003687
|
import requests
import logging
import urllib2
import traceback
import threading
from yumcache import growingblob
class Downloader(threading.Thread):
INVALID_LENGTH = -1
def __init__(self, common, path, url):
self._common = common
self._path = path
self._url = url
self._content = None
self._error = None
self._length = None
self._done = False
threading.Thread.__init__(self)
self.daemon = True
self.start()
def content(self):
return self._content
def error(self):
return self._error
def length(self):
return self._length
def done(self):
return self._done
def run(self):
try:
self._work()
except:
logging.exception("Downloader thread exited with exception")
finally:
self._done = True
def _work(self):
downloadedCompletly = False
try:
self._findLength()
except:
original = traceback.format_exc()
try:
self._completeDownload()
downloadedCompletly = True
except:
logging.exception(
"Unable to find length of %(url)s, and even complete download failed. "
"Original Stack trace:\n%(originalException)s", dict(
url=self._url, originalException=original))
self._common.objectStore.incrementNotFoundCount(self._path)
raise
if not downloadedCompletly:
self._liveDownload()
self._write()
def _completeDownload(self):
response = requests.get(self._url)
self._content = growingblob.GrowingBlob()
self._content.append(response.content)
self._length = self._content.length()
self._statusCode = response.status_code
def _findLength(self):
RETRIES = 10
for retry in xrange(RETRIES):
try:
self._length = self._findLengthRetry(retry)
break
except:
if retry == RETRIES - 1:
logging.exception("Unable to determine length of %(url)s", dict(url=self._url))
raise
def _liveDownload(self):
RETRIES = 5
for retry in xrange(RETRIES):
try:
logging.info("
|
Downloading '%(url)s'", dict(url=self._url))
self._retryLiveDownload(retry
|
)
logging.info("Done Downloading '%(url)s'", dict(url=self._url))
break
except:
logging.exception("While Downloading '%(url)s'", dict(url=self._url))
if retry == RETRIES - 1:
logging.exception("Unable to download %(url)s", dict(url=self._url))
self._error = traceback.format_exc()
raise
def _retryLiveDownload(self, retry):
req = urllib2.Request(self._url, headers={'accept-encoding': ''})
session = urllib2.urlopen(req, timeout=15)
try:
self._content = growingblob.GrowingBlob()
while True:
data = session.read(1024 * 16)
if data == "":
if self._length != self.INVALID_LENGTH and self._content.length() != self._length:
raise Exception(
"Transfer interrupted of %(url)s: expected %(length)d bytes, "
"transferred only %(transferred)d bytes. retrying", dict(
url=self._url, length=self._length,
transferred=self._content.length()))
return
self._content.append(data)
finally:
self._statusCode = session.getcode()
session.close()
def _write(self):
try:
if self._statusCode != 200:
logging.info(
"Not writing cache for '%(url)s', statusCode was %(statusCode)d "
"(and not 200)", dict(url=self._url, statusCode=self._statusCode))
return
if self._length != self.INVALID_LENGTH and self._content.length() != self._length:
raise Exception("Internal error %d != %d" % (self._content.length(), self._length))
self._common.objectStore.write(self._path, self._content.content())
logging.info("Done writing cache for '%(url)s'", dict(url=self._url))
except:
logging.exception("Unable to write cache for %(url)s", dict(url=self._url))
self._error = traceback.format_exc()
raise
def _findLengthRetry(self, retry):
length = self._findLengthUsingASingleHead(retry)
MAKE_SURE_ATTEMPTS = 2
for i in xrange(MAKE_SURE_ATTEMPTS):
another = self._findLengthUsingASingleHead(retry)
if length != another:
raise Exception(
"Got two different length responses (%(first)d, %(second)d) for the "
"same url %(url)s", dict(
first=length, second=another, url=self._url))
return length
def _findLengthUsingASingleHead(self, retry):
response = requests.head(
self._url, allow_redirects=True, timeout=15, headers={'accept-encoding': ''})
if response.status_code != 200:
raise Exception(
"Unable to find length, status code was: %(statusCode)d",
dict(statusCode=response.status_code))
return int(response.headers.get('content-length', self.INVALID_LENGTH))
|
dmilith/SublimeText3-dmilith
|
Packages/Debugger/modules/views/debugger_panel.py
|
Python
|
mit
| 3,906
| 0.027906
|
from __future__ import annotations
from ..typecheck import *
from ..import ui
from ..import dap
from . import css
from .breakpoints_panel import BreakpointsPanel
from .input_list_view import InputListView
if TYPE_CHECKING:
from ..debugger import Debugger
class DebuggerPanel(ui.div):
on_settings: Callable[[], Any]
on_start: Callable[[], Any]
on_stop: Callable[[], Any]
on_pause: Callable[[], Any]
on_continue: Callable[[], Any]
on_step_over: Callable[[], Any]
on_step_out: Callable[[], Any]
on_step_in: Callable[[], Any]
def __init__(self, debugger: Debugger, on_navigate_to_source: Callable[[dap.SourceLocation], None]) -> None:
super().__init__()
self.debugger = debugger
self.breakpoints = BreakpointsPanel(debugger.breakpoints, on_navigate_to_source)
self.debugger.on_session_state_updated.add(lambda session, state: self.dirty())
self.debugger.on_session_active.add(self.on_selected_session)
self.debugger.on_session_added.add(self.on_selected_session)
self.last_active_adapter = None
def on_selected_session(self, session: dap.Session):
self.last_active_adapter = session.adapter_configuration
self.dirty()
def render(self) -> ui.div.Children:
items = [
DebuggerCommandButton(self.on_settings, ui.Images.shared.settings, 'Settings'),
DebuggerCommandButton(self.on_start, ui.Images.shared.play, 'Start'),
]
if self.debugger.is_stoppable():
items.append(DebuggerCommandButton(self.on_stop, ui.Images.shared.stop, 'Stop'))
else:
items.append(DebuggerCommandButton(self.on_stop, ui.Images.shared.stop_disable, 'Stop (Disabled)'))
if self.debugger.is_running():
items.append(DebuggerCommandButton(self.on_pause, ui.Images.shared.pause, 'Pause'))
elif self.debugger.is_paused():
items.append(DebuggerCommandButton(self.on_continue, ui.Images.shared.resume, 'Continue'))
else:
items.append(DebuggerCommandButton(self.on_pause, u
|
i.Images.shared.pause_disable, 'Pause (Disabled)'))
if self.debugger.is_paused():
items.extend([
DebuggerCommandButton(self.on_step_over, ui.Images.shared.down, 'Step Over'),
DebuggerCommandButton(self.on_step_out, ui.Images.shared.left, 'Step Out'),
DebuggerCommandButton(self.on_step_in, ui.Images.shared.right,
|
'Step In'),
])
else:
items.extend([
DebuggerCommandButton(self.on_step_over, ui.Images.shared.down_disable, 'Step Over (Disabled)'),
DebuggerCommandButton(self.on_step_out, ui.Images.shared.left_disable, 'Step Out (Disabled)'),
DebuggerCommandButton(self.on_step_in, ui.Images.shared.right_disable, 'Step In (Disabled)'),
])
# looks like
# current status
# breakpoints ...
if self.debugger.is_active:
self.last_active_adapter = self.debugger.active.adapter_configuration or self.last_active_adapter
panel_items: list[ui.div] = []
if self.debugger.is_active:
session = self.debugger.active
status = session.status
if status:
panel_items.append(ui.div(height=css.row_height)[
ui.text(status, css=css.label_secondary)
])
if self.last_active_adapter:
settings = self.last_active_adapter.settings(self.debugger)
for setting in settings:
panel_items.append(InputListView(setting))
div = self.last_active_adapter.ui(self.debugger)
if div: panel_items.append(div)
panel_items.append(self.breakpoints)
return [
ui.div()[
ui.div(height=css.header_height)[items],
ui.div(width=30 - css.rounded_panel.padding_width, height=1000, css=css.rounded_panel)[
panel_items
],
]
]
class DebuggerCommandButton (ui.span):
def __init__(self, callback: Callable[[], Any], image: ui.Image, title: str) -> None:
super().__init__()
self.image = image
self.callback = callback
self.title = title
def render(self) -> ui.span.Children:
return [
ui.span(css=css.padding)[
ui.click(self.callback, title=self.title)[
ui.icon(self.image),
]
]
]
|
PegasusWang/pyhome
|
test/_env.py
|
Python
|
mit
| 328
| 0
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
from os.path import abspath, dirn
|
ame, join
if sys.getdefaultencoding() != 'utf-8':
reload(sys)
sys.setdefaultencoding('utf-8')
PREFIX = abspath(
joi
|
n(
dirname(abspath(__file__)), '../'
)
)
if PREFIX not in sys.path:
sys.path.append(PREFIX)
|
ewandor/home-assistant
|
homeassistant/components/cloud/iot.py
|
Python
|
apache-2.0
| 7,667
| 0
|
"""Module to handle messages from Home Assistant cloud."""
import asyncio
import logging
from aiohttp import hdrs, client_exceptions, WSMsgType
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.components.alexa import smart_home
from homeassistant.util.decorator import Registry
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from . import auth_api
from .const import MESSAGE_EXPIRATION
HANDLERS = Registry()
_LOGGER = logging.getLogger(__name__)
STATE_CONNECTING = 'connecting'
STATE_CONNECTED = 'connected'
STATE_DISCONNECTED = 'disconnected'
class UnknownHandler(Exception):
"""Exception raised when trying to handle unknown handler."""
class CloudIoT:
"""Class to manage the IoT connection."""
def __init__(self, cloud):
"""Initialize the CloudIoT class."""
self.cloud = cloud
# The WebSocket client
self.client = None
# Scheduled sleep task till next connection retry
self.retry_task = None
# Boolean to indicate if we wanted the connection to close
self.close_requested = False
# The current number of attempts to connect, impacts wait time
self.tries = 0
# Current state of the connection
self.state = STATE_DISCONNECTED
@asyncio.coroutine
def connect(self):
"""Connect to the IoT broker."""
hass = self.cloud.hass
if self.cloud.subscription_expired:
# Try refreshing the token to see if it is still expired.
yield from hass.async_add_job(auth_api.check_token, self.cloud)
if self.cloud.subscription_expired:
hass.components.persistent_notification.async_create(
MESSAGE_EXPIRATION, 'Subscription expired',
'cloud_subscription_expired')
self.state = STATE_DISCONNECTED
return
if self.state == STATE_CONNECTED:
raise RuntimeError('Already connected')
@asyncio.coroutine
def _handle_hass_stop(event):
"""Handle Home Assistant shutting down."""
nonlocal remove_hass_stop_listener
remove_hass_stop_listener = None
yield from self.disconnect()
self.state = STATE_CONNECTING
self.close_requested = False
remove_hass_stop_listener = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, _handle_hass_stop)
session = async_get_clientsession(self.cloud.hass)
client = None
disconnect_warn = None
try:
yield from hass.async_add_job(auth_api.check_token, self.cloud)
self.client = client = yield from session.ws_connect(
self.cloud.relayer, heartbeat=55, headers={
hdrs.AUTHORIZATION:
'Bearer {}'.format(self.cloud.id_token)
})
self.tries = 0
_LOGGER.info('Connected')
self.state = STATE_CONNECTED
while not client.closed:
msg = yield from client.receive()
if msg.type in (WSMsgType.ERROR, WSMsgType.CLOSED,
WSMsgType.CLOSING):
disconnect_warn = 'Connection cancelled.'
break
elif msg.type != WSMsgType.TEXT:
disconnect_warn = 'Received non-Text message: {}'.format(
msg.type)
break
try:
msg = msg.json()
except ValueError:
disconnect_warn = 'Received invalid JSON.'
break
_LOGGER.debug('Received message: %s', msg)
|
response = {
'msgid': msg['msgid'],
}
try:
result = yield from async_handle_message(
hass, self.cloud, msg['handler'], msg['payload'])
# No response from handler
if result is None:
continue
|
response['payload'] = result
except UnknownHandler:
response['error'] = 'unknown-handler'
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Error handling message')
response['error'] = 'exception'
_LOGGER.debug('Publishing message: %s', response)
yield from client.send_json(response)
except auth_api.CloudError:
_LOGGER.warning('Unable to connect: Unable to refresh token.')
except client_exceptions.WSServerHandshakeError as err:
if err.code == 401:
disconnect_warn = 'Invalid auth.'
self.close_requested = True
# Should we notify user?
else:
_LOGGER.warning('Unable to connect: %s', err)
except client_exceptions.ClientError as err:
_LOGGER.warning('Unable to connect: %s', err)
except Exception: # pylint: disable=broad-except
if not self.close_requested:
_LOGGER.exception('Unexpected error')
finally:
if disconnect_warn is not None:
_LOGGER.warning('Connection closed: %s', disconnect_warn)
if remove_hass_stop_listener is not None:
remove_hass_stop_listener()
if client is not None:
self.client = None
yield from client.close()
if self.close_requested:
self.state = STATE_DISCONNECTED
else:
self.state = STATE_CONNECTING
self.tries += 1
try:
# Sleep 0, 5, 10, 15 … up to 30 seconds between retries
self.retry_task = hass.async_add_job(asyncio.sleep(
min(30, (self.tries - 1) * 5), loop=hass.loop))
yield from self.retry_task
self.retry_task = None
hass.async_add_job(self.connect())
except asyncio.CancelledError:
# Happens if disconnect called
pass
@asyncio.coroutine
def disconnect(self):
"""Disconnect the client."""
self.close_requested = True
if self.client is not None:
yield from self.client.close()
elif self.retry_task is not None:
self.retry_task.cancel()
@asyncio.coroutine
def async_handle_message(hass, cloud, handler_name, payload):
"""Handle incoming IoT message."""
handler = HANDLERS.get(handler_name)
if handler is None:
raise UnknownHandler()
return (yield from handler(hass, cloud, payload))
@HANDLERS.register('alexa')
@asyncio.coroutine
def async_handle_alexa(hass, cloud, payload):
"""Handle an incoming IoT message for Alexa."""
return (yield from smart_home.async_handle_message(hass,
cloud.alexa_config,
payload))
@HANDLERS.register('cloud')
@asyncio.coroutine
def async_handle_cloud(hass, cloud, payload):
"""Handle an incoming IoT message for cloud component."""
action = payload['action']
if action == 'logout':
yield from cloud.logout()
_LOGGER.error('You have been logged out from Home Assistant cloud: %s',
payload['reason'])
else:
_LOGGER.warning('Received unknown cloud action: %s', action)
return None
|
opengovt/ckan-agency-management-tool
|
decorators.py
|
Python
|
agpl-3.0
| 2,547
| 0
|
from functions import *
from settings import *
import urllib
def required_permission(permissions=[]): # this is a handler level decorator
def decorator(fn):
def wrapper(self, *args, **kwargs):
if not self.user.permissions:
self.render("frontend/error404.html")
return
# this will overide superadmin, will still go through...
if self.user.permissions in permissions:
return fn(self, *args, **kwargs)
self.render('frontend/no-access.html')
return
return wrapper
return decorator
def csrf_protect(fn):
'''So we can decorate any RequestHandler with #@csrf_protect'''
def wrapper(self, *args, **kwargs):
if not self.user:
logging.info('no user')
self.error(400)
return
else:
if self.user:
if self.user.csrf_token == self.request.get('token'):
return fn(self, *args, **kwargs)
logging.info('wrong token')
self.error(400)
return
return wrapper
def login_required(fn):
'''So we can decorate any RequestHandler with #@admin_required'''
def wrapper(self, *args, **kwargs):
if not self.user:
if self.request.get('redirect'):
self.redirect(get_login_url(
self.request.uri[0:(self.request.uri.find('?'))],
"Please Log In"))
return
else:
|
sel
|
f.redirect(get_login_url(self.request.uri, "Please Log In"))
return
else:
return fn(self, *args, **kwargs)
return wrapper
def allowed_users(permissions=[]): # this is a handler level decorator
def decorator(fn):
def wrapper(self, *args, **kwargs):
if self.user:
if self.user.role in permissions:
return fn(self, *args, **kwargs)
self.tv["error_PAGE"] = True
self.error(404)
self.render('frontend/error404.html')
return
else:
if self.request.get('redirect'):
uri = self.request.uri.find('?')
self.redirect(get_login_url(
self.request.uri[0:(uri)], "Please Log In"))
else:
self.redirect(get_login_url(
self.request.uri, "Please Log In"))
return wrapper
return decorator
|
invisiblek/python-for-android
|
python3-alpha/python3-src/Doc/includes/sqlite3/execute_3.py
|
Python
|
apache-2.0
| 212
| 0.009434
|
import sqlite3
con = sqlite
|
3.connect("mydb")
cur = con.cursor()
who = "Yeltsin"
age = 72
cur.execute("select name_last, age from people where name_last=:who and age=:age",
locals())
print
|
(cur.fetchone())
|
nkgilley/home-assistant
|
homeassistant/components/ads/sensor.py
|
Python
|
apache-2.0
| 2,424
| 0.001238
|
"""Support for ADS sensors."""
import logging
import voluptuous as vol
from homeassistant.components import ads
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_UNIT_OF_MEASUREMENT
import homeassistant.helpers.config_validation as cv
from . import CONF_ADS_FACTOR, CONF_ADS_TYPE, CONF_ADS_VAR, STATE_KEY_STATE, AdsEntity
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "ADS sensor"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ADS_VAR): cv.string,
vol.Optional(CONF_ADS_FACTOR): cv.positive_int,
vol.Optional(CONF_ADS_TYPE, default=ads.ADSTYPE_INT): vol.In(
[
ads.ADSTYPE_INT,
ads.ADSTYPE_UINT,
ads.ADSTYPE_BYTE,
ads.ADSTYPE_DINT,
ads.ADSTYPE_UDINT,
]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=""): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up an ADS sensor device."""
ads_hub = hass.data.get(ads.DATA_ADS)
ads_var = config[CONF_ADS_VAR]
ads_type = config[CONF_ADS_TYPE]
name = config[CONF_NAME]
unit_of_measurement = config.get(CONF_UNIT_OF_MEASUREMENT)
factor = config.get(CONF_ADS_FACTOR)
entity = AdsSensor(ads_hub, ads_var, ads_type, name, unit_of_measurement, factor)
add_entities([entity])
class AdsSensor(AdsEntity):
"""Representation of an ADS sensor entity."""
def __init__(self, ads_hub, ads_var, ads_type, name, unit_of_measurement, factor):
"""Initialize AdsSensor entity."""
super().__init__(ads_hub, name, ads_var)
self._unit_of_measurement = unit_of_measurement
self._ads_type = ads_type
self._factor = factor
async def async_added_to_hass(self):
"""Register device notification."""
await self.async_initialize_device(
self._ads_var,
self._ads_hub.ADS_TYPEMAP[self._ads_type],
STATE_KEY_STATE,
self._factor,
)
@property
def state(self):
"""Return the state of the device."""
ret
|
urn self._state_dict[STATE_KEY_STATE]
@property
def unit_of_measurement(self):
"""Return the unit o
|
f measurement."""
return self._unit_of_measurement
|
tivek/conan
|
conans/test/model/options_test.py
|
Python
|
mit
| 12,830
| 0.001871
|
import unittest
from conans.model.options import OptionsValues, PackageOptions, Options, PackageOptionValues,\
option_undefined_msg
from conans.model.ref import ConanFileReference
from conans.test.utils.tools import TestBufferConanOutput
from conans.errors import ConanException
class OptionsTest(unittest.TestCase):
def setUp(self):
package_options = PackageOptions.loads("""{static: [True, False],
optimized: [2, 3, 4],
path: ANY}""")
values = PackageOptionValues()
values.add_option("static", True)
values.add_option("optimized", 3)
values.add_option("path", "NOTDEF")
package_options.values = values
self.sut = Options(package_options)
def test_in(self):
package_options = PackageOptions.loads("{static: [True, False]}")
sut = Options(package_options)
self.assertTrue("static" in sut)
self.assertFalse("shared" in sut)
self.assertTrue("shared" not in sut)
self.assertFalse("static" not in sut)
def undefined_value_test(self):
""" Not assigning a value to options will raise an error at validate() step
"""
package_options = PackageOptions.loads("""{
path: ANY}""")
with self.assertRaisesRegexp(ConanException, option_undefined_msg("path")):
package_options.validate()
package_options.path = "Something"
package_options.validate()
def undefined_value_none_test(self):
""" The value None is allowed as default, not necessary to default to it
"""
package_options = PackageOptions.loads('{path: [None, "Other"]}')
package_options.validate()
package_options = PackageOptions.loads('{path: ["None", "Other"]}')
package_options.validate()
def items_test(self):
self.assertEqual(self.sut.items(), [("optimized", "3"), ("path", "NOTDEF"),
("static", "True")])
self.assertEqual(self.sut.items(), [("optimized", "3"), ("path", "NOTDEF"),
("static", "True")])
def change_test(self):
self.sut.path = "C:/MyPath"
self.assertEqual(self.sut.items(), [("optimized", "3"), ("path", "C:/MyPath"),
("static", "True")])
self.assertEqual(self.sut.items(), [("optimized", "3"), ("path", "C:/MyPath"),
("static", "True")])
with self.assertRaisesRegexp(ConanException,
"'5' is not a valid 'options.optimized' value"):
self.sut.optimized = 5
def boolean_test(self):
self.sut.static = False
self.assertFalse(self.sut.static)
self.assertTrue(not self.sut.static)
self.assertTrue(self.sut.static == False)
self.assertFalse(self.sut.static == True)
self.assertFalse(self.sut.static != False)
self.assertTrue(self.sut.static != True)
self.assertTrue(self.sut.static == "False")
self.assertTrue(self.sut.static != "True")
def basic_test(self):
boost_values = PackageOptionValues()
boost_values.add_option("static", False)
boost_values.add_option("thread", True)
boost_values.add_option("thread.multi", "off")
poco_values = PackageOptionValues()
poco_values.add_option("deps_bundled", True)
hello1_values = PackageOptionValues()
hello1_values.add_option("static", False)
hello1_values.add_option("optimized", 4)
options = {"Boost": boost_values,
"Poco": poco_values,
"Hello1": hello1_values}
down_ref = ConanFileReference.loads("Hello0/0.1@diego/testing")
own_ref = ConanFileReference.loads("Hello1/0.1@diego/testing")
output = TestBufferConanOutput()
self.sut.propagate_upstream(options, down_ref, own_ref, output)
self.assertEqual(self.sut.values.as_list(), [("optimized", "4"),
("path", "NOTDEF"),
("static", "False"),
("Boost:static", "False"),
("Boost:thread", "True"),
("Boost:thread.multi", "off"),
("Poco:deps_bundled", "True")])
boost_values = PackageOptionValues()
boost_values.add_option("static", 2)
boost_values.add_option("thread", "Any")
boost_values.add_option("thread.multi", "on")
poco_values = PackageOptionValues()
poco_values.add_option("deps_bundled", "What")
hello1_values = PackageOptionValues()
hello1_values.add_option("static", True)
hello1_values.add_option("optimized", "2")
options2 = {"Boost": boost_values,
"Poco": poco_values,
"
|
Hel
|
lo1": hello1_values}
down_ref = ConanFileReference.loads("Hello2/0.1@diego/testing")
with self.assertRaisesRegexp(ConanException, "Hello2/0.1@diego/testing tried to change "
"Hello1/0.1@diego/testing option optimized to 2"):
self.sut.propagate_upstream(options2, down_ref, own_ref, output)
self.assertEqual(self.sut.values.dumps(),
"""optimized=4
path=NOTDEF
static=False
Boost:static=False
Boost:thread=True
Boost:thread.multi=off
Poco:deps_bundled=True""")
def pattern_positive_test(self):
boost_values = PackageOptionValues()
boost_values.add_option("static", False)
boost_values.add_option("path", "FuzzBuzz")
options = {"Boost.*": boost_values}
own_ref = ConanFileReference.loads("Boost.Assert/0.1@diego/testing")
down_ref = ConanFileReference.loads("Consumer/0.1@diego/testing")
output = TestBufferConanOutput()
self.sut.propagate_upstream(options, down_ref, own_ref, output)
self.assertEqual(self.sut.values.as_list(), [("optimized", "3"),
("path", "FuzzBuzz"),
("static", "False"),
("Boost.*:path", "FuzzBuzz"),
("Boost.*:static", "False"),
])
def multi_pattern_test(self):
boost_values = PackageOptionValues()
boost_values.add_option("static", False)
boost_values.add_option("path", "FuzzBuzz")
boost_values2 = PackageOptionValues()
boost_values2.add_option("optimized", 2)
options = {"Boost.*": boost_values,
"*": boost_values2}
own_ref = ConanFileReference.loads("Boost.Assert/0.1@diego/testing")
down_ref = ConanFileReference.loads("Consumer/0.1@diego/testing")
output = TestBufferConanOutput()
self.sut.propagate_upstream(options, down_ref, own_ref, output)
self.assertEqual(self.sut.values.as_list(), [("optimized", "2"),
("path", "FuzzBuzz"),
("static", "False"),
('*:optimized', '2'),
("Boost.*:path", "FuzzBuzz"),
("Boost.*:static", "False"),
])
def multi_pattern_error_test(self):
boost_values = PackageOptionValues()
boost_values.add_option("optimized", 4)
boost_values2 = PackageOptionValues()
boost_values2.add_option("optimized", 2)
options = {"Boost.*": boost_values,
"*": boost_values2}
own_ref = ConanFileReference.loads("Boost.Assert/0.1@diego/testing")
down_ref = ConanFileReference.loads("Consumer/0.1@diego/testing")
output = Te
|
andyliuliming/WALinuxAgent
|
tests/ga/test_exthandlers.py
|
Python
|
apache-2.0
| 2,603
| 0.000768
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache License.
import json
from azurelinuxagent.common.protocol.restapi import ExtensionStatus
from azurelinuxagent.ga.exthandlers import parse_ext_status
from tests.tools import *
class TestExtHandlers(AgentTestCase):
def test_parse_extension_status00(self):
"""
Parse a status report for a successful execution of an extension.
"""
s = '''[{
"status": {
"status": "success",
"formattedMessage": {
"lang": "en-US",
"message": "Command is finished."
},
"operation": "Daemon",
"code": "0",
"name": "Microsoft.OSTCExtensions.CustomScriptForLinux"
},
"version": "1.0",
"timestampUTC": "2018-04-20T21:20:24Z"
}
]'''
ext_status = ExtensionStatus(seq_no=0)
parse_ext_status(ext_status, json.loads(s))
self.assertEqual('0', ext_status.code)
self.assertEqual(None, ext_status.configurationAppliedTime)
self.assertEqual('Command is finish
|
ed.', ext_status.message)
self.assertEqual('Daemon', ext_status.operation)
self.assertEqual('success', ext_status.status)
self.assertEqual(0, ext_status.sequenceNumber)
self.assertEqual(0, len(ext_status.substatus
|
List))
def test_parse_extension_status01(self):
"""
Parse a status report for a failed execution of an extension.
The extension returned a bad status/status of failed.
The agent should handle this gracefully, and convert all unknown
status/status values into an error.
"""
s = '''[{
"status": {
"status": "failed",
"formattedMessage": {
"lang": "en-US",
"message": "Enable failed: Failed with error: commandToExecute is empty or invalid ..."
},
"operation": "Enable",
"code": "0",
"name": "Microsoft.OSTCExtensions.CustomScriptForLinux"
},
"version": "1.0",
"timestampUTC": "2018-04-20T20:50:22Z"
}]'''
ext_status = ExtensionStatus(seq_no=0)
parse_ext_status(ext_status, json.loads(s))
self.assertEqual('0', ext_status.code)
self.assertEqual(None, ext_status.configurationAppliedTime)
self.assertEqual('Enable failed: Failed with error: commandToExecute is empty or invalid ...', ext_status.message)
self.assertEqual('Enable', ext_status.operation)
self.assertEqual('error', ext_status.status)
self.assertEqual(0, ext_status.sequenceNumber)
self.assertEqual(0, len(ext_status.substatusList))
|
mserjx/pytube
|
setup.py
|
Python
|
mit
| 1,352
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pytube import __version__
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def open_file(fname):
return open(os.path.join(os.path.dirname(__file__), fname))
setup(
name="pytube",
version=__version__,
author="Nick Ficano",
author_email="nficano@gmail.com",
packages=['pytube'],
url="http://pytube.nickfi
|
cano.com",
license=open_file('LICENSE.txt').read(),
scripts=['scripts/pytubectl'],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.6",
"Programming L
|
anguage :: Python :: 2.7",
"Programming Language :: Python :: 3.0",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
"Topic :: Internet",
"Topic :: Multimedia :: Video"
],
description="A simple, yet versatile package for downloading "
"YouTube videos.",
long_description=open_file('README.rst').read(),
zip_safe=True,
)
|
yann2192/vpyn
|
Client.py
|
Python
|
gpl-3.0
| 2,459
| 0.004067
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Yann GUIBET <yannguibet@gmail.com>
# See LICENSE for details.
import sys, os
from gevent import select, monkey, spawn, Greenlet, GreenletExit, sleep, socket
from base64 import b64encode
from hashlib import md5
from struct import pack, unpack
from zlib import adler32
from Proto import Proto
from Index import Index
from Config import *
class Client(Proto):
def __init__(self, vpn):
self.vpn = vpn
def close(self):
try:
self.sock.close()
except:
pass
def error(self, exp):
self.close()
def connect(self, host, port, pubkey):
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.handshake(pubkey)
except Exception as e:
self.error(e)
raise
def handshake(self
|
, pubkey):
self.send_id()
myiv = self.send_iv()
i
|
v = self.get_iv(pubkey)
self.init_cipher(pubkey, myiv, iv)
def recv_file(self):
if self.srecvall(1) != "\x01":
self.ssend("\xFF")
raise Exception, "Bad Flags (0x01 expected)"
size = self.srecvall(4)
checksum = self.srecvall(4)
if adler32(size) != unpack('!I',checksum)[0]:
self.ssend("\xFF")
raise Exception, "Bad checksum"
size = unpack('!I', size)[0]
buffer = self.srecvall(size)
hash = self.srecvall(16)
if md5(buffer).digest() != hash:
self.ssend("\xFF")
raise Exception, "Bad md5 ..."
return buffer
def get_file(self, id, name):
path = os.path.join(inbox, name)
while os.path.exists(path):
name = "_"+name
path = os.path.join(inbox, name)
#raise Exception, "%s already exist ..." % path
self.ssend("\x02"+pack('!I',id))
buff = self.recv_file()
with open(path, "wb") as f:
f.write(buff)
def get_index(self, id):
index = Index(id)
buffer = index.get_xml().encode('utf-8')
hash = md5(buffer).digest()
self.ssend('\x03'+hash)
flag = self.srecvall(1)
if flag == "\x04":
buffer = self.recv_file()
index.set_xml(buffer)
elif flag == "\x05":
pass
else:
raise Exception, "Protocol Error"
|
vanant/googleads-dfa-reporting-samples
|
python/v2.0/create_image_creative.py
|
Python
|
apache-2.0
| 3,754
| 0.005328
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates an image creative.
Requires an image asset and advertiser ID as input. To get an advertiser ID,
run get_advertisers.py.
Tags: creatives.insert
"""
__autho
|
r__ = ('api.jimper@gmail.com (Jonathon Imperiosi)')
import argparse
import sys
from apiclient import sample_tools
from apiclient.http import MediaFileUpload
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to add a user role for')
argparser.add_argument(
'advertiser_id', type=int,
help='The ID of the advertiser to associate this
|
creative with.')
argparser.add_argument(
'size_id', type=int,
help='The ID of the size of this creative.')
argparser.add_argument(
'image_name',
help='Suggested name to use for the uploaded creative asset.')
argparser.add_argument(
'path_to_image_file',
help='Path to the asset file to be uploaded.')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'dfareporting', 'v2.0', __doc__, __file__, parents=[argparser],
scope=['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking'])
profile_id = flags.profile_id
advertiser_id = flags.advertiser_id
image_name = flags.image_name
path_to_image_file = flags.path_to_image_file
size_id = flags.size_id
try:
# Upload the creative asset
creative_asset_id = upload_creative_asset(
service, profile_id, advertiser_id, image_name, path_to_image_file,
'IMAGE')
# Construct the creative structure.
creative = {
'advertiserId': advertiser_id,
'creativeAssets': [
{'assetIdentifier': creative_asset_id, 'role': 'PRIMARY'}
],
'name': 'Test image creative',
'size': {'id': size_id},
'type': 'IMAGE'
}
request = service.creatives().insert(profileId=profile_id, body=creative)
# Execute request and print response.
response = request.execute()
print ('Created image creative with ID %s and name "%s".'
% (response['id'], response['name']))
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
def upload_creative_asset(
service, profile_id, advertiser_id, asset_name, path_to_asset_file,
asset_type):
"""Uploads a creative asset and returns an assetIdentifier."""
# Construct the creative asset metadata
creative_asset = {
'assetIdentifier': {
'name': asset_name,
'type': asset_type
}
}
media = MediaFileUpload(path_to_asset_file)
if not media.mimetype():
media = MediaFileUpload(path_to_asset_file, 'application/octet-stream')
response = service.creativeAssets().insert(
advertiserId=advertiser_id,
profileId=profile_id,
media_body=media,
body=creative_asset).execute()
return response['assetIdentifier']
if __name__ == '__main__':
main(sys.argv)
|
ForeverWintr/ImageClassipy
|
clouds/util/farmglue.py
|
Python
|
mit
| 2,342
| 0.003416
|
"""
Glue for interfacing with a farm object
"""
import os
import ast
from .constants import HealthStatus
def imagesAndStatuses(outputDir):
"""
Return a dictionary of images and corresponding human assigned statuses from output.
"""
controlPath = os.path.join(outputDir, 'controls', 'HealthImageryControl.txt')
fieldsPath = os.path.join(outputDir, 'fields')
control = _loadControl(controlPath)
images = {}
for field in _dirs(fieldsPath):
fieldname = os.path.basename(field)
for seg in _dirs(os.path.join(field, 'HealthSegments')):
segname = os.path.basename(seg)
if control[fieldname][segname]['humanAssignedStatus'].lower() !=
|
'yes':
continue
segStatus = _pickStatus(control[fieldname][segname]['healthStatus'])
|
segRGBTiff = os.path.join(seg, 'rgb', 'rgb.tif')
segRGBPng = os.path.join(seg, 'rgb', 'rgb.png')
#choose png if it exists, as it'll save us converting later
if os.path.exists(segRGBPng):
images[segRGBPng] = segStatus
else:
images[segRGBTiff] = segStatus
return images
def _pickStatus(statusString):
s = statusString.lower()
if s == 'cloudy imagery':
return HealthStatus.CLOUDY
if s == 'valid crop health':
return HealthStatus.GOOD
if s == 'insufficient image coverage':
return HealthStatus.INSUFFICIENT_COVERAGE
if s == 'full bloom canola':
return HealthStatus.CANOLA
if s == 'image rejected - other':
return HealthStatus.REJECTED_OTHER
raise KeyError("Unrecognized status: '{}'".format(statusString))
def _loadControl(controlPath):
"""
Load and format the control, discarding irrelevant information like the latest date.
"""
with open(controlPath) as f:
c = ast.literal_eval(f.read())
control = {}
for f, v in c.items():
subdict = {s.pop('name'): s for s in v[1]}
control[f] = subdict
return control
def _dirs(directory):
"""
Return directories in the given one.
"""
return (os.path.join(directory, x) for x in next(os.walk(directory))[1])
if __name__ == '__main__':
path = '/Users/tomrutherford/Documents/Hervalense'
images = imagesAndStatuses(path)
print("done")
|
WiserTogether/aurproxy
|
tellapart/aurproxy/share/adjusters/ramp.py
|
Python
|
apache-2.0
| 2,700
| 0.007407
|
# Copyright 2015 TellApart, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timedelta
from gevent import spawn_later
from gevent.event import Event
from tellapart.aurproxy.audit import AuditItem
from tellapart.aurproxy.share.adjuster import ShareAdjuster
def linear(start_time, end_time, as_of):
if start_time >= as_of:
return 0.0
if end_time <= as_of:
|
return 1.0
else:
total = end_time - start_time
elapsed = as_of - start_time
p = float(elapsed.total_seconds()) / float(total.total_seconds())
return p
_CURVE_FNS = {
'linear': linear }
class RampingShareAdjuster(ShareAdjuster):
def __init__(self,
endpoint,
signal_update_fn,
|
ramp_delay,
ramp_seconds,
curve='linear',
update_frequency=10,
as_of=None):
super(RampingShareAdjuster, self).__init__(endpoint, signal_update_fn)
self._ramp_delay = ramp_delay
self._ramp_seconds = ramp_seconds
self._curve_fn = _CURVE_FNS[curve]
self._update_frequency = update_frequency
self._start_time = as_of
self._stop_event = Event()
def start(self):
"""Start maintaining share adjustment factor for endpoint.
"""
if not self._start_time:
self._start_time = datetime.now() + timedelta(seconds=self._ramp_delay)
spawn_later(self._update_frequency, self._update)
def stop(self):
"""Stop maintaining share adjustment factor for endpoint.
"""
self._stop_event.set()
def _update(self):
if not self._stop_event.is_set():
try:
self._signal_update_fn()
finally:
if datetime.now() > self._end_time:
self.stop()
else:
spawn_later(self._update_frequency, self._update)
@property
def _end_time(self):
return self._start_time + timedelta(seconds=self._ramp_seconds)
@property
def auditable_share(self):
"""Return current share adjustment factor.
"""
as_of = datetime.now()
share = self._curve_fn(self._start_time,
self._end_time,
as_of)
return share, AuditItem('ramp', str(share))
|
jkonecki/autorest
|
AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/ModelFlattening/autorestresourceflatteningtestservice/models/resource.py
|
Python
|
mit
| 1,309
| 0.001528
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Resource(Model):
"""Resource
:param id: Resource Id
|
:type id: str
:param type: Resource Type
:type type: str
:param tags:
:type tags: dict
:param location: Resource Location
:type location: str
:param name: Resource Name
:type name: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'typ
|
e': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, id=None, type=None, tags=None, location=None, name=None, **kwargs):
self.id = id
self.type = type
self.tags = tags
self.location = location
self.name = name
|
alirizakeles/zato
|
code/zato-server/test/zato/server/service/internal/kvdb/data_dict/test_impexp.py
|
Python
|
gpl-3.0
| 1,497
| 0.004008
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# Bunch
from bunch import Bunch
# Zato
from zato.common imp
|
ort zato_namespace
from zato.common.test import rand_string, ServiceTestCase
from zato.server.service.internal.kvdb.data_dict.impexp import Import
##############################################################################
class ImportTestCase(ServiceTestCase):
def setUp(self):
self.service_class = Import
self.sio = self.servi
|
ce_class.SimpleIO
def get_request_data(self):
return {'data':rand_string()}
def get_response_data(self):
return Bunch()
def test_sio(self):
self.assertEquals(self.sio.request_elem, 'zato_kvdb_data_dict_impexp_import_request')
self.assertEquals(self.sio.response_elem, 'zato_kvdb_data_dict_impexp_import_response')
self.assertEquals(self.sio.input_required, ('data',))
self.assertEquals(self.sio.namespace, zato_namespace)
self.assertRaises(AttributeError, getattr, self.sio, 'input_optional')
self.assertRaises(AttributeError, getattr, self.sio, 'output_required')
self.assertRaises(AttributeError, getattr, self.sio, 'output_optional')
def test_impl(self):
self.assertEquals(self.service_class.get_name(), 'zato.kvdb.data-dict.impexp.import')
|
sedden/django-basic-apps
|
basic/events/views.py
|
Python
|
bsd-3-clause
| 1,774
| 0.003946
|
import re, datetime
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.http import Http404
from django.views.generic import date_bas
|
ed, list_detail
from basic.events.models import *
def event_list(request, page=0):
return list_detail.object_list(
request,
queryset=EventTime.objects.all(),
paginate_by=20,
page=page,
)
event_list.__doc__ = list_detail.object_list.__doc__
def event_archi
|
ve_year(request, year):
return date_based.archive_year(
request,
year=year,
date_field='start',
queryset=EventTime.objects.all(),
make_object_list=True,
allow_future=True,
)
event_archive_year.__doc__ = date_based.archive_year.__doc__
def event_archive_month(request, year, month):
return date_based.archive_month(
request,
year=year,
month=month,
date_field='start',
queryset=EventTime.objects.all(),
allow_future=True,
)
event_archive_month.__doc__ = date_based.archive_month.__doc__
def event_archive_day(request, year, month, day):
return date_based.archive_day(
request,
year=year,
month=month,
day=day,
date_field='start',
queryset=EventTime.objects.all(),
allow_future=True,
)
event_archive_day.__doc__ = date_based.archive_day.__doc__
def event_detail(request, slug, year, month, day, id):
return date_based.object_detail(
request,
year=year,
month=month,
day=day,
date_field='start',
object_id=id,
queryset=EventTime.objects.all(),
allow_future=True,
)
event_detail.__doc__ = date_based.object_detail.__doc__
|
mstave/Todo.txt-python
|
todo.py
|
Python
|
gpl-3.0
| 36,444
| 0.005213
|
#!/usr/bin/env python
# TODO.TXT-CLI-python
# Copyright (C) 2011-2012 Sigmavirus24
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# TLDR: This is licensed under the GPLv3. See LICENSE for more details.
import os
import re
import sys
from optparse import OptionParser
from datetime import datetime, date
VERSION = "development"
REVISION = "$Id$"
try:
import readline
except ImportError:
# This isn't crucial to the execution of the script.
# But it is a nice feature to have. Sucks to be an OSX user.
pass
try:
# Python 3 moved the built-in intern() to sys.intern()
intern = sys.intern
except AttributeError:
pass
try:
input = raw_input
except NameError:
# Python 3 renamed raw_input to input
pass
try:
from string import uppercase
except ImportError:
# Python 3 again
from string import ascii_uppercase as uppercase
if os.name == "nt":
try:
from colorama import init
init()
except Exception:
pass
# colorama provides ANSI -> win32 color support
# If they don't have it, no worries.
PRIORITIES = uppercase[:24]
# concat() is necessary long before the grouping of function declarations
concat = lambda str_list, sep='': sep.join([str(i) for i in str_list])
_path = lambda p: os.path.abspath(os.path.expanduser(p))
_pathc = lambda plist: _path(concat(plist))
TERM_COLORS = {
"black": "\033[0;30m", "red": "\033[0;31m",
"green": "\033[0;32m", "brown": "\033[0;33m",
"blue": "\033[0;34m", "purple": "\033[0;35m",
"cyan": "\033[0;36m", "light grey": "\033[0;37m",
"dark grey": "\033[1;30m", "light red": "\033[1;31m",
"light green": "\033[1;32m", "yellow": "\033[1;33m",
"light blue": "\033[1;34m", "light purple": "\033[1;35m",
"light cyan": "\033[1;36m", "white": "\033[1;37m",
"default": "\033[0m", "reverse": "\033[7m",
"bold": "\033[1m",
}
TODO_DIR = _path("~/.todo")
CONFIG = {
"TODO_DIR": TODO_DIR,
"TODOTXT_DEFAULT_ACTION": "list",
"TODOTXT_CFG_FILE": _pathc([TODO_DIR, "/config"]),
"TODO_FILE": _pathc([TODO_DIR, "/todo.txt"]),
"DONE_FILE": _pathc([TODO_DIR, "/done.txt"]),
"TMP_FILE": "",
"REPORT_FILE": "",
"USE_GIT": False,
"PLAIN": False,
"NO_PRI": False,
"PRE_DATE": False,
"INVERT": False,
"HIDE_PROJ": False,
"HIDE_CONT": False,
"HIDE_DATE": False,
"LEGACY": False,
}
for p in PRIORITIES:
CONFIG["PRI_{0}".format(p)] = "default"
del(p, TODO_DIR)
### Helper Functions
def todo_padding(include_done=False):
lines = [line for line in iter_todos(include_d
|
one)]
i = len(lines)
pad = 1
while i >= 10:
pad += 1
i /= 10
|
return pad
def iter_todos(include_done=False):
"""Opens the file in read-only mode; returns an iterator for the todos."""
files = [CONFIG["TODO_FILE"]]
if not os.path.isfile(files[0]):
return
if include_done and os.path.isfile(CONFIG["DONE_FILE"]):
files.append(CONFIG["DONE_FILE"])
for f in files:
with open(f) as fd:
for line in fd:
yield line
def separate_line(number):
"""Takes an integer and returns a string and a list. The string is
the item at that position in the list. The list is the rest of the todos.
If the todo.txt file is empty, separate = lines = None. If the number is
invalid separate = None, lines != None."""
lines = [line for line in iter_todos()]
separate = None
if lines and number - 1 < len(lines) and 0 <= number - 1:
separate = lines.pop(number - 1)
return separate, lines
def rewrite_file(fd, lines):
"""Simple wrapper for three lines used all too frequently. Sets the access
position to the beginning of the file, truncates the file's length to 0 and
then writes all the lines to the file."""
fd.seek(0, 0)
fd.truncate(0)
fd.writelines(lines)
def rewrite_and_post(line_no, old_line, new_line, lines):
"""Wrapper for frequently used semantics for "post-production"."""
with open(CONFIG["TODO_FILE"], "w") as fd:
rewrite_file(fd, lines)
post_success(line_no, old_line, new_line)
def usage(*args):
"""Set the usage string printed out in ./todo.py help."""
def usage_decorator(func):
"""Function that actually sets the usage string."""
func.__usage__ = concat(args, '\n').expandtabs(3)
return func
return usage_decorator
def _git_err(g):
"""Print any errors that result from GitPython and exit."""
if g.stderr:
print(g.stderr)
else:
print(g)
sys.exit(g.status)
@usage('\tpull', '\t\tPulls from your remote git repository.\n')
def _git_pull():
"""Equivalent to running git pull on the command line."""
try:
print(CONFIG["GIT"].pull())
except git.exc.GitCommandError as g:
_git_err(g)
@usage('\tpush', '\t\tPushes to your remote git repository.\n')
def _git_push():
"""Push commits made locally to the remote."""
try:
s = CONFIG["GIT"].push()
except git.exc.GitCommandError as g:
_git_err(g)
if s:
print(s)
else:
print("TODO: 'git push' executed.")
@usage('\tstatus',
'\t\t"git status" of the repository containing your todo files.',
'\t\tRequires git version 1.7.4 or newer.\n')
def _git_status():
"""Print the status of the local repository if the version of git is 1.7
or later."""
if CONFIG["GIT"].version_info >= (1, 7, 3):
print(CONFIG["GIT"].status())
else:
print("status only works for git version 1.7.4 or higher.")
@usage('\tlog', '\t\tShows the last five commits in your repository.\n')
def _git_log():
"""Print the two latest commits in the local repository's log."""
print(CONFIG["GIT"].log("-5", "--oneline"))
def _git_commit(files, message):
"""Make a commit to the git repository.
files -- should be an iterable like ['file_a', 'file_b'] or ['-a']"""
if len(message) > 49:
message = concat([message[:45], "...\n\n", message])
try:
CONFIG["GIT"].commit(files, "-m", message)
except git.exc.GitCommandError as g:
_git_err(g)
committed = CONFIG["TODO_DIR"] if "-a" in files else concat(files, ", ")
print(concat(["TODO: ", committed, " archived."]))
def prompt(*args, **kwargs):
"""Sanitize input collected with raw_input().
Prevents someone from entering 'y\' to attempt to break the program.
args -- can be any collection of strings that require formatting.
kwargs -- will collect the tokens and values."""
args = list(args) # [a for a in args]
args.append(' ')
prompt_str = concat(args).format(**kwargs)
raw = input(prompt_str)
return re.sub(r"\\", "", raw)
def print_x_of_y(x, y):
t_str = "--\nTODO: {0} of {1} tasks shown"
if len(x) > len(y): # EXTREMELY hack-ish
print(t_str.format(len(y), len(y))) # There can't logically be
# more lines of items to do than there actually are.
else:
print(t_str.format(len(x), len(y)))
def test_separated(removed, lines, line_no):
if not (removed or lines):
print("{0}: No such todo.".format(line_no))
return True
return False
### End Helper Functions
### Configuration Functions
def _iter_actual_lines_(config_file):
"""Return only the actual lines of the config file. This skips commented or
blank lines."""
skip_re = re.compile('^\s*(#|$)')
with open(config_
|
leandron/steinlib
|
steinlib/state.py
|
Python
|
mit
| 163
| 0
|
class ParsingState(object):
"""
States of the pars
|
ing process.
"""
wait_for_header = 0
|
wait_for_section = 1
inside_section = 2
end = 4
|
drgarcia1986/simple-settings
|
tests/samples/special.py
|
Python
|
mit
| 74
| 0
|
SIMPLE_S
|
ETTINGS = {
'OVERR
|
IDE_BY_ENV': True
}
MY_VAR = u'Some Value'
|
genenetwork/genenetwork2_diet
|
wqflask/base/data_set.py
|
Python
|
agpl-3.0
| 45,353
| 0.00452
|
# Copyright (C) University of Tennessee Health Science Center, Memphis, TN.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# This program is available from Source Forge: at GeneNetwork Project
# (sourceforge.net/projects/genenetwork/).
#
# Contact Drs. Robert W. Williams and Xiaodong Zhou (2010)
# at rwilliams@uthsc.edu and xzhou15@uthsc.edu
#
# This module is used by GeneNetwork project (www.genenetwork.org)
from __future__ import absolute_import, print_function, division
import os
import math
import string
import collections
import codecs
import json
import gzip
import cPickle as pickle
import itertools
from operator import itemgetter
from redis import Redis
Redis = Redis()
from flask import Flask, g
import reaper
from base import webqtlConfig
from base import species
from dbFunction import webqtlDatabaseFunction
from utility import webqtlUtil
from utility.benchmark import Bench
from utility import chunks
from utility.tools import locate, locate_ignore_error
from maintenance import get_group_samplelists
from MySQLdb import escape_string as escape
from pprint import pformat as pf
# Used by create_database to instantiate objects
# Each subclass will add to this
DS_NAME_MAP = {}
def create_dataset(dataset_name, dataset_type = None):
if not dataset_type:
dataset_type = Dataset_Getter(dataset_name)
print("dataset_type is:", dataset_type)
dataset_ob = DS_NAME_MAP[dataset_type]
dataset_class = globals()[dataset_ob]
return dataset_class(dataset_name)
class Dataset_Types(object):
def __init__(self):
self.datasets = {}
file_name = "wqflask/static/new/javascript/dataset_menu_structure.json"
with open(file_name, 'r') as fh:
data = json.load(fh)
print("*" * 70)
for species in data['datasets']:
for group in data['datasets'][species]:
for dataset_type in data['datasets'][species][group]:
for dataset in data['datasets'][species][group][dataset_type]:
short_dataset_name = dataset[1]
if dataset_type == "Phenotypes":
new_type = "Publish"
elif dataset_type == "Genotypes":
new_type = "Geno"
else:
new_type = "ProbeSet"
self.datasets[short_dataset_name] = new_type
def __call__(self, name):
return self.datasets[name]
# Do the intensive work at startup one time only
Dataset_Getter = Dataset_Types()
def create_datasets_list():
key = "all_datasets"
result = Redis.get(key)
if result:
print("Cache hit!!!")
datasets = pickle.loads(result)
else:
datasets = list()
with Bench("Creating DataSets object"):
type_dict = {'Publish': 'PublishFreeze',
'ProbeSet': 'ProbeSetFreeze',
'Geno': 'GenoFreeze'}
for dataset_type in type_dict:
query = "SELECT Name FROM {}".format(type_dict[dataset_type])
for result i
|
n g.db.execute(query).fetchall():
#The query at the beginning of this function isn't necessary here, but still would
#rather just reuse it
#print("type: {}\tname: {}".format(dataset_type, result.Name))
dataset = create_dataset(result.Name, dataset_type)
datasets.append(dataset)
Redis.set(key, pickle.dumps(dataset
|
s, pickle.HIGHEST_PROTOCOL))
Redis.expire(key, 60*60)
return datasets
def create_in_clause(items):
"""Create an in clause for mysql"""
in_clause = ', '.join("'{}'".format(x) for x in mescape(*items))
in_clause = '( {} )'.format(in_clause)
return in_clause
def mescape(*items):
"""Multiple escape"""
escaped = [escape(str(item)) for item in items]
#print("escaped is:", escaped)
return escaped
class Markers(object):
"""Todo: Build in cacheing so it saves us reading the same file more than once"""
def __init__(self, name):
json_data_fh = open(locate(name + '.json','genotype/json'))
try:
markers = json.load(json_data_fh)
except:
markers = []
for marker in markers:
if (marker['chr'] != "X") and (marker['chr'] != "Y"):
marker['chr'] = int(marker['chr'])
# print("Mb:", marker['Mb'])
marker['Mb'] = float(marker['Mb'])
self.markers = markers
#print("self.markers:", self.markers)
def add_pvalues(self, p_values):
print("length of self.markers:", len(self.markers))
print("length of p_values:", len(p_values))
if type(p_values) is list:
# THIS IS only needed for the case when we are limiting the number of p-values calculated
#if len(self.markers) > len(p_values):
# self.markers = self.markers[:len(p_values)]
for marker, p_value in itertools.izip(self.markers, p_values):
if not p_value:
continue
marker['p_value'] = float(p_value)
if math.isnan(marker['p_value']) or marker['p_value'] <= 0:
marker['lod_score'] = 0
marker['lrs_value'] = 0
else:
marker['lod_score'] = -math.log10(marker['p_value'])
#Using -log(p) for the LRS; need to ask Rob how he wants to get LRS from p-values
marker['lrs_value'] = -math.log10(marker['p_value']) * 4.61
elif type(p_values) is dict:
filtered_markers = []
for marker in self.markers:
#print("marker[name]", marker['name'])
#print("p_values:", p_values)
if marker['name'] in p_values:
#print("marker {} IS in p_values".format(i))
marker['p_value'] = p_values[marker['name']]
if math.isnan(marker['p_value']) or (marker['p_value'] <= 0):
marker['lod_score'] = 0
marker['lrs_value'] = 0
else:
marker['lod_score'] = -math.log10(marker['p_value'])
#Using -log(p) for the LRS; need to ask Rob how he wants to get LRS from p-values
marker['lrs_value'] = -math.log10(marker['p_value']) * 4.61
filtered_markers.append(marker)
#else:
#print("marker {} NOT in p_values".format(i))
#self.markers.remove(marker)
#del self.markers[i]
self.markers = filtered_markers
class HumanMarkers(Markers):
def __init__(self, name, specified_markers = []):
marker_data_fh = open(locate('genotype') + '/' + name + '.bim')
self.markers = []
for line in marker_data_fh:
splat = line.strip().split()
#print("splat:", splat)
if len(specified_markers) > 0:
if splat[1] in specified_markers:
marker = {}
marker['chr'] = int(splat[0])
marker['name'] = splat[1]
marker['Mb'] = float(splat[3]) / 1000000
else:
continue
else:
marker = {}
marker['chr'] = int(splat[0])
marker['name'] = splat[1]
marker['Mb'] = float(splat[3]) / 1000000
self.markers.append(marker)
#print("markers is: ", pf(self.markers))
def ad
|
nanomolina/JP
|
src/odontology/person/migrations/0050_auto_20160710_0431.py
|
Python
|
apache-2.0
| 422
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-07-10 07:31
from __
|
future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('person', '0049_patient_ph
|
oto'),
]
operations = [
migrations.RenameField(
model_name='patient',
old_name='photo',
new_name='picture',
),
]
|
mganeva/mantid
|
scripts/HFIR_4Circle_Reduction/detector2dview.py
|
Python
|
gpl-3.0
| 14,054
| 0.00249
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=W0403,R0902,R0903,R0904,W0212
from __future__ import (absolute_import, division, print_function)
from HFIR_4Circle_Reduction import mpl2dgraphicsview
import numpy as np
import os
from qtpy.QtCore import Signal as pyqtSignal
class Detector2DView(mpl2dgraphicsview.Mpl2dGraphicsView):
"""
Customized 2D detector view
"""
class MousePress(object):
RELEASED = 0
LEFT = 1
RIGHT = 3
newROIDefinedSignal = pyqtSignal(int, int, int, int) # return coordinate of the
def __init__(self, parent):
"""
:param parent:
:return:
"""
mpl2dgraphicsview.Mpl2dGraphicsView.__init__(self, parent)
# connect the mouse motion to interact with the canvas
self._myCanvas.mpl_connect('button_press_event', self.on_mouse_press_event)
self._myCanvas.mpl_connect('button_release_event', self.on_mouse_release_event)
self._myCanvas.mpl_connect('motion_notify_event', self.on_mouse_motion)
# class variables
self._myPolygon = None # matplotlib.patches.Polygon
# class status variables
self._roiSelectMode = False
# region of interest. None or 2 tuple of 2-tuple for upper left corner and lower right corner
# mouse positions as start and end
self._roiStart = None
self._roiEnd = None
# mouse
self._mousePressed = Detector2DView.MousePress.RELEASED
# mouse position and resolution
self._currX = 0.
self._currY = 0.
self._resolutionX = 0.005
self._resolutionY = 0.005
# parent window
self._myParentWindow = None
return
def clear_canvas(self):
"""
clear canvas (override base class)
:return:
"""
# clear the current record
self._myPolygon = None
# reset mouse selection ROI
# set
self._roiStart = None
self._roiEnd = None
# call base class
super(Detector2DView, self).clear_canvas()
return
def enter_roi_mode(self, roi_state):
"""
Enter or leave the region of interest (ROI) selection mode
:return:
"""
assert isinstance(roi_state, bool), 'ROI mode state {} must be a boolean but not a {}.' \
''.format(roi_state, type(roi_state))
# set
self._roiSelectMode = roi_state
if roi_state:
# new in add-ROI mode
self.remove_roi()
else:
# reset roi start and roi end
self._roiStart = None
self._roiEnd = None
return
def integrate_roi_linear(self, exp_number, scan_number, pt_number, output_dir):
"""
integrate the 2D data inside region of interest along both axis-0 and axis-1 individually.
and the result (as 1D data) will be saved to ascii file.
the X values will be the corresponding pixel index either along axis-0 or axis-1
:return:
"""
def save_to_file(base_file_name, axis, array1d, start_index):
"""
save the result (1D data) to an ASCII file
:param base_file_name:
:param axis:
:param array1d:
:param start_index:
:return:
"""
file_name = '{0}_axis_{1}.dat'.format(base_file_name, axis)
wbuf = ''
vec_x = np.arange(len(array1d)) + start_index
for x, d in zip(vec_x, array1d):
wbuf += '{0} \t{1}\n'.format(x, d)
ofile = open(file_name, 'w')
ofile.write(wbuf)
ofile.close()
return
matrix = self.array2d
assert isinstance(matrix, np.ndarray), 'A matrix must be an ndarray but not {0}.'.format(type(matrix))
# get region of interest
if self._roiStart is None:
self._roiStart = (0, 0)
if self._roiEnd is None:
self._roiEnd = matrix.shape
ll_row = min(self._roiStart[0], self._roiEnd[0])
ll_col = min(self._roiStart[1], self._roiEnd[1])
ur_row = max(self._roiStart[0], self._roiEnd[0])
ur_col = max(self._roiStart[1], self._roiEnd[1])
#roi_matrix = matrix[ll_col:ur_col, ll_row:ur_row]
#sum_0 = roi_matrix.sum(0)
#sum_1 = roi_matrix.sum(1)
roi_matrix = matrix[ll_col:ur_col, ll_row:ur_row]
sum_0 = roi_matrix.sum(0)
sum_1 = roi_matrix.sum(1)
# write to file
base_name = os.path.join(output_dir, 'Exp{0}_Scan{1}_Pt{2}'.format(exp_number, scan_number, pt_number))
save_to_file(base_name, 0, sum_0, ll_row)
save_to_file(base_name, 1, sum_1, ll_col)
message = 'Integrated values are saved to {0}...'.format(base_name)
return message
@property
def is_roi_selection_drawn(self):
"""
whether ROI is drawn
:return:
"""
is_drawn = not (self._myPolygon is None)
return is_drawn
def get_roi(self):
"""
:return: A list for polygon0
"""
assert self._roiStart is not None
assert self._roiEnd is not None
# rio start is upper left, roi end is lower right
lower_left_x = min(self._roiStart[0], self._roiEnd[0])
|
lower_left_y = min(self._roiStart[1], self._roiEnd[1])
lower_left = lower_left_x, lower_left_y
# ROI upper right
upper_right_x = max(self._roiStart[0], self._roiEnd[0])
upper_ri
|
ght_y = max(self._roiStart[1], self._roiEnd[1])
upper_right = upper_right_x, upper_right_y
return lower_left, upper_right
def plot_detector_counts(self, raw_det_data, title=None):
"""
plot detector counts as 2D plot
:param raw_det_data:
:return:
"""
x_min = 0
x_max = raw_det_data.shape[0]
y_min = 0
y_max = raw_det_data.shape[1]
count_plot = self.add_plot_2d(raw_det_data, x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max,
hold_prev_image=False)
if title is None:
title = 'No Title'
self.set_title(title)
if self._myPolygon is not None:
print ('[DB...BAT...] Add PATCH')
self._myCanvas.add_patch(self._myPolygon)
else:
print ('[DB...BAT...] NO PATCH')
print ('[DB...BAT...AFTER] ROI Rect: {0}. 2D plot: {1}'.format(self._myPolygon, count_plot))
return
def plot_roi(self):
""" Plot region of interest (as rectangular) to the canvas from the region set from
:return:
"""
# check
assert self._roiStart is not None, 'Starting point of region-of-interest cannot be None'
assert self._roiEnd is not None, 'Ending point of region-of-interest cannot be None'
# create a vertex list of a rectangular
vertex_array = np.ndarray(shape=(4, 2))
# upper left corner
vertex_array[0][0] = self._roiStart[0]
vertex_array[0][1] = self._roiStart[1]
# lower right corner
vertex_array[2][0] = self._roiEnd[0]
vertex_array[2][1] = self._roiEnd[1]
# upper right corner
vertex_array[1][0] = self._roiEnd[0]
vertex_array[1][1] = self._roiStart[1]
# lower left corner
vertex_array[3][0] = self._roiStart[0]
vertex_array[3][1] = self._roiEnd[1]
# register
if self._myPolygon is not None:
self._myPolygon.remove()
self._myPolygon = None
self._myPolygon = self._myCanvas.plot_polygon(vertex_array, fill=False, color='w')
return
def remove_roi(self):
"""
Remove the rectangular for region of interest
:return:
"""
print ('[DB...BAT] Try to remove ROI {0
|
adrianomargarin/wttd-eventex
|
eventex/core/admin.py
|
Python
|
gpl-3.0
| 1,325
| 0.000755
|
from django.contrib import admin
from eventex.core.models import Talk
from eventex.core.models import Course
from eventex.core.models import Speaker
from eventex.core.models import Contact
class ContactInline(admin.TabularInline):
model = Contact
extra = 1
class SpeakerModelAdmin(admin.ModelAdmin):
inlines = [ContactInline]
list_display = ['name', 'website_link', 'photo_img', 'email', 'phone']
prepopulated_fields = {'slug': ['name']}
def website
|
_link(self, obj):
return '<a href="{0}">{0}</a>'.format(obj.websit
|
e)
website_link.allow_tags = True
website_link.short_description = 'Website'
def photo_img(self, obj):
return '<img width="32px" src="{}"/>'.format(obj.photo)
photo_img.allow_tags = True
photo_img.short_description = 'Foto'
def email(self, obj):
return obj.contact_set.emails().first()
email.short_description = 'E-mail'
def phone(self, obj):
return obj.contact_set.phones().first()
phone.short_description = 'Telefone'
class TalkModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.filter(course=None)
admin.site.register(Speaker, SpeakerModelAdmin)
admin.site.register(Talk, TalkModelAdmin)
admin.site.register(Course)
|
pattisdr/osf.io
|
scripts/remove_after_use/find_timeseries_preprints.py
|
Python
|
apache-2.0
| 1,686
| 0.004152
|
from __future__ import division
import argparse
import logging
import csv
import io
from website.app import setup_django
setup_django()
from osf.metrics import PreprintDownload
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def generate_preprint_csv(preprint_ids):
search = PreprintDownload.search().aggs.metric('times', {'date_histogram': {'field': 'timestamp', 'interval': 'day', 'format': 'yyyy-MM-dd'}})
output = io.BytesIO()
data = search.execute()
writer = csv.DictWriter(output, restval=0, fieldnames=[bucket['key_as_string'] for bucket in data.aggregations.times.buckets])
writer.writeheader()
for preprint_id in preprint_ids:
data = search.filter('match', preprint_id=preprint_id).execute()
if data.aggregations.times.buckets:
writer.writerow({bucket['key_as_string']: bucket['doc_count'] for bucket in data.aggregations.times.buckets})
else:
logger.info('preprint {} could not be found skipping')
return output
# defined command line options
# this also generates --help and error handling
# parse the command line
def main():
preprint_guids_to_search = ['vdz32', 'hv28a', 'yj8xw', '35juv', 'pbhr4', 'mky9j', 'qt3k6', 'kr3z8', 'nbhxq', 'az5bg', 'd7av9', '447b3']
cli = argparse.ArgumentParser()
cli.add_argument(
'--guids',
nargs='*',
type=str,
default=preprint_guids_to_search,
)
args = cli.parse_args()
preprint_csv = generate_
|
preprint_csv(args.guids)
with open('top_ten_preprints.csv', 'wb') as writeFile:
wri
|
teFile.write(preprint_csv.getvalue())
if __name__ == '__main__':
main()
|
Suwmlee/XX-Net
|
gae_proxy/server/lib/google/appengine/api/yaml_listener.py
|
Python
|
bsd-2-clause
| 7,554
| 0.003839
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PyYAML event listener
Contains class which interprets YAML events and forwards them to
a handler object.
"""
from google.appengine.api import yaml_errors
import yaml
_EVENT_METHOD_MAP = {
yaml.events.StreamStartEvent: 'StreamStart',
yaml.events.StreamEndEvent: 'StreamEnd',
yaml.events.DocumentStartEvent: 'DocumentStart',
yaml.events.DocumentEndEvent: 'DocumentEnd',
yaml.events.AliasEvent: 'Alias',
yaml.events.ScalarEvent: 'Scalar',
yaml.events.SequenceStartEvent: 'SequenceStart',
yaml.events.SequenceEndEvent: 'SequenceEnd',
yaml.events.MappingStartEvent: 'MappingStart',
yaml.events.MappingEndEvent: 'MappingEnd',
}
class EventHandler(object):
"""Handler interface for parsing YAML files.
Implement this interface to define specific YAML event handling class.
Implementing classes instances are passed to the constructor of
EventListener to act as a receiver of YAML parse events.
"""
def StreamStart(self, event, loader):
"""Handle start of stream event"""
def StreamEnd(self, event, loader):
"""Handle end of stream event"""
def DocumentStart(self, event, loader):
"""Handle start of document event"""
def DocumentEnd(self, event, loader):
"""Handle end of document event"""
def Alias(self, event, loader):
"""Handle alias event"""
def Scalar(self, event, loader):
"""Handle scalar event"""
def SequenceStart(self, event, loader):
"""Handle start of sequence event"""
def SequenceEnd(self, event, loader):
"""Handle end of sequence event"""
def MappingStart(self, event, loader):
"""Handle start of mappping event"""
def MappingEnd(self, event, loader):
"""Handle end of mapping event"""
class EventListener(object):
"""Helper class to re-map PyYAML events to method calls.
By default, PyYAML generates its events via a Python generator. This class
is a helper that iterates over the events from the PyYAML parser and forwards
them to a handle class in the form of method calls. For simplicity, the
underlying event is forwarded to the handler as a parameter to the call.
This object does not itself produce iterable objects, but is really a mapping
to a given handler instance.
Example use:
class PrintDocumentHandler(object):
def DocumentStart(event):
print "A new document has been started"
EventListener(PrintDocumentHandler()).Parse('''
key1: value1
---
key2: value2
'''
>>> A new document has been started
A new document has been started
In the example above, the implemented handler class (PrintDocumentHandler)
has a single method which reports each time a new document is started within
a YAML file. It is not necessary to subclass the EventListener, merely it
receives a PrintDocumentHandler instance. Every time a new document begins,
PrintDocumentHandler.DocumentStart is called with the PyYAML event passed
in as its parameter..
"""
def __init__(self, event_handler):
"""Initialize PyYAML event listener.
Constructs internal mapping directly from event type to method on actual
handler. This prevents reflection being used during actual parse time.
Args:
event_handler: Event handler that will receive mapped events. Must
implement at least one appropriate handler method named from
the values of the _EVENT_METHOD_MAP.
Raises:
ListenerConfigurationError if event_handler is not an EventHandler.
"""
if not isinstance(event_handler, EventHandler):
raise yaml_errors.ListenerConfigurationError(
'Must provide event handler of type yaml_listener.EventHandler')
self._event_method_map = {}
for event, method in _EVENT_METHOD_MAP.items():
self._event_method_map[event] = getattr(event_handler, method)
def HandleEvent(self, event, loader=None):
"""Handle individual PyYAML event.
Args:
event: Event to forward to method call in method call.
Raises:
IllegalEvent when receives an unrecognized or unsupported event type.
"""
if event.__class__ not in _EVENT_METHOD_MAP:
raise yaml_errors.IllegalEvent(
"%s is not a valid PyYAML class" % event.__class__.__name__)
if event.__class__ in self._event_method_map:
self._event_method_map[event.__class__](event, loader)
def _HandleEvents(self, events):
"""Iterate over all events and send them to handler.
This method is not meant to be called from the interface.
Only use in tests.
Args:
events: Iterator or generator containing events to process.
raises:
EventListenerParserError when a yaml.parser.ParserError is raised.
EventError when an exception occurs during the handling of an event.
"""
for event in events:
try:
self.HandleEvent(*event)
except Exception as e:
event_object, loader = event
raise yaml_errors.EventError(e, event_object)
def _GenerateEventParameters(self,
stream,
loader_class=yaml.loader.SafeLoader):
"""Creates a generator that yields event, loader parameter pairs.
For use as parameters to HandleEvent method for use by Parse method.
During testing, _GenerateEventParameters is simulated by allowing
the harness to pass in a list of pairs as the parameter.
A list of (event, loader) pairs must be passed to _HandleEvents otherwise
it is not possible to pass the loader instance to the handler.
Also responsible for instantiating the loader from the Loader
paramet
|
er.
Args:
stream: String document or open file object to process as per the
yaml.parse method. Any object that implements a 'read()' method which
returns a string document will work.
Loader: Loader class to use as per the yaml.parse method. Used to
instantiate new yaml.loader instance.
Yields:
Tuple(event, loader) where:
event: Event emitted by PyYAML loader.
loader_class: Used for dependency injection.
"""
assert loa
|
der_class is not None
try:
loader = loader_class(stream)
while loader.check_event():
yield (loader.get_event(), loader)
except yaml.error.YAMLError as e:
raise yaml_errors.EventListenerYAMLError(e)
def Parse(self, stream, loader_class=yaml.loader.SafeLoader):
"""Call YAML parser to generate and handle all events.
Calls PyYAML parser and sends resulting generator to handle_event method
for processing.
Args:
stream: String document or open file object to process as per the
yaml.parse method. Any object that implements a 'read()' method which
returns a string document will work with the YAML parser.
loader_class: Used for dependency injection.
"""
self._HandleEvents(self._GenerateEventParameters(stream, loader_class))
|
blackboarddd/PyTest
|
mstr/PILTest.py
|
Python
|
gpl-3.0
| 2,261
| 0.01946
|
'''
Created on May 21, 2016
@author: zlp
'''
# import Image
hello = 100
class PILTest(object):
hello = 200
def __init__(self, name):
self._name = name
def printName(self):
print self._name
def printHelloInClass(self):
print self.hello
class2 = PILTest('hahah')
# print 'Print hello value: %d' % hello
# class2.printHelloInClass()
# def make_counter():
# count = 0
# def counter():
# nonlocal count
# count += 1
# return count
# return counter
# yield and testing
def fib(max):
a,b = 0,1
while a<max:
yield a
a,b = b,a+b
class Fib2(object):
def __init__(self,max):
self.__max = max
def __iter__(self):
self.a = 0
self.b = 1
return self
def next(self):
result = self.a
if result > self.__max:
raise StopIteration
self.a, self.b = self.b, self.a+self.b
return result
# for i in fib(100):
# print i
#
# for i in Fib2(100):
# print i
#
# a = [1,2,3,4]
# b = [2,3]
#
# for i in zip(a,b):
# print i
# w,h = im.size
params = dict(user='user', password='password', database='database', host='host', port='port')
defaults = dict(use_unicode=True, charset='utf8', collation='utf8_general_ci', autocommit=False)
for k,v in defaults.iteritems():
pass
# print k,v
def outside(func):
def wrapper(*args, **kw):
print 'Outside sheel'
return func(*args,**kw)
return wrapper
@outside
def log(func):
def wrapper(*args, **kw):
print 'Hello'
return func(*args, **kw)
return wrapper
@log
def simplePrint(str1):
print 'This is Simple ' + str1
print simplePrint('hahaha')
def func1(a,b):
'''
What you are thinking?
>>> func1(1,2)
10
>>> func1(2,4)
9
'''
return 10
import doctest
doctest.testmod()
class Test1(object):
name = 'haha'
def __init__(self,name2):
self.name2 = name2
t1 = Test1('xiaozhu')
t2 = Test1('xiaoYu')
# print t1.name
# print t1.nam
|
e2
# print t2.name
# print t2.name2
# t1.name = 'HEIHEI
|
!'
# print t1.name
# print t2.name
print 'T1\'s name is ',Test1.name
|
interactiveinstitute/watthappened
|
python_modules/socketpool/pool.py
|
Python
|
mit
| 6,292
| 0.000795
|
# -*- coding: utf-8 -
#
# This file is part of socketpool.
# See the NOTICE for more information.
import contextlib
import sys
import time
from socketpool.util import load_backend
class MaxTriesError(Exception):
pass
class MaxConnectionsError(Exception):
pass
class ConnectionPool(object):
"""Pool of connections
This is the main object to maintain connection. Connections are
created using the factory instance passed as an option.
Options:
--------
:attr factory: Instance of socketpool.Connector. See
socketpool.conn.TcpConnector for an example
:attr retry_max: int, default 3. Numbr of times to retry a
connection before raising the MaxTriesError exception.
:attr max_lifetime: int, default 600. time in ms we keep a
connection in the pool
:attr max_size: int, default 10. Maximum number of connections we
keep in the pool.
:attr options: Options to pass to the factory
:attr reap_connection: boolean, default is true. If true a process
will be launched in background to kill idle connections.
:attr backend: string, default is thread. The socket pool can use
different backend to handle process and connections. For now
the backends "thread", "gevent" and "eventlet" are supported. But
you can add your own backend if you want. For an example of backend,
look at the module socketpool.gevent_backend.
"""
def __init__(self, factory,
retry_max=3, retry_delay=.1,
timeout=-1, max_lifetime=600.,
max_size=10, options=None,
reap_connections=True, backend="thread"):
self.backend_mod = load_backend(backend)
self.backend = backend
self.max_size = max_size
self.pool = getattr(self.backend_mod, 'PriorityQueue')()
self._free_conns = 0
self.factory = factory
self.retry_max = retry_max
self.retry_delay = retry_delay
self.timeout = timeout
self.max_lifetime = max_lifetime
if options is None:
self.options = {"backend_mod": self.backend_mod,
"pool": self}
else:
self.options = options
self.options["backend_mod"] = self.backend_mod
self.options["pool"] = self
# bounded semaphore to make self._alive 'safe'
self._sem = self.backend_mod.Semaphore(1)
self._reaper = None
if reap_connections:
self.start_reaper()
def too_old(self, conn):
return time.time() - conn.get_lifetime() > self.max_lifetime
def murder_connections(self):
current_pool_size = self.pool.qsize()
if current_pool_size > 0:
for priority, candidate in self.pool:
current_pool_size -= 1
if not self.too_old(candidate):
self.pool.put((priority, candidate))
else:
self._reap_connection(candidate)
if current_pool_size <= 0:
break
def start_reaper(self):
self._reaper = self.backend_mod.ConnectionReaper(self,
delay=self.max_lifetime)
self._reaper.ensure_started()
def _reap_connection(self, conn):
if conn.is_connected():
conn.invalidate()
def size(self):
return self.pool.qsize()
def release_all(self):
if self.pool.qsize():
for priority, conn in self.pool:
self._reap_connection(conn)
def release_connection(self, conn):
if self._reaper is not None:
self._reaper.ensure_started()
with self._sem:
if self.pool.qsize() < self.max_size:
connected = conn.is_connected()
if connected and not self.too_old(conn):
self.pool.put((conn.get_lifetime(), conn))
else:
self._reap_connection(conn)
else:
self._reap_connection(conn)
def get(self, **options):
options.update(self.options)
found = None
i = self.pool.qsize()
tries = 0
last_error = None
unmatched = []
while tries < self.retry_max:
# first let's try to find a matching one from pool
if self.pool.qsize():
for priority, candidate in self.pool:
i -= 1
if self.too_old(candidate):
# let's drop it
self._reap_connection(candidate)
continue
matches = candidate.matches(**options)
if not matches:
# let's put it back
unmatched.append((priority, candidate))
else:
if candidate.is_connected():
|
found = candidate
break
else:
|
# conn is dead for some reason.
# reap it.
self._reap_connection(candidate)
if i <= 0:
break
if unmatched:
for candidate in unmatched:
self.pool.put(candidate)
# we got one.. we use it
if found is not None:
return found
try:
new_item = self.factory(**options)
except Exception as e:
last_error = e
else:
# we should be connected now
if new_item.is_connected():
with self._sem:
return new_item
tries += 1
self.backend_mod.sleep(self.retry_delay)
if last_error is None:
raise MaxTriesError()
else:
raise last_error
@contextlib.contextmanager
def connection(self, **options):
conn = self.get(**options)
try:
yield conn
# what to do in case of success
except Exception as e:
conn.handle_exception(e)
finally:
self.release_connection(conn)
|
syci/OCB
|
addons/website_sale/models/product.py
|
Python
|
agpl-3.0
| 9,392
| 0.006601
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import openerp
from openerp import tools
from openerp.osv import osv, fields
class product_style(osv.Model):
_name = "product.style"
_columns = {
'name' : fields.char('Style Name', required=True),
'html_class': fields.char('HTML Classes'),
}
class product_pricelist(osv.Model):
_inherit = "product.pricelist"
_columns = {
'code': fields.char('E-commerce Promotional Code'),
}
class product_public_category(osv.osv):
_name = "product.public.category"
_inherit = ["website.seo.metadata"]
_description = "Website Product Category"
_order = "sequence, name"
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id'])
]
def name_get(self, cr, uid, ids, context=None):
res = []
for cat in self.browse(cr, uid, ids, context=context):
names = [cat.name]
pcat = cat.parent_id
while pcat:
names.append(pcat.name)
pcat = pcat.parent_id
res.append((cat.id, ' / '.join(reversed(names))))
return res
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
_columns = {
'name': fields.char('Name', required=True, translate=True),
'complete_name': fields.function(_name_get_fnc, type="char", string='Name'),
'parent_id': fields.many2one('product.public.category','Parent Cate
|
gory', select=True),
'child_id': fields.one2many('product.public.category', 'parent_id', string='Children Categories'),
'sequence': fields.integer('Sequence', help="Gives the sequence order
|
when displaying a list of product categories."),
}
# NOTE: there is no 'default image', because by default we don't show
# thumbnails for categories. However if we have a thumbnail for at least one
# category, then we display a default image on the other, so that the
# buttons have consistent styling.
# In this case, the default image is set by the js code.
image = openerp.fields.Binary("Image", attachment=True,
help="This field holds the image used as image for the category, limited to 1024x1024px.")
image_medium = openerp.fields.Binary("Medium-sized image",
compute='_compute_images', inverse='_inverse_image_medium', store=True, attachment=True,
help="Medium-sized image of the category. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views.")
image_small = openerp.fields.Binary("Small-sized image",
compute='_compute_images', inverse='_inverse_image_small', store=True, attachment=True,
help="Small-sized image of the category. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required.")
@openerp.api.depends('image')
def _compute_images(self):
for rec in self:
rec.image_medium = tools.image_resize_image_medium(rec.image)
rec.image_small = tools.image_resize_image_small(rec.image)
def _inverse_image_medium(self):
for rec in self:
rec.image = tools.image_resize_image_big(rec.image_medium)
def _inverse_image_small(self):
for rec in self:
rec.image = tools.image_resize_image_big(rec.image_small)
class product_template(osv.Model):
_inherit = ["product.template", "website.seo.metadata", 'website.published.mixin', 'rating.mixin']
_order = 'website_published desc, website_sequence desc, name'
_name = 'product.template'
_mail_post_access = 'read'
def _website_url(self, cr, uid, ids, field_name, arg, context=None):
res = super(product_template, self)._website_url(cr, uid, ids, field_name, arg, context=context)
for product in self.browse(cr, uid, ids, context=context):
res[product.id] = "/shop/product/%s" % (product.id,)
return res
_columns = {
# TODO FIXME tde: when website_mail/mail_thread.py inheritance work -> this field won't be necessary
'website_message_ids': fields.one2many(
'mail.message', 'res_id',
domain=lambda self: [
'&', ('model', '=', self._name), ('message_type', '=', 'comment')
],
string='Website Comments',
),
'website_description': fields.html('Description for the website', sanitize=False, translate=True),
'alternative_product_ids': fields.many2many('product.template','product_alternative_rel','src_id','dest_id', string='Suggested Products', help='Appear on the product page'),
'accessory_product_ids': fields.many2many('product.product','product_accessory_rel','src_id','dest_id', string='Accessory Products', help='Appear on the shopping cart'),
'website_size_x': fields.integer('Size X'),
'website_size_y': fields.integer('Size Y'),
'website_style_ids': fields.many2many('product.style', string='Styles'),
'website_sequence': fields.integer('Sequence', help="Determine the display order in the Website E-commerce"),
'public_categ_ids': fields.many2many('product.public.category', string='Website Product Category', help="Those categories are used to group similar products for e-commerce."),
}
def _defaults_website_sequence(self, cr, uid, *l, **kwargs):
cr.execute('SELECT MIN(website_sequence)-1 FROM product_template')
next_sequence = cr.fetchone()[0] or 10
return next_sequence
_defaults = {
'website_size_x': 1,
'website_size_y': 1,
'website_sequence': _defaults_website_sequence,
}
def set_sequence_top(self, cr, uid, ids, context=None):
cr.execute('SELECT MAX(website_sequence) FROM product_template')
max_sequence = cr.fetchone()[0] or 0
return self.write(cr, uid, ids, {'website_sequence': max_sequence + 1}, context=context)
def set_sequence_bottom(self, cr, uid, ids, context=None):
cr.execute('SELECT MIN(website_sequence) FROM product_template')
min_sequence = cr.fetchone()[0] or 0
return self.write(cr, uid, ids, {'website_sequence': min_sequence -1}, context=context)
def set_sequence_up(self, cr, uid, ids, context=None):
product = self.browse(cr, uid, ids[0], context=context)
cr.execute(""" SELECT id, website_sequence FROM product_template
WHERE website_sequence > %s AND website_published = %s ORDER BY website_sequence ASC LIMIT 1""" % (product.website_sequence, product.website_published))
prev = cr.fetchone()
if prev:
self.write(cr, uid, [prev[0]], {'website_sequence': product.website_sequence}, context=context)
return self.write(cr, uid, [ids[0]], {'website_sequence': prev[1]}, context=context)
else:
return self.set_sequence_top(cr, uid, ids, context=context)
def set_sequence_down(self, cr, uid, ids, context=None):
product = self.browse(cr, uid, ids[0], context=context)
cr.execute(""" SELECT id, website_sequence FROM product_template
WHERE website_sequence < %s AND website_published = %s ORDER BY website_sequence DESC LIMIT 1""" % (product.website_sequence, product.website_published))
next = cr.fetchone()
if next:
self.write(cr, uid, [next[0]], {'website_sequence': product.website_sequence}, context=context)
return self.write(cr, uid, [ids[0]], {'website_sequence': next[1]}, context=context)
else:
return self.set_sequence_bottom(cr, uid, ids, context=context)
class product_product(osv.Model):
_inherit = "product.product"
# Wrappers for call_kw with inherits
def open_website_url(self, cr, uid, ids, context=None):
template_id = self.browse(cr, uid, ids, context=context).product_tmpl_id
|
KodiColdkeys/coldkeys-addons
|
repository/plugin.video.white.devil/resources/lib/sources/dailyrls_wp_jh.py
|
Python
|
gpl-2.0
| 6,200
| 0.018065
|
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
class source:
def __init__(self):
self.language = ['en']
self.domains = ['dailyreleases.net']
self.base_link = 'http://dailyreleases.net'
self.search_link = '/search/%s/feed/rss2/'
def movie(self, imdb, title, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
posts = client.parseDOM(r, 'item')
hostDict = hostprDict + hostDict
items = []
for post in posts:
try:
items += zip(client.parseDOM(post, 'a', attrs={'target': '_blank'}), client.parseDOM(post, 'a', ret='href', attrs={'target': '_blank'}))
except:
pass
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr: raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) [M|G]B)', name)[-1]
div = 1 if size.endswith(' GB') else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', ur
|
lparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode
|
('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Dailyrls', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check: sources = check
return sources
except:
return sources
def resolve(self, url):
return url
|
EcmaXp/PyCraft
|
misc/tinypy/tinypy/py2bc.py
|
Python
|
gpl-2.0
| 1,121
| 0.02587
|
if not (str(1.0) == "1"):
from boot import *
import tokenize,parse,encode
def _compile(s,fname):
tokens = tokenize.tokenize(s)
t = parse.parse(s,tokens)
r = encode.encode(fname,s,t)
return r
def _import(name):
if name in MODULES:
return MODULES[name]
py = name+".py"
tpc = name+".tpc"
if exists(py):
if not exists(tpc) or mtime(py) > mtime(tpc):
s = load(py)
code = _compile(s,py)
save(tpc,code)
if not exists(tpc): raise
code = load(tpc)
g = {'__name__':name,'__code__':code}
g['__dict__'] = g
MODULES[name] = g
exec(code,g)
return g
|
def _init():
BUILTINS['compile'] = _compile
BUILTINS['import'] = _import
def import_fname(fname,name):
g = {}
g['__name__'] = name
MODULES[name] = g
s = load(fname)
code = _compile(s,fname)
g['__code__'] = code
exec(code,g)
return g
def ti
|
nypy():
return import_fname(ARGV[0],'__main__')
def main(src,dest):
s = load(src)
r = _compile(s,src)
save(dest,r)
if __name__ == '__main__':
main(ARGV[1],ARGV[2])
|
Mlieou/oj_solutions
|
leetcode/python/ex_294.py
|
Python
|
mit
| 311
| 0.009646
|
class Solution(object):
_memo = {}
def canWin(self, s):
"""
:type s: str
|
:rtype: bool
"""
memo = self._memo
if s not in memo:
return any(s[i:i+2] == '++' and not self.canWin(s[:i] + '-'
|
+ s[i+2:]) for i in range(len(s)))
return memo[s]
|
andrewromanenco/pyjvm
|
pyjvm/class_path.py
|
Python
|
gpl-3.0
| 3,299
| 0
|
# PyJVM (pyjvm.org) Java Virtual Machine implemented in pure Python
# Copyright (C) 2014 Andrew Romanenco (andrew@romanenco.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''Class path for jar files and directories. Cache all jars content.
JAVA_HOME must be set.
Class path is list of jar files and folders for classes lookup.
Separator ":", (";", ",") are also supported
See START.txt for details
'''
import os
import zipfile
def read_class_path(class_path):
'''Cache content of all jars.
Begin with rt.jar
'''
# folders for lookup for class files
lookup_paths = []
# content of all jars (name->path to jar)
jars = {}
# content of rt.jar
rt = {}
# first check local rt.jar
local_path = os.path.dirname(os.path.realpath(__file__))
RT_JAR = os.path.join(local_path, "../rt/rt.jar")
if not os.path.isfile(RT_JAR):
JAVA_HOME = os.environ.get('JAVA_HOME')
if JAVA_HOME is None:
raise Exception("JAVA_HOME is not set")
if not os.path.isdir(JAVA_HOME):
raise Exception("JAVA_HOME must be a folder: %s" % JAVA_HOME)
RT_JAR = os.path.join(JAVA_HOME, "lib/rt.jar")
if not os.path.exists(RT_JAR) or os.path.isdir(RT_JAR):
RT_JAR = os.path.join(JAVA_HOME, "jre/lib/rt.jar")
if not os.path.exists(RT_JAR) or os.path.isdir(RT_JAR):
raise Exception("rt.jar not found")
if not zipfile.is_zipfile(RT_JAR):
raise Exception("rt.jar is not a zip: %s" % RT_JAR)
read_from_jar(RT_JAR, rt)
current = os.getcwd()
splitter = None
if ":" in class_path:
splitter = ":"
elif ";" in class_path:
splitter = ";"
elif "," in class_path:
splitter = ","
else:
splitter = ":"
cpaths = class_path.split(splitter)
for p in cpaths:
p = p.strip()
path = os.path.join(current, p)
if not os.path.exists(path):
raise Exception("Wrong class path entry: %s (path not found %s)",
p, path)
if os.path.isdir(path):
|
lookup_paths.append(path)
else:
if zipfile.is_zipfile(path):
read_from_jar(path, jars)
else:
raise Exception("Class path entry %s is not a jar file" % path)
return (lookup_paths, jars, rt)
def read_from_jar(jar, dict_data):
'''Read file list from a jar'''
if not zipfile.is_zipfile(jar):
|
raise Exception("Not a jar file: %s" % jar)
with zipfile.ZipFile(jar, "r") as j:
for name in j.namelist():
if name.endswith(".class"): # at some point save all files
dict_data[name] = jar
|
pytrainer/pytrainer
|
pytrainer/gui/windowcalendar.py
|
Python
|
gpl-2.0
| 1,204
| 0.016611
|
# -*- coding: utf-8 -*-
#Copyright (C) Fiz Vazquez vud1@sindominio.net
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General
|
Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import warnings
from pytrainer.gui.dialogs import calendar_dialog
class WindowCalendar(object):
|
def __init__(self, data_path = None, parent = None, date = None):
warnings.warn("Deprecated WindowCalendar class called", DeprecationWarning, stacklevel=2)
self.parent = parent
self.date = date
def run(self):
date = calendar_dialog(date=self.date)
if date:
self.parent.setDate(date)
|
jkettleb/iris
|
lib/iris/tests/unit/fileformats/pp/test_PPField.py
|
Python
|
lgpl-3.0
| 11,449
| 0
|
# (C) British Crown Copyright 2013 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.fileformats.pp.PPField` class."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import mock
import numpy as np
import iris.fileformats.pp as pp
from iris.fileformats.pp import PPField
from iris.fileformats.pp import SplittableInt
# The PPField class is abstract, so to test we define a minimal,
# concrete subclass with the `t1` and `t2` properties.
#
# NB. We define dummy header items to allow us to zero the unused header
# items when written to disk and get consistent results.
DUMMY_HEADER = [('dummy1', (0, 13)),
('lbtim', (12,)),
('lblrec', (14,)),
('dummy2', (15, 18)),
('lbrow', (17,)),
('lbext', (19,)),
('lbpack', (20,)),
('dummy3', (21, 37)),
('lbuser', (38, 39, 40, 41, 42, 43, 44,)),
('brsvd', (45, 46, 47, 48)),
('bdatum', (49,)),
('dummy4', (45, 63)),
]
class TestPPField(PPField):
HEADER_DEFN = DUMMY_HEADER
HEADER_DICT = dict(DUMMY_HEADER)
@property
def t1(self):
return netcdftime.datetime(2013, 10, 14, 10, 4)
@property
def t2(self):
return netcdftime.datetime(2013, 10, 14, 10, 5)
class Test_save(tests.IrisTest):
def test_float64(self):
# Tests down-casting of >f8 data to >f4.
def field_checksum(data):
field = TestPPField()
field.dummy1 = 0
field.dummy2 = 0
field.dummy3 = 0
field.dummy4 = 0
field.lbtim = 0
field.lblrec = 0
field.lbrow = 0
field.lbext = 0
field.lbpack = 0
field.lbuser = 0
field.brsvd = 0
field.bdatum = 0
field.data = data
with self.temp_filename('.pp') as temp_filename:
with open(temp_filename, 'wb') as pp_file:
field.save(pp_file)
checksum = self.file_checksum(temp_filename)
return checksum
data_64 = np.linspace(0, 1, num=10, endpoint=False).reshape(2, 5)
checksum_32 = field_checksum(data_64.astype('>f4'))
with mock.patch('warnings.warn') as warn:
checksum_64 = field_checksum(data_64.astype('>f8'))
self.assertEqual(checksum_32, checksum_64)
warn.assert_called_once_with(
'Downcasting array precision from float64 to float32 for save.'
'If float64 precision is required then please save in a '
'different format')
class Test_calendar(tests.IrisTest):
def test_greg(self):
field = TestPPField()
field.lbtim = SplittableInt(1, {'ia': 2, 'ib': 1, 'ic': 0})
self.assertEqual(field.calendar, 'gregorian')
def test_360(self):
field = TestPPField()
field.lbtim = SplittableInt(2, {'ia': 2, 'ib': 1, 'ic': 0})
self.assertEqual(field.calendar, '360_day')
def test_365(self):
field = TestPPField()
field.lbtim = SplittableInt(4, {'ia': 2, 'ib': 1, 'ic': 0})
self.assertEqual(field.calendar, '365_day')
class Test_coord_system(tests.IrisTest):
def _check_cs(self, bplat, bplon, rotated):
field = TestPPField()
field.bplat = bplat
field.bplon = bplon
with mock.patch('iris.fileformats.pp.iris.coord_systems') \
as mock_cs_mod:
result = field.coord_system()
if not rotated:
# It should return a standard unrotated CS.
self.assertTrue(mock_cs_mod.GeogCS.call_count == 1)
self.assertEqual(result, mock_cs_mod.GeogCS())
else:
# It should return a rotated CS with the correct makeup.
self.assertTrue(mock_cs_mod.GeogCS.call_count == 1)
self.assertTrue(mock_cs_mod.RotatedGeogCS.call_count == 1)
self.assertEqual(result, mock_cs_mod.RotatedGeogCS())
self.assertEqual(mock_cs_mod.RotatedGeogCS.call_args_list[0],
mock.call(bplat, bplon,
ellipsoid=mock_cs_mod.GeogCS()))
def test_normal_unrotated(self):
# Check that 'normal' BPLAT,BPLON=90,0 produces an unrotated system.
self._check_cs(bplat=90, bplon=0, rotated=False)
def test_bplon_180_unrotated(self):
# Check that BPLAT,BPLON=90,180 behaves the same as 90,0.
self._check_cs(bplat=90, bplon=180, rotated=False)
def test_odd_bplat_rotated(self):
# Show that BPLAT != 90 produces a
|
rotated field.
self._check_cs(bplat=75, bplon=180, rotated=True)
def test_odd_bplon_rotated(self):
# Show that BPLON != 0 or 180 produces a rotated field.
self._check_cs(bplat=90, bplon=123.45, rotated=True)
class Test__init__(tests.IrisTest):
|
def setUp(self):
header_longs = np.zeros(pp.NUM_LONG_HEADERS, dtype=np.int)
header_floats = np.zeros(pp.NUM_FLOAT_HEADERS, dtype=np.float)
self.header = list(header_longs) + list(header_floats)
def test_no_headers(self):
field = TestPPField()
self.assertIsNone(field._raw_header)
self.assertIsNone(field.raw_lbtim)
self.assertIsNone(field.raw_lbpack)
def test_lbtim_lookup(self):
self.assertEqual(TestPPField.HEADER_DICT['lbtim'], (12,))
def test_lbpack_lookup(self):
self.assertEqual(TestPPField.HEADER_DICT['lbpack'], (20,))
def test_raw_lbtim(self):
raw_lbtim = 4321
loc, = TestPPField.HEADER_DICT['lbtim']
self.header[loc] = raw_lbtim
field = TestPPField(header=self.header)
self.assertEqual(field.raw_lbtim, raw_lbtim)
def test_raw_lbpack(self):
raw_lbpack = 4321
loc, = TestPPField.HEADER_DICT['lbpack']
self.header[loc] = raw_lbpack
field = TestPPField(header=self.header)
self.assertEqual(field.raw_lbpack, raw_lbpack)
class Test__getattr__(tests.IrisTest):
def setUp(self):
header_longs = np.zeros(pp.NUM_LONG_HEADERS, dtype=np.int)
header_floats = np.zeros(pp.NUM_FLOAT_HEADERS, dtype=np.float)
self.header = list(header_longs) + list(header_floats)
def test_attr_singular_long(self):
lbrow = 1234
loc, = TestPPField.HEADER_DICT['lbrow']
self.header[loc] = lbrow
field = TestPPField(header=self.header)
self.assertEqual(field.lbrow, lbrow)
def test_attr_multi_long(self):
lbuser = (100, 101, 102, 103, 104, 105, 106)
loc = TestPPField.HEADER_DICT['lbuser']
self.header[loc[0]:loc[-1] + 1] = lbuser
field = TestPPField(header=self.header)
self.assertEqual(field.lbuser, lbuser)
def test_attr_singular_float(self):
bdatum = 1234
loc, = TestPPField.HEADER_DICT['bdatum']
self.header[loc] = bdatum
field = TestPPField(header=self.header)
self.assertEqual(field.bdatum, bdatum)
def test_attr_multi_float(self):
brsvd = (100, 101, 102, 103)
loc = TestPPField.HEADER_DICT['brsvd']
start = loc[0]
stop = loc[-1] + 1
self.header[start:stop] = brsvd
field = TestPPField(header=self.header)
|
SavinaRoja/Kerminal
|
kerminal/escape_forwarding_containers.py
|
Python
|
gpl-3.0
| 574
| 0.001742
|
# -*- coding: utf-8 -*-
import npyscreen2
class EscapeForwardingContainer(npyscreen2.Co
|
ntainer):
def set_up_exit_condition_handlers(self):
super(EscapeForwardingContainer, self).set_up_exit_condition_handlers()
self.how_exited_handlers.update({'escape': self.h_exit_escape})
class EscapeForwardingSmartContainer(EscapeForwardingContainer,
npyscreen2.SmartContainer):
pass
class EscapeForwardingGridContainer(EscapeForwardingContainer,
|
npyscreen2.GridContainer):
pass
|
braysia/CellTK
|
celltk/utils/_munkres.py
|
Python
|
mit
| 201
| 0.004975
|
im
|
port numpy as np
from scipy.optimize import linear_sum_assignment
def munkres(arr):
temp = np.zeros(arr.shape, np.bool)
ind = linear_sum_assignment(arr)
|
temp[ind] = True
return temp
|
Transkribus/TranskribusDU
|
TranskribusDU/graph/pkg_GraphBinaryConjugateSegmenter/PageXmlSeparatorRegion.py
|
Python
|
bsd-3-clause
| 6,028
| 0.008461
|
# -*- coding: utf-8 -*-
"""
A class to load the SeparatorRegion of a PageXml to add features to the
edges of a graph conjugate used for segmentation.
It specialises the _index method to add specific attributes to the edges
, so that the specific feature transformers can be used.
Copyright NAVER(C) 2019
2019-08-20 JL. Meunier
"""
import numpy as np
import shapely.geometry as geom
from shapely.prepared import prep
from rtree import index
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import QuantileTransformer
from common.trace import traceln
from util.Shape import ShapeLoader
from xml_formats.PageXml import PageXml
from .GraphBinaryConjugateSegmenter_DOM import GraphBinaryConjugateSegmenter_DOM
from graph.Transformer import Transformer
class PageXmlSeparatorRegion(GraphBinaryConjugateSegmenter_DOM):
"""
Extension of a segmenter conjugate graph to exploit graphical separator
as additional edge features
"""
bVerbose = True
def __init__(self):
super(PageXmlSeparatorRegion, self).__init__()
def _index(self):
"""
This method is called before computing the Xs
We call it and right after, we compute the intersection of edge with SeparatorRegions
Then, feature extraction can reflect the crossing of edges and separators
"""
bFirstCall = super(PageXmlSeparatorRegion, self)._index()
if bFirstCall:
# indexing was required
# , so first call
# , so we need to make the computation of edges crossing separators!
self.addSeparatorFeature()
def addSeparatorFeature(self):
"""
We load the graphical separators
COmpute a set of shapely object
In turn, for each edge, we compute the intersection with all separators
The edge features will be:
- boolean: at least crossing one separator
- number of crossing points
- span length of the crossing points
- average length of the crossed separators
- average distance between two crossings
"""
# graphical separators
dNS = {"pc":PageXml.NS_PAGE_XML}
someNode = self.lNode[0]
ndPage = someNode.node.xpath("ancestor::pc:Page", namespaces=dNS)[0]
lNdSep = ndPage.xpath(".//pc:SeparatorRegion", namespaces=dNS)
loSep = [ShapeLoader.node_to_LineString(_nd) for _nd in lNdSep]
if self.bVerbose: traceln(" %d graphical separators"%len(loSep))
# make an indexed rtree
idx = index.Index()
for i, oSep in enumerate(loSep):
idx.insert(i, oSep.bounds)
# take each edge in turn and list the separators it crosses
nCrossing = 0
for edge in self.lEdge:
# bottom-left corner to bottom-left corner
oEdge = geom.LineString([(edge.A.x1, edge.A.y1), (edge.B.x1, edge.B.y1)])
prepO = prep(oEdge)
lCrossingPoints = []
fSepTotalLen = 0
for i in idx.intersection(oEdge.bounds):
# check each candidate in turn
oSep = loSep[i]
if prepO.intersects(oSep):
fSepTotalLen += oSep.length
oPt = oEdge.intersection(oSep)
if type(oPt) != geom.Point:
traceln('Intersection in not a point: skipping it')
else:
lCrossingPoints.append(oPt)
if lCrossingPoints:
nCrossing += 1
edge.bCrossingSep = True
edge.sep_NbCrossing = len(lCrossingPoints)
minx, miny, maxx, maxy = geom.MultiPoint(lCrossingPoints).bounds
edge.sep_SpanLen = abs(minx-maxx) + abs(miny-maxy)
edge.sep_AvgSpanSgmt = edge.sep_SpanLen / len(lCrossingPoints)
edge.sep_AvgSepLen = fSepTotalLen / len(lCrossingPoints)
else:
edge.bCrossingSep = False
edge.sep_NbCrossing = 0
edge.sep_SpanLen = 0
edge.sep_AvgSpanSgmt = 0
edge.sep_AvgSepLen = 0
#traceln((edge.A.domid, edge.B.domid, edge.bCrossingSep, edge.sep_NbCrossing, edge.sep_SpanLen, edge.sep_AvgSpanSgmt, edge.sep_AvgSepLen))
if self.bVerbose:
traceln(" %d (/ %d) edges crossing at least one graphical separator"%(nCrossing, len(self.lEdge)))
class Separator_boolean(Transformer):
"""
a boolean encoding indicating if the edge crosses a separator
"""
def transform(self, lO):
nb = len(lO)
a = np.zeros((nb, 1), dtype=np.float64)
for i, o in enumerate(lO):
if o.bCrossingSep: a[i,0] = 1
return a
def __str__(self):
return "- Separator_b
|
oolean %s (#1)" % (self.__class__)
class Separator_num(Pipeline):
"""
Node neighbour count feature quantiled
|
"""
nQUANTILE = 16
class Selector(Transformer):
"""
Characterising the neighborough by the number of neighbour before and after
"""
def transform(self, lO):
nb = len(lO)
a = np.zeros((nb, 4), dtype=np.float64)
for i, o in enumerate(lO):
a[i,:] = (o.sep_NbCrossing, o.sep_SpanLen, o.sep_AvgSpanSgmt, o.sep_AvgSepLen)
return a
def __init__(self, nQuantile=None):
self.nQuantile = Separator_num.nQUANTILE if nQuantile is None else nQuantile
Pipeline.__init__(self, [ ('geometry' , Separator_num.Selector())
, ('quantiled', QuantileTransformer(n_quantiles=self.nQuantile, copy=False)) #use in-place scaling
])
def __str__(self):
return "- Separator_num %s (#4)" % (self.__class__)
|
CenterForOpenScience/SHARE
|
api/ingestjobs/urls.py
|
Python
|
apache-2.0
| 210
| 0
|
from rest_framework.routers import SimpleRouter
from api.ingestjobs im
|
port views
router = SimpleRouter()
router.register(r'ingestj
|
obs', views.IngestJobViewSet, basename='ingestjob')
urlpatterns = router.urls
|
paulmartel/voltdb
|
lib/python/voltcli/voltadmin.d/show.py
|
Python
|
agpl-3.0
| 1,129
| 0.008857
|
# This file is part of VoltDB.
# Copyright (C) 2008-2016 VoltDB Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for
|
more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with VoltDB. If not, see <http://www.
|
gnu.org/licenses/>.
def show_snapshots(runner):
response = runner.call_proc('@SnapshotStatus', [], [])
print response.table(0).format_table(caption = 'Snapshot Status')
@VOLT.Multi_Command(
bundles = VOLT.AdminBundle(),
description = 'Display information about a live database.',
modifiers = VOLT.Modifier('snapshots', show_snapshots, 'Display current snapshot status.')
)
def show(runner):
runner.go()
|
mozilla/firefox-flicks
|
vendor-local/lib/python/celery/tests/utilities/test_timer2.py
|
Python
|
bsd-3-clause
| 4,966
| 0
|
from __future__ import absolute_import
from __future__ import with_statement
import sys
import time
from kombu.tests.utils import redirect_stdouts
from mock import Mock, patch
import celery.utils.timer2 as timer2
from celery.tests.utils import Case, skip_if_quick
class test_Entry(Case):
def test_call(self):
scratch = [None]
def timed(x, y, moo='foo'):
scratch[0] = (x, y, moo)
tref = timer2.Entry(timed, (4, 4), {'moo': 'baz'})
tref()
self.assertTupleEqual(scratch[0], (4, 4, 'baz'))
def test_cancel(self):
tref = timer2.Entry(lambda x: x, (1, ), {})
tref.cancel()
self.assertTrue(tref.cancelled)
class test_Schedule(Case):
def test_supports_Timer_interface(self):
x = timer2.Schedule()
x.stop()
tref = Mock()
x.cancel(tref)
tref.cancel.assert_called_with()
def test_handle_error(self):
from datetime import datetime
to_timestamp = timer2.to_timestamp
scratch = [None]
def _overflow(x):
raise OverflowError(x)
def on_error(exc_info):
scratch[0] = exc_info
s = timer2.Schedule(on_error=on_error)
timer2.to_timestamp = _overflow
try:
s.enter(timer2.Entry(lambda: None, (), {}),
eta=datetime.now())
s.enter(timer2.Entry(lambda: None, (), {}),
eta=None)
s.on_error = None
with self.assertRaises(OverflowError):
s.enter(timer2.Entry(lambda: None, (), {}),
eta=datetime.now())
finally:
timer2.to_timestamp = to_timestamp
exc = scratch[0]
self.assertIsInstance(exc, OverflowError)
class test_Timer(Case):
@skip_if_quick
def test_enter_after(self):
t = timer2.Timer()
try:
done = [False]
def set_done():
done[0] = True
t.apply_after(300, set_done)
mss = 0
while not done[0]:
if mss >= 2.0:
raise Exception('test timed out')
time.sleep(0.1)
mss += 0.1
finally:
t.stop()
def test_exit_after(self):
t = timer2.Timer()
t.apply_after = Mock()
t.exit_after(300, priority=10)
t.apply_after.assert_called_with(300, sys.exit, 10)
def test_apply_interval(self):
t = timer2.Timer()
try:
t.schedule.enter_after = Mock()
myfun = Mock()
myfun.__name__ = 'myfun'
t.apply_interval(30, myfun)
self.assertEqual(t.schedule.enter_after.call_count, 1)
args1, _ = t.schedule.enter_after.call_args_list[0]
msec1, tref1, _ = args1
self.assertEqual(msec1, 30)
tref1()
self.assertEqual(t.schedule.enter_after.call_count, 2)
args2, _ = t.schedule.enter_after.call_args_list[1]
msec2, tref2, _ = args2
self.assertEqual(msec2, 30)
tref2.cancelled = True
tref2()
self.assertEqual(t.schedule.enter_after.call_count, 2)
finally:
t.stop()
@patch('celery.utils.timer2.logger')
def test_apply_entry_error_handled(self, logger):
t = timer2.Timer()
t.schedule.on_error = None
fun = Mock()
fun.side_effect = ValueError()
t.schedule.apply_entry(fun)
self.assertTrue(logger.error.called)
@redirect_stdouts
def test_apply_entry_error_not_handled(self, stdout, stderr):
t = timer2.Timer()
t.schedule.on_error = Mock()
fun = Mock()
fun.side_effect = ValueError()
t.s
|
chedule.apply_entry(fun)
fun.assert_called_with()
self.assertFalse(stderr.getvalue())
@patch('os._exit')
def test_thread_crash(self, _exit):
t = timer2.Timer()
t._next_entry = Mock()
t._next_entry.side_effect = OSError(131)
t.run()
_exit.assert_called_with(1)
def test_gc_race_lost(self):
t = timer2.Timer()
t._is_stopped.set = Mock()
t._is_stopped.set.s
|
ide_effect = TypeError()
t._is_shutdown.set()
t.run()
t._is_stopped.set.assert_called_with()
def test_to_timestamp(self):
self.assertIs(timer2.to_timestamp(3.13), 3.13)
def test_test_enter(self):
t = timer2.Timer()
t._do_enter = Mock()
e = Mock()
t.enter(e, 13, 0)
t._do_enter.assert_called_with('enter', e, 13, priority=0)
def test_test_enter_after(self):
t = timer2.Timer()
t._do_enter = Mock()
t.enter_after()
t._do_enter.assert_called_with('enter_after')
def test_cancel(self):
t = timer2.Timer()
tref = Mock()
t.cancel(tref)
tref.cancel.assert_called_with()
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/pylint/test/input/func_e0203.py
|
Python
|
agpl-3.0
| 339
| 0
|
"""check
|
for method without self as first argument
"""
__revision__ = 0
class Abcd(object):
"""dummy class"""
def __init__(self):
|
pass
def abcd(yoo):
"""another test"""
abcd = classmethod(abcd)
def edf(self):
"""justo ne more method"""
print('yapudju in', self)
|
fayf/pyload
|
module/plugins/crypter/CryptItCom.py
|
Python
|
gpl-3.0
| 542
| 0.012915
|
# -*- coding: utf-8 -*-
from module.plugins.internal.DeadCrypter import DeadCrypter, create_getInfo
class Cry
|
ptItCom(DeadCrypter):
__name__ = "CryptItCom"
__type__ = "crypter"
__version__ = "0.12"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?crypt-it\.com/(s|e|d|c)/\w+'
__config__ = [] #@TODO: Remove in 0.4.10
__description__ = """Cryp
|
t-it.com decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("jeix", "jeix@hasnomail.de")]
getInfo = create_getInfo(CryptItCom)
|
pombredanne/frappe
|
frappe/desk/form/load.py
|
Python
|
mit
| 4,558
| 0.027644
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
import frappe.utils
import frappe.share
import frappe.defaults
import frappe.desk.form.meta
from frappe.permissions import get_doc_permissions
from frappe import _
@frappe.whitelist()
def getdoc(doctype, name, user=None):
"""
Loads a doclist for a given document. This method is called directly from the client.
Requries "doctype", "name" as form variables.
Will also call the "onload" method on the document.
"""
if not (doctype and name):
raise Exception, 'doctype and name required!'
if not name:
name = doctype
if not frappe.db.exists(doctype, name):
return []
try:
doc = frappe.get_doc(doctype, name)
run_onload(doc)
if not doc.has_permission("read"):
raise frappe.PermissionError, ("read", doctype, name)
# add file list
get_docinfo(doc)
except Exception:
frappe.errprint(frappe.utils.get_traceback())
fra
|
ppe.msgprint(_('Did not load'))
raise
if doc and not name.startswith('_'):
frappe.get_user().update_recent(doctype, name)
|
frappe.response.docs.append(doc)
@frappe.whitelist()
def getdoctype(doctype, with_parent=False, cached_timestamp=None):
"""load doctype"""
docs = []
# with parent (called from report builder)
if with_parent:
parent_dt = frappe.model.meta.get_parent_dt(doctype)
if parent_dt:
docs = get_meta_bundle(parent_dt)
frappe.response['parent_dt'] = parent_dt
if not docs:
docs = get_meta_bundle(doctype)
frappe.response['user_permissions'] = get_user_permissions(docs[0])
if cached_timestamp and docs[0].modified==cached_timestamp:
return "use_cache"
frappe.response.docs.extend(docs)
def get_meta_bundle(doctype):
bundle = [frappe.desk.form.meta.get_meta(doctype)]
for df in bundle[0].fields:
if df.fieldtype=="Table":
bundle.append(frappe.desk.form.meta.get_meta(df.options, not frappe.conf.developer_mode))
return bundle
@frappe.whitelist()
def get_docinfo(doc=None, doctype=None, name=None):
if not doc:
doc = frappe.get_doc(doctype, name)
if not doc.has_permission("read"):
raise frappe.PermissionError
frappe.response["docinfo"] = {
"attachments": get_attachments(doc.doctype, doc.name),
"comments": get_comments(doc.doctype, doc.name),
"assignments": get_assignments(doc.doctype, doc.name),
"permissions": get_doc_permissions(doc),
"shared": frappe.share.get_users(doc.doctype, doc.name,
fields=["user", "read", "write", "share", "everyone"])
}
def get_user_permissions(meta):
out = {}
all_user_permissions = frappe.defaults.get_user_permissions()
for df in meta.get_fields_to_check_permissions(all_user_permissions):
out[df.options] = list(set(all_user_permissions[df.options]))
return out
def get_attachments(dt, dn):
return frappe.get_all("File Data", fields=["name", "file_name", "file_url"],
filters = {"attached_to_name": dn, "attached_to_doctype": dt})
def get_comments(dt, dn, limit=100):
comments = frappe.db.sql("""select name, comment, comment_by, creation,
reference_doctype, reference_name, comment_type, "Comment" as doctype
from `tabComment`
where comment_doctype=%s and comment_docname=%s
order by creation desc limit %s""" % ('%s','%s', limit),
(dt, dn), as_dict=1)
communications = frappe.db.sql("""select name,
content as comment, sender as comment_by, creation,
communication_medium as comment_type, subject, delivery_status,
"Communication" as doctype
from tabCommunication
where reference_doctype=%s and reference_name=%s
order by creation desc limit {0}""".format(limit), (dt, dn),
as_dict=True)
for c in communications:
c.attachments = json.dumps([f.file_url for f in frappe.get_all("File Data",
fields=["file_url"],
filters={"attached_to_doctype": "Communication",
"attached_to_name": c.name}
)])
return comments + communications
def get_assignments(dt, dn):
cl = frappe.db.sql("""select owner, description from `tabToDo`
where reference_type=%(doctype)s and reference_name=%(name)s and status="Open"
order by modified desc limit 5""", {
"doctype": dt,
"name": dn
}, as_dict=True)
return cl
@frappe.whitelist()
def get_badge_info(doctypes, filters):
filters = json.loads(filters)
doctypes = json.loads(doctypes)
filters["docstatus"] = ["!=", 2]
out = {}
for doctype in doctypes:
out[doctype] = frappe.db.get_value(doctype, filters, "count(*)")
return out
def run_onload(doc):
doc.set("__onload", frappe._dict())
doc.run_method("onload")
|
acutesoftware/AIKIF
|
aikif/.z_prototype/test_agent_agg_context.py
|
Python
|
gpl-3.0
| 559
| 0.010733
|
#!/usr/bin/python3
# test_agent_agg_context.py
import os
im
|
port unittest
import sys
root_fldr = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, root_fldr)
import aikif.agents.aggregate.agg_context as mod_agg
class TestAgentAggContext(unittest.TestCase):
def test_01_(self):
agentFileList = mod_agg.AggContext()
agentFileList.start()
self.assertEqual(mod_agg.fileListSrc,'file_sample.csv')
self.assertEqual(agentFileList.repo
|
rt(),[])
if __name__ == '__main__':
unittest.main()
|
newspipe/newspipe
|
dags/dag_factory/components/utils.py
|
Python
|
agpl-3.0
| 1,480
| 0.004054
|
import os
import yaml
import time
from datetime import datetime
def get_all_csv_paths(path):
csv_paths = []
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(".csv"):
csv_paths.append(os.path.join(root, file))
return csv_paths
def date_str_to_unixtime(date_str):
if str(date_str).isnumeric():
return int(date_str)
else:
d = None
try:
d = datetime.strpt
|
ime(date_str)
except:
pass
try:
|
d = datetime.strptime(date_str, '%a, %d %b %Y %H:%M:%S %Z')
except:
pass
try:
d = datetime.strptime(date_str, '%a, %d %b %Y %H:%M:%S %z')
except:
pass
try:
d = datetime.strptime(date_str.split(
'+')[0].replace("T", " "), '%Y-%m-%d %H:%M:%S')
except:
pass
try:
d = datetime.strptime(date_str, '%a, %d %b %Y %H:%M:%S %z')
except:
pass
try:
d = datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S')
except:
pass
return int(time.mktime(d.timetuple())) if d else None
def tag_dict_to_dict(tag_dict):
if not tag_dict:
return None
tags = []
tag_list = yaml.load(tag_dict)
if isinstance(tag_list, list):
for tag in tag_list:
tags.append(tag["term"])
return ','.join(tags)
return None
|
alestic/townclock-ping-timercheck
|
code/lambda_function.py
|
Python
|
apache-2.0
| 1,751
| 0.006282
|
#!/usr/bin/env python3.6
import sys
sys.path.insert(1, 'lib')
import os
import json
import logging
import requests
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Retry until AWS Lambda function times out
retries = Retry(total=None,
status_forcelist=[500, 502, 503, 504])
session = requests.Session()
session.mount('http://', HTTPAdapter(max_retries=retries))
session.mount('https://', HTTPAdapter(max_retries=retries))
def lambda_handler(event, context):
try:
logger.info('event: {}'.format(json.dumps(event)))
url = os.environ['url']
response = session.get(url)
logger.info('response: {}\n{}'.format(
response.status_code, response.content.decode("utf-8")))
except Exception as e:
logger.error("Unexpected Error: {}".format(e))
if __name__ == "__main__":
import sys
logging.basicConfig(stream=sys.stderr)
timer='townclock-ping-time
|
rcheck-demo'
seconds='1200'
os.environ['url'] = 'https://timercheck.io/'+timer+'/'+seconds
lambda_handler({
"source": "townclock.chime",
"type": "chime",
"version": "3.0",
"timestamp": "2017-05-20 01:45 UTC",
"year": "2017",
"month": "05",
"day": "20",
"hour": "01",
|
"minute": "45",
"day_of_week": "Sat",
"unique_id": "02976691-0e70-4dbd-8191-a2f26819e1f7",
"region": "us-east-1",
"sns_topic_arn": "arn:aws:sns:us-east-1:522480313337:unreliable-town-clock-topic-178F1OQACHTYF",
"reference": "http://townclock.io",
"support": "First Last <nobody@example.com>",
"disclaimer": "UNRELIABLE SERVICE"
}, None)
|
ljbin/meiyou
|
soft/identifyporn/avatar_model.py
|
Python
|
apache-2.0
| 8,414
| 0.007776
|
# -*- coding: utf-8 -*-
'''
Created on 2018年3月27日
@author: baimb
'''
import logging as log
log.basicConfig(level=log.INFO, format='%(asctime)s - [%(filename)s:%(funcName)s:%(lineno)s] - %(message)s')
import os
import platform
import tensorflow as tf
from image_similarity import ImageSimilaritySearch
import zbar
import zbar.misc
import skimage
import skimage.io
import numpy as np
from PIL import Image
from io import BytesIO
from tensorflow.python.keras.preprocessing import image
from tensorflow.python.keras.applications.resnet50 import preprocess_input
import sys
import time
import math
import traceback
if sys.version_info.major == 2:
import urllib2
from urlparse import urlparse
else:
import urllib
from urllib.parse import urlparse
VGG_MEAN = [104, 117, 123]
class Avatar_Classifier:
def __init__(self, model_dir=None):
self.model_dir = model_dir
log.info("model_dir:%s", self.model_dir)
self.model_file = os.path.join(model_dir, "avatar.pb")
self.image_match_file = os.path.join(model_dir, "avatar_model.npz")
self.sim_threshold = self.__get_env("sim_threshold", float, default=0.2)
self.pred_threshold = self.__get_env("pred_threshold", float, default=0.5)
self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
log.info("sim_threshold:%s,pred_threshold:%s", self.sim_threshold, self.pred_threshold)
self.scanner = zbar.Scanner()
assert os.path.isfile(self.model_file)
assert os.path.isfile(self.image_match_file)
self.__load_image_match()
self.__load_model()
def __get_env(self, name, value_type, default=None):
value = os.environ.get(name)
if value:
try:
return value_type(value)
except Exception:
log.warn("convert value error:", value)
return default
def __load_tf_graph(self, model_file):
log.info("load tf model:" + model_file)
graph = tf.Graph()
with graph.as_default():
with open(model_file, 'rb') as myfile:
bindata = myfile.read()
output_graph_def = tf.GraphDef()
output_graph_def.ParseFromString(bindata)
_ = tf.import_graph_def(output_graph_def, name="")
return graph
def __load_model(self):
self.tf_graph = self.__load_tf_graph(self.model_file)
self.tf_session = tf.Session(graph=self.tf_graph)
def __load_image_match(self):
self.image_search = ImageSimilaritySearch()
self.image_search.load_images_model(self.image_match_file)
def is_zbar_image(self, img):
if len(img.shape) == 3:
img = zbar.misc.rgb2gray(img)
results = self.scanner.scan(img)
for result in results:
if "QR-Code" == result.type:
return True
return False
def is_spec_image(self, image_path):
sim, _ = self.image_search.find_similar_image(image_path)
if sim <= self.sim_threshold:
log.info("spec image:%s,dict:%s", image_path, sim)
return 1
return 0
def load_image_bytes(self, image_path):
pimg = open(image_path, 'rb').read()
im = Image.open(BytesIO(pimg))
if im.mode != "RGB":
im = im.convert('RGB')
imr = im.resize((256, 256), resample=Image.BILINEAR)
fh_im = BytesIO()
imr.save(fh_im, format='JPEG')
fh_im.seek(0)
return fh_im
def load_image2(self, image_bytes):
img = image.load_img(image_bytes, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return x
def predict_model(self, graph, session, img):
with graph.as_default():
with session.as_default():
input_op = graph.get_tensor_by_name("input_1:0")
prob_op = graph.get_tensor_by_name("output_node0:0")
predictions = session.run(prob_op, feed_dict={input_op: img})
return float(predictions[0][0])
def download_image(self, url, image_path, timeout=10):
start = time.time()
try:
if sys.version_info.major == 2:
req = urllib2.Request(url, headers=self.headers)
request = urllib2.urlopen(req, timeout=timeout)
else:
req = urllib.request.Request(url, headers=self.headers)
request = urllib.request.urlopen(req, timeout=timeout)
with open(image_path, 'wb') as f:
f.write(request.read())
except Exception as e:
log.info(u"download img [%s] error:%s", url, str(e))
return False
finally:
end = time.time()
log.info(u"download img [%s] cost:%s", url, str(end - start))
return True
def download_image2(self, url, image_path, timeout=10):
start = time.time()
try:
cmd = "wget " + url + " -t 2 -T " + str(timeout) + " -O " + image_path
os.system(cmd)
except Exception as e:
log.info(u"download img [%s] error:%s", url, str(e))
return False
finally:
end = time.time()
log.info(u"download img [%s] cost:%s", url, str(end - start))
return True
def predict_qr_code(self, image_path):
"""
返回是否为二维码:1-是,0-False
"""
try:
# 判断图片是否为二维码图片
img = skimage.io.imread(image_path)
if(self.is_zbar_image(img)):
return True
except Exception as e:
log.error("predict error:%s", str(e))
return False
return False
def predict(self, image_path, timeout=10):
"""
如果预测失败,则返回-1
如果预测成功,则返回预测概率值,取值范围为[0-1]
参数:
image_path:图片的url地址或者文本文件路径
timeout:下载图片的超时时间,默认十秒
"""
st = time.time()
score = 0
image_path = str(image_path)
try:
if not os.path.isfile(image_path):
o = urlparse(image_path)
local_path = os.path.join("tmp/", os.path.split(o.path)[1])
if self.download_image(image_path, local_path, timeout=timeout):
image_path = local_path
else:
log.warn("file not found:" + image_path)
return -1
if(self.is_spec_image(image_path)):
return 0
if self.predict_qr_code(image_path):
log.info("%s is qr code:" % image_path)
score = 1
return 1
image_bytes = self.load_image_bytes(image_path)
img2 = self.load_image2(image_bytes)
score = self.predict_model(self.tf_graph, self.tf_session, img2)
except Exception as e:
log.error("predict error:%s image_path:%s ", str(e), image_path)
traceback.prin
|
t_exc()
return -1
finally:
# 删除文件
if os.path.exists(image_path) and image_path.startswith("tmp/"):
os.remove(image_
|
path)
log.info("score:%s,image:%s,cost:%s" % (score, image_path, time.time() - st))
return score
if __name__ == '__main__':
if len(sys.argv) < 3:
print("python dl/nsfw/avatar_model.py model_dir image_path")
exit()
classifer = Avatar_Classifier(sys.argv[1])
print(classifer.predict(sys.argv[2]))
|
zordsdavini/qtile
|
libqtile/dgroups.py
|
Python
|
mit
| 8,861
| 0.000451
|
# Copyright (c) 2011-2012 Florian Mounier
# Copyright (c) 2012-2014 roger
# Copyright (c) 2012 Craig Barnes
# Copyright (c) 2012-2014 Tycho Andersen
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 ramnes
# Copyright (c) 2014 Sebastian Kricner
# Copyright (c) 2014 Sean Vig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import collections
import libqtile.hook
from libqtile.command import lazy
from libqtile.config import Group, Key, Match, Rule
from libqtile.log_utils import logger
def simple_key_binder(mod, keynames=None):
"""Bind keys to mod+group position or to the keys specified as second argument"""
def func(dgroup):
# unbind all
for key in dgroup.keys[:]:
dgroup.qtile.ungrab_key(key)
dgroup.keys.remove(key)
if keynames:
keys = keynames
else:
# keys 1 to 9 and 0
keys = list(map(str, list(range(1, 10)) + [0]))
# bind all keys
for keyname, group in zip(keys, dgroup.qtile.groups):
name = group.name
key = Key([mod], keyname, lazy.group[name].toscreen())
key_s = Key([mod, "shift"], keyname, lazy.window.togroup(name))
key_c = Key(
[mod, "control"],
keyname,
lazy.group.switch_groups(name)
)
dgroup.keys.append(key)
dgroup.keys.append(key_s)
dgroup.keys.append(key_c)
dgroup.qtile.grab_key(key)
dgroup.qtile.grab_key(key_s)
dgroup.qtile.grab_key(key_c)
return func
class DGroups:
"""Dynamic Groups"""
def __init__(self, qtile, dgroups, key_binder=None, delay=1):
self.qtile = qtile
self.groups = dgroups
self.groups_map = {}
self.rules = []
self.rules_map = {}
self.last_rule_id = 0
for rule in getattr(qtile.config, 'dgroups_app_rules', []):
self.add_rule(rule)
self.keys = []
self.key_binder = key_binder
self._setup_hooks()
self._setup_groups()
self.delay = delay
self.timeout = {}
def add_rule(self, rule, last=True):
rule_id = self.last_rule_id
self.rules_map[rule_id] = rule
if last:
self.rules.append(rule)
else:
self.rules.insert(0, rule)
self.last_rule_id += 1
return rule_id
def remove_rule(self, rule_id):
rule = self.rules_map.get(rule_id)
if rule:
self.rules.remove(rule)
del self.rules_map[rule_id]
else:
logger.warn('Rule "%s" not found', rule_id)
def add_dgroup(self, group, start=False):
self.groups_map[group.name] = group
rules = [Rule(m, group=group.name) for m in group.matches]
self.rules.extend(rules)
if start:
self.qtile.add_group(group.name, group.layout, group.layouts, group.label)
def _setup_groups(self):
for group in self.groups:
self.add_dgroup(group, group.init)
if group.spawn and not self.qtile.no_spawn:
if isinstance(group.spawn, str):
spawns = [group.spawn]
else:
spawns = group.spawn
for spawn in spawns:
pid = self.qtile.cmd_spawn(spawn)
self.add_rule(Rule(Match(net_wm_pid=[pid]), group.name))
def _setup_hooks(self):
libqtile.hook.subscribe.addgroup(self._addgroup)
libqtile.hook.subscribe.client_new(self._add)
libqtile.hook.subscribe.client_killed(self._del)
if self.key_binder:
libqtile.hook.subscribe.setgroup(
lambda: self.key_binder(self)
)
libqtile.hook.subscribe.changegroup(
lambda: self.key_binder(self)
)
def _addgroup(self, qtile, group_name):
if group_name not in self.groups_map:
self.add_dgroup(Group(group_name, persist=False))
def _add(self, client):
if client in self.timeout:
logger.info('Remove dgroup source')
self.timeout.pop(client).cancel()
# ignore static windows
if client.defunct:
return
# ignore windows whose groups is already set (e.g. from another hook or
# when it was set on state restore)
if client.group is not None:
return
group_set = False
intrusive = False
for rule in self.rules:
# Matching Rules
if rule.matches(client):
if rule.group:
if rule.group in self.groups_map:
layout = self.groups_map[rule.group].layout
layouts = self.groups_map[rule.group].layouts
label = self.groups_map[rule.group].label
else:
layout = None
layouts = None
label = None
group_
|
added = self.qtile.add_group(rule.group, layout, layouts, label)
client.togroup(rule.group)
group_set = True
group_obj = self.qtile.groups_map[rule.group]
group = self.groups_map.get(rule.group)
if group and g
|
roup_added:
for k, v in list(group.layout_opts.items()):
if isinstance(v, collections.Callable):
v(group_obj.layout)
else:
setattr(group_obj.layout, k, v)
affinity = group.screen_affinity
if affinity and len(self.qtile.screens) > affinity:
self.qtile.screens[affinity].set_group(group_obj)
if rule.float:
client.enablefloating()
if rule.intrusive:
intrusive = rule.intrusive
if rule.break_on_match:
break
# If app doesn't have a group
if not group_set:
current_group = self.qtile.current_group.name
if current_group in self.groups_map and \
self.groups_map[current_group].exclusive and \
not intrusive:
wm_class = client.window.get_wm_class()
if wm_class:
if len(wm_class) > 1:
wm_class = wm_class[1]
else:
wm_class = wm_class[0]
group_name = wm_class
else:
group_name = client.name or 'Unnamed'
self.add_dgroup(Group(group_name, persist=False), start=True)
client.togroup(group_name)
self.sort_groups()
def sort_groups(self):
grps = self.qtile.groups
sorted_grps = sorted(grps, key=lambda g: self.groups_map[g.name].position)
if grps != sorted_grps:
self.qtile.groups = sorted_grps
libqtile.hook.fire("cha
|
hbrunn/bank-statement-reconcile
|
account_statement_transactionid_completion/__init__.py
|
Python
|
agpl-3.0
| 971
| 0
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Joel Grand-Guillaume
# Copyright 2011-2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy
|
of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import statement
|
lypnol/adventofcode-2016
|
day-13/part-1/ayoub.py
|
Python
|
gpl-3.0
| 1,423
| 0.007027
|
from submission import Submission
from heapq import *
class AyoubSubmission(Submission):
def author(self):
return 'Ayoub'
def run(self, s):
s = s.rstrip()
N = int(s)
start = 1, 1
goal = 31,39
def is_wall(x, y):
r = x*x + 3*x + 2*x*y + y + y*y
r += N
return bin(r).count("1") % 2 == 1
def heuristic(x, y):
a, b = goal
return ((x - a)**2 + (y - b)**2)**0.5
dist = {start: 0}
openset = [(heuristic(*start), start)]
closedset = set()
while openset:
|
_, (x, y) = heappop(openset)
|
if x == goal[0] and y == goal[1]:
return dist[(x, y)]
closedset.add((x, y))
for u,v in [(1,0), (0,1), (-1,0), (0, -1)]:
node = (x+u, y+v)
if x+u >= 0 and y+v >=0 and\
not is_wall(*node) and node not in closedset:
if node not in dist or dist[node] < dist[(x,y)] + 1:
dist[node] = dist[(x,y)] + 1
for i, n in enumerate(openset):
if n == node:
openset.pop(i)
heapify(openset)
break
heappush(openset, (dist[node] + heuristic(*node), node))
return None
|
CottageLabs/OpenArticleGauge
|
openarticlegauge/tests/plugins/test_workflow/test_11_1/test_11_1.py
|
Python
|
bsd-3-clause
| 543
| 0.016575
|
from openarticlegauge import plugin
class mock_licence_plugin(plugin.Plugin):
_short_name="mock_doi"
def capabilities(self):
return {
"type_detect_verify" : False,
"canonicalise" : [],
"detect_provider" : [],
|
"license_detect" : True
}
def supports(self, provider):
return True
def license_detect(self, record):
record.record['bibjson'] = {}
record.record['bibjson']['license'] = [{}]
record.record['bibjson']['
|
title'] = "mytitle"
|
Thomsen22/MissingMoney
|
Peak Load Reserve - EU system/PLR_function.py
|
Python
|
gpl-3.0
| 4,572
| 0.013123
|
# Python standard modules
import pandas as pd
# Own modules
from PLR_optclass import PLRMarket
import PLR_optimization as plrmodel
def PeakLoadReserveFunction():
market = PLRMarket()
market.optimize()
df_generators = market.data.generators
gens_for_zones = market.data.gens_for_country
peakload = market.data.peakload
df_cost = market.data.cost
zones = market.data.zones
times = market.data.times
df_gencapacity = pd.DataFrame({'capacity': {g: market.variables.gcap[g].x for g in df_generators.index}})
df_zonalconsumption = pd.DataFrame(index = times, data = {z: [market.data.zonalconsumption[z,t] for t in times] for z in zones})
reservemargin = market.data.reservemargin
timeperiod = market.data.timeperiod
model = market.data.model
bidtype = market.data.BidType
# Model types are controlled from the PLR_optclass
# ActivationPrice = df_generators['lincostold'].max() + 1 # max + 1
ActivationPrice = 280
for g in df_generators.index:
if model == 'Swedish':
if df_gencapacity['capacity'][g] > 0:
df_generators['lincost'][g] = ActivationPrice + (df_generators['lincostold'][g]*0.05)
elif df_gencapacity['capacity'][g] <= 0:
df_generators['lincost'][g] = df_generators['lincostold'][g]
df_generators.to_csv('generators.csv')
# Run the DA optimization ones again and determine the missing money still in the system
df_flow, df_price_DA, df_windprod, df_solarprod = plrmodel.DayAheadMarket()
# A dataframe is returned to Excel for further work
Gen_dataframe = pd.read_csv('revenue_cost_gen.csv', sep=',', encoding='latin-1').set_index('Generator')
Gen_dataframe['PLRplants'] = df_gencapacity['capacity']
PLRbid = {}
for g in df_generators.index:
if df_gencapacity['capacity'][g] > 0:
PLRbid[g] = df_cost['PLRbid'][g]
elif df_gencapacity['capacity'][g] == 0:
PLRbid[g] = 0
PLRbid_df = pd.DataFrame([[key,value] for key,value in PLRbid.items()],columns=["Generator","PLRbid"]).set_index('Generator')
Gen_dataframe['PLRbid'] = PLRbid_df['PLRbid'].map('{:.2f}'.format)
Gen_dataframe.to_csv('revenue_cost_gen_PLR.csv')
Gen_dataframe = plrmodel.missingmoneyPLR(timeperiod, bidtype)
return Gen_dataframe, df_zonalconsumption, reservemargin, df_price_DA, df_windprod, df_solarprod, gens_for_zones, df_generators, zones, peakload, timeperiod
def peakloadreserveoptimization():
Gen_dataframe, df_zonalconsumption, reservemargin, df_price_DA, df_windprod, df_solarprod, gens_for_zones, df_generators, zones, peakload, timeperiod = PeakLoadReserveFunction()
# Cost of wind
windcost = {}
for z in df_price_DA.columns:
for t in df_price_DA.index:
windcost[z,t] = df_windprod[z][t] * df_price_DA[z][t]
totalwindcost = sum(windcost.values())
# Cost of solar
solarcost = {}
for z in df_price_DA.columns:
for t in df_price_DA.index:
solarcost[z,t] = df_solarprod[z][t] * df_price_DA[z][t]
totalsolarcost = sum(solarcost.values())
# Calculating the wind penetration level
wind_penetration = (df_windprod.sum(axis=1) / df_zonalconsumption.sum(axis=1)) * 100
# Calculating the solar penetration level
solar_penetration = (df_solarprod.sum(axis=1) / df_zonalconsumption.sum(axis=1)) * 100
# The activation, deactivation and investment loop can be run by removing the # in the following line of code:
#for z in zones:
#if sum(df_generators['capacity'][g] for g in gens_for_zone
|
s[z]) > (peakload[z] * reservemargin):
#df_generators = plrmodel.plantdeactivation(zones, gens_for_zones, df_zonalconsumption, reservemargin, Gen_dataframe)
#elif sum(df_generators['capacity'][g] for g in gens_for_zones[z]) < (peakload[z] * reservemargin):
#df_gener
|
ators = plrmodel.plantactivation(zones, gens_for_zones, df_zonalconsumption, reservemargin)
#df_generators = plrmodel.plantinvestment(df_price_DA, zones, gens_for_zones, timeperiod, reservemargin, peakload)
# Data to csv files, remove the #
#df_price_DA.to_csv('1dayaheadmarketprices.csv')
#df_solarprod.to_csv('1solarproduction.csv')
#df_windprod.to_csv('1windproduction.csv')
return Gen_dataframe, df_generators, totalwindcost, totalsolarcost, wind_penetration, solar_penetration
|
capitalone/cloud-custodian
|
tools/c7n_org/c7n_org/utils.py
|
Python
|
apache-2.0
| 814
| 0
|
# Copyright 2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import os
from c7n.utils import reset_session_cache
from conte
|
xtlib import contextmanager
def account_tags(account):
tags = {'AccountName': account['name'], 'AccountId': account['account
|
_id']}
for t in account.get('tags', ()):
if ':' not in t:
continue
k, v = t.split(':', 1)
k = 'Account%s' % k.capitalize()
tags[k] = v
return tags
@contextmanager
def environ(**kw):
current_env = dict(os.environ)
for k, v in kw.items():
os.environ[k] = v
try:
yield os.environ
finally:
for k in kw.keys():
del os.environ[k]
os.environ.update(current_env)
reset_session_cache()
|
ujdhesa/unisubs
|
apps/videos/types/bliptv.py
|
Python
|
agpl-3.0
| 2,824
| 0.003895
|
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from videos.types.base import VideoType
from vidscraper.sites import blip
from django.utils.html import strip_tags
import urllib2
import simplejson
import re
class BlipTvVideoType(VideoType):
abbreviation = 'B'
name = 'Blip.tv'
site = 'blip.tv'
pattern = re.compile(r"^https?://blip.tv/(?P<subsite>[a-zA-Z0-9-]+)/(?P<file_id>[a-zA-Z0-9-]+)/?$")
def __init__(self, url):
self.url = url
self.subsite, self.file_id = self._parse_url()
self.json = self._fetch_json()
def convert_to_video_url(self):
return "http://blip.tv/%s/%s" % (self.subsite, self.file_id)
@property
def video_id(self):
if self.json and 'embedLookup' in self.json:
return self.json['embedLookup']
else:
return None
@classmethod
def matches_video_url(cls, url):
return cls.pattern.match(url)
def create_kwars(self):
return {'videoid': self.video_id}
def set_values(self, video_obj):
json = self.json
if 'title' in json:
video_obj.title = unicode(json['title'])
if 'description' in json:
video_obj.description = unicode(json['description'])
if 'media' in json:
video_obj.duration = int(json['media']['duration'])
if 'thumbnailUrl' in json:
video_obj.thumbnail = json['thumbnailUrl'
|
]
video_obj.save()
return video_obj
def _parse_url(self):
matches = self.pattern.match(self.url).groupdict()
return matches['subsite'], matches['file_id']
def _fetch_json(self):
# bliptv just knows how to return jsonp. argh.
url = self.url + "?skin=json&callback="
try:
jsonp = urllib2.urlopen(url).read().strip()
except Exception:
retur
|
n {}
# strip the json parentesis. argh.
if jsonp.endswith(');'):
jsonp = jsonp[1:-2]
json = simplejson.loads(jsonp)
return json[0].get('Post', {}) if len(json) > 0 else None
|
ixlab/trendquery
|
offline/src/parse_zip_test.py
|
Python
|
apache-2.0
| 528
| 0.003788
|
"""This module is used first to read inpu
|
t files and create year, [word]
dict"""
__author__ = 'kamat'
import unittest
from parse_zip import write_year_title
class ParseZipTest(unittest.TestCase):
def test_write_year_title(self):
"""This method is used to create year, [word] dict that
nsf_stats.py uses to create db. Others are just relics of helper
functions"""
directory = "../data/nsf_data/"
output_file = "../output/nsf_output/nsf_word_year_count.dict"
write_year_title(direct
|
ory, output_file)
|
HarrisonHDU/myerp
|
apps/depot/views.py
|
Python
|
mit
| 4,631
| 0.002385
|
# Create your views here.
from django import forms
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseRedirect
from django.template.loader import get_template
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
import datetime
from django.shortcuts import render_to_response
from django.core import serializers
import json
import decimal
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
# app specific files
from .models import *
from .forms import *
def to_json(python_object):
if isinstance(python_object, Cart):
return {
'__class__': 'Cart',
'__value__': {
'items': ser
|
ializers.serialize('json', python_object.items),
'total_price': str(python_object.total_price)
}
}
raise TypeError(repr(python_object) + ' is not JSON serializable')
def from_json(json_object):
if '__class__' in json_object:
if json_object['__class__'] == 'Cart':
ret
|
urn Cart(items=[deserialized_object.object for deserialized_object in serializers.deserialize('json', json_object['__value__']['items'])],
total=decimal.Decimal(json_object['__value__']['total_price']))
return json_object
def create_product(request):
form = ProductForm(request.POST or None)
if form.is_valid():
form.save()
form = ProductForm()
t = get_template('depot/create_product.html')
c = RequestContext(request, locals())
return HttpResponse(t.render(c))
@login_required
def list_product(request):
list_items = Product.objects.all()
paginator = Paginator(list_items, 10)
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
try:
list_items = paginator.page(page)
except:
list_items = paginator.page(paginator.num_pages)
t = get_template('depot/p_list_product.html')
c = RequestContext(request, locals())
return HttpResponse(t.render(c))
def view_product(request, id):
product_instance = Product.objects.get(id=id)
t = get_template('depot/view_product.html')
c = RequestContext(request, locals())
return HttpResponse(t.render(c))
def edit_product(request, id):
product_instance = Product.objects.get(id=id)
form = ProductForm(request.POST or None, instance=product_instance)
if form.is_valid():
form.save()
t = get_template('depot/edit_product.html')
c = RequestContext(request, locals())
return HttpResponse(t.render(c))
def store_view(request):
products = Product.objects.filter(date_available__lt=datetime.datetime.now().date()).order_by('-date_available')
number = len(products)
try:
cart = json.loads(request.session.get('cart', None), object_hook=from_json)
except:
cart = Cart()
return render_to_response(
template_name='depot/store.html',
dictionary=locals(),
context_instance=RequestContext(request)
)
def view_cart(request):
try:
cart = json.loads(request.session.get("cart", None), object_hook=from_json)
except:
cart = None
if not cart:
cart = Cart()
request.session['cart'] = json.dumps(cart, default=to_json)
return render_to_response('depot/view_cart.html', locals(), context_instance=RequestContext(request))
def add_to_cart(request, product_id):
product = Product.objects.get(id=product_id)
try:
cart = json.loads(request.session['cart'], object_hook=from_json)
except:
cart = None
if not cart:
cart = Cart()
request.session['cart'] = json.dumps(cart, default=to_json)
cart.add_product(product)
request.session['cart'] = json.dumps(cart, default=to_json)
return view_cart(request)
def clear_cart(request):
request.session['cart'] = json.dumps(Cart(), default=to_json)
return view_cart(request)
def login_view(request):
user = authenticate(username=request.POST.get('username', None),
password=request.POST.get('password', None))
if user is not None:
login(request, user)
print(request.user)
return list_product(request)
else:
# 验证失败,暂不处理
return store_view(request)
def logout_view(request):
logout(request)
return store_view(request)
|
psnj/petl
|
petl/test/io/test_pandas.py
|
Python
|
mit
| 1,286
| 0
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import sys
import petl as etl
from petl.test.helpers import ieq
from petl.io.pandas import todataframe, fromdataframe
try:
# noinspection PyUnresolvedReferences
import pandas as pd
except ImportError as e:
print('SKIP pandas tests: %s' % e, file=sys.stderr)
else:
def test_todataframe():
tbl = [('foo', 'bar', 'baz'),
('apples', 1, 2.5),
('oran
|
ges', 3, 4.4),
('pears', 7, .1)]
expect = pd.DataFrame.from_records(tbl[1:], columns=tbl[0])
actual = todataframe(tbl)
assert expect.equals(actual)
def test_fromdataframe():
tbl = [('foo', 'bar', 'baz'),
('apples', 1, 2.5),
('ora
|
nges', 3, 4.4),
('pears', 7, .1)]
df = pd.DataFrame.from_records(tbl[1:], columns=tbl[0])
ieq(tbl, fromdataframe(df))
ieq(tbl, fromdataframe(df))
def test_integration():
tbl = [('foo', 'bar', 'baz'),
('apples', 1, 2.5),
('oranges', 3, 4.4),
('pears', 7, .1)]
df = etl.wrap(tbl).todataframe()
tbl2 = etl.fromdataframe(df)
ieq(tbl, tbl2)
ieq(tbl, tbl2)
|
Andrew-McNab-UK/DIRAC
|
Core/DISET/private/BaseClient.py
|
Python
|
gpl-3.0
| 26,577
| 0.025247
|
""" This module exposes the BaseClient class,
which serves as base for InnerRPCClient and TransferClient.
"""
__RCSID__ = "$Id$"
import time
import thread
import DIRAC
from DIRAC.Core.DISET.private.Protocols import gProtocolDict
from DIRAC.FrameworkSystem.Client.Logger import gLogger
from DIRAC.Core.Utilities import List, Network
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.ConfigurationSystem.Client.PathFinder import getServiceURL, getServiceFailoverURL
from DIRAC.Core.Security import CS
from DIRAC.Core.DISET.private.TransportPool import getGlobalTransportPool
from DIRAC.Core.DISET.ThreadConfig import ThreadConfig
class BaseClient(object):
""" Glues together stubs with threading, credentials, and URLs discovery (by DIRAC vo and setup).
Basically what needs to be done to enable RPC calls, and transfer, to find a URL.
"""
VAL_EXTRA_CREDENTIALS_HOST = "hosts"
KW_USE_CERTIFICATES = "useCertificates"
KW_EXTRA_CREDENTIALS = "extraCredentials"
KW_TIMEOUT = "timeout"
KW_SETUP = "setup"
KW_VO = "VO"
KW_DELEGATED_DN = "delegatedDN"
KW_DELEGATED_GROUP = "delegatedGroup"
KW_IGNORE_GATEWAYS = "ignoreGateways"
KW_PROXY_LOCATION = "proxyLocation"
KW_PROXY_STRING = "proxyString"
KW_PROXY_CHAIN = "proxyChain"
KW_SKIP_CA_CHECK = "skipCACheck"
KW_KEEP_ALIVE_LAPSE = "keepAliveLapse"
__threadConfig = ThreadConfig()
def __init__( self, serviceName, **kwargs ):
"""
:param serviceName: URL of the service (proper uri or just System/Component)
:param useCertificates: If set to True, use the server certificate
:param extraCredentials:
:param timeout: Timeout of the call (default 600 s)
:param setup: Specify the Setup
:param VO: Specify the VO
:param delegatedDN: Not clear what it can be used for.
:param delegatedGroup: Not clear what it can be used for.
:param ignoreGateways: Ignore the DIRAC Gatways settings
:param proxyLocation: Specify the location of the proxy
:param proxyString: Specify the proxy string
:param proxyChain: Specify the proxy chain
:param skipCACheck: Do not check the CA
:param keepAliveLapse: Duration for keepAliveLapse (heartbeat like)
"""
if not isinstance( serviceName, basestring ):
raise TypeError( "Service name expected to be a string. Received %s type %s" %
( str( serviceName ), type( serviceName ) ) )
self._destinationSrv = serviceName
self._serviceName = serviceName
self.kwargs = kwargs
self.__useCertificates = None
# The CS useServerCertificate option can be overridden by explicit argument
self.__forceUseCertificates = self.kwargs.get( self.KW_USE_CERTIFICATES )
self.__initStatus = S_OK()
self.__idDict = {}
self.__extraCredentials = ""
self.__enableThreadCheck = False
self.__retry = 0
self.__retryDelay = 0
self.__nbOfUrls = 1 #by default we always have 1 url for example: RPCClient('dips://volhcb38.cern.ch:9162/Framework/SystemAdministrator')
self.__nbOfRetry = 3 # by default we try try times
self.__retryCounter = 1
self.__bannedUrls = []
for initFunc in ( self.__discoverSetup, self.__discoverVO, self.__discoverTimeout,
self.__discoverURL, self.__discoverCredentialsToUse,
self.__checkTransportSanity,
self.__setKeepAlive
|
Lapse ):
result = initFunc()
if not result[ 'OK' ] and self.__initStatus[ 'OK' ]:
self.__initStatus = result
self.numberOfURLs = 0
self._initialize()
#HACK for thread-safety:
self.__allowedThreadID = False
d
|
ef _initialize( self ):
pass
def getDestinationService( self ):
return self._destinationSrv
def getServiceName( self ):
return self._serviceName
def __discoverSetup( self ):
""" Discover which setup to use and stores it in self.setup
The setup is looked for:
* kwargs of the constructor (see KW_SETUP)
* the ThreadConfig
* in the CS /DIRAC/Setup
* default to 'Test'
"""
if self.KW_SETUP in self.kwargs and self.kwargs[ self.KW_SETUP ]:
self.setup = str( self.kwargs[ self.KW_SETUP ] )
else:
self.setup = self.__threadConfig.getSetup()
if not self.setup:
self.setup = gConfig.getValue( "/DIRAC/Setup", "Test" )
return S_OK()
def __discoverVO( self ):
""" Discover which VO to use and stores it in self.vo
The VO is looked for:
* kwargs of the constructor (see KW_VO)
* in the CS /DIRAC/VirtualOrganization
* default to 'unknown'
"""
if self.KW_VO in self.kwargs and self.kwargs[ self.KW_VO ]:
self.vo = str( self.kwargs[ self.KW_VO ] )
else:
self.vo = gConfig.getValue( "/DIRAC/VirtualOrganization", "unknown" )
return S_OK()
def __discoverURL( self ):
""" Calculate the final URL. It is called at initialization and in connect in case of issue
It sets:
* self.serviceURL: the url (dips) selected as target using __findServiceURL
* self.__URLTuple: a split of serviceURL obtained by Network.splitURL
* self._serviceName: the last part of URLTuple (typically System/Component)
"""
#Calculate final URL
try:
result = self.__findServiceURL()
except Exception as e:
return S_ERROR( repr( e ) )
if not result[ 'OK' ]:
return result
self.serviceURL = result[ 'Value' ]
retVal = Network.splitURL( self.serviceURL )
if not retVal[ 'OK' ]:
return retVal
self.__URLTuple = retVal[ 'Value' ]
self._serviceName = self.__URLTuple[-1]
res = gConfig.getOptionsDict( "/DIRAC/ConnConf/%s:%s" % self.__URLTuple[1:3] )
if res[ 'OK' ]:
opts = res[ 'Value' ]
for k in opts:
if k not in self.kwargs:
self.kwargs[k] = opts[k]
return S_OK()
def __discoverTimeout( self ):
""" Discover which timeout to use and stores it in self.timeout
The timeout can be specified kwargs of the constructor (see KW_TIMEOUT),
with a minimum of 120 seconds.
If unspecified, the timeout will be 600 seconds.
The value is set in self.timeout, as well as in self.kwargs[KW_TIMEOUT]
"""
if self.KW_TIMEOUT in self.kwargs:
self.timeout = self.kwargs[ self.KW_TIMEOUT ]
else:
self.timeout = False
if self.timeout:
self.timeout = max( 120, self.timeout )
else:
self.timeout = 600
self.kwargs[ self.KW_TIMEOUT ] = self.timeout
return S_OK()
def __discoverCredentialsToUse( self ):
""" Discovers which credentials to use for connection.
* Server certificate:
-> If KW_USE_CERTIFICATES in kwargs, sets it in self.__useCertificates
->If not, check gConfig.useServerCertificate(), and sets it in self.__useCertificates and kwargs[KW_USE_CERTIFICATES]
* Certification Authorities check:
-> if KW_SKIP_CA_CHECK is not in kwargs and we are using the certificates, set KW_SKIP_CA_CHECK to false in kwargs
-> if KW_SKIP_CA_CHECK is not in kwargs and we are not using the certificate, check the CS.skipCACheck
* Proxy Chain
-> if KW_PROXY_CHAIN in kwargs, we remove it and dump its string form into kwargs[KW_PROXY_STRING]
"""
#Use certificates?
if self.KW_USE_CERTIFICATES in self.kwargs:
self.__useCertificates = self.kwargs[ self.KW_USE_CERTIFICATES ]
else:
self.__useCertificates = gConfig.useServerCertificate()
self.kwargs[ self.KW_USE_CERTIFICATES ] = self.__useCertificates
if self.KW_SKIP_CA_CHECK not in self.kwargs:
if self.__useCertificates:
self.kwargs[ self.KW_SKIP_CA_CHECK ] = False
else:
self.kwargs[ self.KW_SKIP_CA_CHECK ] = CS.skipCACheck()
if self.KW_PROXY_CHAIN in self.kwargs:
try:
self.kwargs[ self.KW_PROXY_STRING ] = self.kwargs[ self.KW_PROXY_CHAIN ].dumpAllToString()[ 'Value' ]
del self.kwargs[ self.KW_PROXY_CHAIN ]
except:
return S_ERROR( "Invalid proxy ch
|
garrettcap/Bulletproof-Backup
|
wx/tools/Editra/src/syntax/_kix.py
|
Python
|
gpl-2.0
| 4,682
| 0.001709
|
###############################################################################
# Name: kix.py #
# Purpose: Syntax configuration module for KIXtart scripts #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2007 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
FILE: kix.py
AUTHOR: Cody Precord
@summary: Lexer configuration module for KIXtart scripts
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: _kix.py 68798 2011-08-20 17:17:05Z CJP $"
__revision__ = "$Revision: 68798 $"
#-----------------------------------------------------------------------------#
# Imports
import wx.stc as stc
# Imports
import synglob
import syndata
#-----------------------------------------------------------------------------#
#---- Keyword Definitions ----#
COMMANDS = (0, "? and beep big break call cd cls color cookie1 copy debug del "
"dim display do until exit flushkb for each next function "
"endfunction get gets global go gosub goto if else endif md or "
"password play quit rd redim return run select case endselect "
"set setl setm settime shell sleep small use while loop")
FUNCTIONS = (1, "abs addkey addprinterconnection addprogramgroup "
"addprogramitem asc ascan at backupeventlog box cdbl chr cint "
"cleareventlog close comparefiletimes createobject cstr "
"dectohex delkey delprinterconnection delprogramgroup "
"delprogramitem deltree delvalue dir enumgroup enumipinfo "
"enumkey enumlocalgroup enumvalue execute exist existkey "
"expandenvironmentvars fix formatnumber freefilehandle "
"getdiskspace getfileattr getfilesize getfiletime "
"getfileversion getobject iif ingroup instr instrrev int "
"isdeclared join kbhit keyexist lcase left len loadhive "
"loadkey logevent logoff ltrim memorysize messagebox open "
"readline readprofilestring readtype readvalue redirectoutput "
"right rnd round rtrim savekey sendkeys sendmessage setascii "
"setconsole setdefaultprinter setfileattr setfocus setoption "
"setsystemstate settitle setwallpaper showprogramgroup "
"shutdown sidtoname split srnd substr trim ubound ucase "
"unloadhive val vartype vartypename writeline "
"writeprofilestring writevalue")
MACROS = (2, "address build color comment cpu crlf csd curdir date day domain "
"dos error fullname homedir homedrive homeshr hostname inwin "
"ipaddress0 ipaddress1 ipaddress2 ipaddress3 kix lanroot
|
ldomain "
"ldrive lm logonmode longhomedir lserver maxpwage mdayno mhz "
"monthno month msecs pid primarygroup priv productsuite "
"producttype pwage ras result rserver scriptdir scriptexe "
"scriptname serror sid site startdir syslang ticks time userid "
"userlang wdayno wksta wuserid ydayno year")
#---- End Keyword Definitions ----#
#---- Syntax Style Specs ----#
SYNTAX_ITEMS = [(stc.STC_KIX_COMMENT, 'comment_st
|
yle'),
(stc.STC_KIX_DEFAULT, 'default_style'),
(stc.STC_KIX_FUNCTIONS, 'funct_style'),
(stc.STC_KIX_IDENTIFIER, 'default_style'),
(stc.STC_KIX_KEYWORD, 'keyword_style'),
(stc.STC_KIX_MACRO, 'pre_style'),
(stc.STC_KIX_NUMBER, 'number_style'),
(stc.STC_KIX_OPERATOR, 'operator_style'),
(stc.STC_KIX_STRING1, 'char_style'),
(stc.STC_KIX_STRING2, 'string_style'),
(stc.STC_KIX_VAR, 'scalar_style')]
#-----------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""SyntaxData object for Kix"""
def __init__(self, langid):
super(SyntaxData, self).__init__(langid)
# Setup
self.SetLexer(stc.STC_LEX_KIX)
def GetKeywords(self):
"""Returns Specified Keywords List """
return [COMMANDS, FUNCTIONS, MACROS]
def GetSyntaxSpec(self):
"""Syntax Specifications """
return SYNTAX_ITEMS
def GetCommentPattern(self):
"""Returns a list of characters used to comment a block of code """
return [u';']
|
toobaz/pandas
|
pandas/tests/frame/test_block_internals.py
|
Python
|
bsd-3-clause
| 22,034
| 0.000499
|
from datetime import datetime, timedelta
from io import StringIO
import itertools
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
Timestamp,
compat,
date_range,
option_context,
)
from pandas.core.arrays import IntervalArray, integer_array
from pandas.core.internals import ObjectBlock
from pandas.core.internals.blocks import IntBlock
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal,
assert_frame_equal,
assert_series_equal,
)
# Segregated collection of methods that require the BlockManager internal data
# structure
class TestDataFrameBlockInternals:
def test_setitem_invalidates_datetime_index_freq(self):
# GH#24096 altering a datetime64tz column inplace invalidates the
# `freq` attribute on the underlying DatetimeIndex
dti = date_range("20130101", periods=3, tz="US/Eastern")
ts = dti[1]
df = DataFrame({"B": dti})
assert df["B"]._values.freq == "D"
df.iloc[1, 0] = pd.NaT
assert df["B"]._values.freq is None
# check that the DatetimeIndex was not altered in place
assert dti.freq == "D"
assert dti[1] == ts
def test_cast_internals(self, float_frame):
casted = DataFrame(float_frame._data, dtype=int)
expected = DataFrame(float_frame._series, dtype=int)
assert_frame_equal(casted, expected)
casted = DataFrame(float_frame._data, dtype=np.int32)
expected = DataFrame(float_frame._series, dtype=np.int32)
assert_frame_equal(casted, expected)
def test_consolidate(self, float_frame):
float_frame["E"] = 7.0
consolidated = float_frame._consolidate()
assert len(consolidated._data.blocks) == 1
# Ensure copy, do I want this?
recons = consolidated._consolidate()
assert recons is not consolidated
tm.assert_frame_equal(recons, consolidated)
float_frame["F"] = 8.0
assert len(float_frame._data.blocks) == 3
float_frame._consolidate(inplace=True)
assert len(float_frame._data.blocks) == 1
def test_consolidate_inplace(self, float_frame):
frame = float_frame.copy() # noqa
# triggers in-place consolidation
for letter in range(ord("A"), ord("Z")):
float_frame[chr(letter)] = chr(letter)
def test_values_consolidate(self, float_frame):
float_frame["E"] = 7.0
assert not float_frame._data.is_consolidated()
_ = float_frame.values # noqa
assert flo
|
at_frame._data.is_consolidated()
def test_modify_values(self, float_frame):
float_frame.values[5] = 5
assert (float_frame.values[5] ==
|
5).all()
# unconsolidated
float_frame["E"] = 7.0
float_frame.values[6] = 6
assert (float_frame.values[6] == 6).all()
def test_boolean_set_uncons(self, float_frame):
float_frame["E"] = 7.0
expected = float_frame.values.copy()
expected[expected > 1] = 2
float_frame[float_frame > 1] = 2
assert_almost_equal(expected, float_frame.values)
def test_values_numeric_cols(self, float_frame):
float_frame["foo"] = "bar"
values = float_frame[["A", "B", "C", "D"]].values
assert values.dtype == np.float64
def test_values_lcd(self, mixed_float_frame, mixed_int_frame):
# mixed lcd
values = mixed_float_frame[["A", "B", "C", "D"]].values
assert values.dtype == np.float64
values = mixed_float_frame[["A", "B", "C"]].values
assert values.dtype == np.float32
values = mixed_float_frame[["C"]].values
assert values.dtype == np.float16
# GH 10364
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[["A", "B", "C", "D"]].values
assert values.dtype == np.float64
values = mixed_int_frame[["A", "D"]].values
assert values.dtype == np.int64
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[["A", "B", "C"]].values
assert values.dtype == np.float64
# as B and C are both unsigned, no forcing to float is needed
values = mixed_int_frame[["B", "C"]].values
assert values.dtype == np.uint64
values = mixed_int_frame[["A", "C"]].values
assert values.dtype == np.int32
values = mixed_int_frame[["C", "D"]].values
assert values.dtype == np.int64
values = mixed_int_frame[["A"]].values
assert values.dtype == np.int32
values = mixed_int_frame[["C"]].values
assert values.dtype == np.uint8
def test_constructor_with_convert(self):
# this is actually mostly a test of lib.maybe_convert_objects
# #2845
df = DataFrame({"A": [2 ** 63 - 1]})
result = df["A"]
expected = Series(np.asarray([2 ** 63 - 1], np.int64), name="A")
assert_series_equal(result, expected)
df = DataFrame({"A": [2 ** 63]})
result = df["A"]
expected = Series(np.asarray([2 ** 63], np.uint64), name="A")
assert_series_equal(result, expected)
df = DataFrame({"A": [datetime(2005, 1, 1), True]})
result = df["A"]
expected = Series(
np.asarray([datetime(2005, 1, 1), True], np.object_), name="A"
)
assert_series_equal(result, expected)
df = DataFrame({"A": [None, 1]})
result = df["A"]
expected = Series(np.asarray([np.nan, 1], np.float_), name="A")
assert_series_equal(result, expected)
df = DataFrame({"A": [1.0, 2]})
result = df["A"]
expected = Series(np.asarray([1.0, 2], np.float_), name="A")
assert_series_equal(result, expected)
df = DataFrame({"A": [1.0 + 2.0j, 3]})
result = df["A"]
expected = Series(np.asarray([1.0 + 2.0j, 3], np.complex_), name="A")
assert_series_equal(result, expected)
df = DataFrame({"A": [1.0 + 2.0j, 3.0]})
result = df["A"]
expected = Series(np.asarray([1.0 + 2.0j, 3.0], np.complex_), name="A")
assert_series_equal(result, expected)
df = DataFrame({"A": [1.0 + 2.0j, True]})
result = df["A"]
expected = Series(np.asarray([1.0 + 2.0j, True], np.object_), name="A")
assert_series_equal(result, expected)
df = DataFrame({"A": [1.0, None]})
result = df["A"]
expected = Series(np.asarray([1.0, np.nan], np.float_), name="A")
assert_series_equal(result, expected)
df = DataFrame({"A": [1.0 + 2.0j, None]})
result = df["A"]
expected = Series(np.asarray([1.0 + 2.0j, np.nan], np.complex_), name="A")
assert_series_equal(result, expected)
df = DataFrame({"A": [2.0, 1, True, None]})
result = df["A"]
expected = Series(np.asarray([2.0, 1, True, None], np.object_), name="A")
assert_series_equal(result, expected)
df = DataFrame({"A": [2.0, 1, datetime(2006, 1, 1), None]})
result = df["A"]
expected = Series(
np.asarray([2.0, 1, datetime(2006, 1, 1), None], np.object_), name="A"
)
assert_series_equal(result, expected)
def test_construction_with_mixed(self, float_string_frame):
# test construction edge cases with mixed types
# f7u12, this does not work without extensive workaround
data = [
[datetime(2001, 1, 5), np.nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 1)],
]
df = DataFrame(data)
# check dtypes
result = df.dtypes
expected = Series({"datetime64[ns]": 3})
# mixed-type frames
float_string_frame["datetime"] = datetime.now()
float_string_frame["timedelta"] = timedelta(days=1, seconds=1)
assert float_string_frame["datetime"].dtype == "M8[ns]"
assert float_string_frame["timedelta"].dtype == "m8[ns]"
result = float_string_frame.dtypes
expected = Series(
|
juanshishido/project-eta
|
code/make_plots_smoothing.py
|
Python
|
bsd-3-clause
| 1,263
| 0.020586
|
from utils.ols import *
########before preproccessing
betas_before_process = apply_ols_to_subject(16, 3, r_outliers = False, smooth = False)
##average across all runs
avg_before = average_betas(betas_before_process)
## shape to 4d
avg_before = beta_2d_to_4d(avg_before)
plt.figure()
plt.imshow(avg_before[:, :, 16, 0], interpolation='neares
|
t', cmap='gray')
plt.title('Middle Slice Beta(Gain) Before smoothing')
plt.savefig("Before_preproccessing_gain")
plt.figure()
plt.imshow(avg_before[:, :, 16, 1], interpolation='nearest', cmap='gray')
plt.title('Middle Slice Beta(Loss) Before smoothing')
plt.savefig("Before_preproccessing_loss")
###### After Preproccessing
betas_after_process = apply_ols_to_subject(16, 3, r_outliers = True, smooth = True)
##average across all runs
avg_after = average_betas(betas_af
|
ter_process)
## shape to 4d
avg_after = beta_2d_to_4d(avg_after)
plt.figure()
plt.imshow(avg_after[:, :, 16, 0], interpolation='nearest', cmap='gray')
plt.title('Middle Slice Beta(Gain) After Smoothing')
plt.savefig("After_preproccessing_gain")
plt.figure()
plt.imshow(avg_after[:, :, 16, 1], interpolation='nearest', cmap='gray')
plt.title('Middle Slice Beta(Loss) After Smoothing')
plt.savefig("After_preproccessing_loss")
|
getsentry/zeus
|
zeus/db/func.py
|
Python
|
apache-2.0
| 1,373
| 0.002185
|
import re
from sqlalchemy.sql import func
from sqlalchemy.types import String, TypeDecorator
# https://bitbucket.org/zzzeek/sqlalchemy/issues/3729/using-array_agg-around-row-function-does
class ArrayOfRecord(TypeDecorator):
_array_regexp = re.compile(r"^\{(\".+?\")*\}$")
_chunk_regexp = re.compile(r'"(.*?)",?')
_param_regexp = re.compile(r"[^\(\),]+")
impl = String
def __init__(self, cols):
self.cols = cols
super().__init__()
def process_result_value(self, value, dialect):
# XXX(dcramer): if the trailing value(s?) of the returning array are NULL, postgres seems to
# not return them, and thus our output array does not match the same length as our column
# selection array
#
# For example if the input is:
# ARRAY_AGG_RESULT(col1, col2)
# And the value of col2 is NULL
# The resulting return value from this query will be:
# ({col1_value},)
elems = self._array_regexp.match(value).group(1)
elems = [e for e in self._chunk_regexp.split(elems) if e]
num_cols = len(self.cols)
padding = (None,) * num_cols
return [
(tuple(self._param_regexp.findall(e)) + padding)[:num_cols] for e in e
|
lems
]
def array_agg_row(*arg):
return func.array_agg(func.row(*arg), type_=ArrayOfRecord(arg))
| |
minscof/wemoJeedomPlugin
|
resources/wemo_server.py
|
Python
|
gpl-3.0
| 16,264
| 0.001969
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import logging
import argparse
#import subprocess
import json
import pywemo
import urllib.request
import urllib.error
import urllib.parse
import socketserver
from datetime import datetime
__version__ = '0.93'
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)-15s - %(name)s: %(message)s')
logger = logging.getLogger('wemo_server')
logger.info('Program starting version %s', __version__)
reqlog = logging.getLogger("urllib3.connectionpool")
reqlog.disabled = True
NOOP = lambda *x: None
#level = logging.DEBUG
# if getattr(args, 'debug', False):
# level = logging.DEBUG
# logging.basicConfig(level=level)
# callbackUrl=${1}
if len(sys.argv) > 1:
callbackUrl = sys.argv[1]
else:
callbackUrl = "http://localhost?payload="
#logger.debug('callback Url %s', callbackUrl)
# listen PORT=${2}
if len(sys.argv) > 2:
PORT = int(sys.argv[2])
HOST, PORT = "localhost", int(sys.argv[2])
else:
PORT = 5000
HOST, PORT = "localhost", 5000
# loglevel=${3}
if len(sys.argv) > 3:
loglevel = sys.argv[3]
else:
loglevel = "info"
logger.debug('loglevel %s', loglevel)
#time_start = time()
#print('Server started at ', strftime("%a, %d %b %Y %H:%M:%S +0000", localtime(time_start)), 'listening on port ', PORT)
#logger.info('Server started at %s listening on port %s',strftime("%a, %d %b %Y %H:%M:%S +0000", localtime(time_start)), PORT)
def _status(state):
return 1 if state == 1 else 0
def _standby(state):
return 1 if state == 8 else 0
def parse_insight_params(params):
"""Parse the Insight parameters."""
#global logger
#logger.debug('______parse %s',params)
(
state, # 0 if off, 1 if on, 8 if on but load is off
lastchange,
onfor, # seconds
ontoday, # seconds
ontotal, # seconds
timeperiod, # pylint: disable=unused-variable
wifipower,
currentmw,
todaymw,
totalmw
) = params.split('|')
state = int(state)
return {'status': _status(state),
'standby': _standby(state),
'lastchange': datetime.fromtimestamp(int(lastchange)),
'onfor': int(onfor),
'ontoday': int(ontoday),
'ontotal': int(ontotal),
'wifiPower': int(wifipower),
'todaymw': int(float(todaymw)),
'totalmw': int(float(totalmw)),
'currentpower': int(float(currentmw))}
def event(self, _type, value):
global logger
#logger.info('event argument = %s',locals().keys())
try:
logger.info('event for device %s with type = %s value %s',
self.serialnumber, _type, value)
#logger.info("$$$$$$ $$ device = %s", type(self))
if _type == 'BinaryState':
params = {}
serialnumber = self.serialnumber
if self.model_name == "Insight":
params = dict(self.insight_params)
params['status'] = _status(int(params['state']))
params['standby'] = _standby(int(params['state']))
del params['state']
else:
params["status"] = self.get_state()
params["logicalAddress"] = serialnumber
payload = json.dumps(params, sort_keys=True, default=str)
logger.debug("json dumps payload = %s", payload)
urllib.request.urlopen(
callbackUrl + urllib.parse.quote(payload)).read()
except:
logger.warning(
'bug in event for device with type = %s value %s', _type, value)
devices = pywemo.discover_devices()
SUBSCRIPTION_REGISTRY = pywemo.SubscriptionRegistry()
SUBSCRIPTION_REGISTRY.start()
for device in devices:
'''
state = device.get_state(True)
logger.info('state = %s', str(state))
serialnumber = device.serialnumber
logger.info("serialnumber = %s", serialnumber)
params = {}
if device.model_name == "Insight":
params = dict(device.insight_params)
params['status'] = _status(int(params['state']))
params['standby'] = _standby(int(params['state']))
del params['state']
else:
params["status"] = device.get_state()
params["logicalAddress"] = serialnumber
payload = json.dumps(params, sort_keys=True, default=str)
logger.debug("json dumps payload = %s", payload)
urllib.request.urlopen(callbackUrl + urllib.parse.quote(payload)).read()
'''
SUBSCRIPTION_REGISTRY.register(device)
SUBSCRIPTION_REGISTRY.on(device, 'BinaryState', event)
#SUBSCRIPTION_REGISTRY.on(device, 'EnergyPerUnitCost', event)
class apiRequestHandler(socketserver.BaseRequestHandler):
def __init__(self, request, client_address, server):
# initialization.
self.logger = loggin
|
g.getLogger('apiRequestHandler')
#self.logger.debug('__init__')
socketserver.BaseRequestHandler.__init__(
self, request, client_address, server)
def start_response(self, code, contentType, data):
self.logger.debug(
'start_response() code = %s payload = %s', code, data)
code = "HTTP/1.1 " + code + '\r\n'
|
self.request.send(code.encode())
response_headers = {
'Content-Type': contentType + '; encoding=utf8',
'Content-Length': len(data),
'Connection': 'close',
}
response_headers_raw = ''.join('%s: %s\n' % (
k, v) for k, v in response_headers.items())
self.request.send(response_headers_raw.encode())
self.request.send(b'\n')
return self.request.send(data.encode())
def handle(self):
#self.logger.debug('start handle()')
global devices
data = str(self.request.recv(1024), "utf-8").split('\n')[0]
lst = data.split()
stringcount = len(lst)
#self.logger.debug('split len->"%s"', stringcount)
if stringcount > 1:
data = urllib.parse.unquote(urllib.parse.unquote(lst[1]))
else:
data = urllib.parse.unquote(urllib.parse.unquote(lst[1]))
self.logger.debug('recv()->"%s"', data)
cmd = 'unkown'
data = data.split("?")
#self.logger.debug('after split data -> %s', data)
cmd = data[0]
#self.logger.debug('cmd -> %s', cmd)
cmd = cmd.split("/")[1]
#self.logger.debug('cmd -> %s', cmd)
arg = ''
if len(data) > 1:
arg = data[1]
#self.logger.debug('arg ->%s', arg)
key = ''
value = ''
key2 = ''
value2 = ''
if arg:
options = arg.split('&')
key = options[0].rpartition('=')[0]
value = urllib.parse.unquote(options[0].rpartition('=')[2])
if len(options) == 2:
key2 = options[1].rpartition('=')[0]
value2 = urllib.parse.unquote(options[1].rpartition('=')[2])
#print('DEBUG = cmd=', cmd, ' arg ', arg, ' key ', key, ' value ', value, ' key2 ', key2, ' value2 ', value2)
#self.logger.debug('cmd ->%s arg=%s key=%s value=%s key2=%s value2=%s',
# cmd, arg, key, value, key2, value2)
if not cmd:
content_type = "text/html"
self.start_response(
'200 OK', content_type, '<h1>Welcome. Try a command ex : scan, stop, start.</h1>')
return
if cmd == 'scan':
devices = pywemo.discover_devices()
payload = '['
separator = ''
for device in devices:
params = {}
params['name'] = device.name
logger.info("name = %s", params['name'])
params['host'] = device.host
logger.info("host = %s", params['host'])
params['serialNumber'] = device.serialnumber
logger.info("serialnumber = %s", params['serialNumber'])
params['modelName'] = device.model_name
logger.info("modelName = %s", params['modelName'])
params['model'] = device.model
logger.info("model = %s", params['model'])
|
UManPychron/pychron
|
pychron/canvas/canvas2D/extraction_line_canvas2D.py
|
Python
|
apache-2.0
| 10,949
| 0.001005
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import os
from pyface.action.menu_manager import MenuManager
from pyface.qt.QtGui import QToolTip
from traits.api import Any, Str, on_trait_change, Bool
from traitsui.menu import Action
from pychron.canvas.canvas2D.overlays.extraction_line_overlay import ExtractionLineInfoTool, ExtractionLineInfoOverlay
from pychron.canvas.canvas2D.scene.extraction_line_scene import ExtractionLineScene
from pychron.canvas.canvas2D.scene.primitives.connections import Elbow, Connection
from pychron.canvas.canvas2D.scene.primitives.lasers import Laser
from pychron.canvas.canvas2D.scene.primitives.primitives import BorderLine
from pychron.canvas.canvas2D.scene.primitives.valves import RoughValve, \
BaseValve, Switch, Manu
|
alSwitch
from pychron.canvas.scene_viewer import SceneCanvas
from pyc
|
hron.globals import globalv
W = 2
H = 2
class ExtractionLineAction(Action):
chamber = Str
class ExtractionLineCanvas2D(SceneCanvas):
"""
"""
scene_klass = ExtractionLineScene
use_backbuffer = True
border_visible = False
active_item = Any
selected_id = Str
show_axes = False
show_grids = False
use_zoom = False
use_pan = False
padding_left = 0
padding_right = 0
padding_bottom = 0
padding_top = 0
manager = Any
aspect_ratio = 4 / 3.
y_range = (-10, 25)
display_volume = Bool
volume_key = Str
confirm_open = Bool(True)
force_actuate_enabled = True
def __init__(self, *args, **kw):
super(ExtractionLineCanvas2D, self).__init__(*args, **kw)
tool = ExtractionLineInfoTool(scene=self.scene,
manager=self.manager)
overlay = ExtractionLineInfoOverlay(tool=tool,
component=self)
self.tool = tool
self.tools.append(tool)
self.overlays.append(overlay)
# @caller
# def invalidate_and_redraw(self):
# super(ExtractionLineCanvas2D, self).invalidate_and_redraw()
def toggle_item_identify(self, name):
v = self._get_switch_by_name(name)
if v is not None:
try:
v.identify = not v.identify
except AttributeError:
pass
def update_switch_state(self, name, nstate, refresh=True, mode=None):
"""
"""
switch = self._get_switch_by_name(name)
if switch is not None:
switch.state = nstate
if refresh:
self.invalidate_and_redraw()
def update_switch_owned_state(self, name, owned):
switch = self._get_switch_by_name(name)
if switch is not None:
switch.owned = owned
self.invalidate_and_redraw()
def update_switch_lock_state(self, name, lockstate):
switch = self._get_switch_by_name(name)
if switch is not None:
switch.soft_lock = lockstate
self.invalidate_and_redraw()
def load_canvas_file(self, canvas_path=None, canvas_config_path=None, valves_path=None):
if canvas_path is None:
canvas_path = self.manager.application.preferences.get('pychron.extraction_line.canvas_path')
if canvas_config_path is None:
canvas_config_path = self.manager.application.preferences.get('pychron.extraction_line.canvas_config_path')
if valves_path is None:
valves_path = self.manager.application.preferences.get('pychron.extraction_line.valves_path')
if canvas_path and os.path.isfile(canvas_path):
self.scene.load(canvas_path, canvas_config_path, valves_path, self)
self.invalidate_and_redraw()
def _over_item(self, event):
x, y = event.x, event.y
return self.scene.get_is_in(x, y, exclude=[BorderLine, Elbow, Connection])
def normal_left_down(self, event):
pass
def normal_mouse_move(self, event):
item = self._over_item(event)
if item is not None:
self.event_state = 'select'
if item != self.active_item:
self.active_item = item
if isinstance(item, (BaseValve, Switch)):
event.window.set_pointer(self.select_pointer)
if self.manager:
self.manager.set_selected_explanation_item(item)
else:
event.window.control.setToolTip('')
QToolTip.hideText()
self.active_item = None
self.event_state = 'normal'
event.window.set_pointer(self.normal_pointer)
if self.manager:
self.manager.set_selected_explanation_item(None)
def select_mouse_move(self, event):
"""
"""
ctrl = event.window.control
try:
tt = self.active_item.get_tooltip_text()
ctrl.setToolTip(tt)
except AttributeError as e:
pass
self.normal_mouse_move(event)
def select_right_down(self, event):
item = self.active_item
if item is not None:
self._show_menu(event, item)
event.handled = True
def select_left_down(self, event):
"""
"""
item = self.active_item
if item is None:
return
if isinstance(item, Laser):
self._toggle_laser_state(item)
return
if isinstance(item, Switch):
state = item.state
state = not state
mode = 'normal'
# try:
if state:
ok, change = self.manager.open_valve(item.name, mode=mode)
else:
ok, change = self.manager.close_valve(item.name, mode=mode)
# except TypeError, e:
# ok, change = True, True
else:
if not isinstance(item, BaseValve):
return
if item.soft_lock:
return
state = item.state
if self.confirm_open:
from pychron.core.ui.dialogs import myConfirmationDialog
from pyface.api import NO
if isinstance(item, ManualSwitch) or (isinstance(item, RoughValve) and not state):
msg = 'Are you sure you want to {} {}'.format('open' if not state else 'close', item.name)
# event.handled = True
dlg = myConfirmationDialog(
message=msg,
title='Verfiy Valve Action',
style='modal')
retval = dlg.open()
if retval == NO:
return
state = not state
change = False
ok = True
if self.manager is not None:
mode = 'normal'
if event.shift_down:
mode = 'shift_select'
if state:
ok, change = self.manager.open_valve(item.name, mode=mode)
else:
ok, change = self.manager.close_valve(item.name, mode=mode)
if ok:
item.state = state
if change and ok:
self._select_hook(item)
if change:
self.invalidate_and_redraw()
def on_lock(self):
item = self._active_item
if item:
item.soft_lock = lock = not item.soft_lock
self.manager.set_software_lock(item.nam
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/django_extensions/management/utils.py
|
Python
|
agpl-3.0
| 2,188
| 0.001828
|
import logging
import os
import sys
from django_extensions.management.signals import post_command, pre_command
def _make_writeable(filename):
"""
Make sure that the file is writeable. Useful if our source is
read-only.
"""
import stat
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
def setup_logger(logger, stream, filename=None, fmt=None):
"""Sets up a logger (if no handlers exist) for console output,
and file 'tee' output if desired."""
if len(logger.handlers) < 1:
console = logging.StreamHandler(stream)
console.setLevel(logging.DEBUG)
console.setFormatter(logging.Formatter(fmt))
logger.addHandler(console)
logger.setLevel(logging.DEBUG)
if filename:
outfile = logging.FileHandler(filename)
outfile.setLeve
|
l(logging.INFO)
outfile.setFormatter(logging.Formatter("%(asctime)s " + (fmt if fmt else '%(message)s')))
logger.addHandler(outfile)
class RedirectHandler(logging.Handler):
"""Redirect logging sent to
|
one logger (name) to another."""
def __init__(self, name, level=logging.DEBUG):
# Contemplate feasibility of copying a destination (allow original handler) and redirecting.
logging.Handler.__init__(self, level)
self.name = name
self.logger = logging.getLogger(name)
def emit(self, record):
self.logger.handle(record)
def signalcommand(func):
"""A decorator for management command handle defs that sends out a pre/post signal."""
def inner(self, *args, **kwargs):
pre_command.send(self.__class__, args=args, kwargs=kwargs)
ret = func(self, *args, **kwargs)
post_command.send(self.__class__, args=args, kwargs=kwargs, outcome=ret)
return ret
return inner
def has_ipdb():
try:
import ipdb # noqa
import IPython # noqa
return True
except ImportError:
return False
|
smallyear/linuxLearn
|
salt/salt/states/elasticsearch_index.py
|
Python
|
apache-2.0
| 2,188
| 0.001828
|
# -*- coding: utf-8 -*-
'''
State module to manage Elasticsearch indices
.. versionadded:: 2015.8.0
'''
# Import python libs
from __future__ import absolute_import
import logging
# Import salt libs
log = logging.getLogger(__name__)
def absent(name):
'''
Ensure that the named index is absent
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
index_exists = __salt__['elasticsearch.index_exists'](index=name)
if index_exists:
if __opts__['test']:
ret['comment'] = 'Index {0} will be removed'.format(name)
ret['result'] = None
else:
ret['result'] = __salt__['elasticsearch.index_delete'](i
|
ndex=name)
if ret['result']:
ret['comment'] = 'Removed index {0} successfully'.format(name)
# TODO show pending changes (body)
else:
r
|
et['comment'] = 'Failed to remove index {0}'.format(name) # TODO error handling
elif not index_exists:
ret['comment'] = 'Index {0} is already absent'.format(name)
else:
ret['comment'] = 'Failed to determine whether index {0} is absent, see Minion log for more information'.format(name)
ret['result'] = False
return ret
def present(name, definition):
'''
Ensure that the named index is present
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
index_exists = __salt__['elasticsearch.index_exists'](name=name)
if not index_exists:
if __opts__['test']:
ret['comment'] = 'Index {0} will be created'.format(name)
ret['result'] = None
else:
ret['result'] = __salt__['elasticsearch.index_create'](index=name, body=definition)
# TODO show pending changes (body)
if ret['result']:
ret['comment'] = 'Created index {0} successfully'.format(name)
elif index_exists:
ret['comment'] = 'Index {0} is already present'.format(name)
else:
ret['comment'] = 'Failed to determine whether index {0} is present, see Minion log for more information'.format(name)
ret['result'] = False
return ret
|
BQLQ/BQLQ
|
drum-0.4.0/drum/links/migrations/0003_auto_20171126_2314.py
|
Python
|
bsd-2-clause
| 1,322
| 0.003026
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-11-26 15:14
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('generic', '0003_auto_20170411_0504'),
migrations.sw
|
appable_dependency(settings.AUTH_USER_MODEL),
('links', '0002_auto_20171104_180
|
9'),
]
operations = [
migrations.CreateModel(
name='UserTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='generic.Keyword')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterField(
model_name='link',
name='image',
field=models.FileField(blank=True, max_length=10000, null=True, upload_to='drum', verbose_name='image'),
),
migrations.AlterField(
model_name='link',
name='link',
field=models.URLField(blank=True, null=True, verbose_name='link'),
),
]
|
pbrf/example_api_python
|
example.py
|
Python
|
mit
| 862
| 0.001277
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib
import json
data = {}
data['type_blank'] = 'a'
data['from_nam
|
e'] = 'Ивана Ивановича'
data['from_city'] = 'г. Москва'
data['from_street'] = 'ул. Стахановская'
data['from_build'] = '999'
data['from_appartment'] = '99'
data['from_zip'] = '109428'
data['whom_surname'] = 'Петрову'
data['whom_name'] = 'Петру Петровичу'
data['whom_city'] = 'г. Санкт-Петербург'
data['whom_street'] = 'ул. Гоголя'
data['whom_build'] = '
|
888'
data['whom_appartment'] = '88'
data['whom_zip'] = '190000'
data['declared_value'] = '1000.00'
data['COD_amount'] = '1100.00'
params = {}
params['access_token'] = ''
params['data'] = json.dumps(data)
params = urllib.urlencode(params)
f = urllib.urlopen("http://pbrf.ru/pdf.F7", params)
print f.read()
|
2mny/mylar
|
lib/cherrypy/_cpserver.py
|
Python
|
gpl-3.0
| 7,535
| 0.005574
|
"""Manage HTTP servers with CherryPy."""
import warnings
import cherrypy
from cherrypy.lib import attributes
from cherrypy._cpcompat import basestring
# We import * because we want to export check_port
# et al as attributes of this module.
from cherrypy.process.servers import *
class Server(ServerAdapter):
"""An adapter for an HTTP server.
You can set attributes (like socket_host and socket_port)
on *this* object (which is probably cherrypy.server), and call
quickstart. For example::
cherrypy.server.socket_port = 80
cherrypy.quickstart()
"""
socket_port = 8080
"""The TCP port on which to listen for connections."""
_socket_host = '127.0.0.1'
def _get_socket_host(self):
return self._socket_host
def _set_socket_host(self, value):
if value == '':
raise ValueError("The empty string ('') is not an allowed value. "
"Use '0.0.0.0' instead to listen on all active "
"interfaces (INADDR_ANY).")
self._socket_host = value
socket_host = property(_get_socket_host, _set_socket_host,
doc="""The hostname or IP address on which to listen for connections.
Host values may be any IPv4 or IPv6 address, or any valid hostname.
The string 'localhost' is a synonym for '127.0.0.1' (or '::1', if
your hosts file prefers IPv6). The string '0.0.0.0' is a special
IPv4 entry meaning "any active interface" (INADDR_ANY), and '::'
is the similar IN6ADDR_ANY for IPv6. The empty string or None are
not allowed.""")
socket_file = None
"""If given, the name of the UNIX socket to use instead of TCP/IP.
When this option is not None, the `socket_host` and `socket_port` options
are ignored."""
socket_queue_size = 5
"""The 'backlog' argument to socket.listen(); specifies the maximum number
of queued connections (default 5)."""
socket_timeout = 10
"""The timeout in seconds for accepted connections (default 10)."""
shutdown_timeout = 5
"""The time to wait for HTTP worker threads to clean up."""
protocol_version = 'HTTP/1.1'
"""The version string to write in the Status-Line of all HTTP responses,
for example, "HTTP/1.1" (the default). Depending on the HTTP server used,
this should also limit the supported features used in the response."""
thread_pool = 10
"""The number of worker threads to start up in the pool."""
thread_pool_max = -1
"""The maximum size of the worker-thread pool. Use -1 to indicate no limit."""
max_request_header_size = 500 * 1024
"""The maximum number of bytes allowable in the request headers. If exceeded,
the HTTP server should return "413 Request Entity Too Large"."""
max_request_body_size = 100 * 1024 * 1024
"""The maximum number of bytes allowable in the request body. If exceeded,
the HTTP server should return "413 Request Entity Too Large"."""
instance = None
"""If not None, this should be an HTTP server instance (such as
CPWSGIServer) which cherrypy.server will control. Use this when you need
more control over object instantiation than is available in the various
configuration options."""
ssl_context = None
"""When using PyOpenSSL, an instance of SSL.Context."""
ssl_certificate = None
"""The filename of the SSL certificate to use."""
ssl_certificate_chain = None
"""When using PyOpenSSL, the certificate chain to pass to
Context.load_verify_locations."""
ssl_private_key = None
"""The filename of the private key to use with SSL."""
ssl_module = 'pyopenssl'
"""The name of a registered SSL adaptation module to use with the builtin
WSGI server. Builtin options are 'builtin' (to use the SSL library built
into recent versions of Python) and 'pyopenssl' (to use the PyOpenSSL
project, which you must install separately). You may also register your
own classes in the wsgiserver.ssl_adapters dict."""
nodelay = True
"""If True (the default since 3.1), sets the TCP_NODELAY socket option."""
wsgi_version = (1, 0)
"""The WSGI version tuple to use with the builtin WSGI server.
The provided options are (1, 0) [which includes support for PEP 3333,
which declares it covers WSGI version 1.0.1 but still mandates the
wsgi.version (1, 0)] and ('u', 0), an experimental unicode version.
You may create and register your own experimental versions of the WSGI
protocol by adding custom classes to the wsgiserver.wsgi_gateways dict."""
def __init__(self):
self.bus = cherrypy.engine
self.httpserver = None
self.interrupt = None
self.running = False
def httpserver_from_self(self, httpserver=None):
"""Return a (httpserver, bind_addr) pair based on self attributes."""
if httpserver is None:
httpserver = self.instance
if httpserver is None:
from cherrypy import _cpwsgi_server
httpserver = _cpwsgi_server.CPWSGIServer(self)
if isinstance(httpserver, basestring):
# Is anyone using this? Can I add an arg?
httpserver = attributes(httpserver)(self)
return httpserver, self.bind_addr
def start(self):
"""Start the HTTP server."""
|
if not self.httpserver:
self.httpserver, self.bind_addr = self.httpserver_from_self()
ServerAdapter.start(self)
start.priority = 75
def _get_bind_addr(self):
if self.socket_file:
return self.socket_file
if self.socket_host is None and se
|
lf.socket_port is None:
return None
return (self.socket_host, self.socket_port)
def _set_bind_addr(self, value):
if value is None:
self.socket_file = None
self.socket_host = None
self.socket_port = None
elif isinstance(value, basestring):
self.socket_file = value
self.socket_host = None
self.socket_port = None
else:
try:
self.socket_host, self.socket_port = value
self.socket_file = None
except ValueError:
raise ValueError("bind_addr must be a (host, port) tuple "
"(for TCP sockets) or a string (for Unix "
"domain sockets), not %r" % value)
bind_addr = property(_get_bind_addr, _set_bind_addr,
doc='A (host, port) tuple for TCP sockets or a str for Unix domain sockets.')
def base(self):
"""Return the base (scheme://host[:port] or sock file) for this server."""
if self.socket_file:
return self.socket_file
host = self.socket_host
if host in ('0.0.0.0', '::'):
# 0.0.0.0 is INADDR_ANY and :: is IN6ADDR_ANY.
# Look up the host name, which should be the
# safest thing to spit out in a URL.
import socket
host = socket.gethostname()
port = self.socket_port
if self.ssl_certificate:
scheme = "https"
if port != 443:
host += ":%s" % port
else:
scheme = "http"
if port != 80:
host += ":%s" % port
return "%s://%s" % (scheme, host)
|
cogu/py-apx
|
tests/data_type_test.py
|
Python
|
mit
| 3,285
| 0.006088
|
import os, sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import apx
import unittest
import remotefile
import autosar
class TestApxDataType(unittest.TestCase):
def setUp(self):
pass
def test_create_uint8_type(self):
dt = apx.DataType('MyU8_T','C')
self.assertIsInstance(dt, apx.DataType)
self.assertEqual(dt.dataElement.typeCode, apx.UINT8_TYPE_CODE)
self.assertEqual(str(dt), 'T"MyU8_T"C')
def test_create_uint8_range_type(self):
dt = apx.DataType('MyU8_T','C(0,3)')
self.assertIsInstance(dt, apx.DataType)
|
self.assertEqual(dt.dataElement.typeCode, apx.UINT8_TYPE_CODE)
self.assertEqual(str(dt), 'T"MyU8_T"C(0,3)')
def test_create_uint8_array_type(self):
dt = apx.DataType('MyU8_T','C[8]')
self.assertIsInstance(dt, apx.DataType)
self.assertEqual(dt.dataElement.typeCode, apx.UINT8_TYPE_CODE)
self.assertEqual(str(dt), 'T"MyU8_T"C[8]')
def test_clone_uint8_type(self):
dt1 = apx.DataType('MyU8_T','C')
|
dt2 = dt1.clone()
self.assertIsInstance(dt2, apx.DataType)
self.assertEqual(dt2.dataElement.typeCode, apx.UINT8_TYPE_CODE)
self.assertEqual(str(dt2), 'T"MyU8_T"C')
def test_create_uint16_type(self):
dt = apx.DataType('MyU16_T','S')
self.assertIsInstance(dt, apx.DataType)
self.assertEqual(dt.dataElement.typeCode, apx.UINT16_TYPE_CODE)
def test_clone_uint16_type(self):
dt1 = apx.DataType('MyU16_T','S')
dt2 = dt1.clone()
self.assertIsInstance(dt2, apx.DataType)
self.assertEqual(dt2.dataElement.typeCode, apx.UINT16_TYPE_CODE)
self.assertEqual(str(dt2), 'T"MyU16_T"S')
def test_create_uint32_type(self):
dt = apx.DataType('MyU32_T','L')
self.assertIsInstance(dt, apx.DataType)
self.assertEqual(dt.dataElement.typeCode, apx.UINT32_TYPE_CODE)
def test_clone_uint32_type(self):
dt1 = apx.DataType('MyU32_T','L')
dt2 = dt1.clone()
self.assertIsInstance(dt2, apx.DataType)
self.assertEqual(dt2.dataElement.typeCode, apx.UINT32_TYPE_CODE)
self.assertEqual(str(dt2), 'T"MyU32_T"L')
def test_create_ref_type(self):
dt = apx.DataType('MyRef_T','T["MyU8_T"]')
self.assertIsInstance(dt, apx.DataType)
self.assertEqual(dt.dataElement.typeCode, apx.REFERENCE_TYPE_CODE)
self.assertEqual(dt.dataElement.typeReference, 'MyU8_T')
self.assertEqual(str(dt), 'T"MyRef_T"T["MyU8_T"]')
def test_clone_ref_type(self):
dt1 = apx.DataType('MyRef_T','T["MyU8_T"]')
dt2 = dt1.clone()
self.assertIsInstance(dt2, apx.DataType)
self.assertEqual(dt2.dataElement.typeCode, apx.REFERENCE_TYPE_CODE)
self.assertEqual(dt2.dataElement.typeReference, 'MyU8_T')
self.assertEqual(str(dt2), 'T"MyRef_T"T["MyU8_T"]')
def test_record_type(self):
dt = apx.DataType('MyRecord_T','{"Name"a[8]"Id"L"Data"S[3]}')
self.assertIsInstance(dt, apx.DataType)
self.assertEqual(dt.dataElement.typeCode, apx.RECORD_TYPE_CODE)
self.assertEqual(str(dt), 'T"MyRecord_T"{"Name"a[8]"Id"L"Data"S[3]}')
if __name__ == '__main__':
unittest.main()
|
bitmazk/django-linklist
|
linklist/tests/test_settings.py
|
Python
|
mit
| 2,306
| 0
|
"""Settings that need to be set in order to run the tests."""
import os
DEBUG = True
SITE_ID = 1
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
ROOT_URLCONF = 'linklist.tests.urls'
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(__file__, '../../../static/')
MEDIA_ROOT = os.path.join(__file__, '../../../media/')
STATICFILES_DIRS = (
os.path.join(__file__, 'tests/test_static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.FileSys
|
temFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), '../templates'),
)
COVERAGE_REPORT_HTML_OUTPUT_DIR = os.path.join(
o
|
s.path.dirname(__file__), 'coverage')
COVERAGE_MODULE_EXCLUDES = [
'tests$', 'settings$', 'urls$', 'locale$',
'migrations', 'fixtures', 'admin$', 'django_extensions',
]
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
)
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
'django_nose',
'filer',
'easy_thumbnails',
]
INTERNAL_APPS = [
'linklist.tests.test_app',
'linklist',
]
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
COVERAGE_MODULE_EXCLUDES += EXTERNAL_APPS
SECRET_KEY = 'PoP43cj5=(cj36$_8a!6ar0u"(hF5b24kns&gz7u*k*@a5tCCf'
LANGUAGES = [
('en', 'English'),
]
|
h4ck3rm1k3/OpenWrt-Firefly-SDK
|
staging_dir/host/lib/scons-2.3.1/SCons/Environment.py
|
Python
|
gpl-2.0
| 96,203
| 0.002276
|
"""SCons.Environment
Base class for construction Environments. These are
the primary objects used to communicate dependency and
construction information to the build engine.
Keyword arguments supplied when the construction Environment
is created are construction variables used to initialize the
Environment
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software")
|
, to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom t
|
he Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Environment.py 2014/03/02 14:18:15 garyo"
import copy
import os
import sys
import re
import shlex
from collections import UserDict
import SCons.Action
import SCons.Builder
import SCons.Debug
from SCons.Debug import logInstanceCreation
import SCons.Defaults
import SCons.Errors
import SCons.Memoize
import SCons.Node
import SCons.Node.Alias
import SCons.Node.FS
import SCons.Node.Python
import SCons.Platform
import SCons.SConf
import SCons.SConsign
import SCons.Subst
import SCons.Tool
import SCons.Util
import SCons.Warnings
class _Null(object):
pass
_null = _Null
_warn_copy_deprecated = True
_warn_source_signatures_deprecated = True
_warn_target_signatures_deprecated = True
CleanTargets = {}
CalculatorArgs = {}
semi_deepcopy = SCons.Util.semi_deepcopy
semi_deepcopy_dict = SCons.Util.semi_deepcopy_dict
# Pull UserError into the global name space for the benefit of
# Environment().SourceSignatures(), which has some import statements
# which seem to mess up its ability to reference SCons directly.
UserError = SCons.Errors.UserError
def alias_builder(env, target, source):
pass
AliasBuilder = SCons.Builder.Builder(action = alias_builder,
target_factory = SCons.Node.Alias.default_ans.Alias,
source_factory = SCons.Node.FS.Entry,
multi = 1,
is_explicit = None,
name='AliasBuilder')
def apply_tools(env, tools, toolpath):
# Store the toolpath in the Environment.
if toolpath is not None:
env['toolpath'] = toolpath
if not tools:
return
# Filter out null tools from the list.
for tool in [_f for _f in tools if _f]:
if SCons.Util.is_List(tool) or isinstance(tool, tuple):
toolname = tool[0]
toolargs = tool[1] # should be a dict of kw args
tool = env.Tool(toolname, **toolargs)
else:
env.Tool(tool)
# These names are (or will be) controlled by SCons; users should never
# set or override them. This warning can optionally be turned off,
# but scons will still ignore the illegal variable names even if it's off.
reserved_construction_var_names = [
'CHANGED_SOURCES',
'CHANGED_TARGETS',
'SOURCE',
'SOURCES',
'TARGET',
'TARGETS',
'UNCHANGED_SOURCES',
'UNCHANGED_TARGETS',
]
future_reserved_construction_var_names = [
#'HOST_OS',
#'HOST_ARCH',
#'HOST_CPU',
]
def copy_non_reserved_keywords(dict):
result = semi_deepcopy(dict)
for k in result.keys():
if k in reserved_construction_var_names:
msg = "Ignoring attempt to set reserved variable `$%s'"
SCons.Warnings.warn(SCons.Warnings.ReservedVariableWarning, msg % k)
del result[k]
return result
def _set_reserved(env, key, value):
msg = "Ignoring attempt to set reserved variable `$%s'"
SCons.Warnings.warn(SCons.Warnings.ReservedVariableWarning, msg % key)
def _set_future_reserved(env, key, value):
env._dict[key] = value
msg = "`$%s' will be reserved in a future release and setting it will become ignored"
SCons.Warnings.warn(SCons.Warnings.FutureReservedVariableWarning, msg % key)
def _set_BUILDERS(env, key, value):
try:
bd = env._dict[key]
for k in bd.keys():
del bd[k]
except KeyError:
bd = BuilderDict(kwbd, env)
env._dict[key] = bd
for k, v in value.items():
if not SCons.Builder.is_a_Builder(v):
raise SCons.Errors.UserError('%s is not a Builder.' % repr(v))
bd.update(value)
def _del_SCANNERS(env, key):
del env._dict[key]
env.scanner_map_delete()
def _set_SCANNERS(env, key, value):
env._dict[key] = value
env.scanner_map_delete()
def _delete_duplicates(l, keep_last):
"""Delete duplicates from a sequence, keeping the first or last."""
seen={}
result=[]
if keep_last: # reverse in & out, then keep first
l.reverse()
for i in l:
try:
if i not in seen:
result.append(i)
seen[i]=1
except TypeError:
# probably unhashable. Just keep it.
result.append(i)
if keep_last:
result.reverse()
return result
# The following is partly based on code in a comment added by Peter
# Shannon at the following page (there called the "transplant" class):
#
# ASPN : Python Cookbook : Dynamically added methods to a class
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/81732
#
# We had independently been using the idiom as BuilderWrapper, but
# factoring out the common parts into this base class, and making
# BuilderWrapper a subclass that overrides __call__() to enforce specific
# Builder calling conventions, simplified some of our higher-layer code.
class MethodWrapper(object):
"""
A generic Wrapper class that associates a method (which can
actually be any callable) with an object. As part of creating this
MethodWrapper object an attribute with the specified (by default,
the name of the supplied method) is added to the underlying object.
When that new "method" is called, our __call__() method adds the
object as the first argument, simulating the Python behavior of
supplying "self" on method calls.
We hang on to the name by which the method was added to the underlying
base class so that we can provide a method to "clone" ourselves onto
a new underlying object being copied (without which we wouldn't need
to save that info).
"""
def __init__(self, object, method, name=None):
if name is None:
name = method.__name__
self.object = object
self.method = method
self.name = name
setattr(self.object, name, self)
def __call__(self, *args, **kwargs):
nargs = (self.object,) + args
return self.method(*nargs, **kwargs)
def clone(self, new_object):
"""
Returns an object that re-binds the underlying "method" to
the specified new object.
"""
return self.__class__(new_object, self.method, self.name)
class BuilderWrapper(MethodWrapper):
"""
A MethodWrapper subclass that that associates an environment with
a Builder.
This mainly exists to wrap the __call__() function so that all calls
to Builders can have their argument lists massaged in the same way
(treat a lone argument as the source, treat two arguments as target
then source, make sure both target and source are lists) without
having
|
sdjcw/python-getting-started
|
app.py
|
Python
|
mit
| 638
| 0
|
# coding: utf-8
from datetime import datetime
from flask import Flask
from flask import render_template
from views.todos import todos_view
app = Flask(__name__)
ap
|
p.register_blueprint(todos_view, url_prefix='/todos')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/time')
def time():
return str(datetime.now())
@app.route('/1/ping')
def ping():
"""健康监测
LeanEngine 会根据 `/1/ping` 判断应用是否正常运行。
如果返回状态码为 200 则认为正常。
其他状态码或者超
|
过 5 秒没响应则认为应用运行异常。
"""
return 'pong'
|
william-richard/moto
|
moto/awslambda/__init__.py
|
Python
|
apache-2.0
| 305
| 0
|
from __future__ im
|
port unicode_literals
from .models import lambda_backends
from ..core.models import base_decorator, deprecated_base_decorator
lambda_backend = lambda_backends["us-east-1"]
mock_lambda = base_decorator(lambda_backends)
mock_lambda_deprecated = depreca
|
ted_base_decorator(lambda_backends)
|
DMOJ/judge
|
dmoj/executors/clang_executor.py
|
Python
|
agpl-3.0
| 293
| 0
|
from .gcc_executor import
|
GCCExecutor, MAX_ERRORS
class ClangExecutor(GCCExecutor):
arch = 'clang_target_arch'
def get_flags(se
|
lf):
return self.flags + ['-ferror-limit=%d' % MAX_ERRORS]
@classmethod
def get_version_flags(cls, command):
return ['--version']
|
jht0664/Utility_python_gromacs
|
MonteCarlo/python/initial.py
|
Python
|
mit
| 9,683
| 0.030776
|
#!/usr/bin/env python3
# ver 0.1 - coding python by Hyuntae Jung on 6/1/2017
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Initial coordiante of particles for MC simulation')
## args
#parser.add_argument('-i', '--input', default='init.ic', nargs='?',
# help='input file')
parser.add_argument('-na', '--nmola', nargs='?', type=int,
help='# particles of A')
parser.add_argument('-nb', '--nmolb', nargs='?', type=int,
help='# particles of B')
parser.add_argument('-r', '--ratio', default=5.0, nargs='?', type=float,
help='ratio of box-z/box-x (box-x = box-y)')
parser.add_argument('-sep', '--sep', default='NO', nargs='?', type=str,
help='pre-separation YES/NO')
parser.add_argument('-fr', '--frac', default=1.0, nargs='?', type=float,
help='number fraction of A of one phase if -sep YES (')
parser.add_argument('-d', '--dens', nargs='?', type=float,
help='number density')
parser.add_argument('-sa', '--sizea', default=1.0, nargs='?', type=float,
help='diameter of A')
parser.add_argument('-sb', '--sizeb', default=1.0, nargs='?', type=float,
help='diameter of B')
parser.add_argument('-mt', '--maxtry', default=0, nargs='?', type=int,
help='attemps for random insertion (if zero, do lattice insertion)')
parser.add_argument('-fm', '--format', default='MC', nargs='?', type=str,
help='Save in fortran MC format (MC), .npz format (NPZ), or .gro format (GRO)')
parser.add_argument('-o', '--output', default='init', nargs='?', type=str,
help='output file (exclude extension name) ')
parser.add_argument('args', nargs=argparse.REMAINDER)
parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.1')
# read args
args = parser.parse_args()
print(" input arguments: {0}".format(args))
# default for args
#args.input = args.input if args.input is not None else 'init.ic'
if args.sep != 'YES' and args.sep != 'NO':
raise ValueError("Wrong argument for pre-separation option")
if args.sep == 'NO' and args.frac != 1.0:
raise ValueError("-sep and -fr not matched or not set")
if args.sep == 'YES':
if args.nmola != args.nmolb:
raise ValueError("Not support the different #particles of A and B")
if args.format != 'MC' and args.format != 'NPZ' and args.format != 'GRO':
raise ValueError("Wrong argument for format!")
if args.format == 'MC':
args.output = args.output+'.ic'
elif args.format == 'NPZ':
args.output = args.output+'.npz'
elif args.format == 'GRO':
args.output = args.output+'.gro'
# numpy double precision
import numpy as np
import sys
sys.path.append('/home/htjung/Utility/python/')
import hjung
from hjung import *
## timer
start_proc, start_prof = hjung.time.init()
# determine box size
print("="*30)
ntot = args.nmola + args.nmolb
print("Total number of molecules = %s" %ntot)
boxl = (float(ntot)/args.dens/args.ratio)**(1.0/3.0)
box = np.array((boxl, boxl, boxl*args.ratio))
print("Box = %s" %(box))
# attemp to insertion only for Widom-Rowlinson Mixture
coordinates = np.zeros((ntot, 3))
def overlap(new_index,coordinates,nmola,distance,box):
import numpy as np
for i in range(nmola):
dxyz = coordinates[i] - coordinates[new_index]
dxyz = dxyz - box * np.around(dxyz/box)
if np.linalg.norm(dxyz) < distance:
return 1 # overlap!
#print("avoid overlap {}".format(new_index))
return 0 # success for insertion
print("="*30)
print("Start Insertion")
maxa = int(args.nmola*args.frac)
maxb = int(args.nmolb*args.frac)
if args.maxtry > 0:
# try random insertion
for i in range(args.nmola):
if args.sep == 'YES' and i < maxa:
# if you set specific fraction and pre-separation
coordinates[i] = [np.random.random_sample()*box[0],
np.random.random_sample()*box[1],
np.random.random_sample()*0.50*box[2]]
elif args.sep == 'YES' and i >= maxa:
coordinates[i] = [np.random.random_sample()*box[0],
np.random.random_sample()*box[1],
(np.random.random_sample()*0.50+0.50)*box[2]]
else:
# if you set random
coordinates[i] = [np.random.random_sample()*box[0],
np.random.random_sample()*box[1],
np.random.random_sample()*box[2]]
curr_index = args.nmola
ntry = 1
nmod = 10
while (curr_index < ntot):
if ntry%nmod == 0:
print("%d th random trials (%s/%s)th particle" %(ntry,curr_index,ntot))
if (ntry/nmod)%10 == 0:
nmod = nmod*10
if ntry > args.maxtry:
print("Hard to insert because ntry > maxtry.")
print("I made initial coordinates with %d out of %d molecules" %(curr_index-1,ntot))
break
if args.sep == 'YES' and curr_index < args.nmola+maxb:
coordinates[curr_index] = [np.random.random_sample()*box[0],
np.random.random_sample()*box[1],
(np.random.random_sample()*0.50+0.50)*box[2]]
elif args.sep == 'YES' and curr_index >= args.nmola+maxb:
coordinates[curr_index] = [np.random.random_sample()*box[0],
np.random.random_sample()*box[1],
np.random.random_sample()*0.50*box[2]]
else:
coordinates[curr_index] = [np.random.random_sample()*box[0],
np.random.random_sample()*box[1],
np.random.random_sample()*box[2]]
dist = 0.5*(args.sizea + args.sizeb)
success = overlap(curr_index,coordinates,args.nmola,dist,box)
if success == 0:
#print("succees {0}".format(curr_index))
#print("succees {} {} {}".format(coordinates[curr_index][0],coordinates[curr_index][1],coordinates[curr_index][2]))
curr_index = curr_index + 1
ntry = ntry + 1
else:
# try lattice insertion
maxsize = max(args.sizea,args.sizeb)
ncellx = np.int(np.floor(box[0]/maxsize))
ncelly = np.int(np.floor(box[1]/maxsize))
ncellz = np.int(np.floor(box[2]/maxsize))
ncell = ncellx*ncelly*ncellz
if ncell < ntot:
raise ValueError("Not possible to use lattice insertion because #particles > #cells")
occupy_cell = np.zeros((ncellx,ncelly,ncellz),dtype=int)
i = 0
ntry = 1
nmod = 10
print("Try Insertion of A")
while (i < args.nmola):
if ntry%nmod == 0:
print("%d th lattice trials (%s/%s)th particle" %(ntry,i,ntot))
if (ntry/nmod)%nmod == 0:
nmod = nmod*nmod
icx = np.trunc(np.random.random_sample()*box[0]/maxsize)
icy = np.trunc(np.random.random_sample()*box[1]/maxsize)
if args.sep == 'YES' and i < maxa:
icz = np.trunc(np.random.random_sample()*0.50*box[2]/maxsize)
elif args.sep == 'YES' and i >= maxa:
icz = np.trunc((np.random.random_sample()*0.50+0.50)*box[2]/maxsize)
else:
icz = np.trunc(np.random.random_sample()*box[2]/maxsize)
if icx < ncellx and icy < ncelly and icz < ncellz:
randx = (icx+0.5)*maxsize
randy = (icy+0.5)*maxsize
randz = (icz+0.5)*maxsize
coordinates[i] = [randx,randy,randz]
occupy_cell[np.int(icx)][np.int(icy)][np.int(icz)] = 1
i = i + 1
ntry = ntry + 1
curr_index = args.nmola
ntry = 1
nmod = 10
print("Try Insertion of B")
while (curr_index < ntot):
if ntry%nmod == 0:
print("%d th lattice trials (%s/%s)th particle" %(ntry,curr_index,ntot))
if (
|
ntry/nmod)%nmod == 0:
nmod = nmod*nmod
icx = np.trunc(np.random.random_sample()*box[0]/maxsize)
icy = np.trunc(np.random.random_sample()*box[1]/maxsize)
if args.sep == 'YES' and curr_index < args.nmola+maxb:
icz = np.trunc((np.random.random_sample()*0.50+0.50)*box[2]/maxsize)
elif args.sep == 'YES' and curr_index >= args.nmola+maxb:
icz = np.trunc(np.random.random_sample()*0.50*
|
box[2]/maxsize)
else:
icz = np.trunc(np.random.random_sample()*box[2]/maxsize)
randx = (icx+0.5)*maxsize
randy = (icy+0.5)*maxsize
randz = (icz+0.5)*maxsize
coordinates[curr_index] = [randx,randy,randz]
ntry = ntry + 1
if icx >= ncellx or icy >= ncelly or icz >= ncellz:
continue
elif occupy_cell[np.int(icx)][np.int(icy)][np.int(icz)] == 0:
curr_index = curr_index + 1
# save initial coordinates
print("="*30)
print("Saving OutputFile...")
if args.format == 'NPZ':
# array_argument = nmola, nmolb, coord, box
np.savez(args.output,nmola=args.nmola,nmolb=args.nmolb,coord=coordinates,box=box)
elif args.format == 'GRO':
# gromacs version
output_f
|
TeamAADGT/CMPUT404-project-socialdistribution
|
social/app/migrations/0007_merge_20170408_2326.py
|
Python
|
apache-2.0
| 334
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-04-08 23:26
from __future__ import unicode_literals
from django.db import m
|
igrations
class Migration(migrations.Migration):
dependencies = [
('app', '0006_author_has_github_task'),
('app', '0006_auto_20170405_1957'),
|
]
operations = [
]
|
AaronWatters/inferelator_strawman
|
inferelator_strawman/condition.py
|
Python
|
bsd-2-clause
| 1,467
| 0.004772
|
import pandas as pd
class Condition:
"""
A condition maps gene names to numbers which often represent expression levels.
Parameters
----------
condition_name: str
A unique name identifying the condition.
gene_mapping: pd.Series
A pandas Series holding the gene to number mapping or an object that
can be converted to a pandas Series.
"""
META_DATA_HEADER = "\t".join(["isTs", "is1stLast", "prevCol", "del.t", "condName"]) + "\n"
def __init__(self, condition_name, gene_mapping):
self.name = condition_name
self.gene_mapping = pd.Series(gene_mapping, name=condition_name)
def __repr__(self):
"printable representation for diagnostics."
return "Condition" + repr((self.name, id(self)))
def response_scalar(self, gene_name):
"Return the gene 'response' for this conditon."
return self.gene_mapping[gene_name]
def design_vector(self, transcription_factors):
"Retu
|
rn a 1d array of transcription factor coefficients for condition."
return self.gene_mapping[transcription_factors]
def meta_data_tsv_line(self, isTs=False, is1stLast="e", prevCol=None, delt=None):
def f(s):
if s is None:
return "NA"
else:
return repr(s).replace("'", '"')
data = [isTs, is1stLast, prevCol, delt,
|
self.name]
return "\t".join(map(f, data)) + "\n"
|
Hainish/cerealbox
|
server/serial_comm.py
|
Python
|
gpl-3.0
| 372
| 0.024194
|
import serial, sys
class SerialComm():
def __init
|
__(self, serial_device):
try:
self.ser = serial.Serial(serial_device, 9600, timeout=5)
except serial.serialutil.SerialException, e:
print "Unable to open serial port "+serial_device
sys.exit()
|
def close(self):
self.ser.close()
def writeln(self, line):
self.ser.write(line+"\n")
|
pkerpedjiev/ernwin
|
fess/scripts/bounding_box_coords.py
|
Python
|
agpl-3.0
| 2,959
| 0.007435
|
#!/usr/bin/python
import sys
import numpy as np
import Bio.PDB as bp
import tess.threedee.model.coarse_grain as ttmc
import borgy.graph.graph_pdb as cgg
import borgy.visual.pymol as cvp
from optparse import OptionParser
def main():
usage = './bounding_box_coords.py temp.comp temp.pdb'
usage += "Print out the coordinates of two diagonal corners of the"
usage += 'bounding box of each stem nucleotide in
|
the pdb file.'
parser = OptionParser()
#parser.add_option('-o', '--options', dest='some_option', default='yo', help="Place holder for a real option", type='str')
parser.add_option('-e', '--edge', dest='edge', default=False, action='store_true', help='Include the edge nucleotides in the statistics.')
parser.add_option('-p', '--pymol', dest='pymol', default=False, action='store_true', help='Output in pymol
|
cgo format.')
parser.add_option('-a', '--averages', dest='averages', default=False, action='store_true', help='Output the average coordinates of the bounding boxes')
(options, args) = parser.parse_args()
if len(args) < 2:
parser.print_help()
sys.exit(1)
chain = list(bp.PDBParser().get_structure('temp', args[1]).get_chains())[0]
bg = ttmc.CoarseGrainRNA(args[0])
pp = cvp.PymolPrinter()
pp.draw_axes = True
all_corners = []
for s in bg.stems():
for i in range(bg.stem_length(s)):
if i == 0 or i == bg.stem_length(s) - 1:
if options.edge:
pass
else:
continue
(origin, basis, corners) = cgg.bounding_boxes(bg, chain, s, i)
all_corners += [corners]
if options.pymol:
pp.add_sphere(corners[0][0], 'yellow', 0.1, '', [238/255., 221/255., 130/255.])
pp.add_sphere(corners[0][1], 'yellow', 0.1, '', [184/255.,134/255.,11/255.])
pp.add_sphere(corners[1][0], 'purple', 0.1, '', [238/255., 130/255., 238/255.])
pp.add_sphere(corners[1][1], 'purple', 0.1, '', [208/255., 32/255., 144/255.])
else:
print '0','0', " ".join(map(str, corners[0][0]))
print '0','1', " ".join(map(str, corners[0][1]))
print '1','0', " ".join(map(str, corners[1][0]))
print '1','1', " ".join(map(str, corners[1][1]))
if options.averages:
all_corners = np.array(all_corners)
print '--averages--'
print '0', '0', " ".join(map(str, np.mean(np.array([c[0][0] for c in all_corners]), axis=0)))
print '0', '1', " ".join(map(str, np.mean(np.array([c[0][1] for c in all_corners]), axis=0)))
print '1', '0', " ".join(map(str, np.mean(np.array([c[1][0] for c in all_corners]), axis=0)))
print '1', '1', " ".join(map(str, np.mean(np.array([c[1][1] for c in all_corners]), axis=0)))
if options.pymol:
pp.output_pymol_file()
if __name__ == '__main__':
main()
|
JaxxC/goodgame.xbmc
|
plugin.video.goodgame/resources/lib/m3u8/model.py
|
Python
|
gpl-2.0
| 22,352
| 0.002058
|
# coding: utf-8
# Copyright 2014 Globo.com Player authors. All rights reserved.
# Use of this source code is governed by a MIT License
# license that can be found in the LICENSE file.
from collections import namedtuple
import os
import errno
import math
try:
import urlparse as url_parser
except ImportError:
import urllib.parse as url_parser
import parser
class M3U8(object):
'''
Represents a single M3U8 playlist. Should be instantiated with
the content as string.
Parameters:
`content`
the m3u8 content as string
`base_path`
all urls (key and segments url) will be updated with this base_path,
ex.:
base_path = "http://videoserver.com/hls"
/foo/bar/key.bin --> http://videoserver.com/hls/key.bin
http://vid.com/segment1.ts --> http://videoserver.com/hls/segment1.ts
can be passed as parameter or setted as an attribute to ``M3U8`` object.
`base_uri`
uri the playlist comes from. it is propagated to SegmentList and Key
ex.: http://example.com/path/to
Attributes:
`key`
it's a `Key` object, the EXT-X-KEY from m3u8. Or None
`segments`
a `SegmentList` object, represents the list of `Segment`s from this playlist
`is_variant`
Returns true if this M3U8 is a variant playlist, with links to
other M3U8s with different bitrates.
If true, `playlists` is a list of the playlists available,
and `iframe_playlists` is a list of the i-frame playlists available.
`is_endlist`
Returns true if EXT-X-ENDLIST tag present in M3U8.
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.8
`playlists`
If this is a variant playlist (`is_variant` is True), returns a list of
Playlist objects
`iframe_playlists`
If this is a variant playlist (`is_variant` is True), returns a list of
IFramePlaylist objects
`playlist_type`
A lower-case string representing the type of the playlist, which can be
one of VOD (video on demand) or EVENT.
`media`
If this is a variant playlist (`is_variant` is True), returns a list of
Media objects
`target_duration`
Returns the EXT-X-TARGETDURATION as an integer
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.2
`media_sequence`
Returns the EXT-X-MEDIA-SEQUENCE as an integer
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.3
`program_date_time`
Returns the EXT-X-PROGRAM-DATE-TIME as a string
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.5
`version`
Return the EXT-X-VERSION as is
`allow_cache`
Return the EXT-X-ALLOW-CACHE as is
`files`
Returns an iterable with all files from playlist, in order. This includes
segments and key uri, if present.
`base_uri`
It is a property (getter and setter) used by
SegmentList and Key to have absolute URIs.
`is_i_frames_only`
Returns true if EXT-X-I-FRAMES-ONLY tag present in M3U8.
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.12
`is_independent_segments`
Returns true if EXT-X-INDEPENDENT-SEGMENTS tag present in M3U8.
https://tools.ietf.org/html/draft-pantos-http-live-streaming-13#section-3.4.16
'''
simple_attributes = (
# obj attribute # parser attribute
('is_variant', 'is_variant'),
('is_endlist', 'is_endlist'),
('is_i_frames_only', 'is_i_frames_only'),
('target_duration', 'targetduration'),
('media_sequence', 'media_sequence'),
('program_date_time', 'program_date_time'),
('is_independent_segments', 'is_independent_segments'),
('version', 'version'),
('allow_cache', 'allow_cache'),
('playlist_type', 'playlist_type')
)
def __init__(self, content=None, base_path=None, base_uri=None, strict=False):
if content is not None:
self.data = parser.parse(content, strict)
else:
self.data = {}
self._base_uri = base_uri
if self._base_uri:
if not self._base_uri.endswith('/'):
self._base_uri += '/'
self._initialize_attributes()
self.base_path = base_path
def _initialize_attributes(self):
self.key = Key(base_uri=self.base_uri, **self.data['key']) if 'key' in self.data else None
self.segments = SegmentList([ Segment(base_uri=self.base_uri, **params)
for params in self.data.get('segments', []) ])
for attr, param in self.simple_attributes:
setattr(self, attr, self.data.get(param))
self.files = []
if self.key:
self.files.append(self.key.uri)
self.files.extend(self.segments.uri)
self.media = MediaList([ Media(base_uri=self.base_uri,
**media)
for media in self.data.get('media', []) ])
self.playlists = PlaylistList([ Playlist(base_uri=self.base_uri,
media=self.media,
**playlist)
for playlist in self.data.get('playlists', []) ])
self.iframe_playlists = PlaylistList()
for ifr_pl in self.data.get('iframe_playlists', []):
self.iframe_playlists.append(
IFramePlaylist(base_uri=self.base_uri,
uri=ifr_pl['uri'],
iframe_stream_info=ifr_pl['iframe_stream_info'])
)
def __unicode__(self):
return self.dumps()
@property
def base_uri(self):
return self._base_uri
@base_uri.setter
def base_uri(self, new_base_uri):
self._base_uri = new_base_uri
self.media.base_uri = new_base_uri
self.playlists.base_uri = new_base_uri
self.segments.base_uri = new_base_uri
@property
def base_path(self):
return self._base_path
@base_path.setter
def base_path(self, newbase_path):
self._base_path = newbase_path
self._update_base_path()
def _update_base_path(self):
if self._base_path is None:
return
if self.key:
self.key.base_
|
path = self.base_path
self.media.base_path = self.base_path
self.segments.base_path = self.base_path
self.playlists.base_path = self.base_path
def add_playlist(self, playlist):
self.is_variant = True
self.playlists.append(playlist)
def add_iframe_playlist(self, iframe_playlist):
if iframe_playlist is not None:
self.is_vari
|
ant = True
self.iframe_playlists.append(iframe_playlist)
def add_media(self, media):
self.media.append(media)
def add_segment(self, segment):
self.segments.append(segment)
def dumps(self):
'''
Returns the current m3u8 as a string.
You could also use unicode(<this obj>) or str(<this obj>)
'''
output = ['#EXTM3U']
if self.is_independent_segments:
output.append('#EXT-X-INDEPENDENT-SEGMENTS')
if self.media_sequence:
output.append('#EXT-X-MEDIA-SEQUENCE:' + str(self.media_sequence))
if self.allow_cache:
output.append('#EXT-X-ALLOW-CACHE:' + self.allow_cache.upper())
if self.version:
output.append('#EXT-X-VERSION:' + self.version)
if self.key:
output.append(str(self.key))
if self.target_duration:
output.append('#EXT-X-TARGETDURATION:' + int_or_float_to_string(self.target_duration))
if self.program_date_time is not None:
output.append('#EXT-X-PROGRAM-DATE-TIME:' + parser.format_date_time(self.program_date_time))
if not (self.playlist_type is None or self.playlist_
|
lmmsoft/LeetCode
|
LeetCode-Algorithm/1243. Array Transformation/1243.py
|
Python
|
gpl-2.0
| 992
| 0.002016
|
from typing import List
class Solution:
def transformArray2(self, arr: List[int]) -> List[int]:
while True:
arr2 = [a for a in arr]
changed = 0
for id in range(1
|
, len(arr) - 1):
l = arr[id - 1]
r = arr[id + 1]
m = arr[id]
if l > m and r > m:
m += 1
changed += 1
elif l < m and r < m:
m -= 1
changed += 1
arr2[id] = m
arr = arr2
if changed == 0:
break
return arr
|
def transformArray(self, A):
for _ in range(100):
A = A[:1] + [b + (a > b < c) - (a < b > c) for a, b, c in zip(A, A[1:], A[2:])] + A[-1:]
return A
if __name__ == '__main__':
assert Solution().transformArray([6, 2, 3, 4]) == [6, 3, 3, 4]
assert Solution().transformArray([1, 6, 3, 4, 3, 5]) == [1, 4, 4, 4, 4, 5]
|
kevinkle/semantic
|
superphy/src/upload/python/sparql/user.py
|
Python
|
apache-2.0
| 2,243
| 0.009808
|
#!/usr/bin/python
#Filename: user_query_strings.py
#Author: Bryce Drew
#Date: Sept. 30, 2015
#Functionality:
#These are sparql queries. They are designed to be specific for the user ontology:
from sparql.endpoint import Endpoint
def add_literal_by_email(email, predicate, literal):
update = """
PREFIX email: <https://github.com/superphy#hasEmail>
PREFIX p: <%s>
INSERT{
?indv p: '%s'
}
WHERE {
?indv email: '%s'
}""" % (predicate, literal, email)
Endpoint.update(update)
return update
def last_user():
return Endpoint.query("""
PREFIX user: <https://github.com/superphy#User>
PREFIX RDF_type: <http://www.w3.org/1999/02/22-rdf-syntax-ns#type>
SELECT ?s
WHERE {
?s RDF_type: user:
}
ORDER BY DESC(?s)
LIMIT 1
""")
def email_exists(email):
return Endpoint.ask("""ASK {?x <https://github.com/superphy#hasEmail> '%s'}""" % (email))
def insert_user(sparql_id, email):
return Endpoint.update("""
PREFIX user: <https://github.com/superphy#User>
PREFIX owl_NamedIndividual: <http://www.w3.org/2002/07/owl#>
PREFIX RDF_type: <http://www.w3.org/1999/02/22-rdf-syntax-ns#type>
PREFIX indv: <https://github.com/superphy#%s.User>
PREFIX email: <https://github.com/superphy#hasEmail>
INSERT DATA{
indv: RDF_type: owl_NamedIndividu
|
al:.
indv: RDF_type: user:.
indv: email: '%s'
}""" % (sparql_id, email.lower()))
#Called by sign_up app vi
|
ew
def insert_next_user(email="no"):
#See if email exists
if email_exists(email.lower()):
#This doesn't acutally stop anything with the sql though, so don't
#think this is a good place to put a validator. -Bryce
print "Already Exists!"
return False
#Get next user id
results = last_user()
user_id = 0
for result in results["results"]["bindings"]:
for i, thing in enumerate('s'):
user_id = int(result[thing]['value'][28:-5]) + 1
break
break
#Fill debug email for testing purposes.
if email == "no":
email = "test.%s@test.com" % user_id #This is here to provide an easy sample email when debugging.
return insert_user(user_id, email.lower())
|
dmsovetov/pygling
|
Pygling/__main__.py
|
Python
|
mit
| 3,421
| 0.042093
|
#!/usr/bin/python
#################################################################################
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Dmitry Sovetov
#
# https://github.com/dmsovetov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#################################################################################
import os, argparse
from Workspace import Workspace
# class Pygling
class Pygling:
@staticmethod
def main():
name = os.path.basename( os.getcwd() )
# Parse arguments
parser = argparse.ArgumentParser( pro
|
g = 'Pygling', description = 'Pygling C++ workspace generator.', prefix_chars = '--', formatter_class = argparse.ArgumentDefaultsHelpFormatter )
parser.add_argument( "action", type = str, help = "Action", choices = ["configure", "build", "install"] )
parser.add_argument( "-p",
|
"--platform", default = 'all', type = str, help = "Target platform" )
parser.add_argument( "-s", "--source", default = '.', type = str, help = "Project source path" )
parser.add_argument( "-o", "--output", default = 'projects', type = str, help = "Output path" )
parser.add_argument( "-n", "--name", default = name, type = str, help = "Workspace (solution) name" )
parser.add_argument( "-a", "--arch", default = 'default', type = str, help = "Target build architecture" )
parser.add_argument( "-x", "--std", default = 'cxx98', type = str, help = "C++ standard", choices = ['cxx99', 'cxx11'] )
parser.add_argument( "-c", "--configuration", default = 'Release', type = str, help = "Build configuration" )
parser.add_argument( "--package", type = str, help = "Application package identifier" )
parser.add_argument( "--platformSdk", type = str, help = "Platform SDK identifier" )
parser.add_argument( "--xcteam", type = str, help = "Xcode provisioning profile to be used" )
# Check action
args, unknown = parser.parse_known_args()
workspace = Workspace(args.name, args.source, args.output, args, unknown)
if args.action == 'configure': workspace.configure(args.platform)
elif args.action == 'build': workspace.build(args.platform)
elif args.action == 'install': workspace.install(args.platform)
# Entry point
if __name__ == "__main__":
Pygling.main()
|
CBE544/CBE544.github.io
|
ASE/Getting_Started/run_sp.py
|
Python
|
gpl-2.0
| 2,085
| 0.026859
|
from ase import *
from espresso import espresso
from ase.lattice import bulk
import matplotlib
matplotlib.use('Agg') #turn off screen output so we can plot from the cluster
import matplotlib.pyplot as plt
import numpy as np
metal = 'Pt'
metal2 = None # if you have an alloy, specify the second metal
name = metal
if metal2:
name += metal2
a = 3.989 # optimized lattice constant
kpts = [6, 8, 10, 14, 18] # list of k-points to try
energies = []
# if Mo then use bcc crystal, otherwise fcc
if metal == 'Mo':
crystal = 'bcc'
else:
crystal = 'fcc'
for k in kpts:
if metal2:
atoms = bulk(metal, crystal, a=a, cubic=True)
atoms.set_chemical_symbols(metal+'3'+metal2)
else:
atoms = bulk(metal, crystal, a)
calc = espresso(pw=500, #plane-wave cutoff
dw=5000, #density
|
cutoff
xc='BEEF-vdW', #exchange-correlation functional
kpts=(k,k,k), #sampling grid of the Brillouin zone
#(is internally folded back to the
#irreducible Brillouin zone
|
)
nbands=-10, #10 extra bands besides the bands needed to hold
#the valence electrons
sigma=0.1,
psppath='/home/vossj/suncat/psp/gbrv1.5pbe', #pseudopotential path
convergence= {'energy':1e-5,
'mixing':0.1,
'nmix':10,
'mix':4,
'maxsteps':500,
'diag':'david'
}, #convergence parameters
outdir='calcdir')
atoms.set_pbc([1,1,1]) #periodic boundary conditions in all directions
atoms.set_calculator(calc) #connect espresso to Pt structure
energy = atoms.get_potential_energy() #this triggers a DFT calculation
energies.append(energy)
print 'k-points:', k, 'Total energy:', energy, 'eV'
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(kpts, energies)
ax.set_xlabel('k-points')
ax.set_ylabel('Total energy')
plt.tight_layout()
fig.savefig('kpts_vs_sp_energies.png')
|
ArduPilot/MAVProxy
|
MAVProxy/modules/mavproxy_restserver.py
|
Python
|
gpl-3.0
| 5,314
| 0.002446
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Server Module
Patrick Jose Pereira
April 2018
'''
import time
import json
import socket
from threading import Thread
from flask import Flask
from werkzeug.serving import make_server
from MAVProxy.modules.lib import mp_module
def mavlink_to_json(msg):
'''Translate mavlink python messages in json string'''
ret = '\"%s\": {' % msg._type
for fieldname in msg._fieldnames:
data = getattr(msg, fieldname)
ret += '\"%s\" : \"%s\", ' % (fieldname, data)
ret = ret[0:-2] + '}'
return ret
def mpstatus_to_json(status):
'''Translate MPStatus in json string'''
msg_keys = list(status.msgs.keys())
data = '{'
for key in msg_keys[:-1]:
data += mavlink_to_json(status.msgs[key]) + ','
data += mavlink_to_json(status.msgs[msg_keys[-1]])
data += '}'
return data
class RestServer():
'''Rest Server'''
def __init__(self):
# Set log level and remove flask output
import logging
self.log = logging.getLogger('werkzeug')
self.log.setLevel(logging.ERROR)
# Server variables
self.app = None
self.run_thread = None
self.address = 'localhost'
self.port = 5000
# Save status
self.status = None
self.server = None
def update_dict(self, mpstate):
'''We don't have time to waste'''
self.status = mpstate.status
def set_ip_port(self, ip, port):
'''set ip and port'''
self.address = ip
self.port = port
self.stop()
self.start()
def start(self):
'''Stop server'''
# Set flask
self.app = Flask('RestServer')
self.add_endpoint()
# Create a thread to deal with flask
self.run_thread = Thread(target=self.run)
self.run_thread.start()
def running(self):
'''If app is valid, thread and server are running'''
return self.app != None
def stop(self):
'''Stop server'''
self.app = None
if self.run_thread:
self.run_thread = None
if self.server:
self.server.shutdown()
self.server = None
def run(self):
'''Start app'''
self.server = make_server(self.address, self.port, self.app, threaded=True)
self.server.serve_forever()
def request(self, arg=None):
'''Deal with requests'''
if not self.status:
return '{"result
|
": "No message"}'
try:
status_dict = json.loads(mpstatus_to_json(self.status))
except Exception as e:
print(e)
return
# If no key, send the entire json
if not arg:
return json.dumps(statu
|
s_dict)
# Get item from path
new_dict = status_dict
args = arg.split('/')
for key in args:
if key in new_dict:
new_dict = new_dict[key]
else:
return '{"key": "%s", "last_dict": %s}' % (key, json.dumps(new_dict))
return json.dumps(new_dict)
def add_endpoint(self):
'''Set endpoits'''
self.app.add_url_rule('/rest/mavlink/<path:arg>', 'rest', self.request)
self.app.add_url_rule('/rest/mavlink/', 'rest', self.request)
class ServerModule(mp_module.MPModule):
''' Server Module '''
def __init__(self, mpstate):
super(ServerModule, self).__init__(mpstate, "restserver", "restserver module")
# Configure server
self.rest_server = RestServer()
self.add_command('restserver', self.cmds, \
"restserver module", ['start', 'stop', 'address 127.0.0.1:4777'])
def usage(self):
'''show help on command line options'''
return "Usage: restserver <address|freq|stop|start>"
def cmds(self, args):
'''control behaviour of the module'''
if not args or len(args) < 1:
print(self.usage())
return
if args[0] == "start":
if self.rest_server.running():
print("Rest server already running.")
return
self.rest_server.start()
print("Rest server running: %s:%s" % \
(self.rest_server.address, self.rest_server.port))
elif args[0] == "stop":
if not self.rest_server.running():
print("Rest server is not running.")
return
self.rest_server.stop()
elif args[0] == "address":
# Check if have necessary amount of arguments
if len(args) != 2:
print("usage: restserver address <ip:port>")
return
address = args[1].split(':')
# Check if argument is correct
if len(address) == 2:
self.rest_server.set_ip_port(address[0], int(address[1]))
return
else:
print(self.usage())
def idle_task(self):
'''called rapidly by mavproxy'''
# Update server with last mpstate
self.rest_server.update_dict(self.mpstate)
def unload(self):
'''Stop and kill everything before finishing'''
self.rest_server.stop()
pass
def init(mpstate):
'''initialise module'''
return ServerModule(mpstate)
|
resamsel/dbnavigator
|
src/dbmanagr/wrapper.py
|
Python
|
gpl-3.0
| 3,349
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright © 2014 René Samselnig
#
# This file is part of Database Navigator.
#
# Database Navigator is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Database Navigator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Database Navigator. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import logging
import pdb
import urllib2
import json
import ijson
from dbmanagr.writer import Writer
from dbmanagr import logger as log
from dbmanagr.jsonable import from_json
COMMANDS = {
'dbdiff': 'differ',
'dbexec': 'executer',
'dbexport': 'exporter',
'dbgraph': 'grapher',
'dbnav': 'navigator'
}
class Wrapper(object):
def __init__(self, options=None):
self.options = options
def write(self):
try:
sys.stdout.write(Writer.write(self.run()))
except BaseException as e:
log.logger.exception(e)
return -1
return 0
def execute(self): # pragma: no cover
"""To be overridden by sub classes"""
pass
def run(self):
try:
if (
self.options is not None
and self.options.daemon): # pragma: no cover
log.logger.debug('Executing remotely')
return self.executer(*sys.argv)
log.logger.debug('Executing locally')
return self.execute()
except BaseException as e:
log.logger.exception(e)
if log.logger.getEffectiveLevel() <= logging.DEBUG:
# Start post mortem debugging only when debugging is enabled
if os.getenv('UNITTEST', 'False') == 'True':
raise
if self.options.trace: # pragma: no cover
pdb.post_mortem(sys.exc_info()[2])
else:
# Show the error message if log level
|
is INFO or higher
log.log_error(e) # pragma: no cover
def exec
|
uter(self, *args): # pragma: no cover
"""Execute remotely"""
options = self.options
try:
# from dbmanagr import daemon
# if not daemon.is_running(options):
# daemon.start_server(options)
url = 'http://{host}:{port}/{path}'.format(
host=options.host,
port=options.port,
path=COMMANDS[options.prog])
request = json.dumps(args[1:])
log.logger.debug('Request to %s:\n%s', url, request)
response = urllib2.urlopen(url, request)
for i in ijson.items(response, 'item'):
yield from_json(i)
except urllib2.HTTPError as e:
raise from_json(json.load(e))
except urllib2.URLError as e:
log.logger.error('Daemon not available: %s', e)
except BaseException as e:
log.logger.exception(e)
|
freedomtan/workload-automation
|
wlauto/workloads/andebench/__init__.py
|
Python
|
apache-2.0
| 3,485
| 0.003443
|
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "Lice
|
nse");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
|
import re
from wlauto import AndroidUiAutoBenchmark, Parameter, Alias
from wlauto.exceptions import ConfigError
class Andebench(AndroidUiAutoBenchmark):
name = 'andebench'
description = """
AndEBench is an industry standard Android benchmark provided by The
Embedded Microprocessor Benchmark Consortium (EEMBC).
http://www.eembc.org/andebench/about.php
From the website:
- Initial focus on CPU and Dalvik interpreter performance
- Internal algorithms concentrate on integer operations
- Compares the difference between native and Java performance
- Implements flexible multicore performance analysis
- Results displayed in Iterations per second
- Detailed log file for comprehensive engineering analysis
"""
package = 'com.eembc.coremark'
activity = 'com.eembc.coremark.splash'
summary_metrics = ['AndEMark Java', 'AndEMark Native']
parameters = [
Parameter('number_of_threads', kind=int,
description='Number of threads that will be spawned by AndEBench.'),
Parameter('single_threaded', kind=bool,
description="""
If ``true``, AndEBench will run with a single thread. Note: this must
not be specified if ``number_of_threads`` has been specified.
"""),
]
aliases = [
Alias('andebenchst', number_of_threads=1),
]
regex = re.compile('\s*(?P<key>(AndEMark Native|AndEMark Java))\s*:'
'\s*(?P<value>\d+)')
def validate(self):
if (self.number_of_threads is not None) and (self.single_threaded is not None): # pylint: disable=E1101
raise ConfigError('Can\'t specify both number_of_threads and single_threaded parameters.')
def setup(self, context):
if self.number_of_threads is None: # pylint: disable=access-member-before-definition
if self.single_threaded: # pylint: disable=E1101
self.number_of_threads = 1 # pylint: disable=attribute-defined-outside-init
else:
self.number_of_threads = self.device.number_of_cores # pylint: disable=W0201
self.logger.debug('Using {} threads'.format(self.number_of_threads))
self.uiauto_params['number_of_threads'] = self.number_of_threads
# Called after this setup as modifying uiauto_params
super(Andebench, self).setup(context)
def update_result(self, context):
super(Andebench, self).update_result(context)
results = {}
with open(self.logcat_log) as fh:
for line in fh:
match = self.regex.search(line)
if match:
data = match.groupdict()
results[data['key']] = data['value']
for key, value in results.iteritems():
context.result.add_metric(key, value)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.