repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
zmap/zannotate | asnames/__main__.py | Python | apache-2.0 | 3,472 | 0.006048 | import json
import urllib2
import re
class CIDRReportASNameDump(object):
"""ASNNameDump represents the full list of
registered AS Numbers on CIDR report. Acts as an iterator that
will provide tuples: (AS Number, AS Name)"""
CIDR_REPORT_URL = "http://www.cidr-report.org/as2.0/autnums.html"
ENTRY_REGEX = re.compile("^<a href.*>(.*)</a>(.*)$")
OVERRIDES = {
}
ADDITIONAL_NAMES = {
}
def __init__(self):
self.__f = None
self.data = {}
def fetch(self):
if not self.__f:
self.__f = urllib2.urlopen(self.CIDR_REPORT_URL)
for line in self.__f.readlines():
m = self.ENTRY_REGEX.match(line)
if m:
asn = m.groups()[0].rstrip().lstrip().replace('AS','')
# handle weird . notation for > 16-bit ASNs
if '.' in asn:
b, s = asn.split('.')
asn = (int(b) << 16) + int(s)
else:
asn = int(asn)
description = m.groups()[1].rstrip().lstrip()
# parse country code if available
if len(description) > 3 and description[-3] == ",":
id_org = description[:-3]
country = description[-2:]
elif len(description) > 4 and description[-4] == ",":
id_org = description[:-4].strip()
country = description[-3:].strip()
else: # no co | untry present.
id_org = description
country = None
# this format is terrible, but we'll try to parse it out anyway.
if " - " in id_org and id_org.split(" - ", 1)[0].isupper():
name, org = id_org.split(" - ", 1)
#elif " " in id_org and id_org.split(" ", 1)[0].isupper():
# n | ame, org = id_org.split(" ", 1)
elif "-" in id_org and id_org.split("-", 1)[0].isupper():
name, org = id_org.split("-", 1)
else:
name, org = id_org, None
if name:
name = unicode(name, errors="ignore").encode("utf-8", "ignore").replace('"', '')
if country:
country = unicode(country, errors="ignore").encode("utf-8", "ignore").replace('"', '')
if org:
org = unicode(org, errors="ignore").encode("utf-8", "ignore").replace('"', '')
if description:
description = unicode(description, errors="ignore").encode("utf-8", "ignore").replace('"', '')
self.data[int(asn)] = {"asn":int(asn), "description":description, "country_code":country, "organization":org, "name":name}
def lookup(self, number):
number = int(number)
if number in self.OVERRIDES:
return self.OVERRIDES[number]
if number in self.data:
return self.data[number]
if number in self.ADDITIONAL_NAMES:
return self.ADDITIONAL_NAMES[number]
return {"asn":number, "name":"UNKNOWN-%i" % number, "description":"Unknown AS (ASN:%i)" % number, "organization":"Unknown"}
def iter(self):
for asn, info in self.data.iteritems():
yield info
def main():
db = CIDRReportASNameDump()
db.fetch()
for r in db.iter():
print json.dumps(r)
if __name__ == "__main__":
main()
|
ShakoHo/fxos-certsuite | mcts/webapi_tests/moztime/test_time.py | Python | mpl-2.0 | 2,856 | 0.006653 | from datetime import datetime
import time
from mcts.webapi_tests.semiauto import TestCase
class TestTime(TestCase):
"""
This is a test for the `MozTime API`_ which will:
- Get the current date/time and ask the test user to verify
- Set the current date/time to a user-specified value, and ask the test user verify
.. _`MozTime API`: https://developer.mozilla.org/en-US/docs/Web/API/Time_and_Clock_API
"""
def setUp(self):
super(TestTime, self).setUp()
self.wait_for_obj("window.navigator.mozTime")
def test_time_set(self):
get_current_time = """
var curDate = new Date();
var time_msec = curDate.getTime();
return time_msec;
"""
set_time = """
var date_n_time_string = arguments[0];
var datetime_to_set = new Date(date_n_time_string);
var sec = datetime_to_set.getTime();
console.log("printing the set time",sec);
var time_interface = window.navigator.mozTime;
//set the time using timer webAPI
time_interface.set(datetime_to_set);
//get the newly set time
var get_new_time = new Date();
return get_new_time.getTime();
"""
#get current time from system
current_time_msec = self.marionette.execute_script(get_current_time)
str_current_time = time.strftime('%Y-%m-%d %H:%M', \
time.localtime(current_time_msec / 1000.0))
self.confirm("Pull the notification bar and confirm if %s is current"\
" date and time on phone?" % str_current_time)
| #get the new date and time from user and pass to script to set
str_date = self.prompt("Please enter a date to be changed in"\
" format dd/mm/yyyy")
if str_date is None:
self.fail("Must enter a date")
str_time = self.prompt("Plea | se enter a time to be changed in" \
" format HH:MM")
if str_time is None:
self.fail("Must enter a time")
date_struct = datetime.strptime(str_date, '%d/%m/%Y')
date_format = date_struct.strftime('%B %d, %Y')
date_n_time = date_format + ' ' + str_time
mozset_time = self.marionette.execute_script(set_time, \
script_args=[date_n_time])
#compare the times
str_mozset_time = time.strftime('%B %d, %Y %H:%M', \
time.localtime(mozset_time / 1000.0))
self.assertEqual(date_n_time, str_mozset_time)
self.confirm("Pull the notification bar and confirm that the date" \
" and time you have been set to %s" % str_mozset_time)
|
jomorais/pyfase | examples/pingpong/pong.py | Python | gpl-3.0 | 655 | 0.003053 | #!/usr/bin/python
__author__ = 'joaci'
try:
import ti | me
from pyfase import MicroService
except Exception as e:
print('require module exception: %s' % e)
exit(0)
class Pong(MicroService):
def __init__(self):
super(Pong, self).__init__(self, sender_endpoint='ipc:///tmp/sender', receiver_endpoint='ipc:///tmp/receiver')
def on_connect(self):
print('### on_connect ###')
self.request_action('ping', {})
@MicroService.action
def pong(self, service, data):
print('### service: %s requ | est a pong ###' % service)
time.sleep(2)
self.request_action('ping', {})
Pong().execute()
|
dodobas/osm-export-tool2 | jobs/models.py | Python | bsd-3-clause | 9,975 | 0.002506 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import uuid
from django.contrib.auth.models import Group, User
from django.contrib.gis.db import models
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.postgres.fields import ArrayField
from django.db.models.fields import CharField
from django.db.models.signals import post_delete, post_save
from django.dispatch.dispatcher import receiver
from django.utils import timezone
logger = logging.getLogger(__name__)
# construct the upload path for export config files..
def get_upload_path(instance, filename):
configtype = instance.config_type.lower()
# sanitize the filename here..
path = 'export/config/{0}/{1}'.format(configtype, instance.filename)
logger.debug('Saving export config to /media/{0}'.format(path))
return path
class LowerCaseCharField(CharField):
"""
Defines a charfield which automatically converts all inputs to
lowercase and saves.
"""
def pre_save(self, model_instance, add):
"""
Converts the string to lowercase before saving.
"""
current_value = getattr(model_instance, self.attname)
setattr(model_instance, self.attname, current_value.lower())
return getattr(model_instance, self.attname)
class TimeStampedModelMixin(models.Model):
"""
Mixin for timestamped models.
"""
created_at = models.DateTimeField(default=timezone.now, editable=False)
updated_at = models.DateTimeField(default=timezone.now, editable=False)
class Meta: # pragma: no cover
abstract = True
class ExportConfig(TimeStampedModelMixin):
"""
Model for export configuration.
"""
PRESET = 'PRESET'
TRANSLATION = 'TRANSLATION'
TRANSFORM = 'TRANSFORM'
CONFIG_TYPES = (
(PRESET, 'Preset'),
(TRANSLATION, | 'Translation'),
(TRANSFORM, 'Transform')
)
id = models.AutoField(primary_key=True, editable=False)
uid = models.UUIDField(unique=True, default=uuid.uuid4, editable=False)
name = models.Ch | arField(max_length=255, default='', db_index=True)
user = models.ForeignKey(User, related_name='user')
config_type = models.CharField(max_length=11, choices=CONFIG_TYPES, default=PRESET)
filename = models.CharField(max_length=255)
upload = models.FileField(max_length=255, upload_to=get_upload_path)
content_type = models.CharField(max_length=30, editable=False)
published = models.BooleanField(default=False)
class Meta: # pragma: no cover
managed = True
db_table = 'export_configurations'
class ExportFormat(TimeStampedModelMixin):
"""Model for a ExportFormat"""
id = models.AutoField(primary_key=True, editable=False)
uid = models.UUIDField(unique=True, default=uuid.uuid4, editable=False, db_index=True)
name = models.CharField(max_length=100)
slug = LowerCaseCharField(max_length=10, unique=True, default='')
description = models.CharField(max_length=255)
cmd = models.TextField(max_length=1000)
objects = models.Manager()
class Meta: # pragma: no cover
managed = True
db_table = 'export_formats'
def __str__(self):
return '{0}'.format(self.name)
def __unicode__(self, ):
return '{0}'.format(self.slug)
class Region(TimeStampedModelMixin):
"""
Model for a HOT Export Region.
"""
id = models.AutoField(primary_key=True, editable=False)
uid = models.UUIDField(unique=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=100, db_index=True)
description = models.CharField(max_length=1000, blank=True)
the_geom = models.PolygonField(verbose_name='HOT Export Region', srid=4326, default='')
the_geom_webmercator = models.PolygonField(verbose_name='Mercator extent for export region', srid=3857, default='')
the_geog = models.PolygonField(verbose_name='Geographic extent for export region', geography=True, default='')
objects = models.GeoManager()
class Meta: # pragma: no cover
managed = True
db_table = 'regions'
def __str__(self):
return '{0}'.format(self.name)
class Job(TimeStampedModelMixin):
"""
Model for a Job.
"""
id = models.AutoField(primary_key=True, editable=False)
uid = models.UUIDField(unique=True, default=uuid.uuid4, editable=False, db_index=True)
user = models.ForeignKey(User, related_name='owner')
name = models.CharField(max_length=100, db_index=True)
description = models.CharField(max_length=1000, db_index=True)
event = models.CharField(max_length=100, db_index=True, default='', blank=True)
region = models.ForeignKey(Region, null=True)
formats = models.ManyToManyField(ExportFormat, related_name='formats')
configs = models.ManyToManyField(ExportConfig, related_name='configs')
published = models.BooleanField(default=False, db_index=True) # publish export
feature_save = models.BooleanField(default=False, db_index=True) # save feature selections
feature_pub = models.BooleanField(default=False, db_index=True) # publish feature selections
the_geom = models.PolygonField(verbose_name='Extent for export', srid=4326, default='')
the_geom_webmercator = models.PolygonField(verbose_name='Mercator extent for export', srid=3857, default='')
the_geog = models.PolygonField(verbose_name='Geographic extent for export', geography=True, default='')
objects = models.GeoManager()
class Meta: # pragma: no cover
managed = True
db_table = 'jobs'
def save(self, *args, **kwargs):
self.the_geog = GEOSGeometry(self.the_geom)
self.the_geom_webmercator = self.the_geom.transform(ct=3857, clone=True)
super(Job, self).save(*args, **kwargs)
def __str__(self):
return '{0}'.format(self.name)
@property
def overpass_extents(self, ):
extents = GEOSGeometry(self.the_geom).extent # (w,s,e,n)
# overpass needs extents in order (s,w,n,e)
overpass_extents = '{0},{1},{2},{3}'.format(str(extents[1]), str(extents[0]),
str(extents[3]), str(extents[2]))
return overpass_extents
@property
def tag_dict(self,):
# get the unique keys from the tags for this export
uniq_keys = list(self.tags.values('key').distinct('key'))
tag_dict = {} # mapping of tags to geom_types
for entry in uniq_keys:
key = entry['key']
tag_dict['key'] = key
geom_types = list(self.tags.filter(key=key).values('geom_types'))
geom_type_list = []
for geom_type in geom_types:
geom_list = geom_type['geom_types']
geom_type_list.extend([i for i in geom_list])
tag_dict[key] = list(set(geom_type_list)) # get unique values for geomtypes
return tag_dict
@property
def filters(self,):
filters = []
for tag in self.tags.all():
kv = '{0}={1}'.format(tag.key, tag.value)
filters.append(kv)
return filters
@property
def categorised_tags(self,):
points = []
lines = []
polygons = []
for tag in self.tag_dict:
for geom in self.tag_dict[tag]:
if geom == 'point':
points.append(tag)
if geom == 'line':
lines.append(tag)
if geom == 'polygon':
polygons.append(tag)
return {'points': sorted(points), 'lines': sorted(lines), 'polygons': sorted(polygons)}
class Tag(models.Model):
"""
Model to hold Export tag selections.
Holds the data model (osm | hdm | preset)
and the geom_type mapping.
"""
id = models.AutoField(primary_key=True, editable=False)
name = models.CharField(max_length=100, blank=False, default='', db_index=True)
key = models.CharField(max_length=50, blank=False, default='', db_index=True)
value = models.CharField(max_length=50, blank=False, default='', db_index=True)
job = models.ForeignKey(Job, related_name='tags')
d |
VinnieJohns/ggrc-core | src/ggrc_basic_permissions/migrations/versions/20160603120209_c9218e757bc_rename_admin_role_from_ggrc_admin_to_.py | Python | apache-2.0 | 1,169 | 0.001711 | # Copyright (C) 2017 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Rename admin role from gGRC Admin to Administrator.
Create Date: 2016-06-03 12:02:09.438599
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
# revision identifiers, used by Alembic.
revision = 'c9218e757bc'
down_revision = '4d5180ab1b42'
roles_table = table(
'roles',
column('id', sa.Integer),
column('name', sa.String),
column('updated_at', sa.DateTime),
column('description', sa.Text),
)
def upgrade():
op.execute(roles_table.update()
.where(roles_table.c.name == 'gGRC Admin')
.values(name='Administrator',
description='System Administrator with super-user '
'p | rivileges'))
def downgrade():
op.execute(roles_table.update()
| .where(roles_table.c.name == 'Administrator')
.values(name='gGRC Admin',
description='gGRC System Administrator with super-user '
'privileges'))
|
mfcovington/django-lab-members | lab_members/migrations/0012_scientist_email.py | Python | bsd-3-clause | 523 | 0.001912 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('lab_members', '0011_auto_20141130_2335'), |
]
operations = [
migrations.AddField(
model_name='scientist',
name='email',
field=models.EmailField(help_text='Please enter email address', max_length=75, null=True, verbose_name=' | email address'),
preserve_default=True,
),
]
|
mrculler/movieface | face_types.py | Python | gpl-3.0 | 4,296 | 0.003957 | # Defines some types
import collections
Movie = collections.namedtuple("Movie", ["movie_id", "collection_id", "is_renamed", "filename"])
Collection = collections.namedtuple("Collection", ["collection_id", "display_name", "extension", "path", "drop_path"])
# Full definitions are below (generated by specifying "verbose=True" to namedtuple calls)
#
#class Movie(tuple):
# 'Movie(movie_id, collection_id, is_renamed, filename)'
#
# __slots__ = ()
#
# _fields = ('movie_id', 'collection_id', 'is_renamed', 'filename')
#
# def __new__(_cls, movie_id, collection_id, is_renamed, filename):
# 'Create new instance of Movie(movie_id, collection_id, is_renamed, filename)'
# return _tuple.__new__(_cls, (movie_id, collection_id, is_renamed, filename))
#
# @classmethod
# def _make(cls, iterable, new=tuple.__new__, len=len):
# 'Make a new Movie object from a sequence or iterable'
# result = new(cls, iterable)
# if len(result) != 4:
# raise TypeError('Expected 4 arguments, got %d' % len(result))
# return result
#
# def __repr__(self):
# 'Return a nicely formatted representation string'
# return 'Movie(movie_id=%r, collection_id=%r, is_renamed=%r, filename=%r)' % self
#
# def _asdict(self):
# 'Return a new OrderedDict which maps field names to their values'
# return OrderedDict(zip(self._fields, self))
#
# __dict__ = property(_asdict)
#
# def _replace(_self, **kwds):
# 'Return a new Movie object replacing specified fields with new values'
# result = _self._make(map(kwds.pop, ('movie_id', 'collection_id', 'is_renamed', 'filename'), _self))
# if kwds:
# raise ValueError('Got unexpected field names: %r' % kwds.keys())
# return result
#
# def __getnewargs__(self):
# 'Return self as a plain tuple. Used by copy and pickle.'
# return tuple(self)
#
# movie_id = _property(_itemgetter(0), doc='Alias for field number 0')
#
# collection_id = _property(_itemgetter(1), doc='Alias for field number 1')
#
# is_renamed = _property(_itemgetter(2), doc='Alias for field number 2')
#
# filename = _property(_itemgetter(3), doc='Alias for field number 3')
#
#
#class Collection(tuple):
# 'Collection(collection_id, display_name, extension, path, drop_path)'
#
# __slots__ = ()
#
# _fields = ('collection_id', 'display_name', 'extension', 'path', 'drop_path')
#
# def __new__(_cls, collection_id, display_name, extension, path, drop_path):
# 'Create new instance of Collection(collection_id, display_name, extension, path, drop_path)'
# return _tuple.__new__(_cls, (collection_id, display_name, extension, path, drop_path))
#
# @classmethod
# def _make(cls, iterable, new=tuple.__new__, len=len):
# 'Make a new Collection object from a sequence or iterable'
# result = new(cls, iterable)
# if len(result) != 5:
# raise TypeError('Expected 5 arguments, got %d' % len(result))
# return result
#
# def __repr__(self):
# 'Return a nicely formatted representation string'
# return 'Collection(collection_id=%r, display_name=%r, extension=%r, path=%r, drop_path=%r)' % self
#
# def _asdict(self):
# 'Return a new OrderedDict which maps field names to their values'
# return OrderedDict(zip(self._fields, | self))
#
# __dict__ = property(_asdict)
#
# def _replace(_self, **kwds):
# 'Return | a new Collection object replacing specified fields with new values'
# result = _self._make(map(kwds.pop, ('collection_id', 'display_name', 'extension', 'path', 'drop_path'), _self))
# if kwds:
# raise ValueError('Got unexpected field names: %r' % kwds.keys())
# return result
#
# def __getnewargs__(self):
# 'Return self as a plain tuple. Used by copy and pickle.'
# return tuple(self)
#
# collection_id = _property(_itemgetter(0), doc='Alias for field number 0')
#
# display_name = _property(_itemgetter(1), doc='Alias for field number 1')
#
# extension = _property(_itemgetter(2), doc='Alias for field number 2')
#
# path = _property(_itemgetter(3), doc='Alias for field number 3')
#
# drop_path = _property(_itemgetter(4), doc='Alias for field number 4')
|
virtualeconomycoin/vec | share/qt/clean_mac_info_plist.py | Python | mit | 917 | 0.016358 | #!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the Virtualeconomycoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Virtualeconom | ycoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr | [1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
|
gifford-lab/bcbio-nextgen | bcbio/variation/population.py | Python | mit | 12,759 | 0.003762 | """Provide infrastructure to allow exploration of variations within populations.
Uses the gemini framework (https://github.com/arq5x/gemini) to build SQLite
database of variations for query and evaluation.
"""
import collections
import csv
from distutils.version import LooseVersion
import os
import subprocess
import toolz as tz
from bcbio import install, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do, programs
from bcbio.variation import multiallelic, vcfutils
def prep_gemini_db(fnames, call_info, samples, extras):
"""Prepare a gemini database from VCF inputs prepared with snpEff.
"""
data = samples[0]
out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "gemini"))
name, caller, is_batch = call_info
gemini_db = os.path.join(out_dir, "%s-%s.db" % (name, caller))
multisample_vcf = get_multisample_vcf(fnames, name, caller, data)
gemini_vcf = multiallelic.to_single(multisample_vcf, data)
use_gemini_quick = (do_db_build(samples) and
any(vcfutils.vcf_has_variants(f) for f in fnames))
if not utils.file_exists(gemini_db) and use_gemini_quick:
use_gemini = do_db_build(samples) and any(vcfutils.vcf_has_variants(f) for f in fnames)
if use_gemini:
ped_file = create_ped_file(samples + extras, gemi | ni_vcf)
gemini_db = create_gemini_db(gemini_vcf, data, gemini_db, ped_file)
return [[(name, caller), {"db": gemini_db if utils.file_exists(gemini_db) else None,
"vcf": multisample_vcf if is_batch else None}]]
def | create_gemini_db(gemini_vcf, data, gemini_db=None, ped_file=None):
if not gemini_db:
gemini_db = "%s.db" % utils.splitext_plus(gemini_vcf)[0]
if not utils.file_exists(gemini_db):
if not vcfutils.vcf_has_variants(gemini_vcf):
return None
with file_transaction(data, gemini_db) as tx_gemini_db:
gemini = config_utils.get_program("gemini", data["config"])
if "program_versions" in data["config"].get("resources", {}):
gemini_ver = programs.get_version("gemini", config=data["config"])
else:
gemini_ver = None
# Recent versions of gemini allow loading only passing variants
load_opts = ""
if not gemini_ver or LooseVersion(gemini_ver) > LooseVersion("0.6.2.1"):
load_opts += " --passonly"
# For small test files, skip gene table loading which takes a long time
if gemini_ver and LooseVersion(gemini_ver) > LooseVersion("0.6.4"):
if _is_small_vcf(gemini_vcf):
load_opts += " --skip-gene-tables"
if "/test_automated_output/" in gemini_vcf:
load_opts += " --test-mode"
# Skip CADD or gerp-bp if neither are loaded
if gemini_ver and LooseVersion(gemini_ver) >= LooseVersion("0.7.0"):
gemini_dir = install.get_gemini_dir(data)
for skip_cmd, check_file in [("--skip-cadd", "whole_genome_SNVs.tsv.compressed.gz")]:
if not os.path.exists(os.path.join(gemini_dir, check_file)):
load_opts += " %s" % skip_cmd
# skip gerp-bp which slows down loading
load_opts += " --skip-gerp-bp "
num_cores = data["config"]["algorithm"].get("num_cores", 1)
tmpdir = os.path.dirname(tx_gemini_db)
eanns = _get_effects_flag(data)
# Apply custom resource specifications, allowing use of alternative annotation_dir
resources = config_utils.get_resources("gemini", data["config"])
gemini_opts = " ".join([str(x) for x in resources["options"]]) if resources.get("options") else ""
cmd = ("{gemini} {gemini_opts} load {load_opts} -v {gemini_vcf} {eanns} --cores {num_cores} "
"--tempdir {tmpdir} {tx_gemini_db}")
cmd = cmd.format(**locals())
do.run(cmd, "Create gemini database for %s" % gemini_vcf, data)
if ped_file:
cmd = [gemini, "amend", "--sample", ped_file, tx_gemini_db]
do.run(cmd, "Add PED file to gemini database", data)
return gemini_db
def _get_effects_flag(data):
effects_config = tz.get_in(("config", "algorithm", "effects"), data, "snpeff")
if effects_config == "snpeff":
return "-t snpEff"
elif effects_config == "vep":
return "-t VEP"
else:
return ""
def get_affected_status(data):
"""Retrieve the affected/unaffected status of sample.
Uses unaffected (1), affected (2), unknown (0) coding from PED files:
http://pngu.mgh.harvard.edu/~purcell/plink/data.shtml#ped
"""
affected = set(["tumor", "affected"])
unaffected = set(["normal", "unaffected"])
phenotype = str(tz.get_in(["metadata", "phenotype"], data, "")).lower()
if phenotype in affected:
return 2
elif phenotype in unaffected:
return 1
else:
return 0
def create_ped_file(samples, base_vcf):
"""Create a GEMINI-compatible PED file, including gender, family and phenotype information.
Checks for a specified `ped` file in metadata, and will use sample information from this file
before reconstituting from metadata information.
"""
def _code_gender(data):
g = dd.get_gender(data)
if g and str(g).lower() in ["male", "m"]:
return 1
elif g and str(g).lower() in ["female", "f"]:
return 2
else:
return 0
out_file = "%s.ped" % utils.splitext_plus(base_vcf)[0]
sample_ped_lines = {}
header = ["#Family_ID", "Individual_ID", "Paternal_ID", "Maternal_ID", "Sex", "Phenotype", "Ethnicity"]
for md_ped in list(set([x for x in [tz.get_in(["metadata", "ped"], data)
for data in samples] if x is not None])):
with open(md_ped) as in_handle:
reader = csv.reader(in_handle, dialect="excel-tab")
for parts in reader:
if parts[0].startswith("#") and len(parts) > len(header):
header = header + parts[len(header):]
else:
sample_ped_lines[parts[1]] = parts
if not utils.file_exists(out_file):
with file_transaction(samples[0], out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle, dialect="excel-tab")
writer.writerow(header)
batch = _find_shared_batch(samples)
for data in samples:
sname = dd.get_sample_name(data)
if sname in sample_ped_lines:
writer.writerow(sample_ped_lines[sname])
else:
writer.writerow([batch, sname, "-9", "-9",
_code_gender(data), get_affected_status(data), "-9"])
return out_file
def _find_shared_batch(samples):
for data in samples:
batch = tz.get_in(["metadata", "batch"], data, dd.get_sample_name(data))
if not isinstance(batch, (list, tuple)):
return batch
def _is_small_vcf(vcf_file):
"""Check for small VCFs which we want to analyze quicker.
"""
count = 0
small_thresh = 250
with utils.open_gzipsafe(vcf_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
count += 1
if count > small_thresh:
return False
return True
def get_multisample_vcf(fnames, name, caller, data):
"""Retrieve a multiple sample VCF file in a standard location.
Handles inputs with multiple repeated input files from batches.
"""
unique_fnames = []
for f in fnames:
if f not in unique_fnames:
unique_fnames.append(f)
out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "gemini"))
if len(unique_fnames) > 1:
gemini_vcf = os.path.join(out_d |
TheImagingSource/tiscamera | tools/tcam-capture/tcam_capture/TcamView.py | Python | apache-2.0 | 21,385 | 0.001777 | # Copyright 2017 The Imaging Source Europe GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import time
from tcam_capture.CapsDesc import CapsDesc
from tcam_capture.TcamScreen import TcamScreen
from tcam_capture.FileNameGenerator import FileNameGenerator
from tcam_capture.MediaSaver import MediaSaver
from tcam_capture.Settings import Settings
from tcam_capture.Encoder import MediaType, get_encoder_dict
from tcam_capture.TcamCaptureData import TcamCaptureData
from tcam_capture.FPSCounter import FPSCounter
from PyQt5 import QtGui, QtWidgets, QtCore
from PyQt5.QtWidgets import (QWidget, QHBoxLayout)
from PyQt5.QtCore import QObject, pyqtSignal, Qt, QEvent
import logging
import gi
gi.require_version("Gst", "1.0")
gi.require_version("Tcam", "0.1")
gi.require_version("GstVideo", "1.0")
from gi.repository import Tcam, Gst, GLib, GstVideo
log = logging.getLogger(__name__)
class TcamView(QWidget):
image_saved = pyqtSignal(str)
video_saved = pyqtSignal(str)
new_pixel_under_mouse = pyqtSignal(bool, int, int, QtGui.QColor)
current_fps = pyqtSignal(float)
format_selected = pyqtSignal(str, str, str) # format, widthxheight, framerate
first_image = pyqtSignal()
def __init__(self, serial: str, dev_type: str, parent=None):
super(TcamView, self).__init__(parent)
self.layout = QHBoxLayout()
self.container = TcamScreen(self)
self.container.new_pixel_under_mouse.connect(self.new_pixel_under_mouse_slot)
self.fullscreen_container = None # separate widget for fullscreen usage
self.is_fullscreen = False
self.layout.addWidget(self.container)
self.layout.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
self.setLayout(self.layout)
self.serial = serial
self.dev_type = dev_type
self.tcam = None
self.pipeline = None
self.image = None
self.mouse_is_pressed = False
self.use_dutils = True
self.current_width = 0
self.current_height = 0
self.device_lost_callbacks = []
self.caps_desc = None
self.video_format = None
self.retry_countdown = 0
self.settings = None
self.video_fng = None
self.image_fng = None
# additional timer to update actual_fps
# when no images arrive
self.fps_timer = QtCore.QTimer()
self.fps_timer.timeout.connect(self.fps_tick)
self.fps = None
self.file_pattern = ""
self.file_location = "/tmp"
self.caps = None
self.state = None
self.videosaver = None
self.imagesaver = None
self.window_id = self.container.winId()
self.displaysink = None
def get_caps_desc(self):
"""
Returns a CapsDesc describing the caps of the currently opened device
Returns None if device is not opened
"""
if not self.caps_desc:
tcam = self.get_tcam()
if not tcam:
return None
caps = tcam.get_static_pad("src").query_caps()
self.caps_desc = CapsDesc(caps)
return self.caps_desc
def new_pixel_under_mouse_slot(self, active: bool,
mouse_x: int, mouse_y: int,
color: QtGui.QColor):
self.new_pixel_under_mouse.emit(active, mouse_x, mouse_y, color)
def eventFilter(self, obj, event):
""""""
if event.type == QEvent.KeyPress:
if event.key() == Qt.Key_F11:
self.toggle_fullscreen()
return True
return QObject.eventFilter(self, obj, event)
def set_settings(self, new_settings: Settings):
"""
Update settings of all subclasses
"""
self.settings = new_settings
self.use_dutils = self.settings.use_dutils
if not self.video_fng:
self.video_fng = FileNameGenerator(self.serial,
self.settings.video_name)
else:
self.video_fng.set_settings(self.settings.video_name)
self.video_fng.location = self.settings.save_location
self.video_fng.file_suffix = get_encoder_dict()[self.settings.video_type].file_ending
if not self.image_fng:
self.image_fng = FileNameGenerator(self.serial,
self.settings.image_name)
else:
self.image_fng.set_settings(self.settings.image_name)
self.image_fng.location = self.settings.save_location
self.image_fng.file_suffix = get_encoder_dict()[self.settings.image_type].file_ending
def toggle_fullscreen(self):
if self.is_fullscreen:
self.is_fullscreen = False
self.showNormal()
self.fullscreen_container.hide()
# self.fullscreen_container.deleteLater()
self.fullscreen_container = None
self.displaysink.set_window_handle(self.window_id)
else:
self.is_fullscreen = True
self.fullscreen_container = TcamScreen()
self.fullscreen_container.is_fullscreen = True
self.fullscreen_container.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.fullscreen_container.showFullScreen()
self.fullscreen_container.show()
self.container.first_image = True
self.displaysink.set_window_handle(self.fullscreen_container.winId())
self.fullscreen_container.setFocusPolicy(QtCore.Qt.StrongFocus)
self.fullscreen_container.installEventFilter(self.fullscreen_container)
self.fullscreen_container.destroy_widget.connect(self.toggle_fullscreen)
# either show info that we are in trigger mode and still waiting for the first image
# or show that last image we had. This way we always have something to show to the user
if self.is_trigger_mode_on() and self.container.first_image:
self.fullscreen_container.wait_for_first_image()
else:
self.fullscreen_container.on_new_pixmap(self.container.pix.pixmap())
def fit_view(self):
if self.is_fullscreen:
self.fullscreen_container.fit_in_view.emit()
else:
self.container.fit_in_view.emit()
def save_image(self, image_type: str):
if not self.imagesaver:
| self.imagesaver = MediaSaver(self.serial, self.caps, MediaType.image)
self.imagesaver.saved.connect(self.image_saved_callback)
| self.imagesaver.error.connect(self.image_error_callback)
self.image_fng.set_settings(self.settings.image_name)
fn = self.image_fng.create_file_name("image")
self.imagesaver.current_filename = fn
self.imagesaver.save_image(get_encoder_dict()[image_type])
def image_saved_callback(self, image_path: str):
"""
SLOT for imagesaver callback for successfull saving
"""
self.image_saved.emit(image_path)
def image_error_callback(self, error_msg: str):
pass
def video_saved_callback(self, video_path: str):
"""
SLOT for videosaver callback for successfull saving
"""
self.video_saved.emit(video_path)
def start_recording_video(self, video_type: str):
"""
"""
if self.videosaver:
log.error("A video recording is already ongoing.")
return
self.videosaver = MediaSaver(self.serial, self.caps, MediaType.video)
self.videosaver.set_encoder(video_type)
self.videosaver.location = self.file_location
self.videosave |
jos4uke/getSeqFlankBlatHit | lib/python2.7/site-packages/Cython/Compiler/Nodes.py | Python | gpl-2.0 | 349,102 | 0.002819 | #
# Parse tree nodes
#
from __future__ import absolute_import
import cython
cython.declare(sys=object, os=object, copy=object,
Builtin=object, error=object, warning=object, Naming=object, PyrexTypes=object,
py_object_type=object, ModuleScope=object, LocalScope=object, ClosureScope=object,
StructOrUnionScope=object, PyClassScope=object,
CppClassScope=object, UtilityCode=object, EncodedString=object,
absolute_path_length=cython.Py_ssize_t, error_type=object)
import sys, os, copy
from itertools import chain
from . import Builtin
from .Errors import error, warning, InternalError, CompileError
from . import Naming
from . import PyrexTypes
from . import TypeSlots
from .PyrexTypes import py_object_type, error_type
from .Symtab import (ModuleScope, LocalScope, ClosureScope,
StructOrUnionScope, PyClassScope, CppClassScope, TemplateScope)
from .Code import UtilityCode
from .StringEncoding import EncodedString, escape_byte_string, split_string_literal
from . import Future
from . import Options
from . import DebugFlags
absolute_path_length = 0
def relative_position(pos):
"""
We embed the relative filename in the generated C file, since we
don't want to have to regenerate and compile all the source code
whenever the Python install directory moves (which could happen,
e.g,. when distributing binaries.)
INPUT:
a position tuple -- (absolute filename, line number column position)
OUTPUT:
relative filename
line number
AUTHOR: William Stein
"""
global absolute_path_length
if absolute_path_length==0:
absolute_path_length = len(os.path.abspath(os.getcwd()))
return (pos[0].get_filenametable_entry()[absolute_path_length+1:], pos[1])
def embed_position(pos, docstring):
if not Options.embed_pos_in_docstring:
return docstring
pos_line = u'File: %s (starting at line %s)' % relative_position(pos)
if docstring is None:
# unicode string
return EncodedString(pos_line)
# make sure we can encode the filename in the docstring encoding
# otherwise make the docstring a unicode string
encoding = docstring.encoding
if encoding is not None:
try:
pos_line.encode(encoding)
except UnicodeEncodeError:
encoding = None
if not docstring:
# reuse the string encoding of the original docstring
doc = EncodedString(pos_line)
else:
doc = EncodedString(pos_line + u'\n' + docstring)
doc.encoding = encoding
return doc
def _analyse_signature_annotation(annotation, env):
base_type = None
explicit_pytype = explicit_ctype = False
if annotation.is_dict_literal:
for name, value in annotation.key_value_pairs:
if not name.is_string_literal:
continue
if name.value in ('type', b'type'):
explicit_pytype = True
if not explicit_ctype:
annotation = value
elif name.value in ('ctype', b'ctype'):
explicit_ctype = True
annotation = value
if explicit_pytype and explicit_ctype:
warning(annotation.pos, "Duplicate type declarations fo | und in signature annotation")
arg_type = annotation.analyse_as_type(env)
if arg_type is not None:
| if explicit_pytype and not explicit_ctype and not arg_type.is_pyobject:
warning(annotation.pos,
"Python type declaration in signature annotation does not refer to a Python type")
base_type = CAnalysedBaseTypeNode(
annotation.pos, type=arg_type, is_arg=True)
else:
warning(annotation.pos, "Unknown type declaration found in signature annotation")
return base_type, arg_type
def write_func_call(func, codewriter_class):
def f(*args, **kwds):
if len(args) > 1 and isinstance(args[1], codewriter_class):
# here we annotate the code with this function call
# but only if new code is generated
node, code = args[:2]
marker = ' /* %s -> %s.%s %s */' % (
' ' * code.call_level,
node.__class__.__name__,
func.__name__,
node.pos[1:])
pristine = code.buffer.stream.tell()
code.putln(marker)
start = code.buffer.stream.tell()
code.call_level += 4
res = func(*args, **kwds)
code.call_level -= 4
if start == code.buffer.stream.tell():
code.buffer.stream.seek(pristine)
else:
marker = marker.replace('->', '<-')
code.putln(marker)
return res
else:
return func(*args, **kwds)
return f
class VerboseCodeWriter(type):
# Set this as a metaclass to trace function calls in code.
# This slows down code generation and makes much larger files.
def __new__(cls, name, bases, attrs):
from types import FunctionType
from .Code import CCodeWriter
attrs = dict(attrs)
for mname, m in attrs.items():
if isinstance(m, FunctionType):
attrs[mname] = write_func_call(m, CCodeWriter)
return super(VerboseCodeWriter, cls).__new__(cls, name, bases, attrs)
class CheckAnalysers(type):
"""Metaclass to check that type analysis functions return a node.
"""
methods = set(['analyse_types',
'analyse_expressions',
'analyse_target_types'])
def __new__(cls, name, bases, attrs):
from types import FunctionType
def check(name, func):
def call(*args, **kwargs):
retval = func(*args, **kwargs)
if retval is None:
print name, args, kwargs
return retval
return call
attrs = dict(attrs)
for mname, m in attrs.items():
if isinstance(m, FunctionType) and mname in cls.methods:
attrs[mname] = check(mname, m)
return super(CheckAnalysers, cls).__new__(cls, name, bases, attrs)
class Node(object):
# pos (string, int, int) Source file position
# is_name boolean Is a NameNode
# is_literal boolean Is a ConstNode
#__metaclass__ = CheckAnalysers
if DebugFlags.debug_trace_code_generation:
__metaclass__ = VerboseCodeWriter
is_name = 0
is_none = 0
is_nonecheck = 0
is_literal = 0
is_terminator = 0
temps = None
# All descendants should set child_attrs to a list of the attributes
# containing nodes considered "children" in the tree. Each such attribute
# can either contain a single node or a list of nodes. See Visitor.py.
child_attrs = None
cf_state = None
# This may be an additional (or 'actual') type that will be checked when
# this node is coerced to another type. This could be useful to set when
# the actual type to which it can coerce is known, but you want to leave
# the type a py_object_type
coercion_type = None
def __init__(self, pos, **kw):
self.pos = pos
self.__dict__.update(kw)
gil_message = "Operation"
nogil_check = None
def gil_error(self, env=None):
error(self.pos, "%s not allowed without gil" % self.gil_message)
cpp_message = "Operation"
def cpp_check(self, env):
if not env.is_cpp():
self.cpp_error()
def cpp_error(self):
error(self.pos, "%s only allowed in c++" % self.cpp_message)
def clone_node(self):
"""Clone the node. This is defined as a shallow copy, except for member lists
amongst the child attributes (from get_child_accessors) which are also
copied. Lists containing child nodes are thus seen as a way for the node
to hold multiple children directly; the list is not treated as a separate
level in the tree."""
result = copy.copy(self)
for attrname in result.child_attrs:
|
usc-isi/nova | nova/api/ec2/cloud.py | Python | apache-2.0 | 69,515 | 0.000561 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cloud Controller: Implementation of EC2 REST API calls, which are
dispatched to other nodes via AMQP RPC. State is via distributed
datastore.
"""
import base64
import time
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
from nova.api import validator
from nova import block_device
from nova import compute
from nova.compute import instance_types
from nova.compute import vm_states
from nova import db
from nova import exception
from nova import flags
from nova.image import s3
from nova import network
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import quota
from nova import utils
from nova import volume
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def validate_ec2_id(val):
if not validator.validate_str()(val):
raise exception.InvalidInstanceIDMalformed(val)
try:
ec2utils.ec2_id_to_id(val)
except exception.InvalidEc2Id:
raise exception.InvalidInstanceIDMalformed(val)
# EC2 API can return the following values as documented in the EC2 API
# http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/
# ApiReference-ItemType-InstanceStateType.html
# pending 0 | running 16 | shutting-down 32 | terminated 48 | stopping 64 |
# stopped 80
_STATE_DESCRIPTION_MAP = {
None: inst_state.PENDING,
vm_states.ACTIVE: inst_state.RUNNING,
vm_states.BUILDING: inst_state.PENDING,
vm_states.DELETED: inst_state.TERMINATED,
vm_states.SOFT_DELETED: inst_state.TERMINATED,
vm_states.STOPPED: inst_state.STOPPED,
vm_states.PAUSED: inst_state.PAUSE,
vm_states.SUSPENDED: inst_state.SUSPEND,
vm_states.RESCUED: inst_state.RESCUE,
vm_states.RESIZED: inst_state.RESIZE,
}
def _state_description(vm_state, _shutdown_terminate):
"""Map the vm state to the server status string"""
# Note(maoy): We do not provide EC2 compatibility
# in shutdown_terminate flag behavior. So we ignore
# it here.
name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state)
return {'code': inst_state.name_to_code(name),
'name': name}
def _parse_block_device_mapping(bdm):
"""Parse BlockDeviceMappingItemType into flat hash
BlockDevicedMapping.<N>.DeviceName
BlockDevicedMapping.<N>.Ebs.SnapshotId
BlockDevicedMapping.<N>.Ebs.VolumeSize
BlockDevicedMapping.<N>.Ebs.DeleteOnTermination
BlockDevicedMapping.<N>.Ebs.NoDevice
BlockDevicedMapping.<N>.VirtualName
=> remove .Ebs and allow volume id in SnapshotId
"""
ebs = bdm.pop('ebs', None)
if ebs:
ec2_id = ebs.pop('snapshot_id', None)
if ec2_id:
id = ec2utils.ec2_vol_id_to_uuid(ec2_id)
if ec2_id.startswith('snap-'):
bdm['snapshot_id'] = id
elif ec2_id.startswith('vol-'):
bdm['volume_id'] = id
ebs.setdefault('delete_on_termination', True)
bdm.update(ebs)
return bdm
def _properties_get_mappings(properties):
return block_device.mappings_prepend_dev(properties.get('mappings', []))
def _format_block_device_mapping(bdm):
"""Contruct BlockDeviceMappingItemType
{'device_name': '...', 'snapshot_id': , ...}
=> BlockDeviceMappingItemType
"""
keys = (('deviceName', 'device_name'),
('virtualName', 'virtual_name'))
item = {}
for name, k in keys:
if k in bdm:
item[name] = bdm[k]
if bdm.get('no_device'):
item['noDevice'] = True
if ('snapshot_id' in bdm) or ('volume_id' in bdm):
ebs_keys = (('snapshotId', 'snapshot_id'),
('snapshotId', 'volume_id'), # snapshotId is abused
('volumeSize', 'volume_size'),
('deleteOnTermination', 'delete_on_termination'))
ebs = {}
for name, k in ebs_keys:
if k in bdm:
if k == 'snapshot_id':
ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k])
elif k == 'volume_id':
ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k])
else:
ebs[name] = bdm[k]
assert 'snapshotId' in ebs
item['ebs'] = ebs
return item
def _format_mappings(properties, result):
"""Format multiple BlockDeviceMappingItemType"""
mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']}
for m in _properties_get_mappings(properties)
if block_device.is_swap_or_ephemeral(m['virtual'])]
block_device_mapping = [_format_block_device_mapping(bdm) for bdm in
properties.get('block_device_mapping', [])]
# NOTE(yamahata): overwrite mappings with block_device_mapping
for bdm in block_device_mapping:
for i in range(len(mappings)):
if bdm['deviceName'] == mappings[i]['deviceName']:
del mappings[i]
break
mappings.append(bdm)
# NOTE(yamahata): trim ebs.no_device == true. Is this necessary?
mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))]
if mappings:
result['blockDeviceMapping'] = mappings
class CloudController(object):
""" CloudController provides the critical dispatch between
inbound API calls through the endpoint and messages
sent to the other nodes.
"""
def __init__(self):
self.image_service = s3.S3ImageService()
self.network_api = network.API()
self.volume_api = volume.API()
self.security_group_api = CloudSecurityGroupAPI()
self.compute_api = compute.API(network_api=self.network_api,
volume_api=self.volume_api,
security_group_api=self.security_group_api)
self.keypair_api = compute.api.KeypairAPI()
def __str__(self):
retur | n 'CloudController'
def _get_image_state(self, image):
# NOTE(vish): fallback status if image_state isn't set
state = image.get('status')
if state == 'active':
state = 'available'
return image['properties'].get('image_state', state)
def describe_availability_zones(self, context, **kwargs):
if ('zone_name' in kwargs and
'verbose' in kwargs['zone_name'] and
context.is_ad | min):
return self._describe_availability_zones_verbose(context,
**kwargs)
else:
return self._describe_availability_zones(context, **kwargs)
def _get_zones(self, context):
"""Return available and unavailable zones."""
enabled_services = db.service_get_all(context, False)
disabled_services = db.service_get_all(context, True)
available_zones = []
for zone in [service.availability_zone for service
in enabled_services]:
if not zone in available_zones:
available_zones.append(zone)
not_available_zones = []
for zone in [service.availability_zone for service in disabled_services
if not service['availability_zone'] in available_zones]:
if not zone in not_available_zones:
not_available_zones.append(zone)
return (available_zones, not_available_zones)
def _describe_availability_zones(self, context, |
tongjixianing/projects | pylib/cqlshlib/formatting.py | Python | apache-2.0 | 12,345 | 0.002754 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import binascii
import calendar
import math
import re
import sys
import platform
import wcwidth
from collections import defaultdict
from displaying import colorme, get_str, FormattedValue, DEFAULT_VALUE_COLORS, NO_COLOR_MAP
from cassandra.cqltypes im | port EMPTY
from cassandra.util import datetime_from_timestamp
from util import UTC
is_win = platform.system() == 'Windows'
unicode_controlchars_re = re.compile(r'[\x00-\x31\x7f-\xa0]')
controlchars_re = re.compile(r | '[\x00-\x31\x7f-\xff]')
def _show_control_chars(match):
txt = repr(match.group(0))
if txt.startswith('u'):
txt = txt[2:-1]
else:
txt = txt[1:-1]
return txt
bits_to_turn_red_re = re.compile(r'\\([^uUx]|u[0-9a-fA-F]{4}|x[0-9a-fA-F]{2}|U[0-9a-fA-F]{8})')
def _make_turn_bits_red_f(color1, color2):
def _turn_bits_red(match):
txt = match.group(0)
if txt == '\\\\':
return '\\'
return color1 + txt + color2
return _turn_bits_red
default_null_placeholder = 'null'
default_float_precision = 3
default_colormap = DEFAULT_VALUE_COLORS
empty_colormap = defaultdict(lambda: '')
def format_by_type(cqltype, val, encoding, colormap=None, addcolor=False,
nullval=None, date_time_format=None, float_precision=None):
if nullval is None:
nullval = default_null_placeholder
if val is None:
return colorme(nullval, colormap, 'error')
if addcolor is False:
colormap = empty_colormap
elif colormap is None:
colormap = default_colormap
if date_time_format is None:
date_time_format = DateTimeFormat()
if float_precision is None:
float_precision = default_float_precision
return format_value(cqltype, val, encoding=encoding, colormap=colormap,
date_time_format=date_time_format, float_precision=float_precision,
nullval=nullval)
def color_text(bval, colormap, displaywidth=None):
# note that here, we render natural backslashes as just backslashes,
# in the same color as surrounding text, when using color. When not
# using color, we need to double up the backslashes so it's not
# ambiguous. This introduces the unique difficulty of having different
# display widths for the colored and non-colored versions. To avoid
# adding the smarts to handle that in to FormattedValue, we just
# make an explicit check to see if a null colormap is being used or
# not.
if displaywidth is None:
displaywidth = len(bval)
tbr = _make_turn_bits_red_f(colormap['blob'], colormap['text'])
coloredval = colormap['text'] + bits_to_turn_red_re.sub(tbr, bval) + colormap['reset']
if colormap['text']:
displaywidth -= bval.count(r'\\')
return FormattedValue(bval, coloredval, displaywidth)
DEFAULT_NANOTIME_FORMAT = '%H:%M:%S.%N'
DEFAULT_DATE_FORMAT = '%Y-%m-%d'
DEFAULT_TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S%z'
if platform.system() == 'Windows':
DEFAULT_TIME_FORMAT = '%Y-%m-%d %H:%M:%S %Z'
class DateTimeFormat():
def __init__(self, timestamp_format=DEFAULT_TIMESTAMP_FORMAT, date_format=DEFAULT_DATE_FORMAT, nanotime_format=DEFAULT_NANOTIME_FORMAT):
self.timestamp_format = timestamp_format
self.date_format = date_format
self.nanotime_format = nanotime_format
def format_value_default(val, colormap, **_):
val = str(val)
escapedval = val.replace('\\', '\\\\')
bval = controlchars_re.sub(_show_control_chars, escapedval)
return bval if colormap is NO_COLOR_MAP else color_text(bval, colormap)
# Mapping cql type base names ("int", "map", etc) to formatter functions,
# making format_value a generic function
_formatters = {}
def format_value(type, val, **kwargs):
if val == EMPTY:
return format_value_default('', **kwargs)
formatter = _formatters.get(type.__name__, format_value_default)
return formatter(val, **kwargs)
def get_formatter(type):
return _formatters.get(type.__name__, format_value_default)
def formatter_for(typname):
def registrator(f):
_formatters[typname] = f
return f
return registrator
@formatter_for('bytearray')
def format_value_blob(val, colormap, **_):
bval = '0x' + binascii.hexlify(val)
return colorme(bval, colormap, 'blob')
formatter_for('buffer')(format_value_blob)
def format_python_formatted_type(val, colormap, color, quote=False):
bval = str(val)
if quote:
bval = "'%s'" % bval
return colorme(bval, colormap, color)
@formatter_for('Decimal')
def format_value_decimal(val, colormap, **_):
return format_python_formatted_type(val, colormap, 'decimal')
@formatter_for('UUID')
def format_value_uuid(val, colormap, **_):
return format_python_formatted_type(val, colormap, 'uuid')
@formatter_for('inet')
def formatter_value_inet(val, colormap, quote=False, **_):
return format_python_formatted_type(val, colormap, 'inet', quote=quote)
@formatter_for('bool')
def format_value_boolean(val, colormap, **_):
return format_python_formatted_type(val, colormap, 'boolean')
def format_floating_point_type(val, colormap, float_precision, **_):
if math.isnan(val):
bval = 'NaN'
elif math.isinf(val):
bval = 'Infinity' if val > 0 else '-Infinity'
else:
exponent = int(math.log10(abs(val))) if abs(val) > sys.float_info.epsilon else -sys.maxsize - 1
if -4 <= exponent < float_precision:
# when this is true %g will not use scientific notation,
# increasing precision should not change this decision
# so we increase the precision to take into account the
# digits to the left of the decimal point
float_precision = float_precision + exponent + 1
bval = '%.*g' % (float_precision, val)
return colorme(bval, colormap, 'float')
formatter_for('float')(format_floating_point_type)
def format_integer_type(val, colormap, **_):
# base-10 only for now; support others?
bval = str(val)
return colorme(bval, colormap, 'int')
formatter_for('long')(format_integer_type)
formatter_for('int')(format_integer_type)
@formatter_for('datetime')
def format_value_timestamp(val, colormap, date_time_format, quote=False, **_):
bval = strftime(date_time_format.timestamp_format, calendar.timegm(val.utctimetuple()))
if quote:
bval = "'%s'" % bval
return colorme(bval, colormap, 'timestamp')
def strftime(time_format, seconds):
tzless_dt = datetime_from_timestamp(seconds)
return tzless_dt.replace(tzinfo=UTC()).strftime(time_format)
@formatter_for('Date')
def format_value_date(val, colormap, **_):
return format_python_formatted_type(val, colormap, 'date')
@formatter_for('Time')
def format_value_time(val, colormap, **_):
return format_python_formatted_type(val, colormap, 'time')
@formatter_for('str')
def format_value_text(val, encoding, colormap, quote=False, **_):
escapedval = val.replace(u'\\', u'\\\\')
if quote:
escapedval = escapedval.replace("'", "''")
escapedval = unicode_controlchars_re.sub(_show_control_chars, escapedval)
bval = escapedval.encode(encoding, 'backslashreplace')
if quote:
bval = "'%s'" % bval
return bval if colormap is NO_COLOR_MAP else color_text(bval, colormap, wcwidth.wcswidth(bval.decode(encoding)))
# name alias
formatter_for('unicode')(format_value_ |
diogo149/CauseEffectPairsChallenge | code/param.py | Python | gpl-3.0 | 2,724 | 0.001101 | from __future__ import print_function
# THIS IS A TEST
import cPickle as pickle
# import pickle
class Param(object):
"""
Class that allows you to store values when training and retrieve values when testing as either a function result, or a value.
"""
class ParamStore(object):
def __init__(self, filename):
self.filename = filename
def __enter__(self):
assert Param.instance is None
Param.instance = self
try:
with open(self.filename) as infile:
self.values = pickle.load(infile)
except IOError:
| self.values = {}
def __exit__(self, type, value, traceback):
Param.instance = None
with open(self. | filename, 'w') as outfile:
pickle.dump(self.values, outfile)
def __getitem__(self, key):
return self.values[key]
def __setitem__(self, key, value):
self.values[key] = value
instance = None
trainMode = False
@staticmethod
def v(identifier, value):
"""
Stores a value with an identifier.
"""
return Param.f(identifier, lambda: value)
@staticmethod
def f(identifier, func, *args, **kwargs):
"""
Stores a function result with an identifier.
"""
if Param.instance is None:
return func(*args, **kwargs)
if Param.trainMode:
Param.instance[identifier] = func(*args, **kwargs)
return Param.instance[identifier]
@staticmethod
def train(filename):
"""
Sets global Param mode to train.
"""
Param.trainMode = True
return Param.ParamStore(filename)
@staticmethod
def test(filename):
"""
Sets global Param mode to test.
"""
Param.trainMode = False
return Param.ParamStore(filename)
"""
The following are shortcuts. You can call them with:
>>> import param
>>> param.f(identifier, func)
Instead of:
>>> import param
>>> param.Param.f(identifier, func)
"""
train = Param.train
test = Param.test
f = Param.f
v = Param.v
class SETTINGS(object):
"""
Class used to store settings. Access with:
>>> some_variable = SETTINGS.[name]
OR
>>> SETTINGS.[name] = value
"""
@staticmethod
def default(**kwargs):
"""
Allows default settings to be set, if not yet set. Only takes in keyword arguments.
"""
for name, default_val in kwargs.items():
try:
getattr(SETTINGS, name)
except AttributeError:
setattr(SETTINGS, name, default_val)
|
VirusTotal/content | Packs/Orca/Integrations/Orca/Orca_test.py | Python | mit | 26,935 | 0.001596 | from datetime import datetime
import pytest
import json
from Orca import OrcaClient, BaseClient, DEMISTO_OCCURRED_FORMAT, fetch_incidents
DUMMY_ORCA_API_DNS_NAME = "https://dummy.io/api"
mock_alerts_response = {
"version": "0.1.0",
"status": "success",
"total_items": 58,
"total_ungrouped_items": 58,
"total_supported_items": 10000,
"data": [
{
"type": "malware",
"rule_id": "r1111ea1111",
"type_string": "Malware",
"type_key": "/test_eicar_file",
"category": "Malware",
"description": "Malware EICAR-Test-File found on asset",
"details": "We have detected a file infected with EICAR-Test-File on the asset.",
"recommendation": "Remediate the host and attend additional alerts on the host to close the infection path.",
"alert_labels": [
"malware_found"
],
"asset_category": "Storage",
"cloud_provider_id": "111111111111",
"cloud_provider": "aws",
"cloud_account_id": "10b11111-1111-1111-91d5-11111de11111",
"cloud_vendor_id": "111111111111",
"account_name": "111111111111",
"asset_unique_id": "storage_111111e11111_scan-me-s3-bucket-s8rrr",
"asset_name": "scan-me-s3-bucket-s8rrr",
"asset_type": "storage",
"asset_type_string": "AWS S3 Bucket",
"group_unique_id": "storage_111111e11111_scan-me-s3-bucket-s8rrr",
"group_name": "scan-me-s3-bucket-s8rrr",
"group_type": "storage",
"group_type_string": "NonGroup",
"group_val": "nongroup",
"cluster_unique_id": "storage_111111e11111_scan-me-s3-bucket-s8rrr",
"cluster_name": "scan-me-s3-bucket-s8rrr",
"cluster_type": "storage",
"level": 0,
"asset_state": "enabled",
"asset_labels": [
"internet_facing",
"pii"
],
"asset_vendor_id": "scan-me-s3-bucket-s8rrr",
"asset_regions": [
"us-east-1"
],
"asset_regions_names": [
"N. Virginia"
],
"source": "test_eicar_file",
"findings": {
"malware": [
{
"type": "malware",
"labels": [
"malware_found"
],
"virus_names": [
"EICAR-Test-File"
],
"modification_time": "2020-04-26T14:26:11+00:00",
"file": "/test_eicar_file",
"sha256": "275a021bbfb6489e54d471899f7db9d1663fc695ec2fe2a2c4538aabf651fd0f",
"sha1": "3395856ce81f2b7382dee72602f798b642f14140",
"md5": "44d88612fea8a8f36de82e1278abb02f",
"has_macro": False
}
]
},
"configuration": {
"user_status": "closed",
"jira_issue_link": "https://www.jira.com/myproject",
"jira_issue": "TP-41"
},
"state": {
"alert_id": "orca-59",
"status": "in_progress",
"status_time": "2020-12-30T09:57:33+00:00",
"created_at": "2020-11-08T12:58:52+00:00",
"last_seen": "2020-12-30T10:35:46+00:00",
"score": 1,
"severity": "compromised",
"low_since": None,
"high_since": "2020-12-15T15:33:49+00:00",
"in_verification": None
},
"priv": {
"key": "3ea22222274111114b011111bb311111",
"score": 1,
"orig_score": 1,
"alert_id": "orca-59",
"full_scan_time": "2020-12-30T10:35:46+00:00",
"organization_id": "11111111-1111-1111-1111-c111881c1111",
"organization_name": "Orca Security",
"context": "dat | a",
"account_action_id_ctx": {
| "data": "11111111-1111-1111-1111-8a529a011111"
},
"scan_id_ctx": {
"data": "11111111-1111-1111-1111-8a529a011111_111111111111_bucket-111111e11111-us-east-1"
},
"first_seen": "2020-11-08T13:03:37+00:00"
},
"hdr": {
"asset_category": "Storage",
"organization_id": "11111111-1111-1111-1111-c111881c1111",
"organization_name": "Orca Security",
"cloud_provider": "aws",
"cloud_provider_id": "111111111111",
"cloud_account_id": "10b11111-1111-1111-91d5-11111de11111",
"context": "data",
"asset_unique_id": "storage_111111e11111_scan-me-s3-bucket-s8rrr",
"asset_type": "storage",
"asset_type_string": "AWS S3 Bucket",
"asset_name": "scan-me-s3-bucket-s8rrr",
"group_unique_id": "storage_111111e11111_scan-me-s3-bucket-s8rrr",
"group_name": "scan-me-s3-bucket-s8rrr",
"group_type": "storage",
"group_type_string": "NonGroup",
"cluster_unique_id": "storage_111111e11111_scan-me-s3-bucket-s8rrr",
"cluster_type": "storage",
"cluster_name": "scan-me-s3-bucket-s8rrr",
"level": 0,
"group_val": "nongroup",
"asset_vendor_id": "scan-me-s3-bucket-s8rrr",
"cloud_vendor_id": "111111111111",
"asset_state": "enabled",
"account_name": "111111111111",
"asset_labels": [
"internet_facing"
]
},
"insert_time": "2020-12-30T10:45:21+00:00"
},
{
"type": "malware",
"rule_id": "r1111ea1111",
"type_string": "Malware",
"type_key": "/usr/local/bin/eicarcom2.zip",
"category": "Malware",
"description": "Malware EICAR-Test-File found on asset",
"details": "We have detected a file infected with EICAR-Test-File on the asset.",
"recommendation": "Remediate the host and attend additional alerts on the host to close the infection path.",
"alert_labels": [
"malware_found"
],
"asset_category": "Image",
"cloud_provider_id": "111111111111",
"cloud_provider": "aws",
"cloud_account_id": "10b11111-1111-1111-91d5-11111de11111",
"cloud_vendor_id": "111111111111",
"account_name": "111111111111",
"asset_unique_id": "vmimage_111111e11111_ami-11111c111111d7911",
"asset_name": "my_test_image-1231asdasjdn",
"asset_type": "vmimage",
"asset_type_string": "VM Image",
"group_unique_id": "vmimage_111111e11111_ami-11111c111111d7911",
"group_name": "my_test_image-1231asdasjdn",
"group_type": "vmimage",
"group_type_string": "NonGroup",
"group_val": "nongroup",
"cluster_unique_id": "vmimage_111111e11111_ami-11111c111111d7911",
"cluster_name": "my_test_image-1231asdasjdn",
"cluster_type": "vmimage",
"level": 0,
"asset_vendor_id": "ami-11111c111111d7911",
"asset_distribution_name": "Ubuntu",
"asset_distribution_version": "18.04",
"asset_role_names": [
"mysql",
"ssh",
"haproxy",
"postgresql"
],
"source": "eicarcom2.zip",
"findings": {
"malware": [
{
"type": "malware",
"labels": [
"malware_found"
],
"vi |
vmalloc/weber-cli | weber/cli/generate.py | Python | bsd-3-clause | 427 | 0.004684 | import click
from .. import templates
@click.group()
def generate():
pass
@gene | rate.command()
@click.argument('name')
@click.option('--path', required=True)
@click.pass_context
def blueprint(ctx, name, path):
app = ctx.obj
templates.extract_template(
'snippets/blueprint.py',
app.get_blueprint_directory().join(name + | '.py'),
ctx={
'blueprint': {'name': name, 'path': path}})
|
nabin-info/hackerrank.com | validate-list-of-email-address-with-filter.py | Python | mit | 747 | 0.032129 | #!/usr/bin/python
import sys
imp | ort re
re_valid_email = re.compile(r'^[-_0-9a-zA-Z]+@[0-9a-zA-Z]+\.[0-9a-zA-Z]{1,3}$')
def valid_email(s):
return not (re_valid_email.search(s) == None)
N = int(raw_input().strip())
A = []
for i in range(N):
A += [ str(raw_input().strip()) ]
A.sort()
V = filter(valid_email, A)
print V
#### INPUT ##
## 3
## lara@hackerrank.com
## brian-23@hackerrank.com
## britts_54 | @hackerrank.com
##
#### OUTPUT ##
## ['brian-23@hackerrank.com', 'britts_54@hackerrank.com', 'lara@hackerrank.com']
#### INPUT ##
## 5
## dheeraj-234@gmail.com
## itsallcrap
## harsh_1234@rediff.in
## kunal_shin@iop.az
## matt23@@india.in
##
#### OUTPUT ##
## ['dheeraj-234@gmail.com', 'harsh_1234@rediff.in', 'kunal_shin@iop.az']
|
radoondas/elasticbeat | vendor/github.com/elastic/beats/packetbeat/tests/system/test_0061_nfs.py | Python | apache-2.0 | 1,469 | 0 | from packetbeat import BaseTest
"""
Tests for the NFS
"""
class Test(BaseTest):
def test_V3(self):
"""
Should correctly parse NFS v3 packet
"""
self.render_config_template(
nfs_ports=[20 | 49],
)
self.run_packetbeat(pcap="nfs_v3.pcap")
objs = self.read_output()
assert len(objs) == 1
o = objs[0]
assert o["type"] == "nfs"
assert o["rpc.auth_flavor"] == "unix"
assert "rpc.time" in o
assert "rpc.time_str" in o
assert "rpc.call_size" in o
assert "rpc.reply_size" in o
assert o["nfs.version"] == 3
assert o["nfs.opcode"] == "LOOKUP"
assert o["nfs.status"] == "NFSERR_NOENT"
def tes | t_v4(self):
"""
Should correctly parse NFSv4.1 packet
"""
self.render_config_template(
nfs_ports=[2049],
)
self.run_packetbeat(pcap="nfs_v4.pcap")
objs = self.read_output()
assert len(objs) == 1
o = objs[0]
assert o["type"] == "nfs"
assert o["rpc.auth_flavor"] == "unix"
assert "rpc.time" in o
assert "rpc.time_str" in o
assert "rpc.call_size" in o
assert "rpc.reply_size" in o
assert o["nfs.version"] == 4
assert o["nfs.minor_version"] == 1
assert o["nfs.tag"] == "readdir"
assert o["nfs.opcode"] == "READDIR"
assert o["nfs.status"] == "NFS_OK"
|
yasoob/PythonRSSReader | venv/lib/python2.7/dist-packages/sessioninstaller/backends/synaptic.py | Python | mit | 3,567 | 0.002803 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Make use of synaptic as backend."""
# Copyright (C) 2008-2010 Sebastian Heinlein <devel@glatzor.de>
# Copyright (C) 2005-2007 Canonical
#
# Licensed under the GNU General Public License Version 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
__author__ = "Sebastian Heinlein <devel@glatzor.d | e>, " \
"Michael Vogt <mvo@canonical.com"
import tempfile
from gettext import gettext as _
from gi.repository import GObject
from defer import Deferred
import sessioninstaller.errors
class SynapticBackend(obje | ct):
"""Make use of Synaptic to install and remove packages."""
def _run_synaptic(self, xid, opt, tempf, interaction):
deferred = Deferred()
if tempf:
opt.extend(["--set-selections-file", "%s" % tempf.name])
#FIXME: Take interaction into account
opt.extend(["-o", "Synaptic::closeZvt=true"])
if xid:
opt.extend(["--parent-window-id", "%s" % (xid)])
cmd = ["/usr/bin/gksu",
"--desktop", "/usr/share/applications/update-manager.desktop",
"--", "/usr/sbin/synaptic", "--hide-main-window",
"--non-interactive"]
cmd.extend(opt)
flags = GObject.SPAWN_DO_NOT_REAP_CHILD
(pid, stdin, stdout, stderr) = GObject.spawn_async(cmd, flags=flags)
GObject.child_watch_add(pid, self._on_synaptic_exit, (tempf, deferred))
return deferred
def _on_synaptic_exit(self, pid, condition, (tempf, deferred)):
if tempf:
tempf.close()
if condition == 0:
deferred.callback()
else:
deferred.errback(sessioninstaller.errors.ModifyFailed())
def remove_packages(self, xid, package_names, interaction):
opt = []
# custom progress strings
#opt.append("--progress-str")
#opt.append("%s" % _("Please wait, this can take some time."))
#opt.append("--finish-str")
#opt.append("%s" % _("Update is complete"))
tempf = tempfile.NamedTemporaryFile()
for pkg_name in package_names:
tempf.write("%s\tuninstall\n" % pkg_name)
tempf.flush()
return self._run_synaptic(xid, opt, tempf, interaction)
def install_packages(self, xid, package_names, interaction):
opt = []
# custom progress strings
#opt.append("--progress-str")
#opt.append("%s" % _("Please wait, this can take some time."))
#opt.append("--finish-str")
#opt.append("%s" % _("Update is complete"))
tempf = tempfile.NamedTemporaryFile()
for pkg_name in package_names:
tempf.write("%s\tinstall\n" % pkg_name)
tempf.flush()
return self._run_synaptic(xid, opt, tempf, interaction)
def install_package_files(self, xid, package_names, interaction):
raise NotImplemented
# vim:ts=4:sw=4:et
|
mjumbewu/django-model-filters | model_filters/templatetags/model_nodes.py | Python | bsd-3-clause | 4,549 | 0.005716 | from django.db.models.manager import Manager
from django.template import Context, Node, Variab | le
from django.template.loader import get_template
class BaseModelBlockNode (Node):
def __init__(self, thing, resolved=True):
"""
thing -- The thing (probably a model instance or a list of model
instances) to be rendered as a block.
resolved -- If True, then ``thing`` is a resolved value. If False,
then thing is the name of a variable which, in context,
| will contain the value of the thing.
"""
self.thing = thing
self.resolved = resolved
def get_template_variable(self, thing, type_of_thing):
"""
Return the name of the template variable that should be used to render
the thing. If the variable name does not resolve to a value, then a
default template will be used.
"""
if isinstance(thing, (list, tuple)):
template_variable = ''
elif hasattr(thing, 'model') and thing:
template_variable = '%s_%s_%s_template' % \
(thing.model._meta.app_label, thing.model._meta.module_name,
type_of_thing)
else:
template_variable = '%s_%s_%s_template' % \
(thing._meta.app_label, thing._meta.module_name, type_of_thing)
return template_variable
def get_resolved_value(self, context):
"""
Return a resolved version of the thing being rendered -- either a model
instance or a list of such instances. Specifically, if the name of the
value was passed to the node as a string, resolve the value w.r.t. the
context. If the actual value was passed in, then just return the value.
"""
if not self.resolved:
res_var = Variable(self.thing).resolve(context)
else:
res_var = self.thing
return res_var
class ModelDetailNode (BaseModelBlockNode):
def get_context_data(self, instance):
"""
Calculate additional context data that will be used to render the thing.
"""
fields = []
for field in instance._meta.fields:
name = field.name
label = field.verbose_name
value = getattr(instance, field.name)
is_list = False
is_direct = True
model = instance._meta.module_name
if value is not None:
fields.append((
name, label, value, is_list, is_direct, model,
))
for rel_obj, model in instance._meta.get_all_related_objects_with_model():
name = rel_obj.get_accessor_name()
label = name
value = getattr(instance, name)
is_list = isinstance(value, (list, tuple, Manager))
is_direct = False
if value is not None:
fields.append((
name, label, value, is_list, is_direct, model,
))
return {'model':instance._meta.module_name,
'instance':instance,
'fields':fields}
def render(self, context):
instance = self.get_resolved_value(context)
template_variable = self.get_template_variable(instance, 'detail')
template_name = context.get(template_variable,
'model_filters/object_detail.html')
template = get_template(template_name)
context.update(Context(self.get_context_data(instance)))
if 'title' not in context:
context['title'] = None
return template.render(context)
class ModelListNode (BaseModelBlockNode):
def get_context_data(self, queryset):
if hasattr(queryset, 'model') and queryset.model:
model = queryset.model._meta.module_name
else:
model = None
return {'model':model, 'instance_list':queryset}
def render(self, context):
queryset = self.get_resolved_value(context)
template_variable = self.get_template_variable(queryset, 'list')
template_name = context.get(template_variable,
'model_filters/object_list.html')
template = get_template(template_name)
context.update(Context(self.get_context_data(queryset)))
if 'title' not in context:
context['title'] = None
return template.render(context)
|
YourCyborg/Sun-RPI | src/comms/models.py | Python | bsd-3-clause | 32,624 | 0.00564 | """
Models for the comsystem. The Commsystem is intended to be
used by Players (thematic IC communication is probably
best handled by custom commands instead).
The comm system could take the form of channels, but can also
be adopted for storing tells or in-game mail.
The comsystem's main component is the Message (Msg), which
carries the actual information between two parties.
Msgs are stored in the database and usually not
deleted.
A Msg always have one sender (a user), but can have
any number targets, both users and channels.
Channels are central objects that act as targets for
Msgs. Players can connect to channels by use of a
ChannelConnect object (this object is necessary to easily
be able to delete connections on the fly).
"""
from datetime import datetime
from django.db import models
from src.utils.idmapper.models import SharedMemoryModel
from src.comms import managers
from src.comms.managers import identify_object
from src.locks.lockhandler import LockHandler
from src.utils import logger
from src.utils.utils import is_iter, to_str, crop, make_iter
__all__ = ("Msg", "TempMsg", "Channel", "PlayerChannelConnection", "ExternalChannelConnection")
#------------------------------------------------------------
#
# Msg
#
#------------------------------------------------------------
class Msg(SharedMemoryModel):
"""
A single message. This model describes all ooc messages
sent in-game, both to channels and between players.
The Msg class defines the following properties:
sender - sender of message
receivers - list of target objects for message
channels - list of channels message was sent to
message - the text being sent
date_sent - time message was sent
hide_from_sender - bool if message should be hidden from sender
hide_from_receivers - list of receiver objects to hide message from
hide_from_channels - list of channels objects to hide message from
permissions - perm strings
"""
#
# Msg database model setup
#
#
# These databse fields are all set using their corresponding properties,
# named same as the field, but withtout the db_* prefix.
# Sender is either a player, an object or an external sender, like an IRC channel
# normally there is only one, but if co-modification of a message is allowed, there
# may be more than one "author"
db_sender_players = models.ManyToManyField("players.PlayerDB", related_name='sender_player_set', null=True, verbose_name='sender(player)', db_index=True)
db_sender_objects = models.ManyToManyField("objects.ObjectDB", related_name='sender_object_set', null=True, verbose_name='sender(object)', db_index=True)
db_sender_external = models.CharField('external sender', max_length=255, null=True, db_index=True,
help_text="identifier for external sender, for example a sender over an IRC connection (i.e. someone who doesn't have an exixtence in-game).")
# The destination objects of this message. Stored as a
# comma-separated string of object dbrefs. Can be defined along
# with channels below.
db_receivers_players = models.ManyToManyField('players.PlayerDB', related_name='receiver_player_set', null=True, help_text="player receivers")
db_receivers_objects = models.ManyToManyField('objects.ObjectDB', related_name='receiver_object_set', null=True, help_text="object receivers")
db_receivers_channels = models.ManyToManyField("Channel", related_name='channel_set', null=True, help_text="channel recievers")
# header could be used for meta-info about the message if your system needs it, or as a separate
# store for the mail subject line maybe.
db_header = models.CharField('header', max_length=128, null=True, blank=True, db_index=True)
# the message body itself
db_message = models.TextField('messsage')
# send date
db_date_sent = models.DateTimeField('date sent', editable=False, auto_now_add=True, db_index=True)
# lock storage
db_lock_storage = models.TextField('locks', blank=True,
help_text='access locks on this message.')
# these can be used to filter/hide a given message from supplied objects/players/channels
db_hide_from_players = models.ManyToManyField("players.PlayerDB", related_name='hide_from_players_set', null=True)
db_hide_from_objects = models.ManyToManyField("objects.ObjectDB", related_name='hide_from_objects_set', null=True)
db_hide_from_channles = models.ManyToManyField("Channel", related_name='hide_from_channels_set', null=True)
# Database manager
objects = managers.MsgManager()
def __init__(self, *args, **kwargs):
SharedMemoryModel.__init__(self, *args, **kwargs)
self.locks = LockHandler(self)
class Meta:
"Def | ine Django meta options"
| verbose_name = "Message"
# Wrapper properties to easily set database fields. These are
# @property decorators that allows to access these fields using
# normal python operations (without having to remember to save()
# etc). So e.g. a property 'attr' has a get/set/del decorator
# defined that allows the user to do self.attr = value,
# value = self.attr and del self.attr respectively (where self
# is the object in question).
# sender property (wraps db_sender_*)
#@property
def __senders_get(self):
"Getter. Allows for value = self.sender"
return [hasattr(o, "typeclass") and o.typeclass or o for o in
list(self.db_sender_players.all()) + list(self.db_sender_objects.all())]
#@sender.setter
def __senders_set(self, value):
"Setter. Allows for self.sender = value"
for val in (v for v in make_iter(value) if v):
obj, typ = identify_object(val)
if typ == 'player':
self.db_sender_players.add(obj)
elif typ == 'object':
self.db_sender_objects.add(obj)
elif isinstance(typ, basestring):
self.db_sender_external = obj
elif not obj:
return
else:
raise ValueError(obj)
self.save()
#@sender.deleter
def __senders_del(self):
"Deleter. Clears all senders"
self.db_sender_players.clear()
self.db_sender_objects.clear()
self.db_sender_external = ""
self.save()
senders = property(__senders_get, __senders_set, __senders_del)
def remove_sender(self, value):
"Remove a single sender or a list of senders"
for val in make_iter(value):
obj, typ = identify_object(val)
if typ == 'player':
self.db_sender_players.remove(obj)
elif typ == 'object':
self.db_sender_objects.remove(obj)
elif isinstance(obj, basestring) and self.db_sender_external == obj:
self.db_sender_external = ""
else:
raise ValueError(obj)
self.save()
# receivers property
#@property
def __receivers_get(self):
"Getter. Allows for value = self.receivers. Returns three lists of receivers: players, objects and channels."
return [hasattr(o, "typeclass") and o.typeclass or o for o in
list(self.db_receivers_players.all()) + list(self.db_receivers_objects.all())]
#@receivers.setter
def __receivers_set(self, value):
"Setter. Allows for self.receivers = value. This appends a new receiver to the message."
for val in (v for v in make_iter(value) if v):
obj, typ = identify_object(val)
if typ == 'player':
self.db_receivers_players.add(obj)
elif typ == 'object':
self.db_receivers_objects.add(obj)
elif not obj:
return
else:
raise ValueError
self.save()
#@receivers.deleter
def __receivers_del(self):
"Deleter. Clears all receivers"
self.db_receivers_players.clear()
self.db_receivers_objects.clear()
self.save()
receivers = property(__receivers_get, __receivers_set |
plotly/python-api | packages/python/plotly/plotly/graph_objs/surface/contours/_x.py | Python | mit | 17,141 | 0.000992 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class X(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "surface.contours"
_path_str = "surface.contours.x"
_valid_props = {
"color",
"end",
"highlight",
"highlightcolor",
"highlightwidth",
"project",
"show",
"size",
"start",
"usecolormap",
"width",
}
# color
# -----
@property
def color(self):
"""
Sets the color of the contour lines.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# end
# ---
@property
def end(self):
"""
Sets the end contour level value. Must be more than
`contours.start`
The 'end' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["end"]
@end.setter
def end(self, val):
self["end"] = val
# highlight
# ---------
@property
def highlight(self):
"""
Determines whether or not contour lines about the x dimension
are highlighted on hover.
The 'highlight' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["highlight"]
@highlight.setter
def highlight(self, val):
self["highlight"] = val
# highlightcolor
# --------------
@property
def highlightcolor(self):
"""
Sets the color of the highlighted contour lines.
The 'highlightcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["highlightcolor"]
@highlightcolor.setter
def hi | ghlightcolor(self, val):
self["highlightcolor"] = val
# highlightwidth
# - | -------------
@property
def highlightwidth(self):
"""
Sets the width of the highlighted contour lines.
The 'highlightwidth' property is a number and may be specified as:
- An int or float in the interval [1, 16]
Returns
-------
int|float
"""
return self["highlightwidth"]
@highlightwidth.setter
def highlightwidth(self, val):
self["highlightwidth"] = val
# project
# -------
@property
def project(self):
"""
The 'project' property is an instance of Project
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.contours.x.Project`
- A dict of string/value properties that will be passed
to the Project constructor
Supported dict properties:
x
Determines whether or not these contour lines
are projected on the x plane. If `highlight` is
set to True (the default), the projected lines
are shown on hover. If `show` is set to True,
|
neutrons/FastGR | addie/processing/mantid/master_table/import_from_database/conflicts_solver.py | Python | mit | 6,804 | 0.003086 | from __future__ import (absolute_import, division, print_function)
from collections import defaultdict
import numpy as np
from qtpy.QtWidgets import QMainWindow, QTableWidget, QRadioButton, QTableWidgetItem
from addie.utilities import load_ui
from addie.utilities.list_runs_parser import ListRunsParser
#from addie.ui_solve_import_conflicts import Ui_MainWindow as UiMainWindow
class ConflictsSolverHandler:
def __init__(self, parent=None, json_conflicts={}):
o_solver = ConflictsSolverWindow(parent=parent, json_conflicts=json_conflicts)
if parent.conflicts_solver_ui_position:
o_solver.move(parent.conflicts_solver_ui_position)
o_solver.show()
class ConflictsSolverWindow(QMainWindow):
list_table = [] # name of table in each of the tabs
table_width_per_character = 20
table_header_per_character = 15
list_keys = ["Run Number", 'chemical_formula', 'geometry', 'mass_density', 'sample_env_device']
columns_label = ["Run Number", "Chemical Formula", "Geometry", "Mass Density", "Sample Env. Device"]
list_of_keys_with_conflicts = []
def __init__(self, parent=None, json_conflicts={}):
self.parent = parent
self.json_conflicts = json_conflicts
QMainWindow.__init__(self, parent=parent)
self.ui = load_ui('solve_import_conflicts.ui', baseinstance=self)
#self.ui = UiMainWindow()
#self.ui.setupUi(self)
self.init_widgets()
def init_widgets(self):
json_conflicts = self.json_conflicts
for _key in json_conflicts.keys():
if json_conflicts[_key]['any_conflict']:
self.list_of_keys_with_conflicts.append(_key)
self._add_tab(json=json_conflicts[_key]['conflict_dict'])
def _calculate_columns_width(self, json=None):
"""will loop through all the conflict keys to figure out which one, for each column label, the string
is the longest"""
list_key = self.list_keys
columns_width = defaultdict(list)
for _key in list_key:
for _conflict_index in json.keys():
columns_width[_key].append(self.table_width_per_character * len(json[_conflict_index][_key]))
final_columns_width = []
for _key in list_key:
_max_width = np.max([np.array(columns_width[_key]).max(), len(_key)* self.table_header_per_character])
final_columns_width.append(_max_width)
return final_columns_width
def _add_tab(self, json=None):
"""will look at the json and will display the values in conflicts in a new tab to allow the user
to fix the conflicts"""
number_of_tabs = self.ui.tabWidget.count()
_table = QTableWidget()
# initialize each table
columns_width = self._calculate_columns_width(json=json)
for _col in np.arange(len(json[0])):
_table.insertColumn(_col)
_table.setColumnWidth(_col, columns_width[_col])
for _row in np.arange(len(json)):
_table.insertRow(_row)
self.list_table.append(_table)
_table.setHorizontalHeaderLabels(self.columns_label)
for _row in np.arange(len(json)):
# run number
_col = 0
list_runs = json[_row]["Run Number"]
o_parser = ListRunsParser()
checkbox = QRadioButton(o_parser.new_runs(list_runs=list_runs))
if _row == 0:
checkbox.setChecked(True)
# QtCore.QObject.connect(checkbox, QtCore.SIGNAL("clicked(bool)"),
# lambda bool, row=_row, table_id=_table:
# self._changed_conflict_checkbox(bool, row, table_id))
_table.setCellWidget(_row, _col, checkbox)
_col += 1
# chemical formula
item = QTableWidgetItem(json[_row]["chemical_formula"])
_table.setItem(_row, _col, item)
_col += 1
# geometry
item = QTableWidgetItem(json[_row]["geometry"])
_table.setItem(_row, _col, item)
_col += 1
# mass_density
item = QTableWidgetItem(json[_row]["mass_density"])
_table.setItem(_row, _col, item)
_col += 1
# sample_env_device
item = QTableWidgetItem(json[_row]["sample_env_device"])
_table.setItem(_row, _col, item)
self.ui.tabWidget.insertTab(number_of_tabs, _table, "Conflict #{}".format(number_of_tabs))
# def _changed_conflict_checkbox(self, state, row, table_id):
# print("state is {} in row {} from table_id {}".format(state, row, table_id))
def save_resolved_conflict(self, tab_index=0, key=None):
"""Using the radio button checked, will save the chemical_formula, geometry... into the final json"""
def _get_checked_row(table_ui=None):
"""returns the first row where the radio button (column 0) is checked"""
if table_ui is None:
return -1
nbr_row = table_ui.rowCount()
for _row in np.arange(nbr_row):
is_radio_button_checked = table_ui.cellWidget(_row, 0).isChecked()
if is_radio_button_checked:
return _row
return -1
table_ui = self.list_table[tab_index]
json_conflicts = self.json_conflicts
this_json = json_conflicts[key]
# row checked (w | hich row to use to fix conflict
_row = _get_checked_row(table_ui=table_ui)
this_json['any_conflict'] = False
# chemical_formula, geometry, etc.
chemical_formula = str(table_ui.item(_row, 1).text())
geometry = str(table_ui.item(_row, 2).text()) |
mass_density = str(table_ui.item(_row, 3).text())
sample_env_device = str(table_ui.item(_row, 4).text())
this_json['resolved_conflict'] = {'chemical_formula': chemical_formula,
'geometry': geometry,
'mass_density': mass_density,
'sample_env_device': sample_env_device}
self.json_conflicts = json_conflicts
def accept(self):
for _conflict_index, _key in enumerate(self.list_of_keys_with_conflicts):
self.save_resolved_conflict(tab_index=_conflict_index, key=_key)
self.parent.from_oncat_to_master_table(json=self.json_conflicts,
with_conflict=False)
self.close()
def reject(self):
self.parent.from_oncat_to_master_table(json=self.json_conflicts,
ignore_conflicts=True)
self.close()
def closeEvent(self, c):
pass
|
wzyy2/PiBox | PiBox/sh/datapoint_tools/num.py | Python | gpl-2.0 | 3,324 | 0.011131 | #/bin/env python
# -*-coding:utf8-*-
'''
# Any issues or improvements please contact jacob-chen@iotwrt.com
'''
import urllib
import json
if __name__ == '__main__':
choose = int(raw_input('1.write 2.read 3.edit 4.remove 5.history'))
domain = raw_input('domain(as 192.168.10.106:8000) : ')
sensor_id = raw_input('sensor_id(int) : ')
#write
if choose == 1:
a = list()
ex = 'y'
while ex == 'y':
key = raw_input('key(as 2012-12-12T11:11:11) : ')
value = float(raw_input('value(float) : '))
a.append({'value': value, 'key' : key})
ex = raw_input('again?y or n ')
#raw post
params = json.dumps(a)
f = urllib.urlopen("http://" + domain + "/API/sensor/" + sensor_id + "/datapoint/", params)
#get
# params = urllib.urlencode({'value': value, 'key' : key})
# f = urllib.urlopen("http://" + domain + "/API/sensor/" + sensor_id + "/datapoint/?%s" % params)
get = f.read()
print 'ret body:', get
s = json.loads(get)
print 'msg:', s['msg']
#read
elif choose == 2:
key = raw_i | nput('key(as 2012-12-12T11:11:11) : ')
params = urllib.urlencode({'key': key})
f = urllib.urlopen("http://" + domain + "/API/sensor/" + sensor_id + "/datapoint/get/?%s" % params)
get = f.read()
print 'ret body:', get
s = json.loads(get)
print 'key:',s['key'],'value:',s['value']
#edit
elif choose == 3:
| key = raw_input('key(as 2012-12-12T11:11:11) : ')
value = float(raw_input('value(float) : '))
params = urllib.urlencode({'key': key, 'value' : value})
f = urllib.urlopen("http://" + domain + "/API/sensor/" + sensor_id + "/datapoint/edit/?%s" % params)
get = f.read()
print 'ret body:', get
s = json.loads(get)
print 'msg:', s['msg']
#remove
elif choose == 4:
key = raw_input('key(as 2012-12-12T11:11:11) : ')
params = urllib.urlencode({'key': key})
f = urllib.urlopen("http://" + domain + "/API/sensor/" + sensor_id + "/datapoint/remove/?%s" % params)
get = f.read()
print 'ret body:', get
s = json.loads(get)
print 'msg:', s['msg']
#history
elif choose == 5:
ex = raw_input('latest 20?y or n')
if ex == 'y':
f = urllib.urlopen("http://" + domain + "/API/sensor/" + sensor_id + "/datapoint/history/")
get = f.read()
print 'ret body:', get
s = json.loads(get)
for item in s['datapoint']:
print 'key:',item['key'],'value:',item['value']
else:
start = raw_input('start(as 2012-12-12T11:11:11) : ')
end = raw_input('end(as 2012-12-12T11:11:11) : ')
interval = raw_input('interval(int) : ')
params = urllib.urlencode({'start': start, 'end' : end, 'interval' : interval})
f = urllib.urlopen("http://" + domain + "/API/sensor/" + sensor_id + "/datapoint/history/?%s" % params)
get = f.read()
print 'ret body:', get
s = json.loads(get)
for item in s['datapoint']:
print 'key:',item['key'],'value:',item['value'] |
berkmancenter/mediacloud | apps/topics-base/tests/python/topics_base/media/test_normalize_url.py | Python | agpl-3.0 | 481 | 0 | # n | oinspection PyProtectedMember
from topics_base.media import _normalize_url, MAX_URL_LENGTH
def test_normalize_url():
"""Test normalize_url()."""
assert _normalize_url('http://www.foo.com/') == 'http://foo.com/'
assert _normalize_url('http://foo.com') == 'http://foo.com/'
assert _normalize_url('http://articles.foo.com/') == 'http://foo.com/'
long_url = 'http://foo.com/' + ('x' * (1024 * 1024))
assert len(_normalize_url(long_url)) == MAX_URL_LENG | TH
|
davidam/python-examples | basics/cadena.py | Python | gpl-3.0 | 1,315 | 0 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <davidam@gnu.org>
# Maintainer: David Arroyo Menéndez <davidam@gnu.org>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any l | ater ve | rsion.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
# -*- coding: utf-8 -*-
cadena = str(input("Escriba un texto: "))
if cadena.isalnum():
print("Consta de letras y/o números, sin espacios")
if cadena.isalpha():
print("Consta de letras, sin números y sin espacios")
if cadena.isdigit():
print("Consta sólo de números, sin espacios")
print("Texto en minúsculas: %s" % str(cadena.lower()))
print("Texto capitalizado: %s" % str(cadena.capitalize()))
|
frankosan/pypers | pypers/steps/samtools/bamcheck.py | Python | gpl-3.0 | 1,886 | 0.011135 | from pypers.core.step import CmdLineStep
class BamCheck(CmdLineStep):
spec = {
"version": "0.1.19",
"descr": [
"Run the samtools bamcheck utility against a bam file,",
"generating a text stats file suitable for plot-bamcheck.",
"If a target file is provided, also creates stats for the target regions only"
],
"args":
{
"inputs": [
{
"name" : "input_files",
"type" : "file",
"iterable": True,
| "descr" : "a list of b | am files to be checked",
}
],
"outputs": [
{
"name" : "output_files",
"type" : "file",
"value" : "{{input_files}}.bamcheck.txt",
"descr" : "text file with qc on bam file",
},
{
"name" : "output_targets",
"type" : "file",
"value" : "{{input_files}}.bamcheck.target.txt",
"descr" : "text file with qc on bam file in target regions",
"required" : False
}
],
"params": [
{
'name' : 'baits_file',
'type' : 'str',
'value': '',
'descr': 'agilent baits file. It is a file containing regions to plot (format: chr start end label)'
},
]
},
"cmd": [
"/software/pypers/samtools/samtools-0.1.19/bin/bamcheck {{input_files}} > {{output_files}}",
" && if [ -f \"{{baits_file}}\" ]; then /software/pypers/samtools/samtools-0.1.19/bin/bamcheck -t {{baits_file}} {{input_files}} > {{output_targets}}; fi"
]
}
|
guorendong/iridium-browser-ubuntu | tools/perf/profile_creators/large_profile_extender.py | Python | bsd-3-clause | 713 | 0.004208 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from profile_creators import cookie_profile_extender
from profile_creators import history_profile_extender
from profile_creators import profile_extender
class LargeProf | ileExtender(profile_extender.Pr | ofileExtender):
"""This class creates a large profile by performing a large number of url
navigations."""
def Run(self):
extender = history_profile_extender.HistoryProfileExtender(
self.finder_options)
extender.Run()
extender = cookie_profile_extender.CookieProfileExtender(
self.finder_options)
extender.Run()
|
mgadi/naemonbox | sources/psdash/gevent-1.0.1/greentest/test__threading_patched_local.py | Python | gpl-2.0 | 510 | 0.003922 | from gevent import monkey; monkey.patch_all()
import threading
localdata = threading.lo | cal()
localdata.x = "hello"
assert localdata.x == 'hello'
success = []
def func():
try:
localdata.x
raise AssertionError('localdata.x must raise AttributeError')
except AttributeError:
pass
assert localdata.__dict__ == | {}, localdata.__dict__
success.append(1)
t = threading.Thread(None, func)
t.start()
t.join()
assert success == [1], 'test failed'
assert localdata.x == 'hello'
|
bundlewrap/bundlewrap | bundlewrap/items/symlinks.py | Python | gpl-3.0 | 6,528 | 0.001225 | from collections import defaultdict
from os.path import dirname, normpath
from shlex import quote
from bundlewrap.exceptions import BundleError
from bundlewrap.items import Item
from bundlewrap.utils.remote import PathInfo
from bundlewrap.utils.text import mark_for_translation as _
from bundlewrap.utils.text import is_subdirectory
ATTRIBUTE_VALIDATORS = defaultdict(lambda: lambda id, value: None)
class Symlink(Item):
"""
A symbolic link.
"""
BUNDLE_ATTRIBUTE_NAME = "symlinks"
ITEM_ATTRIBUTES = {
'group': "root",
'owner': "root",
'target': None,
}
ITEM_TYPE_NAME = "symlink"
REQUIRED_ATTRIBUTES = ['target']
def __repr__(self):
return "<Symlink path:{} target:{}>".format(
quote(self.name),
self.attributes['target'],
)
def cdict(self):
cdict = {
'target': self.attributes['target'],
'type': 'symlink',
}
for optional_attr in ('group', 'owner'):
if self.attributes[optional_attr] is not None:
cdict[optional_attr] = self.attributes[optional_attr]
return cdict
def display_on_create(self, cdict):
del cdict['type']
return cdict
def fix(self, status):
if status.must_be_created or 'type' in status.keys_to_fix:
# fixing the type fixes everything
self._fix_type(status)
return
for fix_type in ('target', 'owner', 'group'):
if fix_type in status.keys_to_fix:
if fix_type == 'group' and 'owner' in status.keys_to_fix:
# owner and group are fixed with a single chown
continue
getattr(self, "_fix_" + fix_type)(status)
def _fix_owner(self, status):
group = self.attributes['group'] or ""
if group:
group = ":" + quote(group)
if self.node.os in self.node.OS_FAMILY_BSD:
command = "chown -h {}{} {}"
else:
command = "chown -h {}{} -- {}"
self.run(command.format(
quote(self.attributes['owner'] or ""),
group,
quote(self.name),
))
_fix_group = _fix_owner
def _fix_target(self, status):
if self.node.os in self.node.OS_FAMILY_BSD:
self.run("ln -sfh -- {} {}".format(
quote(self.attributes['target']),
quote(self.name),
))
else:
self.run("ln -sfT -- {} {}".format(
quote(self.attributes['target']),
quote(self.name),
))
def _fix_type(self, status):
self.run("rm -rf -- {}".format(quote(self.name)))
self.run("mkdir -p -- {}".format(quote(dirname(self.name))))
self.run("ln -s -- {} {}".format(
quote(self.attributes['target']),
quote(self.name),
))
if self.attributes['owner'] or self.attributes['group']:
self._fix_owner(status)
def get_auto_deps(self, items):
deps = []
for item in items:
if item == self:
continue
if item.ITEM_TYPE_NAME == "file" and (
is_subdirectory(item.name, self.name) or
item.name == self.name
):
raise BundleError(_(
"{item1} (from bundle '{bundle1}') blocking path to "
"{item2} (from bundle '{bundle2}')"
).format(
item1=item.id,
bundle1=item.bundle.name,
item2=self.id,
bundle2=self.bundle.name,
))
elif item.ITEM_TYPE_NAME == "user" | and item.name | == self.attributes['owner']:
if item.attributes['delete']:
raise BundleError(_(
"{item1} (from bundle '{bundle1}') depends on item "
"{item2} (from bundle '{bundle2}') which is set to be deleted"
).format(
item1=self.id,
bundle1=self.bundle.name,
item2=item.id,
bundle2=item.bundle.name,
))
else:
deps.append(item.id)
elif item.ITEM_TYPE_NAME == "group" and item.name == self.attributes['group']:
if item.attributes['delete']:
raise BundleError(_(
"{item1} (from bundle '{bundle1}') depends on item "
"{item2} (from bundle '{bundle2}') which is set to be deleted"
).format(
item1=self.id,
bundle1=self.bundle.name,
item2=item.id,
bundle2=item.bundle.name,
))
else:
deps.append(item.id)
elif item.ITEM_TYPE_NAME in ("directory", "symlink"):
if is_subdirectory(item.name, self.name):
deps.append(item.id)
return deps
def patch_attributes(self, attributes):
if 'group' not in attributes and self.node.os in self.node.OS_FAMILY_BSD:
# BSD doesn't have a root group, so we have to use a
# different default value here
attributes['group'] = 'wheel'
return attributes
def sdict(self):
path_info = PathInfo(self.node, self.name)
if not path_info.exists:
return None
else:
return {
'target': path_info.symlink_target if path_info.is_symlink else "",
'type': 'symlink' if path_info.is_symlink else path_info.stat['type'],
'owner': path_info.owner,
'group': path_info.group,
}
@classmethod
def validate_attributes(cls, bundle, item_id, attributes):
for key, value in attributes.items():
ATTRIBUTE_VALIDATORS[key](item_id, value)
@classmethod
def validate_name(cls, bundle, name):
if normpath(name) == "/":
raise BundleError(_("'/' cannot be a file"))
if normpath(name) != name:
raise BundleError(_(
"'{path}' is an invalid symlink path, should be '{normpath}' (bundle '{bundle}')"
).format(
path=name,
normpath=normpath(name),
bundle=bundle.name,
))
|
jldbc/pybaseball | pybaseball/split_stats.py | Python | mit | 8,655 | 0.003582 | import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from typing import Dict, Optional, Tuple, Union
import bs4 as bs
import pandas as pd
import re
def download_url(url: str) -> bytes:
"""
Gets the content from the url specified
"""
session = requests.Session()
retry = Retry(connect=3, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
resp = session.get(url)
return resp.content
def get_split_soup(playerid: str, year: Optional[int] = None, pitching_splits: bool = False) -> bs.BeautifulSoup:
"""
gets soup for the player splits.
"""
pitch_or_bat = 'p' if pitching_splits else 'b'
if year is None: # provides scores from yesterday if date is not provided
url = f"https://www.baseball-reference.com/players/split.fcgi?id={playerid}&year=Career&t={pitch_or_bat}"
else:
year = str(year)
url = f"https://www.baseball-reference.com/players/split.fcgi?id={playerid}&year={year}&t={pitch_or_bat}"
html = download_url(url)
soup = bs.BeautifulSoup(html, 'lxml')
return soup
def get_player_info(playerid: str, soup: bs.BeautifulSoup = None) -> Dict:
'''
Returns a dictionary with player position, batting and throwing handedness, player height in inches, player weight, and current team from Baseball Reference.
'''
if not soup:
soup = get_split_soup(playerid)
about_info = soup.find_all(
"div", {"itemtype": "https://schema.org/Person"})
info = [ele for ele in about_info]
# This for loop goes through the player bio section at the top of the splits page to find all of the <p> tags
for i in range(len(info)):
ptags = info[i].find_all('p')
fv = []
# This loop goes through each of the <p> tags and finds all text between the tags including the <strong> tags.
for j in range(len(ptags)):
InfoRegex = re.compile(r'>(.*?)<', re.DOTALL)
r = InfoRegex.findall(str(ptags[j]))
# This loop cleans up the text found in the outer loop and removes non alphanumeric characters.
for k in range(len(r)):
pattern = re.compile(r'[\W_]+')
strings = pattern.sub(' ', r[k])
if strings and strings != ' ':
fv.append(strings)
player_info_data = {
'Position': fv[1],
'Bats': fv[3],
'Throws': fv[5],
# 'Height': int(fv[6].split(' ')[0])*12+int(fv[6].split(' ')[1]), # Commented out because I determined that Pablo Sandoval has some weird formatting that ruins this. Uncomment for ht, wt of most players.
# 'Weight': int(fv[7][0:3]),
# 'Team': fv[10]
}
return player_info_data
def get_splits(playerid: str, year: Optional[int] = None, player_info: bool = False, pitching_splits: bool = False) -> Union[pd.DataFrame, Tuple[pd.DataFrame, Dict]]:
"""
Returns a dataframe of all split stats for a given player.
If player_info is True, this will also return a dictionary that includes player position, handedness, height, weight, position, and team
"""
soup = get_split_soup(playerid, year, pitching_splits)
# the splits tables on the bbref site are all within an embedded comment. This finds all the comments
comment = soup.find_all(text=lambda text: isinstance(text, bs.Comment))
data = []
level_data = []
for i in range(len(comment)):
commentsoup = bs.BeautifulSoup(comment[i], 'lxml')
split_tables = commentsoup.find_all(
"div", {"class": "table_container"})
splits = [ele for ele in split_tables]
headings = []
level_headings = []
for j in range(len(splits)):
split_type = splits[j].find_all('caption')[0].string.strip()
# two types of tables on bref, game level and non-game level
if split_type[-5:] == 'Level':
if year == None: # The bbref tables for career splits have one extra preceding th column labeled 'I' that is not used and is not in the single season records
level_headings = [th.get_text()
for th in splits[j].find("tr").find_all("th")][1:]
else:
level_headings = [th.get_text()
for th in splits[j].find("tr").find_all("th")][:]
level_headings.append('Split Type')
level_headings.append('Player ID')
# singles data isn't included in the tables so this appends the column header
level_headings.append('1B')
level_data.append(level_headings)
rows = splits[j].find_all('tr')
for row in rows:
if year == None: # The bbref tables for career splits have one extra preceding th column labeled 'I' that is not used and is not in the single season records
level_cols = row.find_all('td')
else:
level_cols = row.find_all(['th', 'td'])
level_cols = [ele.text.strip() for ele in level_cols]
if split_type != "By Inning": # bbref added three empty columns to the by inning tables that don't match the rest of the tables. Not including this split table in results
level_cols.append(split_type)
level_cols.append(playerid)
level_data.append([ele for ele in level_cols])
else:
if year == None: # The bbref tables for career splits have one extra preceding th column labeled 'I' that is not used and is not in the single season records
headings = [th.get_text()
for th in splits[j].find("tr").find_all("th")][1:]
else:
headings = [th.get_text()
for th in splits[j].find("tr").find_all("th")][:]
headings.append('Split Type')
headings.append('Player ID')
# singles data isn't included in the tables so this appends the column header
headings.append('1B')
data.append(headings)
rows = splits[j].find_all('tr')
for row in rows:
if year == None: # The bbref tables for career splits have one extra preceding th column labeled 'I' that is not used and is not in the single season records
cols = row.find_all('td')
else:
cols = row.find_all(['th', 'td'])
cols = [ele.text.strip() for ele in cols]
if split_type != "By Inning": # bbref added three empty columns to the by inning tables that don't match the rest of the tables. Not including this split table in results
cols.append(split_type)
cols.append(playerid)
data.append([ele for ele in cols])
data = pd.DataFrame(data)
data = data.rename(columns=data.iloc[0])
data = data.reindex(data.index.drop(0))
data = data.set_index(['Player ID', 'Split Type', 'Split'])
data = data.drop(index=['Split'], level=2)
data = data.apply(pd.to_numeric, errors='coerce').convert_dtypes()
data = data.dropna(axis=1, how='all')
data['1B'] = data['H']-data['2B']-data['3B']-data['HR']
data = data.loc[playerid]
if pitching_splits is True: # Returns Game Level tables as a second dataframe for pitching splits
level_data = pd.DataFrame(level_data)
level_data = level_data.rename(columns=level_data.iloc[0])
level_data = level_data.reindex(level_data | .index.drop(0))
level_data = level_data.set_index(['Player ID', 'Split Type', 'Split'])
level_data = level_data.drop(index=['Split | '], level=2)
level_data = level_data.apply(
pd.to_numeric, errors='coerce').convert_dtypes()
level_data = level_data.dropna(axis=1 |
sammdot/circa | client.py | Python | bsd-3-clause | 11,289 | 0.029498 | import logging
import pathlib
import socket
import sys
import threading
from channel import Channel, ChannelList, User
from server import Server
from util.nick import nickeq, nicklower
from util.msg import Message
class Client:
def __init__(self, server, nick, username, realname, **conf):
self.sock = None
self.nick = None
self.server = None
self.conf = {
"port": 6667,
"autorejoin": True,
"autoconn": True,
"channels": []
}
self.conf.update(conf)
self.conf.update({
"server": server,
"nick": nick,
"username": username,
"realname": realname,
})
self.channels = ChannelList()
self.listeners = {}
self.nickmod = 0
if self.conf["autoconn"]:
threading.Thread(name="main", target=self.connect).start()
def connect(self):
"""Attempt to connect to the server. Log in if successful."""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM | )
try:
self.sock.connect((self.conf["server"], self.conf["port"]))
self.server = Server(self.conf["server"], self.conf["port"])
logging.info("Connected to %s", self.server.host)
self.send("NICK", self.conf["nick"])
if "password" in self.conf:
self.send("PASS", self.conf["password"])
self.send("USER", self.conf["username | "], 8, "*", \
self.conf["realname"])
threading.Thread(name="listen", target=self.listen).start()
except socket.error as e:
logging.error("Cannot connect to %s: %s", self.conf["server"], e)
self.sock.close()
self.sock = None
def send(self, *msg):
"""Send a raw message to the server."""
if not self.sock:
logging.error("Not connected to server")
return
message = " ".join(map(str, msg))
self.sock.sendall(bytes(message + "\r\n", "utf-8"))
logging.debug("(%s) %s", threading.current_thread().name, message.rstrip())
def say(self, to, msg):
"""Send a message to a user/channel."""
if not msg:
return
self.send("PRIVMSG", to, ":" + msg)
if any([to.startswith(i) for i in self.server.types]):
self.channels[to[1:]].users[self.nick].messages.append(msg)
def notice(self, to, msg):
"""Send a notice to a user/channel."""
self.send("NOTICE", to, ":" + msg)
def ctcp_say(self, to, text):
"""Send a CTCP PRIVMSG message."""
self.say(to, "\x01{0}\x01".format(text))
def ctcp_notice(self, to, text):
"""Send a CTCP NOTICE message."""
self.notice(to, "\x01{0}\x01".format(text))
def action(self, to, msg):
self.ctcp_say(to, "ACTION {0}".format(msg))
def join(self, chan):
"""Join a channel."""
self.send("JOIN", chan)
def part(self, chan, reason=None):
self.send("PART", chan, ":" + (reason or ""))
def listen(self):
"""Listen for incoming messages from the IRC server."""
if not self.sock:
logging.error("Not connected to server")
return
sock = self.sock.makefile('rb')
while True:
try:
msg = sock.readline().decode("utf-8", errors="ignore").rstrip("\r\n")
m = Message.parse(msg)
if not m:
raise socket.error
logging.debug(m.raw)
thread = threading.Thread(target=lambda: self.handle(m))
thread.start()
except socket.error:
logging.info("Disconnected from server")
self.sock.close()
self.sock = None
break
def add_listener(self, event, fn):
"""Add a function to listen for the specified event."""
if event not in self.listeners:
self.listeners[event] = []
self.listeners[event].append(fn)
def remove_listener(self, event, fn):
"""Remove a function as a listener from the specified event."""
if event not in self.listeners:
return
self.listeners[event] = [l for l in self.listeners[event] if l != fn]
def remove_listeners(self, event):
"""Remove all functions listening for the specified event."""
if event not in self.listeners:
return
self.listeners.pop(event)
def emit(self, event, *params):
"""Emit an event, and call all functions listening for it."""
if event != "raw" and self.conf["verbose"]:
logging.debug("[{0}] '{1}'".format(event, "', '".join(map(str, params))))
if event in self.listeners:
for listener in self.listeners[event]:
try:
thread = threading.Thread(target=lambda: listener(*params))
thread.start()
except TypeError as e:
logging.error("(%s) invalid number of parameters [%s]",
threading.current_thread().name, listener.__name__)
except:
type, value, lasttb = sys.exc_info()
fname = pathlib.Path(lasttb.tb_frame.f_code.co_filename).name
self.circa.say(self, channel, "\x02\x034{0}\x03\x02: {1} ({2}:{3})".format( \
type.__name__, value, fname, lasttb.tb_lineno))
def _ctcp(self, fr, to, text, type):
text = text.strip("\x01")
parts = text.split()
self.emit("ctcp", fr, to, text, type)
self.emit("ctcp." + type, fr, to, text)
if type == "privmsg":
if parts[0] == "VERSION":
self.emit("ctcp.version", fr, to)
elif len(parts) > 1 and parts[0] == "ACTION":
self.emit("action", fr, to, " ".join(parts[1:]))
elif len(parts) > 1 and parts[0] == "PING":
self.ctcp_notice(fr, "PING " + " ".join(parts[1:]))
def handle(self, msg):
self.emit("raw", msg)
c = msg.command
if c == "001":
self.nick = self.conf["nick"] + self.nickmod * "_"
self.emit("registered", msg.params[0], msg)
elif c == "004":
self.server.usermodes = set(msg.params[3])
elif c == "005":
for p in msg.params:
if "=" in p:
param, value = p.split("=")
if param == "CHANLIMIT":
for pair in value.split(","):
typ, num = pair.split(":")
self.server.chlimit[typ] = int(num)
elif param == "CHANMODES":
modes = value.split(",")
self.server.chmodes = dict(zip("abcd", map(set, modes)))
elif param == "CHANTYPES":
self.server.types = set(value)
elif param == "CHANNELLEN":
self.server.chlength = int(value)
elif param == "IDCHAN":
for pair in value.split(","):
typ, num = pair.split(":")
self.server.idlength[typ] = int(num)
elif param == "KICKLEN":
self.server.kicklength = int(value)
elif param == "NICKLEN":
self.server.nicklength = int(value)
elif param == "PREFIX":
modes, prefixes = value[1:].split(")")
self.server.prefix_mode = dict(zip(prefixes, modes))
self.server.mode_prefix = dict(zip(modes, prefixes))
self.server.chmodes["b"].update(modes)
elif param == "TARGMAX":
for pair in value.split(","):
typ, num = pair.split(":")
self.server.maxtargets[typ] = int(num) if num else 0
elif param == "TOPICLEN":
self.server.topiclength = int(value)
elif c == "433":
self.nickmod += 1
self.send("NICK", self.conf["nick"] + self.nickmod * "_")
elif c == "PING":
self.send("PONG", msg.params[0])
self.emit("ping", msg.params[0], msg)
elif c == "PONG":
self.emit("pong", msg.params[0], msg)
elif c == "NOTICE":
fr, to = msg.nick, msg.params[0]
text = msg.params[1] or ""
if text[0] == "\x01" and "\x01" in text[1:]:
self._ctcp(fr, to, text, "notice")
else:
self.emit("notice", fr, to, text, msg)
elif c == "MODE":
by = msg.nick
adding = True
if msg.params[0][0] in self.server.types:
chan = msg.params[0].lower()[1:]
params = msg.params[1:]
while len(params):
modes = params.pop(0)
for mode in modes:
if mode == '+':
adding = True
elif mode == '-':
adding = False
elif mode in self.server.mode_prefix:
user = params.pop(0)
op = "+" if adding else "-"
u = self.channels[chan].users[user].mode
try:
(u.add if adding else u.remove)(mode)
except KeyError:
pass
self.emit(op + "mode", chan, by, mode, user, msg)
elif c == "NICK":
nick = msg.params[0]
if nickeq(msg.nick, self.nick):
self.nick = nicklower(nick)
elif nickeq(msg.nick, nick):
return
chans = list(filter(lambda c: msg.nick in c, self.channels.values()))
for chan in chans:
chan.users[nick] = chan.users[msg.nick]
chan.users.pop(msg.nick)
chan.users[nick].nick = nicklower(nick)
self.emit("nick", msg.nick, nick, [c.name for c in chans], msg)
elif c == "375":
self.server.motd = msg.params[1] + "\n"
|
MOOOWOOO/qblog | qblog.py | Python | gpl-3.0 | 633 | 0 | # coding: utf-8
from app imp | ort db, app
from app.auth.models import Role
from app.user.models import User
from flask.ext.migrate import Migrate, MigrateCommand
from flask.ext.script import Server, Shell, Manager
__author__ = 'Jux.Liu'
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
server = Server(host="0.0.0.0", port=5000, use_ | reloader=True)
manager.add_command("runserver", server)
if __name__ == "__main__":
manager.run()
|
subeax/grab | grab/spider/__init__.py | Python | mit | 90 | 0 | from grab.spider.base import Spider, | Task, Data, NullTask
fro | m grab.spider.error import *
|
plotly/python-api | packages/python/plotly/plotly/validators/scattercarpet/marker/colorbar/_tickformat.py | Python | mit | 516 | 0 | import _plotly_utils.basevalidators
class TickformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="tickformat",
parent_name="scattercarpet.marker.colorbar",
**kwargs
):
super(TickformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role" | , "style"),
| **kwargs
)
|
rgmining/tripadvisor | setup.py | Python | gpl-3.0 | 3,543 | 0.000847 | #
# setup.py
#
# Copyright (c) 2017 Junpei Kawamoto
#
# This file is part of rgmining-tripadvisor-dataset.
#
# rgmining-tripadvisor-dataset is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# rgmining-tripadvisor-dataset is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#
# pylint: disable=invalid-name
"""Package information about a synthetic dataset for review graph mining.
"""
import distutils.command.install_data
from os import path
import site
import sys
import urllib
from setuptools import setup
def read(fname):
"""Read a file.
"""
return open(path.join(path.dirname(__file__), fname)).read()
class CustomInstallData(distutils.command.install_data.install_data):
"""Custom install data command to download data files from the web.
"""
def run(self):
"""Before executing run command, download data files.
"""
for f in self. | data_files:
if not isinstance(f, tuple):
continue
for i, u in enumerate(f[1]):
base = path.basename(u)
f[1][i] = path.join(sys.prefix, f[0], base)
if not path.exists(f[1][i]):
f[1][i] = path.join(sys.prefix, "local", f[0], base)
if not path.exists | (f[1][i]):
f[1][i] = path.join(site.getuserbase(), f[0], base)
if not path.exists(f[1][i]):
f[1][i] = urllib.urlretrieve(u, base)[0]
return distutils.command.install_data.install_data.run(self)
def load_requires_from_file(filepath):
"""Read a package list from a given file path.
Args:
filepath: file path of the package list.
Returns:
a list of package names.
"""
with open(filepath) as fp:
return [pkg_name.strip() for pkg_name in fp.readlines()]
setup(
name="rgmining-tripadvisor-dataset",
use_scm_version=True,
author="Junpei Kawamoto",
author_email="kawamoto.junpei@gmail.com",
description="Trip Advisor dataset for Review Graph Mining Project",
long_description=read("README.rst"),
url="https://github.com/rgmining/tripadvisor",
py_modules=[
"tripadvisor"
],
install_requires=load_requires_from_file("requirements.txt"),
setup_requires=[
"setuptools_scm"
],
data_files=[(
"rgmining/data",
["http://times.cs.uiuc.edu/~wang296/Data/LARA/TripAdvisor/TripAdvisorJson.tar.bz2"]
)],
test_suite="tests.suite",
license="GPLv3",
cmdclass={
"install_data": CustomInstallData
},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development :: Libraries",
"Topic :: Scientific/Engineering :: Information Analysis"
]
)
|
stephane-martin/salt-debian-packaging | salt-2016.3.3/salt/runners/asam.py | Python | apache-2.0 | 11,014 | 0.001725 | # -*- coding: utf-8 -*-
'''
Novell ASAM Runner
==================
.. versionadded:: Beryllium
Runner to interact with Novell ASAM Fan-Out Driver
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
To use this runner, set up the Novell Fan-Out Driver URL, username and password in the
master configuration at ``/etc/salt/master`` or ``/etc/salt/master.d/asam.conf``:
.. code-block:: yaml
asam:
prov1.domain.com
username: "testuser"
password: "verybadpass"
prov2.domain.com
username: "testuser"
password: "verybadpass"
.. note::
Optionally, ``protocol`` and ``port`` can be specified if the Fan-Out Driver server
is not using the defaults. Default is ``protocol: https`` and ``port: 3451``.
'''
from __future__ import absolute_import
# Import python libs
import logging
# Import third party libs
HAS_LIBS = False
HAS_SIX = False
try:
import requests
from salt.ext.six.moves.html_parser import HTMLParser # pylint: disable=E0611
try:
import salt.ext.six as six
HAS_SIX = True
except ImportError:
# Salt version <= 2014.7.0
try:
import six
except ImportError:
pass
HAS_LIBS = True
class ASAMHTMLParser(HTMLParser): # fix issue #30477
def __init__(self):
HTMLParser.__init__(self)
self.data = []
def handle_starttag(self, tag, attrs):
if tag != "a":
return
for attr in attrs:
if attr[0] != "href":
return
self.data.append(attr[1])
except ImportError:
pass
log = logging.getLogger( | __name__)
def __virtual__():
'''
Check for ASAM Fan-Out driver configuration in master config file
or directory and load runner only if it is specified
'''
if not HAS_LIBS or not HAS_SIX:
return False
if _get_asam_configuration() is False:
return False
return True
def _get_asam_configuration(driver_url=''):
'''
Return the configuration read from the master configuration
file or directory
'''
asam | _config = __opts__['asam'] if 'asam' in __opts__ else None
if asam_config:
try:
for asam_server, service_config in six.iteritems(asam_config):
username = service_config.get('username', None)
password = service_config.get('password', None)
protocol = service_config.get('protocol', 'https')
port = service_config.get('port', 3451)
if not username or not password:
log.error(
"Username or Password has not been specified in the master "
"configuration for {0}".format(asam_server)
)
return False
ret = {
'platform_edit_url': "{0}://{1}:{2}/config/PlatformEdit.html".format(protocol, asam_server, port),
'platform_config_url': "{0}://{1}:{2}/config/PlatformConfig.html".format(protocol, asam_server, port),
'platformset_edit_url': "{0}://{1}:{2}/config/PlatformSetEdit.html".format(protocol, asam_server, port),
'platformset_config_url': "{0}://{1}:{2}/config/PlatformSetConfig.html".format(protocol, asam_server, port),
'username': username,
'password': password
}
if (not driver_url) or (driver_url == asam_server):
return ret
except Exception as exc:
log.error(
"Exception encountered: {0}".format(exc)
)
return False
if driver_url:
log.error(
"Configuration for {0} has not been specified in the master "
"configuration".format(driver_url)
)
return False
return False
def _make_post_request(url, data, auth, verify=True):
r = requests.post(url, data=data, auth=auth, verify=verify)
if r.status_code != requests.codes.ok:
r.raise_for_status()
else:
return r.text.split('\n')
def _parse_html_content(html_content):
parser = ASAMHTMLParser()
for line in html_content:
if line.startswith("<META"):
html_content.remove(line)
else:
parser.feed(line)
return parser
def _get_platformset_name(data, platform_name):
for item in data:
if platform_name in item and item.startswith('PlatformEdit.html?'):
parameter_list = item.split('&')
for parameter in parameter_list:
if parameter.startswith("platformSetName"):
return parameter.split('=')[1]
return None
def _get_platforms(data):
platform_list = []
for item in data:
if item.startswith('PlatformEdit.html?'):
parameter_list = item.split('PlatformEdit.html?', 1)[1].split('&')
for parameter in parameter_list:
if parameter.startswith("platformName"):
platform_list.append(parameter.split('=')[1])
return platform_list
def _get_platform_sets(data):
platform_set_list = []
for item in data:
if item.startswith('PlatformSetEdit.html?'):
parameter_list = item.split('PlatformSetEdit.html?', 1)[1].split('&')
for parameter in parameter_list:
if parameter.startswith("platformSetName"):
platform_set_list.append(parameter.split('=')[1].replace('%20', ' '))
return platform_set_list
def remove_platform(name, server_url):
'''
To remove specified ASAM platform from the Novell Fan-Out Driver
CLI Example:
.. code-block:: bash
salt-run asam.remove_platform my-test-vm prov1.domain.com
'''
config = _get_asam_configuration(server_url)
if not config:
return False
url = config['platform_config_url']
data = {
'manual': 'false',
}
auth = (
config['username'],
config['password']
)
try:
html_content = _make_post_request(url, data, auth, verify=False)
except Exception as exc:
err_msg = "Failed to look up existing platforms on {0}".format(server_url)
log.error("{0}:\n{1}".format(err_msg, exc))
return {name: err_msg}
parser = _parse_html_content(html_content)
platformset_name = _get_platformset_name(parser.data, name)
if platformset_name:
log.debug(platformset_name)
data['platformName'] = name
data['platformSetName'] = str(platformset_name)
data['postType'] = 'platformRemove'
data['Submit'] = 'Yes'
try:
html_content = _make_post_request(url, data, auth, verify=False)
except Exception as exc:
err_msg = "Failed to delete platform from {1}".format(server_url)
log.error("{0}:\n{1}".format(err_msg, exc))
return {name: err_msg}
parser = _parse_html_content(html_content)
platformset_name = _get_platformset_name(parser.data, name)
if platformset_name:
return {name: "Failed to delete platform from {0}".format(server_url)}
else:
return {name: "Successfully deleted platform from {0}".format(server_url)}
else:
return {name: "Specified platform name does not exist on {0}".format(server_url)}
def list_platforms(server_url):
'''
To list all ASAM platforms present on the Novell Fan-Out Driver
CLI Example:
.. code-block:: bash
salt-run asam.list_platforms prov1.domain.com
'''
config = _get_asam_configuration(server_url)
if not config:
return False
url = config['platform_config_url']
data = {
'manual': 'false',
}
auth = (
config['username'],
config['password']
)
try:
html_content = _make_post_request(url, data, auth, verify=False)
except Exception as exc:
err_msg = "Failed to look up existing platforms"
log.error("{0}:\n{1}".format(err_msg, exc))
return {server_url: |
hydroshare/hydroshare_temp | ga_ows/views/wfs.py | Python | bsd-3-clause | 31,334 | 0.007149 | """
An implementation of OGC WFS 2.0.0 over the top of Django. This module requires that OGR be installed and that you use
either the PostGIS or Spatialite backends to GeoDjango for the layers you are retrieving. The module provides a
generic view, :py:class:WFS that provides standard WFS requests and responses and :py:class:WFST that provides WFS +
Transactions.
This is an initial cut at WFS compatibility. It is not perfect by any means, but it is a decent start. To use WFS with
your application, you will either need to use a GeoDjango model or derive from :py:class:WFSAdapter and
wrap a model class with it. Most URL configs will look like this::
url('r/wfs', WFS.as_view(model=myapp.models.MyGeoModel))
Models' Meta class can be modified to include attributes that can be picked up by the view as descriptive parameters
that will make it into the response of a GetCapabilities request.
The following features remain unimplemented:
* Transactions
* Creation and removal of stored queries
* Resolution
* The standard XML filter language (instead I intend to support OGR SQL and the Django filter language)
"""
from collections import namedtuple
from uuid import uuid4
from django.http import HttpResponse
from django.contrib.gis.db.models.query import GeoQuerySet
from django.contrib.gis.db.models import GeometryField
from django import forms as f
import json
from django.shortcuts import render_to_response
from ga_ows.views import common
from ga_ows.utils import MultipleValueField, BBoxField, CaseInsensitiveDict
from lxml import etree
from ga_ows.views.common import RequestForm, CommonParameters, GetCapabilitiesMixin
from osgeo import ogr
from django.conf import settings
from tempfile import gettempdir
from django.db import connections
import re
from lxml import etree
import os
#: Requests' Common Parameters
#: ===========================
class InputParameters(RequestForm):
"""
"""
srs_name = f.CharField()
input_format = f.CharField() # default should be "application/gml+xml; version=3.2"
srs_format = f.CharField(required=False)
@classmethod
def from_request(cls, request):
request['srs_name'] = request.get('srsname', 'EPSG:4326')
request['input_format'] = request.get('inputformat', "application/gml+xml; version=3.2")
class PresentationParameters(RequestForm):
count = f.IntegerField()
start_index = f.IntegerField()
max_features = f.IntegerField()
output_format = f.CharField()
@classmethod
def from_request(cls, request):
request['count'] = int(request.get('count', '1'))
request['start_index'] = int(request.get('st | artindex','1'))
request['max_feat | ures'] = int(request.get('maxfeatures', '1'))
request['output_format'] = request.get('outputformat',"application/gml+xml; version=3.2")
class AdHocQueryParameters(RequestForm):
type_names = MultipleValueField()
aliases = MultipleValueField(required=False)
filter = f.CharField(required=False)
filter_language = f.CharField(required=False)
resource_id = f.CharField(required=False)
bbox = BBoxField()
sort_by = f.CharField(required=False)
@classmethod
def from_request(cls, request):
request['type_names'] = request.getlist('typenames')
request['aliases'] = request.getlist('aliases')
request['filter'] = request.get('filter')
request['filter_language'] = request.get('filterlanguage')
request['resource_id'] = request.get('resource_id')
request['bbox'] = request.get('bbox')
request['sort_by'] = request.get('sortby')
class StoredQueryParameters(RequestForm):
stored_query_id = f.CharField(required=False)
@classmethod
def from_request(cls, request):
request['stored_query_id'] = request.get('storedquery_id')
class GetFeatureByIdParameters(RequestForm):
feature_id = f.CharField()
@classmethod
def from_request(cls, request):
request['feature_id'] = request.get('id')
class ResolveParameters(RequestForm):
resolve = f.CharField(required=False)
resolve_depth = f.IntegerField()
resolve_timeout = f.FloatField()
@classmethod
def from_request(cls, request):
request['resolve'] = request.get('resolve')
request['resolve_depth'] = int(request.get('resolve_depth','0'))
request['resolve_timeout'] = float(request.get('resolve_timeout', '0'))
#: Exceptions
#: ==========
class CannotLockAllFeatures(common.OWSException):
"""A locking request with a lockAction of ALL failed to lock all the requested features."""
class DuplicateStoredQueryIdValue(common.OWSException):
"""The identifier specified for a stored query expression is a duplicate."""
class DuplicateStoredQueryParameterName(common.OWSException):
"""This specified name for a stored query parameter is already being used within the same stored query definition."""
class FeaturesNotLocked(common.OWSException):
"""For servers that do not support automatic data locking (see 15.2.3.1), this exception indicates that a transaction operation is modifying features that have not previously been locked using a LockFeature (see Clause 12) or GetFeatureWithLock (see Clause 13) operation."""
class InvalidLockId(common.OWSException):
"""The value of the lockId parameter on a Transaction operation is invalid because it was not generated by the server."""
class InvalidValue(common.OWSException):
"""A Transaction (see Clause 15) has attempted to insert or change the value of a data component in a way that violates the schema of the feature."""
class LockHasExpired(common.OWSException):
"""The specified lock identifier on a Transaction or LockFeature operation has expired and is no longer valid."""
class OperationParsingFailed(common.OWSException):
"""The request is badly formed and failed to be parsed by the server."""
class OperationProcessingFailed(common.OWSException):
"""An error was encountered while processing the operation."""
class ResponseCacheExpired(common.OWSException):
"""The response cache used to support paging has expired and the results are no longer available."""
class OperationNotSupported(common.OWSException):
"""The operation is not yet implemented"""
########################################################################################################################
# Adapter class
########################################################################################################################
#: Class for describing features. A named tuple containing:
#: * name : str - the feature type name. this is what goes in the featureTypes parameter on a GetFeature request.
#: * title : str - the human readable name for this feature type
#: * abstract : str - a short description of this feature type, if necessary
#: * keywords : list(str) - keywords associated with this feature_type
#: * srs : str - the sptial reference system that is default for this feature type
#: * bbox : (minx, miny, maxx, maxy) - the boundinb box for this feature type. must be present and filled in WGS84
#:
FeatureDescription = namedtuple('FeatureDescription', ('ns', 'ns_name', 'name','title','abstract','keywords','srs','bbox', 'schema'))
#: A description of a stored-query parameter. A named tuple containing:
#: * type : str - the parameter type
#: * name : str - the parameter name (computer-readable)
#: * title : str - the parameter name (human-readable)
#: * abstract : str - a short description of the parameter
#: * query_expression : :py:class:StoredQueryExpression
#:
StoredQueryParameter = namedtuple("StoredQueryParameter", ('type','name', 'title','abstract', 'query_expression'))
#: A description of how a stored query parameter should be filled in. A named tuple containing:
#: * text : str - template text for a query
#: * language : str - the language the query is expressed in.
#: * private : boolean - whether or not the query is private
#: * return_feature_types : the comma-separated computer-readable names of the feature types that are returned
S |
stackforge/kolla | kolla/tests/test_methods.py | Python | apache-2.0 | 5,853 | 0 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kolla.template import methods
from kolla.tests import base
class MethodsTest(base.TestCase):
def test_debian_package_install(self):
packages = ['https://packages.debian.org/package1.deb', 'package2.deb']
result = methods.debian_package_install(packages)
expectCmd = 'apt-get -y install --no-install-recommends package2.deb'
self.assertEqual(expectCmd, result.split("&&")[1].strip())
def test_enable_repos_rhel(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'rhel',
'base_package_type': 'rpm',
}
result = methods.handle_repos(template_vars, ['grafana'], 'enable')
expectCmd = ''
self.assertEqual(expectCmd, result)
def test_enable_repos_centos(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'centos',
'base_package_type': 'rpm',
}
result = methods.handle_repos(template_vars, ['grafana'], 'enable')
expectCmd = 'RUN dnf config-manager --enable grafana || true'
self.assertEqual(expectCmd, result)
def test_enable_repos_centos_missing_repo(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'centos',
'base_package_type': 'rpm',
}
result = methods.handle_repos(template_vars, ['missing_repo'],
'enable')
expectCmd = ''
self.assertEqual(expectCmd, result)
def test_enable_repos_centos_multiple(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'centos',
'base_package_type': 'rpm',
}
result = methods.handle_repos(template_vars, ['grafana', 'ceph'],
'enable')
expectCmd = 'RUN dnf config-manager --enable grafana '
expectCmd += '--enable centos-ceph-nautilus || true'
self.assertEqual(expectCmd, result)
def test_enable_repos_debian(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'debian',
'base_package_type': 'deb'
}
result = methods.handle_repos(template_vars, ['grafana'], 'enable')
expectCmd = 'RUN echo "deb https://packages.grafana.com/oss/deb '
expectCmd += 'stable main" >/etc/apt/sources.list.d/grafana.list'
self.assertEqual(expectCmd, result)
def test_enable_repos_debian_missing_repo(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'debian',
'base_package_type': 'deb'
}
result = methods.handle_repos(template_vars, ['missing_repo'],
'enable')
expectCmd = ''
self.assertEqual(expectCmd, result)
def test_enable_repos_debian_multiple(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'debian',
'base_package_type': 'deb'
}
result = methods.handle_repos(template_vars, ['grafana', 'kibana'],
'enable')
expectCmd = 'RUN echo "deb https://packages.grafana.com/oss/deb '
expectCmd += 'stable main" >/etc/apt/sources.list.d/grafana.list && '
expectCmd += 'echo "deb '
expectCmd += 'https://artifacts.elastic.co/packages/oss-7.x/apt '
expectCmd += 'stable main" >/etc/apt/sources.list.d/kibana.list'
self.assertEqual(expectCmd, result)
def test_disable_repos_centos(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'centos',
'base_package_type': 'rpm',
}
result = methods.handle_repos(template_vars, ['grafana'], 'disable')
expectCmd = 'RUN dnf config-manager --disable grafana || true'
self.assertEqual(expectCmd, result)
def test_disable_repos_centos_multiple(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'centos',
| 'base_package_type': 'rpm',
}
result = methods.handle_repos(template_vars, ['grafana', 'ceph'],
'disable')
expectCmd = 'RUN dnf config-manager --disable grafana '
expectCmd += ' | --disable centos-ceph-nautilus || true'
self.assertEqual(expectCmd, result)
# NOTE(hrw): there is no disabling of repos for Debian/Ubuntu
def test_disable_repos_debian(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'debian',
'base_package_type': 'deb'
}
result = methods.handle_repos(template_vars, ['grafana'], 'disable')
expectCmd = ''
self.assertEqual(expectCmd, result)
def test_handle_repos_string(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'debian',
'base_package_type': 'deb'
}
self.assertRaisesRegex(TypeError,
r'First argument should be a list of '
r'repositories',
methods.handle_repos, template_vars, 'grafana',
'disable')
|
mrunge/horizon_lib | horizon_lib/utils/validators.py | Python | apache-2.0 | 1,727 | 0 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# Lice | nse for the specific language governing permissions and limitations
# under the License.
from django.core.exceptions import ValidationError # noqa
from django.utils.translation import ugettext_lazy as _
from horizon_lib import conf
def validate_port_range(port):
if port not in range(-1, 65536):
raise ValidationError(_("Not a valid port number"))
def validate_ip_protocol( | ip_proto):
if ip_proto not in range(-1, 256):
raise ValidationError(_("Not a valid IP protocol number"))
def password_validator():
return conf.HORIZON_CONFIG["password_validator"]["regex"]
def password_validator_msg():
return conf.HORIZON_CONFIG["password_validator"]["help_text"]
def validate_port_or_colon_separated_port_range(port_range):
"""Accepts a port number or a single-colon separated range."""
if port_range.count(':') > 1:
raise ValidationError(_("One colon allowed in port range"))
ports = port_range.split(':')
for port in ports:
try:
if int(port) not in range(-1, 65536):
raise ValidationError(_("Not a valid port number"))
except ValueError:
raise ValidationError(_("Port number must be integer"))
|
ophiry/dvc | tests/test_repro.py | Python | apache-2.0 | 5,073 | 0.002957 | import os
from dvc.command.remove import CmdRemove
from dvc.command.repro import CmdRepro
from dvc.command.run import CmdRun
from dvc.executor import Executor
from tests.test_cmd_run import RunBasicTest
class ReproBasicEnv(RunBasicTest):
def setUp(self):
super(ReproBasicEnv, self).setUp()
self.file_name1 = os.path.join('data', 'file1')
self.file1_code_file = 'file1.py'
self.create_file_and_commit(self.file1_code_file, 'print("Hello")' + os.linesep + 'print("Mary")')
self.settings.parse_args(['run',
'--stdout', self.file_name1,
'--code', self.file1_code_file,
'python', self.file1_code_file, '--not-repro'])
cmd_file1 = CmdRun(self.settings)
self.assertEqual(cmd_file1.code_dependencies, [self.file1_code_file])
cmd_file1.run()
self.file_name11 = os.path.join('data', 'file11')
self.file11_code_file = 'file | 11.py'
self.create_file_and_commit(self.file11_code_file,
'import sys' + os.linesep + 'print(open(sys.argv[1]).readline().strip())')
self.settings.parse_args(['run',
'--stdout', self.file_name11,
'--code', self.file11_code_file,
'pyth | on', self.file11_code_file, self.file_name1])
CmdRun(self.settings).run()
self.file_name2 = os.path.join('data', 'file2')
self.file2_code_file = 'file2.py'
self.create_file_and_commit(self.file2_code_file,
'print("Bobby")')
self.settings.parse_args(['run',
'--stdout', self.file_name2,
'python', self.file2_code_file, '--not-repro'])
CmdRun(self.settings).run()
self.file_res_code_file = 'code_res.py'
self.create_file_and_commit(self.file_res_code_file,
'import sys' + os.linesep +
'text1 = open(sys.argv[1]).read()' + os.linesep +
'text2 = open(sys.argv[2]).read()' + os.linesep +
'print(text1 + text2)')
self.file_name_res = os.path.join('data', 'file_res')
self.settings.parse_args(['run',
'--stdout', self.file_name_res,
'--code', self.file_res_code_file,
'python', self.file_res_code_file,
self.file_name11,
self.file_name2])
cmd_res = CmdRun(self.settings)
self.assertEqual(cmd_res.code_dependencies, [self.file_res_code_file])
cmd_res.run()
lines = list(filter(None, map(str.strip, open(self.file_name_res).readlines())))
self.assertEqual(lines, ['Hello', 'Bobby'])
def create_file_and_commit(self, file_name, content='Any', message='Just a commit'):
self.create_file(file_name, content)
self.commit_file(file_name, message)
@staticmethod
def commit_file(file_name, message='Just a commit'):
Executor.exec_cmd_only_success(['git', 'add', file_name])
Executor.exec_cmd_only_success(['git', 'commit', '-m', message])
def modify_file_and_commit(self, filename, content_to_add=' '):
fd = open(filename, 'a')
fd.write(content_to_add)
fd.close()
self.commit_file(filename)
class ReproCodeDependencyTest(ReproBasicEnv):
def test(self):
self.modify_file_and_commit(self.file_res_code_file)
self.settings.parse_args('repro {}'.format(self.file_name_res))
CmdRepro(self.settings).run()
self.assertEqual(open(self.file_name_res).read().strip(), 'Hello\nBobby')
class ReproChangedDependency(ReproBasicEnv):
def test(self):
self.recreate_file1()
self.settings.parse_args('repro {}'.format(self.file_name11))
CmdRepro(self.settings).run()
self.assertEqual(open(self.file_name11).read(), 'Goodbye\n')
def recreate_file1(self):
self.settings.parse_args('remove {} --keep-in-cloud'.format(self.file_name1))
CmdRemove(self.settings).run()
file1_code_file = 'file1_2.py'
self.create_file_and_commit(file1_code_file, 'print("Goodbye")' + os.linesep + 'print("Jack")')
self.settings.parse_args(['run',
'--stdout', self.file_name1,
'--code', file1_code_file,
'python', file1_code_file, '--not-repro'])
CmdRun(self.settings).run()
class ReproChangedDeepDependency(ReproChangedDependency):
def test(self):
self.recreate_file1()
self.settings.parse_args('repro {}'.format(self.file_name_res))
CmdRepro(self.settings).run()
self.assertEqual(open(self.file_name_res).read().strip(), 'Goodbye\nBobby')
|
amosnier/python_for_kids | book_code/appendixb/ch16-checkerboard.py | Python | gpl-3.0 | 1,319 | 0.00834 | from tkinter import *
import random
import time
class Game:
def __init__(self):
self.tk = Tk()
self.tk.title("Game")
self.tk.resizable(0, 0)
self.tk.wm_attributes("-topmost", 1)
self.canvas = Canvas(self.tk, width=500, height=500, bd=0, highlightthickness=0)
self.canvas.pack()
self.tk.update()
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.bg = | PhotoImage(file="background.gif")
w = self.bg.width()
h = self.bg.height()
draw_background = 0
for x in range(0, 5):
for y in range(0, 5):
if draw_background == 1:
self.canvas.create_image(x * | w, y * h, image=self.bg, anchor='nw')
draw_background = 0
else:
draw_background = 1
self.sprites = []
self.running = True
def add(self, sprite):
self.sprites.append(sprite)
def mainloop(self):
while 1:
for sprite in self.sprites:
sprite.move()
self.tk.update_idletasks()
self.tk.update()
time.sleep(0.01)
g = Game()
g.mainloop() |
zhlinh/leetcode | 0091.Decode Ways/solution.py | Python | apache-2.0 | 1,111 | 0.0018 | #!/usr/bin/env pyth | on
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: zhlinhng@gmail.com
Version: 0.0.1
Created Time: 2016-03-02
Last_modify: 2016-03-02
******************************************
'''
'''
A message containing letters from A-Z is
being encoded to numbers using the following mapping:
'A' -> 1
'B' -> 2
...
'Z' -> 26
Given an encoded message containing digits, |
determine the total number of ways to decode it.
For example,
Given encoded message "12", it could be decoded as "AB" (1 2) or "L" (12).
The number of ways decoding "12" is 2.
'''
class Solution(object):
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
if not s or s[0] == '0':
return 0
dp0, dp1 = 1, 1
for i in range(1, len(s)):
if s[i] == '0':
dp1 = 0
if s[i-1] == '1' or (s[i-1] == '2' and s[i] <= '6' ):
dp1 = dp0 + dp1
dp0 = dp1 - dp0
else:
dp0 = dp1
return dp1
|
getting-things-gnome/liblarch | tests/signals_testing.py | Python | lgpl-3.0 | 5,708 | 0 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Liblarch - a library to handle directed acyclic graphs
# Copyright (c) 2011-2012 - Lionel Dricot & Izidor Matušov
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
import threading
import time
from tests.watchdog import Watchdog
from gi.repository import GLib
class SignalCatcher(object):
'''
A class to test signals
'''
def __init__(self, unittest, generator, signal_name,
should_be_caught=True, how_many_signals=1,
error_code="No error code set"):
self.signal_catched_event = threading.Event()
self.generator = generator
self.signal_name = signal_name
self.signal_arguments = []
self.unittest = unittest
self.how_many_signals = how_many_signals
self.should_be_caught = should_be_caught
self.error_code = error_code
def _on_failure():
# we need to release the waiting thread
self.signal_catched_event.set()
self.missed = True
# then we notify the error
# if the error_code is set to None, we're expecting it to fail.
if error_code is not None:
print("An expected signal wasn't received %s" % error_code)
self.unittest.assertFalse(should_be_caught)
self.watchdog = Watchdog(3, _on_failure)
def __enter__(self):
def __signal_callback(*args):
self.signal_arguments.append(args[1:])
if len(self.signal_arguments) >= self.how_many_signals:
self.signal_catched_event.set()
self.handler = self.generator.connect(
self.signal_name, __signal_callback)
self.watchdog.__enter__()
return [self.signal_catched_event, self.signal_arguments]
def __exit__(self, err_type, value, traceback):
self.generator.disconnect(self.handler)
if not self.should_be_caught and not hasattr(self, 'missed'):
self.assertFalse(True)
return (not isinstance(value, Exception) and
self.watchdog.__exit__(err_type, value, traceback))
class CallbackCatcher(obje | ct):
'''
A class to test callbacks
'''
def __init__(self, unittest, generator, signal_name,
should_be_caught=True, how_many_signals=1,
error_code="No error code set"):
self.signal_catched_event = threading.Event()
self.generator = generator
self.signal_name = signal_name
self.signal_arguments = []
self.unittest = unittest
self.how_many_signals = how_many_signals
self.should_be_caught = sh | ould_be_caught
self.error_code = error_code
def _on_failure():
# we need to release the waiting thread
self.signal_catched_event.set()
self.missed = True
# then we notify the error
# if the error_code is set to None, we're expecting it to fail.
if error_code is not None:
print("An expected signal wasn't received %s" % error_code)
self.unittest.assertFalse(should_be_caught)
self.watchdog = Watchdog(3, _on_failure)
def __enter__(self):
def __signal_callback(*args):
""" Difference to SignalCatcher is that we do not skip
the first argument. The first argument by signals is widget
which sends the signal -- we omit this feature when using callbacks
"""
self.signal_arguments.append(args)
if len(self.signal_arguments) >= self.how_many_signals:
self.signal_catched_event.set()
self.handler = self.generator.register_cllbck(
self.signal_name, __signal_callback)
self.watchdog.__enter__()
return [self.signal_catched_event, self.signal_arguments]
def __exit__(self, err_type, value, traceback):
self.generator.deregister_cllbck(self.signal_name, self.handler)
if not self.should_be_caught and not hasattr(self, 'missed'):
self.assertFalse(True)
return (not isinstance(value, Exception) and
self.watchdog.__exit__(err_type, value, traceback))
class GobjectSignalsManager(object):
def init_signals(self):
'''
Initializes the gobject main loop so that signals can be used.
This function returns only when the gobject main loop is running
'''
def gobject_main_loop():
self.main_loop = GLib.MainLoop()
self.main_loop.run()
threading.Thread(target=gobject_main_loop).start()
while (not hasattr(self, 'main_loop') or
not self.main_loop.is_running()):
# since running the gobject main loop is a blocking call,
# we have to check that it has been started in a polling fashion
time.sleep(0.1)
def terminate_signals(self):
self.main_loop.quit()
|
dmnfarrell/peat | PEATDB/Ekin/Ekin_map.py | Python | mit | 23,383 | 0.016893 | #!/usr/bin/env python
#
# Protein Engineering Analysis Tool DataBase (PEATDB)
# Copyright (C) 2010 Damien Farrell & Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
from Tkinter import *
import Pmw
import os
import numpy
class Ekin_map_annotate:
def map_datatab2structure(self):
"""If the PEATDB record has a structure, then we allow the user to map each datatab
to a specific part of the protein.
One can map a datatab to an atom, a residue, a chain, or define a structural group and map to it"""
if not self.parent:
import tkMessageBox
tkMessageBox.showinfo("No PEAT",
"This option is only available when Ekin is started from PEAT",
parent=self.ekin_win)
return
#
# Do we have a record name
#
if not self.protein:
import tkMessageBox
tkMessageBox.showinfo("No PEAT record",
"This option is only available when Ekin has been started by clicking a PEAT record",
parent=self.ekin_win)
return
#
# Is there a structure?
#
error=None
if not self.parent.data.has_key('DBinstance'):
error=1
else:
DB=self.parent.data['DBinstance'].DB
if not DB[self.protein].has_key('Structure'):
error=1
else:
print 'Trying to get PDB'
self.pdblines,X=self.parent.get_structure(self.protein,'Structure')
if not self.pdblines:
error=1
if error:
import tkMessageBox
tkMessageBox.showinfo("No Structure in PEAT",
"This option is only available when the PEAT record has a structure",
parent=self.ekin_win)
return
#
# Open the mapping window
#
mapper_win=Toplevel()
mapper_win.title('Map datatab to structure. %s - %s' %(self.protein,self.field))
self.set_geometry(self.ekin_win,mapper_win)
#
# Mapping Manager
#
row=0
Label(mapper_win,text='Mapping Manager',bg='lightblue').grid(row=row,column=0,columnspan=3,sticky='news')
row=row+1
Label(mapper_win,textvariable=self.currentdataset.get()).grid(row=row,column=0,columnspan=3,sticky='news')
#
# Headers
#
#row=row+1
#Label(mapper_win,text='Structural group type').grid(row=row,column=0,sticky='news')
#Label(mapper_win,text='Structural element').grid(row=row,column=1,sticky='news')
#Label(mapper_win,text='Datatab property').grid(row=row,column=2,sticky='news')
#
# Structural groupings for this protein
#
#if not DB[self.protein].has_key('structgroups'):
# DB[self.protein]['structgroups']={}
#structgroups=DB[self.protein]['structgroups'].keys()
#
# Load the residue definitions
#
import Protool.mutate
self.M_instance=Protool.mutate.Mutate(onlydefs=1)
self.AAdefs=self.M_instance.aadefs
#
# Struct group types
#
row=row+1
listbox_height=5
self.group_type_box = Pmw.ScrolledListBox(mapper_win,
items=['Residues','Atoms','Titratable groups'],
labelpos='nw',
label_text='Group type',
listbox_height = listbox_height,
usehullsize = 1,
hull_width = 200,
hull_height = 100,
| selectioncommand=self.update_elements)
self.group_type_box.grid(row=row,column=0,columnspan=1,sticky='news')
self.group_type_box.configure(listbox_bg='white')
self.group_type_box.configure(listbox_selectmode='single')
self.group_type_box.configure(listbox_exportselection=0)
#
#
# Dropdown list of elements of each structgroup type
#
self.group_elements_box = Pmw.ScrolledListBox(mapper_win,
items=[],
labelpos='nw',
label_text='Group Elements',
listbox_height = listbox_height,
usehullsize = 1,
hull_width = 200,
hull_height = 100)
self.group_elements_box.grid(row=row,column=1,columnspan=1,sticky='news')
self.group_elements_box.configure(listbox_bg='white')
self.group_elements_box.configure(listbox_selectmode='extended')
self.group_elements_box.configure(listbox_exportselection=0)
# Parameters that we can map to structgroups
import Fitter
self.FIT=Fitter.FITTER('1 pKa 2 Chemical shifts',self)
self.dataprops=['Data source']+self.FIT.parameter_names
self.data_prop_box = Pmw.ScrolledListBox(mapper_win,
items=self.dataprops,
labelpos='nw',
label_text='Data properties',
listbox_height = listbox_height,
usehullsize = 1,
hull_width = 200,
hull_height = 100)
self.data_prop_box.grid(row=row,column=2,columnspan=1,sticky='news')
self.data_prop_box.configure(listbox_bg='white')
self.data_prop_box.configure(listbox_selectmode='extended')
self.data_prop_box.configure(listbox_exportselection=0)
#
# List of existing mappings
#
row=row+1
datatab=self.currentdataset.get()
print 'Loading this datatab in mapper',datatab
mappings=self.get_structmappings(datatab)
self.mapping_box = Pmw.ScrolledListBox(mapper_win,
items=mappings,
labelpos='nw',
label_text='Existing mappings',
listbox_height = 6,
usehullsize = 1,
hull_width = 200,
hull_height = 200)
self.mapping_box.grid(row=row,column=0,columnspan=3,sticky='news')
self.mapping_box.configure(listbox_selectmode='single')
self.mapping_box.configure(listbox_bg='white')
#
# Buttons
#
row=row+1
Button(mapper_win,text='Create mapping',bg='lightgreen',borderwidth=2, relief=GROOVE, command=self.create_mapping).grid(row=row,column=0,sticky='news',padx=2,pady=2)
Button(mapper_win,text='Delete map | |
nesl/mercury | Services/Mapping/tmpBatch.py | Python | gpl-2.0 | 1,450 | 0.035862 | import os
import sys
filenames = [
'Albuquerque_6x6.tfix',
'Atlanta_6x6.tfix',
'Austin_6x6.tfix',
'Baltimore_6x6.tfix',
'B | oston_6x6.tfix',
'Charlotte_6x6.tfix',
'Chicago_6x6.tfix',
'Cleveland_6x6.tfix',
'Columbus_6x6.tfix',
'Dallas_6x6.tfix',
'Denver_6x6.tfix',
'Detroit_6x6.tfix',
'El_Paso_6x6.tfix',
'Fort_Worth_6x6.tfix',
'Fresno_6x6.tfix',
'Houston_6x6.tfix',
'Indianapolis_6x6.tfix',
'Jacksonville_6x6.tfix',
'Kansas_City_2_6x6.tfix',
'Kansas_City_6x6.tfix',
'Las_Vegas_6x6.tfix',
'Long_Beach_6x6.t | fix',
'Los_Angeles_6x6.tfix',
'Memphis_6x6.tfix',
'Mesa_6x6.tfix',
'Milwaukee_6x6.tfix',
'Nashville_6x6.tfix',
'New_Orleans_6x6.tfix',
'New_York_6x6.tfix',
'Oklahoma_City_6x6.tfix',
'Omaha_6x6.tfix',
'Philadelphia_6x6.tfix',
'Phoneix_6x6.tfix',
'Portland_6x6.tfix',
'Sacramento_6x6.tfix',
'San_Antonio_6x6.tfix',
'San_Diego_6x6.tfix',
'San_Francisco_6x6.tfix',
'San_Jose_6x6.tfix',
'San_Juan_6x6.tfix',
'Seattle_6x6.tfix',
'Tucson_6x6.tfix',
#'ucla_3x3.tfix',
#'ucla_4x4.tfix',
#'ucla_5x5.tfix',
#'ucla_small.tfix',
'Virginia_Beach_6x6.tfix',
'Washington_6x6.tfix'
]
startIdx = 0
stopIdx = len(filenames)
if len(sys.argv) >= 2:
startIdx = int(sys.argv[1])
stopIdx = startIdx
if len(sys.argv) >= 3:
stopIdx = int(sys.argv[2])
idx = 0
for x in filenames:
if startIdx <= idx and idx <= stopIdx:
print('idx=' + str(idx))
cmd = 'python3 makeElevSegMap.py ' + x
print(cmd)
os.system(cmd)
idx += 1
|
studywolf/NDMPS-paper | code/test_ndmps_fs_discrete_spa_frontend.py | Python | gpl-3.0 | 1,619 | 0.004941 | import numpy as np
import importlib
import itertools
import nengo
from models import ndmps_fs_discrete_spa_frontend
importlib.reload(ndmps_fs_discrete_spa_frontend)
from models.ndmps_fs_discrete_spa_frontend import generate
input_signals = ['ZERO']#, 'ONE', 'TWO', 'THREE', 'FOUR',
# 'FIVE', 'SIX', 'SEVEN', 'EIGHT', 'NINE']
# input_signals += [" + ".join(x) for x in
# itertools.combinations(input_signals, 2)]
print(input_signals)
for input_signal in input_signals:
print(input_signal)
model = nengo.Network(seed=10)
with model:
ndmps_d = generate(input_signal=input_signal)
probe = nengo.Probe(ndmps_d.output, synapse=.01, sample_every=.005)
ndmps_d.product_x.sq1.add_neuron_output()
ndmps_d.product_y.sq1.add_neuron_output()
probe_product_x = nengo.Probe(
ndmps_d.product_x.sq1.neuron_output, synapse=None)
probe_product_y = nengo.Probe(
ndmps_d.product_y.sq1.neuron_output, synapse=None)
sim = nengo.Simulator(model)
sim.run(4)
# format input string to be appro | priate file name
input_signal = | input_signal.lower().replace(' ', '')
np.savez_compressed('results/data/discrete_fs/time_steps', sim.trange())
np.savez_compressed('results/data/discrete_fs/data_%s' % input_signal, sim.data[probe])
np.savez_compressed('results/data/discrete_fs/data_%s_x_neurons' % input_signal,
sim.data[probe_product_x])
np.savez_compressed('results/data/discrete_fs/data_%s_y_neurons' % input_signal,
sim.data[probe_product_y])
|
StegSchreck/RatS | tests/unit/icheckmovies/test_icheckmovies_ratings_inserter.py | Python | agpl-3.0 | 1,934 | 0.000517 | import os
from unittest import TestCase
from unittest.mock import patch
from RatS.icheckmovies.icheckmovies_ratings_inserter import ICheckMoviesRatingsInserter
TESTDATA_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, "assets")
)
class ICheckMoviesInserterTest(TestCase):
def setUp(self):
if not os.path.exists(os.path.join(TESTDATA_PATH, "exports")):
os.makedirs(os.path.join(TESTDATA_PATH, "exports"))
self.movie = dict()
self.movie["title"] = "Fight Club"
self.movie["imdb"] = dict()
self.movie["imdb"]["id"] = "tt0137523"
self.movie["imdb"]["url"] = "https://www.imdb.com/title/tt0137523"
self.movie["imdb"]["my_rating"] = 9
@patch("RatS.base.base_ratings_inserter.RatingsInserter.__init__")
@patch("RatS.utils.browser_handler.Firefox")
def test_init(self, browser_mock, base_init_mock):
ICheckMoviesRatingsInserter(None)
self.assertTrue(base_init_mock.called)
@pa | tch("RatS.icheckmovies.icheckmovies_ratings_inserter.Select")
@patch("RatS.base.base_ratings_uploader.save_movies_to_csv")
@patch("RatS.icheckmovies.icheckmovies_ratings_inserter.ICheckMovies")
@patch("RatS.base.base_ratings_inserter.RatingsInserter.__init__")
@patch("RatS.utils.browser_handler.Firefox")
def test_insert(
self, browser_mock, base_init_mock, site_mock, impex_mock, select_mock
): # pyl | int: disable=too-many-arguments
site_mock.browser = browser_mock
inserter = ICheckMoviesRatingsInserter(None)
inserter.site = site_mock
inserter.site.site_name = "ICheckMovies"
inserter.failed_movies = []
inserter.exports_folder = TESTDATA_PATH
inserter.csv_filename = "converted.csv"
inserter.insert([self.movie], "IMDB")
self.assertTrue(base_init_mock.called)
self.assertTrue(impex_mock.called)
|
AlexSafatli/DataMiningImplementations | naive-bayes/ID3_classifier.py | Python | gpl-2.0 | 13,649 | 0.011869 | #!/bin/python
# ID3_classifier.py
# -------------------------
# Winter 2013; Alex Safatli
# -------------------------
# A program that carries out
# the ID3 algorithm on a
# given data file; extended
# in order to act as a classifier
# a la NB_classifier.
#
# Usage: python ID3_classifier.py
# Imports
import os, sys
from math import log
# Constants
RESULT_FILE = 'id3.txt'
# ID3 Class(es)
class id3:
# Decision tree ID3 algorithm.
def __init__(self,target,dataset):
# Tree object.
self.tree = tree(None,target)
# Target and dataset.
self.target, self.dataset = target, dataset
# The dataset list and entropy for the target.
self.classes_target = dataset[target]
self.entropy_target = self.entropy(target)
def entropy(self,attr,subset=None,\
drange=None):
out, dataset = 0, self.dataset
# Get the lists of values.
tarvals = self.classes_target # target
tarset = list(set(tarvals))
attvals = dataset[attr] # attribute
# Determine what range of indices to look over.
if not drange:
drange = range(len(attvals))
# Set up a count for all unique values in target.
tarcnt, attcnt = [0 for x in tarset], [0 for x in tarset]
# For each value in the attribute.
for i in drange:
attval = attvals[i]
# For each unique value in target.
if (subset and attval == subset) or not subset:
for j in xrange(len(tarset)):
attcnt[j] += 1
# See if matching.
if tarvals[i] == tarset[j]:
tarcnt[j] += 1
# Log computations.
for j in xrange(len(tarset)):
# Add to value.
if attcnt[j] != 0:
p = (float(tarcnt[j])/attcnt[j])
if p != 0: # Avoid finding the log of 0.
out -= p*log(p,2)
return out
def gain(self,attr,drange=None):
# The attribute given partitions the
# set into subsets.
raw = self.dataset[attr]
data = []
if drange:
for d in drange:
data.append(raw[d])
else:
data = raw
out = self.entropy_target
subs = set(data)
for subset in subs:
# Get ratio of number of occurences
# of subset in the full set.
s_j = data.count(subset)
ratio = float(s_j)/len(data)
out -= ratio*self.entropy(attr,subset,drange)
return out
def bestDecision(self,parent,drange=None):
# Choose best attribute for next decision
# in the tree.
bestsplit = None
if not parent:
attrs = []
else:
attrs = [x for x in parent.getParents()]
# Check only the given subset of the dataset.
for header in self.dataset:
if (header != self.target and \
header not in attrs):
gain = self.gain(header,drange)
# See if better than current best gain.
if not bestsplit or gain > bestsplit[1]:
bestsplit = (header,gain)
# Split for most gain in information.
if bestsplit:
return bestsplit[0]
else:
return None # stopping condition reached.
def buildTree(self,parent=None,data=None):
def attachBranches(vals,node):
# Attach a set of branches to an
# attrib ute node.
br = None
for s in set(vals):
if br:
b = branch(s,None,br,node)
br = b
else:
br = branch(s,None,None,node)
return br
# Figure out what subset of the data is
# being investigated for this branch.
attr = self.bestDecision(parent,data)
if not data:
data = range(len(self.classes_target))
if not attr:
# If stopping condition reached:
# no attributes are left or no data.
# Use class distribution.
cldist = []
for d in data:
cldist.append(self.classes_target[d])
cla = max(cldist)
node = attribute(cla,None,parent) # leaf node
parent.attr = node
return
vals = self.dataset[attr]
# Create branch nodes for new attribute node.
node = attribute(attr,None,parent)
br = attachBranches(vals,node)
node.branch = br
# Attach new attribute node to parent.
if parent:
parent.attr = node # prev branch
else:
self.tree = tree(node,self.target) # root node
# Recurse.
majorityclass = []
for b in br: # Go through all branches.
# See if all samples for this node have same
# class label.
datasubset = [x for x in xrange(len(vals)) \
if vals[x] = | = b.value and x in data]
classsubset = []
for x in datasubset:
classsubset.append(self.classes_target[x])
if len(set(classsubset)) == 1:
# If stopping condition reached. All same class.
cla = classsubset[0]
node = a | ttribute(cla,None,b) # leaf node
b.attr = node
majorityclass.append(cla)
continue
elif len(set(classsubset)) == 0:
# No class; means not majority class.
cla = None
for cl in self.classes_target:
# Allows for support for non-binary target
# attributes.
if cl not in majorityclass:
cla = cl
break
node = attribute(cla,None,b) # leaf node
b.attr = node
continue
self.buildTree(b,datasubset)
# Tree Data Structure
class tree:
# Encapsulates a tree.
def __init__(self,attr,target):
self.root = attr
self.target = target
def __nextlevel__(self,node,lvl,out):
# For an attribute.
branch = node.branch
if branch:
# not leaf node
for b in branch:
# For every branch off attribute.
out.append('\n%sIf %s is %s, then' \
% ('\t'*lvl,node.name,b.value))
self.__nextlevel__(b.attr,lvl+1,out)
else:
# leaf node
out.append(' %s is %s.' % (self.target,node.name))
def toStringAsLogic(self):
# Gets string of the tree as if-logic.
out = []
self.__nextlevel__(self.root,0,out)
return "".join(out)
def getTargetValue(self,dic,node=None):
# Given a dictionary for a transaction,
# traverse the tree until a single value
# is found for the target.
if not node:
node = self.root
if node.branch:
for b in node.branch:
if b.value == dic[node.name]:
return self.getTargetValue(dic,b.attr)
else:
return node.name # is leaf node
class attribute:
# An attribute node. A leaf node is
# designated as an attribute node
# with a null pointer.
def __init__(self,name,branch,parent):
self.name = name
self.branch = branch
self.parent = parent
def __eq__(self,other):
return self.name == other
def __ne__(self,other):
return not self.__eq__(other)
def __str__(self):
return self.name
class branch:
# A value branch.
def __init__(self,value,attr,next,parent):
self.value = value
self.attr = attr
self.next = next
self.parent = parent
def getParents(self):
n = self
while (n.parent):
n = n.parent
if hasattr(n,'attr'): # if branch
continu |
diego-d5000/MisValesMd | env/lib/python2.7/site-packages/django/contrib/gis/gdal/__init__.py | Python | mit | 2,676 | 0 | """
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existent file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
from django.contrib.gis.gdal.error import (check_err, GDALException,
OGRException, OGRIndexError, SRSException) # NOQA
from django.contrib.gis.gdal.geomtype import OGRGeomType # NOQA
__all__ = [
'check_err', 'GDALException', 'OGRException', 'OGRIndexError',
'SRSException', 'OGRGeomType', 'HAS_GDAL',
]
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver # NOQA
from django.contrib.gis.gdal.datasource import DataSource # NOQA
from django.contrib.gis.gdal.libgdal import gdal_version, gd | al_full_version, GDAL_VERSION # NOQA
from django.contrib.gis.gdal.raster.source import GDALRaster # NOQA
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform # NOQA
from django.contrib.gis.gdal.geometries import OGRGeometry # NOQA
| HAS_GDAL = True
__all__ += [
'Driver', 'DataSource', 'gdal_version', 'gdal_full_version',
'GDAL_VERSION', 'SpatialReference', 'CoordTransform', 'OGRGeometry',
]
except GDALException:
HAS_GDAL = False
try:
from django.contrib.gis.gdal.envelope import Envelope
__all__ += ['Envelope']
except ImportError:
# No ctypes, but don't raise an exception.
pass
|
edofic/ggrc-core | src/ggrc/converters/base.py | Python | apache-2.0 | 5,472 | 0.008589 | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENS | E file>
"""Base objects for csv file converters."""
from collections import defaultdict
from ggrc import settings
from ggrc.utils import structures
from ggrc.cache.memcache import MemCache
from ggrc.converters import get_exportables
from ggrc.converters.base_block import BlockConverter
from ggrc.converters.import_helper import extract_relevant_data
from ggrc.converters.import_helper import split_array
from ggrc.fulltext import get_indexer
class Converter(object):
"""Base class for csv | converters.
This class contains and handles all block converters and makes sure that
blocks and columns are handled in the correct order. It also holds cache
objects that need to be shared between all blocks and rows for mappings and
similar uses.
"""
CLASS_ORDER = [
"Person",
"Program",
"Risk Assessment",
"Audit",
"Request",
"Policy",
"Regulation",
"Standard",
"Section",
"Control",
"Assessment Template",
"Custom Attribute Definition",
"Assessment",
"Workflow",
"Task Group",
"Task Group Task",
]
priority_columns = [
"email",
"slug",
"delete",
"task_type",
]
def __init__(self, **kwargs):
self.dry_run = kwargs.get("dry_run", True)
self.csv_data = kwargs.get("csv_data", [])
self.ids_by_type = kwargs.get("ids_by_type", [])
self.block_converters = []
self.new_objects = defaultdict(structures.CaseInsensitiveDict)
self.shared_state = {}
self.response_data = []
self.exportable = get_exportables()
self.indexer = get_indexer()
def to_array(self):
self.block_converters_from_ids()
self.handle_row_data()
return self.to_block_array()
def to_block_array(self):
""" exporting each in it's own block separated by empty lines
Generate 2d array where each cell represents a cell in a csv file
"""
csv_data = []
for block_converter in self.block_converters:
csv_header, csv_body = block_converter.to_array()
# multi block csv must have first column empty
two_empty_lines = [[], []]
block_data = csv_header + csv_body + two_empty_lines
for line in block_data:
line.insert(0, "")
block_data[0][0] = "Object type"
block_data[1][0] = block_converter.name
csv_data.extend(block_data)
return csv_data
def import_csv(self):
self.block_converters_from_csv()
self.row_converters_from_csv()
self.handle_priority_columns()
self.import_objects()
self.import_secondary_objects()
self.drop_cache()
def handle_priority_columns(self):
for attr_name in self.priority_columns:
for block_converter in self.block_converters:
block_converter.handle_row_data(attr_name)
def handle_row_data(self):
for converter in self.block_converters:
converter.handle_row_data()
def row_converters_from_csv(self):
for converter in self.block_converters:
converter.row_converters_from_csv()
def block_converters_from_ids(self):
""" fill the block_converters class variable
Generate block converters from a list of tuples with an object name and ids
"""
object_map = {o.__name__: o for o in self.exportable.values()}
for object_data in self.ids_by_type:
class_name = object_data["object_name"]
object_class = object_map[class_name]
object_ids = object_data.get("ids", [])
fields = object_data.get("fields")
block_converter = BlockConverter(self, object_class=object_class,
fields=fields, object_ids=object_ids,
class_name=class_name)
block_converter.row_converters_from_ids()
self.block_converters.append(block_converter)
def block_converters_from_csv(self):
"""Prepare BlockConverters and order them like specified in
self.CLASS_ORDER.
"""
offsets, data_blocks = split_array(self.csv_data)
for offset, data in zip(offsets, data_blocks):
if len(data) < 2:
continue # empty block
class_name = data[1][0].strip().lower()
object_class = self.exportable.get(class_name)
raw_headers, rows = extract_relevant_data(data)
block_converter = BlockConverter(self, object_class=object_class,
rows=rows, raw_headers=raw_headers,
offset=offset, class_name=class_name)
self.block_converters.append(block_converter)
order = defaultdict(int)
order.update({c: i for i, c in enumerate(self.CLASS_ORDER)})
order["Person"] = -1
self.block_converters.sort(key=lambda x: order[x.name])
def import_objects(self):
for converter in self.block_converters:
converter.handle_row_data()
converter.import_objects()
def import_secondary_objects(self):
for converter in self.block_converters:
converter.import_secondary_objects(self.new_objects)
def get_info(self):
for converter in self.block_converters:
self.response_data.append(converter.get_info())
return self.response_data
def get_object_names(self):
return [c.object_class.__name__ for c in self.block_converters]
@classmethod
def drop_cache(cls):
if not getattr(settings, 'MEMCACHE_MECHANISM', False):
return
memcache = MemCache()
memcache.clean()
|
manchicken/gist_per_day | gist_reminder.py | Python | unlicense | 554 | 0.01444 | #!/usr/bin/env python
# This program is designed to send a single email to me once a day if it doesn't see a commit
# in the last 24 hours to gist_a_ | day.
from gistapi import Gist, Gis | ts
import gmail
import pprint
config = {
'sender' : 'davidelmets4peace@gmail.com',
'senderpw' : 'yuslohaw',
'recipient' : 'themanchicken@gmail.com',
'subject' : 'Don\'t forget to create a gist!',
'gistuser' : 'manchicken',
'api_token' : '069598e0300ee0445b48'
}
gistfetch = Gists.fetch_by_user(config['gistuser'])
pprint.pprint(gistfetch)
|
netscaler/neutron | neutron/services/metering/agents/metering_agent.py | Python | apache-2.0 | 11,158 | 0 | # Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import eventlet
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent import rpc as agent_rpc
from neutron.common import constants as constants
from neutron.common import topics
from neutron.common import utils
from neutron import context
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common.notifier import api as notifier_api
from neutron.openstack.common import periodic_task
from neutron.openstack.common.rpc import proxy
from neutron.openstack.common import service
from neutron import service as neutron_service
LOG = logging.getLogger(__name__)
class MeteringPluginRpc(proxy.RpcProxy):
BASE_RPC_API_VERSION = '1.0'
def __init__(self, host):
super(MeteringPluginRpc,
self).__init__(topic=topics.METERING_AGENT,
default_version=self.BASE_RPC_API_VERSION)
def _get_sync_data_metering(self, context):
try:
return self.call(context,
self.make_msg('get_sync_data_metering',
host=self.host),
topic=topics.METERING_PLUGIN)
except Exception:
LOG.exception(_("Failed synchronizing routers"))
class MeteringAgent(MeteringPluginRpc, manager.Manager):
Opts = [
cfg.StrOpt('driver',
default='neutron.services.metering.drivers.noop.'
'noop_driver.NoopMeteringDriver',
help=_("Metering driver")),
cfg.IntOpt('measure_interval', default=30,
help=_("Interval between two metering measures")),
cfg.IntOpt('report_interval', default=300,
help=_("Interval between two metering reports")),
]
def __init__(self, host, conf=None):
self.conf = conf or cfg.CONF
self._load_drivers()
self.root_helper = config.get_root_helper(self.conf)
self.context = context.get_admin_context_without_session()
self.metering_info = {}
self.metering_loop = loopingcall.FixedIntervalLoopingCall(
self._metering_loop
)
measure_interval = self.conf.measure_interval
self.last_report = 0
self.metering_loop.start(interval=measure_interval)
self.host = host
self.label_tenant_id = {}
self.routers = {}
self.metering_infos = {}
super(Meter | ingAgent, self).__init__(host=self.conf.host)
def _load_drivers(self):
"""Loads plugin-driver from configuration."""
LOG.info(_("Loading Metering driver %s"), self.conf.driver)
if not self.conf.driver:
raise SystemExit(_('A metering driver must be specified'))
self.metering_driver = importutils.import_object(
self.conf.driver, self, self.conf)
def _meter | ing_notification(self):
for label_id, info in self.metering_infos.items():
data = {'label_id': label_id,
'tenant_id': self.label_tenant_id.get(label_id),
'pkts': info['pkts'],
'bytes': info['bytes'],
'time': info['time'],
'first_update': info['first_update'],
'last_update': info['last_update'],
'host': self.host}
LOG.debug(_("Send metering report: %s"), data)
notifier_api.notify(self.context,
notifier_api.publisher_id('metering'),
'l3.meter',
notifier_api.CONF.default_notification_level,
data)
info['pkts'] = 0
info['bytes'] = 0
info['time'] = 0
def _purge_metering_info(self):
ts = int(time.time())
report_interval = self.conf.report_interval
for label_id, info in self.metering_info.items():
if info['last_update'] > ts + report_interval:
del self.metering_info[label_id]
def _add_metering_info(self, label_id, pkts, bytes):
ts = int(time.time())
info = self.metering_infos.get(label_id, {'bytes': 0,
'pkts': 0,
'time': 0,
'first_update': ts,
'last_update': ts})
info['bytes'] += bytes
info['pkts'] += pkts
info['time'] += ts - info['last_update']
info['last_update'] = ts
self.metering_infos[label_id] = info
return info
def _add_metering_infos(self):
self.label_tenant_id = {}
for router in self.routers.values():
tenant_id = router['tenant_id']
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
self.label_tenant_id[label_id] = tenant_id
tenant_id = self.label_tenant_id.get
accs = self._get_traffic_counters(self.context, self.routers.values())
if not accs:
return
for label_id, acc in accs.items():
self._add_metering_info(label_id, acc['pkts'], acc['bytes'])
def _metering_loop(self):
self._add_metering_infos()
ts = int(time.time())
delta = ts - self.last_report
report_interval = self.conf.report_interval
if delta > report_interval:
self._metering_notification()
self._purge_metering_info()
self.last_report = ts
@utils.synchronized('metering-agent')
def _invoke_driver(self, context, meterings, func_name):
try:
return getattr(self.metering_driver, func_name)(context, meterings)
except RuntimeError:
LOG.exception(_("Driver %(driver)s does not implement %(func)s"),
{'driver': cfg.CONF.metering_driver,
'func': func_name})
@periodic_task.periodic_task(run_immediately=True)
def _sync_routers_task(self, context):
routers = self._get_sync_data_metering(self.context)
if not routers:
return
self._update_routers(context, routers)
def router_deleted(self, context, router_id):
self._add_metering_infos()
if router_id in self.routers:
del self.routers[router_id]
return self._invoke_driver(context, router_id,
'remove_router')
def routers_updated(self, context, routers=None):
if not routers:
routers = self._get_sync_data_metering(self.context)
if not routers:
return
self._update_routers(context, routers)
def _update_routers(self, context, routers):
for router in routers:
self.routers[router['id']] = router
return self._invoke_driver(context, routers,
'update_routers')
def _get_traffic_counters(self, context, routers):
LOG.debug(_("Get router traffic counters"))
return self._invoke_driver(context, routers, 'get_traffic_counters')
def update_metering_label_rules(self, context, routers):
LOG.debug(_("Update metering rules from agent"))
return |
molpopgen/fwdpy11 | fwdpy11/_functions/simplify_tables.py | Python | gpl-3.0 | 2,314 | 0.002593 | from typing import List, Tuple, Union
import fwdpy11._fwdpy11
import fwdpy11._types
import numpy as np
def simplify(pop, samples):
"""
Simplify a TableCollection stored in a Population.
:param pop: A :class:`fwdpy11.DiploidPopulation`
:param samp | les: A list of samples (node indexes).
:return: The simplified tables and array mapping input sample IDs to output IDS
:rtype: tuple
Note that the samples argument is agnostic with respect to the time of
the nodes in the input tables. Thus, you may do things like simplify
to a set of "currently-alive" nodes plus some or all ancient samples by
including some node IDs from
:a | ttr:`fwdpy11.DiploidPopulation.ancient_sample_metadata`.
If the input contains ancient samples, and you wish to include them in the output,
then you need to include their IDs in the samples argument.
.. note::
Due to node ID remapping, the metadata corresponding to nodes becomes a bit more
difficult to look up. You need to use the output ID map, the original IDs, and
the population's metadata containers.
.. deprecated:: 0.3.0
Prefer :func:`fwdpy11.simplify_tables`
.. versionchanged:: 0.3.0
Ancient samples are no longer kept by default
.. versionchanged:: 0.5.0
No longer requires a :class:`MutationVector` argument.
"""
import warnings
warnings.warn(
"This function is deprecated and will be removed soon. Please use fwdpy11.simplify_tables instead",
category=FutureWarning,
)
ll_t, idmap = fwdpy11._fwdpy11._simplify(pop, samples)
return fwdpy11._types.TableCollection(ll_t), idmap
def simplify_tables(
tables: fwdpy11._types.TableCollection, samples: Union[List, np.ndarray]
) -> Tuple[fwdpy11._types.TableCollection, np.ndarray]:
"""
Simplify a TableCollection.
:param pop: A table collection.
:type pop: :class:`fwdpy11.TableCollection`
:param samples: list of samples
:type list: list-like or array-like
:returns: A simplified TableCollection and an array containing remapped sample ids.
:rtype: tuple
.. versionadded:: 0.3.0
"""
ll_t, idmap = fwdpy11._fwdpy11._simplify_tables(tables, samples)
return fwdpy11._types.TableCollection(ll_t), idmap
|
CGATOxford/CGATPipelines | CGATPipelines/pipeline_docs/pipeline_motifs/trackers/Context.py | Python | mit | 156 | 0 |
from CGATRe | port.Tracker import *
from IntervalReport import *
class ContextSummary(IntervalTracker, SingleTableTrackerRows):
table = "con | text_stats"
|
wakamori/GoForIt | 1/1-2.py | Python | bsd-2-clause | 1,719 | 0.002327 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import random
import sys
class DayLife:
"""Life in a day."""
def __init__(self, date, life):
"""Set birth datetime and life."""
self.birthdate = date
self.life = life
f | inalyear = self.birthdate.year + self.life
finaldate = datetime.datetime(finalyear, self.birthdate.month,
self.birthdate.day)
self.finaldate = finaldate - datetime.timedelta(days=1)
def now(self):
"""Calculate current time."""
curdate = datetime.datetime.now()
maxdays = | (self.finaldate - self.birthdate).days
curdays = (curdate - self.birthdate).days
curtime = datetime.timedelta(days=1) / maxdays
curtime = curtime * curdays
return datetime.time(
(curtime.seconds / 60) / 60,
(curtime.seconds / 60) % 60,
curtime.seconds % 60)
if __name__ == '__main__':
# options
startyear = 1900
endyear = 2000
life = 200
print startyear, "<= a <=", endyear
print "n =", life
daycount = (datetime.datetime(endyear, 12, 31) -
datetime.datetime(startyear, 1, 1)).days
birthdate = datetime.datetime(startyear, 1, 1) + \
datetime.timedelta(days=random.randint(0, daycount))
args = sys.argv
if len(args) == 4:
year = int(args[1])
month = int(args[2])
date = int(args[3])
birthdate = datetime.datetime(year, month, date)
print "birthdate:", birthdate.date()
mylife = DayLife(birthdate, life)
print "finaldate:", mylife.finaldate.date()
print "today:", mylife.now()
|
spahan/unixdmoain | lib/afslib/tests/vos.py | Python | bsd-3-clause | 376 | 0.013298 | #coding: utf-8
import sys
sys.path.append("/opt/UD2/lib/afslib")
import afslib
vos = a | fslib.VolumeServer()
# for vol in vos.volumes:
# print vol
print vos
print "Fileservers : %s " % str(vos.fileserv)
testvol = ("testvolume", "urz-mars.urz.unibas.ch", "/vicepa")
vos.create(testvol)
|
import os
print(os.popen("vos exa testvolume").read())
vos.remove(testvol)
|
dougwig/x-neutron-lbaas | neutron_lbaas/openstack/common/systemd.py | Python | apache-2.0 | 3,066 | 0 | # Copyright 2012-2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper module for systemd service readiness notification.
"""
import os
import socket
import sys
from neutron_lbaas.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def _abstractify(socket_name):
if socket_name.startswith('@'):
# abstract namespace socket
socket_name = '\0%s' % socket_name[1:]
return socket_name
def _sd_notify(unset_env, msg):
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
sock.connect(_abstractify(notify_socket))
sock.sendall(msg)
if unset_env:
del os.environ['NOTIFY_SOCKET']
except EnvironmentError:
LOG.debug("Systemd notification failed", exc_info=True)
finally:
sock.close()
def notify():
"""Send notification to Systemd that service is ready.
For details see
http://www.freedesktop.org/software/systemd/man/sd_notify.html
"""
_sd_notify(False, 'READY=1')
def notify_once():
"""Send notification once to Systemd that service is ready.
Systemd sets NOTIFY_SOCKET environment variable with the name of the
socket listening for notifications from services.
This method removes the NOTIFY_SOCKET environment variable to ensure
notification is sent only once.
"""
_sd_notify(True, 'READY=1')
def onready(notify_socket, timeout):
"""Wait for systemd style n | otification on the socket.
:param notify_socket: local socket address
:type notify_socket: string
:param timeout: socket timeout
:type timeout: float
:returns: 0 service ready
1 service not ready
2 timeout occurred
"""
sock = socket.socket(socket. | AF_UNIX, socket.SOCK_DGRAM)
sock.settimeout(timeout)
sock.bind(_abstractify(notify_socket))
try:
msg = sock.recv(512)
except socket.timeout:
return 2
finally:
sock.close()
if 'READY=1' in msg:
return 0
else:
return 1
if __name__ == '__main__':
# simple CLI for testing
if len(sys.argv) == 1:
notify()
elif len(sys.argv) >= 2:
timeout = float(sys.argv[1])
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
retval = onready(notify_socket, timeout)
sys.exit(retval)
|
Distrotech/dia | plug-ins/python/diadissect.py | Python | gpl-2.0 | 6,191 | 0.048134 | #
# Dissect a diagram by rendering it and accumulating invalid
# renderer calls to object selection.
#
# Copyright (c) 2014 Hans Breuer <hans@breuer.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import sys, string, dia
##
# \brief A dissecting renderer for Dia
#
# Check the diagram by rendering it and report anomalies with their
# call and the causing object.
#
# \extends _DiaPyRenderer
# \ingroup ExportFilters
class DissectRenderer :
def __init__ (self) :
self.f = None
self.current_objects = []
self.warnings = []
self.errors = []
self.font = None
def _open(self, filename) :
self.f = open(filename, "w")
def begin_render (self, data, filename) :
self._open (filename)
self.extents = data.extents
try :
# this can fail for two reason:
# 1) data.diagram is None, e.g. when running from pure bindings
# 2) there is no member data.diagram because Dia is just too old
self.f.write ("# Dissect %s\n" % (data.diagram.filename,))
except :
self.f.write ("# Dissect %s\n" % (filename,))
def end_render (self) :
self.f.write('%d error(s) %d warning(s)\n' % (len(self.errors), len(self.warnings)))
self.f.close ()
def Warning (self, msg) :
self.warnings.append ((self.current_objects[- | 1], msg))
if self.f :
self.f.write ("Warni | ng: %s, %s\n" % (self.current_objects[-1], msg))
def Error (self, msg) :
self.errors.append ((self.current_objects[-1], msg))
if self.f :
self.f.write ("Error: %s, %s\n" % (self.current_objects[-1], msg))
def draw_object (self, object, matrix) :
self.current_objects.append (object)
# XXX: check matrix
# don't forget to render the object
object.draw (self)
del self.current_objects[-1]
def set_linewidth (self, width) :
# width==0 is hairline
if width < 0 or width > 10 :
self.Warning ("linewidth out of range")
def set_linecaps (self, mode) :
if mode < 0 or mode > 2 :
self.Error ("linecaps '%d' unknown" % (mode,))
def set_linejoin (self, mode) :
if mode < 0 or mode > 2 :
self.Error ("linejoin '%d' unknown" % (mode,))
def set_linestyle (self, style, dash_length) :
if style < 0 or style > 4 :
self.Error ("linestyle '%d' unknown" % (style,))
if dash_length < 0.001 or dash_length > 1 :
self.Warning ("dashlength '%f' out of range" % (dash_length,))
def set_fillstyle (self, style) :
# currently only 'solid' so not used anywhere else
if style != 0 :
self.Error ("fillstyle '%d' unknown" % (style,))
def set_font (self, font, size) :
self.font = font
self.font_size = size
def draw_line (self, start, end, color) :
pass # can anything go wrong here ?
def draw_polyline (self, points, color) :
if len(points) < 2 :
self.Error ("draw_polyline with too few points")
def _polygon (self, points, fun) :
if len(points) < 3 :
self.Error ("%s with too few points" % (fun,))
def draw_polygon (self, points, fill, stroke) :
self._polygon(points, "draw_polygon")
# obsolete with recent Dia
def fill_polygon (self, points, color) :
self._polygon(points, "draw_polygon")
def _rect (self, rect, fun) :
if rect.top > rect.bottom :
self.Warning ("%s negative height" % (fun,))
if rect.left > rect.right :
self.Warning ("%s negative width" % (fun,))
def draw_rect (self, rect, fill, stroke) :
self._rect (rect, "draw_rect")
def draw_rounded_rect (self, rect, fill, stroke, rounding) :
# XXX: check rounding to be positive (smaller than half width, height?)
self._rect (rect, "draw_rect")
def _arc (self, center, width, height, angle1, angle2, fun) :
if width <= 0 :
self.Warning ("%s width too small" % (fun,))
if height <= 0 :
self.Warning ("%s height too small" % (fun,))
# angles
rot = 0.0
if angle1 < angle2 :
rot = angle2 - angle1
else :
rot = angle1 - angle2
if rot <= 0 or rot >= 360 :
self.Warning ("%s bad rotation %g,%g" % (fun, angle1, angle2))
def draw_arc (self, center, width, height, angle1, angle2, color) :
self._arc(center, width, height, angle1, angle2, "draw_arc")
def fill_arc (self, center, width, height, angle1, angle2, color) :
self._arc(center, width, height, angle1, angle2, "fill_arc")
def draw_ellipse (self, center, width, height, fill, stroke) :
self._arc(center, width, height, 0, 360, "draw_ellipse")
def _bezier (self, bezpoints, fun) :
nMoves = 0
for bp in bezpoints :
if bp.type == 0 : # BEZ_MOVE_TO
nMoves = nMoves + 1
if nMoves > 1 :
self.Warning ("%s move-to within", (fun,))
elif bp.type == 1 : # BEZ_LINE_TO
pass
elif bp.type == 2 : # BEZ_CURVE_TO
pass
else :
self.Error ("%s invalid BezPoint type='%d'" % (fun, bp.type,))
def draw_bezier (self, bezpoints, color) :
if len(bezpoints) < 2 :
self.Error ("draw_bezier too few points");
self._bezier (bezpoints, "draw_bezier")
def fill_bezier (self, bezpoints, color) :
if len(bezpoints) < 3 :
self.Error ("fill_bezier too few points");
self._bezier (bezpoints, "fill_bezier")
def draw_string (self, text, pos, alignment, color) :
if len(text) < 1 :
self.Warning ("draw_string empty text")
if alignment < 0 or alignment > 2 :
self.Error ("draw_string unknown alignmern '%d'" % (alignment,))
def draw_image (self, point, width, height, image) :
if width <= 0 :
self.Warning ("draw_image width too small")
if height <= 0 :
self.Warning ("draw_image height too small")
# XXX: check image, e.g. existing file name
# dia-python keeps a reference to the renderer class and uses it on demand
dia.register_export ("Dissect", "dissect", DissectRenderer())
|
its-dirg/oidc-fed | src/services/op/op.py | Python | apache-2.0 | 3,497 | 0.002574 | import json
import ssl
import jinja2
import yaml
from flask import jsonify
from flask.app import Flask
from flask.globals import request, current_app
from flask.templating import render_template
from jwkest.jwk import keyrep
from oic.oic.provider import Provider
from oic.utils.authn.authn_context import AuthnBroker
from oic.utils.authn.client import verify_client
from oic.utils.authn.user import NoAuthn
from oic.utils.authz import AuthzHandling
from oic.utils.sdb import SessionDB
from werkzeug.utils import redirect
from oidc_fed import OIDCFederationError
from oidc_fed.provider import OP
def init_fed_op(cnf):
with open(cnf["PROVIDER_CONFIG"]) as f:
provider_config = yaml.safe_load(f)
root_key = keyrep(provider_config["root_key_jwk"])
federation_keys = [keyrep(jwk) for jwk in provider_config["federations_jwk"]]
authn_broker = AuthnBroker()
name = "https://" + cnf["SERVER_NAME"]
user = "tester"
authn_broker.add("password", NoAuthn(None, user))
provider = Provider(name, SessionDB(name), {}, authn_broker, None, AuthzHandling(), verify_client, None)
return OP(name, root_key, provider_config["software_statements"], federation_keys, name + "/signed_jwks",
provider, name + "/jwks")
def init_app():
app = Flask(__name__)
template_loader = jinja2.FileSystemLoader(["templates", "../templates"])
app.jinja_loader = template_loader
app.config.from_envvar("OIDCFED_PROVIDER_CONFIG")
app.op = init_fed_op(app.config)
return app
app = init_app()
@app.route("/")
def index():
return render_template("index.html", software_statements=[ss.jwt.headers["kid"] for ss in
current_app.op.software_statements])
@app.route("/signed_jwks")
def signed_jwks():
return current_app.op.signed_jwks
@app.route("/jwks")
def jwks():
return jsonify(current_app.op.jwks.export_jwks())
@app.route("/.well-known/openid-configuration")
def provider_configuration():
response = current_app.op.provider_configuration()
# return response.message, response.status, response.headers
return jsonify(json.loads(response.message))
@app.route("/registration", methods=["post"])
def client_registration():
response = current_app.op.register_client(request.headers.get("Authorization"),
request.get_data().decode("utf-8"))
return response.message, response.status, response.headers
@app.route("/authorization")
def auth | entication_endpoint():
response = current_app.op.provider.authorization_endpoint(request.query_string.decode("utf-8"))
return redirect(response.message, 303)
@app.route("/token", methods=["get", "post"])
def token_endpoint():
client_authn = request.headers.get("Authorization")
if request.method == "GET":
data = request.query_string
elif request.method == "POST":
data = reques | t.get_data()
response = current_app.op.provider.token_endpoint(data.decode("utf-8"), authn=client_authn)
return response.message, response.status, response.headers
@app.errorhandler(OIDCFederationError)
def exception_handler(error):
response = app.make_response(str(error))
response.status_code = 400
return response
if __name__ == "__main__":
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(app.config['HTTPS_CERT'], app.config['HTTPS_KEY'])
app.run(debug=True, ssl_context=context)
|
kevgathuku/django-dotenv | tests.py | Python | bsd-3-clause | 4,694 | 0.000213 | import os
import unittest
import warnings
from dotenv import parse_dotenv, read_dotenv
class ParseDotenvTestCase(unittest.TestCase):
def test_parses_unquoted_values(self):
env = parse_dotenv('FOO=bar')
self.assertEqual(env, {'FOO': 'bar'})
def test_parses_values_with_spaces_around_equal_sign(self):
env = parse_dotenv('FOO =bar')
self.assertEqual(env, {'FOO': 'bar'})
env = parse_dotenv('FOO= bar')
self.assertEqual(env, {'FOO': 'bar'})
def test_parses_double_quoted_values(self):
env = parse_dotenv('FOO="bar"')
self.assertEqual(env, {'FOO': 'bar'})
def test_parses_single_quoted_values(self):
env = parse_dotenv("FOO='bar'")
self.assertEqual(env, {'FOO': 'bar'})
def test_parses_escaped_double_quotes(self):
env = parse_dotenv('FOO="escaped\"bar"')
self.assertEqual(env, {'FOO': 'escaped"bar'})
def test_parses_empty_values(self):
env = parse_dotenv('FOO=')
self.assertEqual(env, {'FOO': ''})
def test_expands_variables_found_in_values(self):
env = parse_dotenv("FOO=test\nBAR=$FOO")
self.assertEqual(env, {'FOO': 'test', 'BAR': 'test'})
def test_expands_variables_wrapped_in_brackets(self):
env = parse_dotenv("FOO=test\nBAR=${FOO}bar")
self.assertEqual(env, {'FOO': 'test', 'BAR': 'testbar'})
def test_expands_variables_from_environ_if_not_found_in_local_env(self):
os.environ.setdefault('FOO', 'test')
env = parse_dotenv('BAR=$FOO')
self.assertEqual(env, {'BAR': 'test'})
def test_expands_undefined_variables_to_an_empty_string(self):
self.assertEqual(parse_dotenv('BAR=$FOO'), {'BAR': ''})
def test_expands_variables_in_double_quoted_values(self):
env = parse_dotenv("FOO=test\nBAR=\"quote $FOO\"")
self.assertEqual(env, | {'FOO': 'test', 'BAR': 'quote test'})
def test_does_not_expand_variables_in_single_quoted_values(self):
env = parse_dotenv("BAR='quote $FOO'")
self.assertEqual(env, {'BAR': 'quote $FOO'})
def test_does_not_expand_escaped_variables(self):
| env = parse_dotenv('FOO="foo\\$BAR"')
self.assertEqual(env, {'FOO': 'foo$BAR'})
env = parse_dotenv('FOO="foo\${BAR}"')
self.assertEqual(env, {'FOO': 'foo${BAR}'})
def test_parses_export_keyword(self):
env = parse_dotenv('export FOO=bar')
self.assertEqual(env, {'FOO': 'bar'})
def test_parses_key_with_dot_in_the_name(self):
env = parse_dotenv('FOO.BAR=foobar')
self.assertEqual(env, {'FOO.BAR': 'foobar'})
def test_strips_unquoted_values(self):
env = parse_dotenv('foo=bar ')
self.assertEqual(env, {'foo': 'bar'}) # not 'bar '
def test_warns_if_line_format_is_incorrect(self):
with warnings.catch_warnings(record=True) as w:
parse_dotenv('lol$wut')
self.assertEqual(len(w), 1)
self.assertTrue(w[0].category is SyntaxWarning)
self.assertEqual(
str(w[0].message),
"Line 'lol$wut' doesn't match format"
)
def test_ignores_empty_lines(self):
env = parse_dotenv("\n \t \nfoo=bar\n \nfizz=buzz")
self.assertEqual(env, {'foo': 'bar', 'fizz': 'buzz'})
def test_ignores_inline_comments(self):
env = parse_dotenv('foo=bar # this is foo')
self.assertEqual(env, {'foo': 'bar'})
def test_allows_hash_in_quoted_values(self):
env = parse_dotenv('foo="bar#baz" # comment ')
self.assertEqual(env, {'foo': 'bar#baz'})
def test_ignores_comment_lines(self):
env = parse_dotenv("\n\n\n # HERE GOES FOO \nfoo=bar")
self.assertEqual(env, {'foo': 'bar'})
def test_parses_hash_in_quoted_values(self):
env = parse_dotenv('foo="ba#r"')
self.assertEqual(env, {'foo': 'ba#r'})
env = parse_dotenv('foo="ba#r"')
self.assertEqual(parse_dotenv("foo='ba#r'"), {'foo': 'ba#r'})
class ReadDotenvTestCase(unittest.TestCase):
def test_defaults_to_dotenv(self):
read_dotenv()
self.assertEqual(os.environ.get('DOTENV'), 'true')
def test_reads_the_file(self):
read_dotenv('.env')
self.assertEqual(os.environ.get('DOTENV'), 'true')
def test_warns_if_file_does_not_exist(self):
with warnings.catch_warnings(record=True) as w:
read_dotenv('.does_not_exist')
self.assertEqual(len(w), 1)
self.assertTrue(w[0].category is UserWarning)
self.assertEqual(
str(w[0].message),
"Not reading .does_not_exist - it doesn't exist."
)
|
rndusr/urwid | urwid/display_common.py | Python | lgpl-2.1 | 31,238 | 0.002401 | #!/usr/bin/python
# Urwid common display code
# Copyright (C) 2004-2011 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
from __future__ import division, print_function
import os
import sys
try:
import termios
except ImportError:
pass # windows
from urwid.util import StoppingContext, int_scale
from urwid import signals
from urwid.compat import B, bytes3, xrange, with_metaclass
# for replacing unprintable bytes with '?'
UNPRINTABLE_TRANS_TABLE = B("?") * 32 + bytes3(list(xrange(32,256)))
# signals sent by BaseScreen
UPDATE_PALETTE_ENTRY = "update palette entry"
INPUT_DESCRIPTORS_CHANGED = "input descriptors changed"
# AttrSpec internal values
_BASIC_START = 0 # first index of basic color aliases
_CUBE_START = 16 # first index of color cube
_CUBE_SIZE_256 = 6 # one side of the color cube
_GRAY_SIZE_256 = 24
_GRAY_START_256 = _CUBE_SIZE_256 ** 3 + _CUBE_START
_CUBE_WHITE_256 = _GRAY_START_256 -1
_CUBE_SIZE_88 = 4
_GRAY_SIZE_88 = 8
_GRAY_START_88 = _CUBE_SIZE_88 ** 3 + _CUBE_START
_CUBE_WHITE_88 = _GRAY_START_88 -1
_CUBE_BLACK = _CUBE_START
# values copied from xterm 256colres.h:
_CUBE_STEPS_256 = [0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff]
_GRAY_STEPS_256 = [0x08, 0x12, 0x1c, 0x26, 0x30, 0x3a, 0x44, 0x4e, 0x58, 0x62,
0x6c, 0x76, 0x80, 0x84, 0x94, 0x9e, 0xa8, 0xb2, 0xbc, 0xc6, 0xd0,
0xda, 0xe4, 0xee]
# values copied from xterm 88colres.h:
_CUBE_STEPS_88 = [0x00, 0x8b, 0xcd, 0xff]
_GRAY_STEPS_88 = [0x2e, 0x5c, 0x73, 0x8b, 0xa2, 0xb9, 0xd0, 0xe7]
# values copied from X11/rgb.txt and XTerm-col.ad:
_BASIC_COLOR_VALUES = [(0,0,0), (205, 0, 0), (0, 205, 0), (205, 205, 0),
(0, 0, 238), (205, 0, 205), (0, 205, 205), (229, 229, 229),
(127, 127, 127), (255, 0, 0), (0, 255, 0), (255, 255, 0),
(0x5c, 0x5c, 0xff), (255, 0, 255), (0, 255, 255), (255, 255, 255)]
_COLOR_VALUES_256 = (_BASIC_COLOR_VALUES +
[(r, g, b) for r in _CUBE_STEPS_256 for g in _CUBE_STEPS_256
for b in _CUBE_STEPS_256] +
[(gr, gr, gr) for gr in _GRAY_STEPS_256])
_COLOR_VALUES_88 = (_BASIC_COLOR_VALUES +
[(r, g, b) for r in _CUBE_STEPS_88 for g in _CUBE_STEPS_88
for b in _CUBE_STEPS_88] +
[(gr, gr, gr) for gr in _GRAY_STEPS_88])
assert len(_COLOR_VALUES_256) == 256
assert len(_COLOR_VALUES_88) == 88
_FG_COLOR_MASK = 0x000000ff
_BG_COLOR_MASK = 0x0000ff00
_FG_BASIC_COLOR = 0x00010000
_FG_HIGH_COLOR = 0x00020000
_BG_BASIC_COLOR = 0x00040000
_BG_HIGH_COLOR = 0x00080000
_BG_SHIFT = 8
_HIGH_88_COLOR = 0x00100000
_STANDOUT = 0x02000000
_UNDERLINE = 0x04000000
_BOLD = 0x08000000
_BLINK = 0x10000000
_ITALICS = 0x20000000
_STRIKETHROUGH = 0x40000000
_FG_MASK = (_FG_COLOR_MASK | _FG_BASIC_COLOR | _FG_HIGH_COLOR |
_STANDOUT | _UNDERLINE | _BLINK | _BOLD | _ITALICS | _STRIKETHROUGH)
_BG_MASK = _BG_COLOR_MASK | _BG_BASIC_COLOR | _BG_HIGH_COLOR
DEFAULT = 'default'
BLACK = 'black'
DARK_RED = 'dark red'
DARK_GREEN = 'dark green'
BROWN = 'brown'
DARK_BLUE = 'dark blue'
DARK_MAGENTA = 'dark magenta'
DARK_CYAN = 'dark cyan'
LIGHT_GRAY = 'light gray'
DARK_GRAY = 'dark gray'
LIGHT_RED = 'light red'
LIGHT_GREEN = 'light green'
YELLOW = 'yellow'
LIGHT_BLUE = 'light blue'
LIGHT_MAGENTA = 'light magenta'
LIGHT_CYAN = 'light cyan'
WHITE = 'white'
_BASIC_COLORS = [
BLACK,
DARK_RED,
DARK_GREEN,
BROWN,
DARK_BLUE,
DARK_MAGENTA,
DARK_CYAN,
LIGHT_GRAY,
DARK_GRAY,
LIGHT_RED,
LIGHT_GREEN,
YELLOW,
LIGHT_BLUE,
LIGHT_MAGENTA,
LIGHT_CYAN,
WHITE,
]
_ATTRIBUTES = {
'bold': _BOLD,
'italics': _ITALICS,
'underline': _UNDERLINE,
'blink': _BLINK,
'standout': _STANDOUT,
'strikethrough': _STRIKETHROUGH,
}
def _value_lookup_table(values, size):
"""
Generate a lookup table for finding the closest item in values.
Lookup returns (index into values)+1
values -- list of values in ascending order, all < size
size -- size of lookup table and maximum value
>>> _value_lookup_table([0, 7, 9], 10)
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2]
"""
middle_values = [0] + [(values[i] + values[i + 1] + 1) // 2
for i in range(len(values) - 1)] + [size]
lookup_table = []
for i in range(len(middle_values)-1):
count = middle_values[i + 1] - middle_values[i]
lookup_table.extend([i] * count)
return lookup_table
_CUBE_256_LOOKUP = _value_lookup_table(_CUBE_STEPS_256, 256)
_GRAY_256_LOOKUP = _value_lookup_table([0] + _GRAY_STEPS_256 + [0xff], 256)
_CUBE_88_LOOKUP = _value_lookup_table(_CUBE_STEPS_88, 256)
_GRAY_88_LOOKUP = _value_lookup_table([0] + _GRAY_STEPS_88 + [0xff], 256)
# convert steps to values that will be used by string versions of the colors
# 1 hex digit for rgb and 0..100 for grayscale
_CUBE_STEPS_256_16 = [int_scale(n, 0x100, 0x10) for n in _CUBE_STEPS_256]
_GRAY_STEPS_256_101 = [int_scale(n, 0x100, 101) for n in _GRAY_STEPS_256]
_CUBE_STEPS_88_16 = [int_scale(n, 0x100, 0x10) for n in _CUBE_STEPS_88]
_GRAY_STEPS_88_101 = [int_scale(n, 0x100, 101) for n in _GRAY_STEPS_88]
# create lookup tables for 1 hex digit rgb and 0..100 for grayscale values
_CUBE_256_LOOKUP_16 = [_CUBE_256_LOOKUP[int_scale(n, 16, 0x100)]
for n in range(16)]
_GRAY_256_LOOKUP_101 = [_GRAY_256_LOOKUP[int_scale(n, 101, 0x100)]
for n in range(101)]
_CUBE_88_LOOKUP_16 = [_CUBE_88_LOOKUP[int_scale(n, 16, 0x100)]
for n in range(16)]
_GRAY_88_LOOKUP_101 = [_GRAY_88_LOOKUP[int_scale(n, 101, 0x100)]
for n in range(101)]
# The functions _gray_num_256() and _gray_num_88() do not include the gray
# values from the color cube so that the gray steps are an even width.
# The color cube grays are available by using the rgb func | tions. Pure
# white and black are taken from the color cube, since the gray range does
# not include them, and the basic colors are more likely to have been
# customized by an end-user.
def _gray_num_256(gnum):
"""Return ths colo | r number for gray number gnum.
Color cube black and white are returned for 0 and 25 respectively
since those values aren't included in the gray scale.
"""
# grays start from index 1
gnum -= 1
if gnum < 0:
return _CUBE_BLACK
if gnum >= _GRAY_SIZE_256:
return _CUBE_WHITE_256
return _GRAY_START_256 + gnum
def _gray_num_88(gnum):
"""Return ths color number for gray number gnum.
Color cube black and white are returned for 0 and 9 respectively
since those values aren't included in the gray scale.
"""
# gnums start from index 1
gnum -= 1
if gnum < 0:
return _CUBE_BLACK
if gnum >= _GRAY_SIZE_88:
return _CUBE_WHITE_88
return _GRAY_START_88 + gnum
def _color_desc_256(num):
"""
Return a string description of color number num.
0..15 -> 'h0'..'h15' basic colors (as high-colors)
16..231 -> '#000'..'#fff' color cube colors
232..255 -> 'g3'..'g93' grays
>>> _color_desc_256(15)
'h15'
>>> _color_desc_256(16)
'#000'
>>> _color_desc_256(17)
'#006'
>>> _color_desc_256(230)
'#ffd'
>>> _color_desc_256(233)
'g7'
>>> _color_desc_256(234)
'g11'
"""
assert num >= 0 and num < 256, num
if num < _CUBE_START:
return 'h%d' % num
if num < _GRAY_START_256:
num -= _CUBE_START
b, num = num % _CUBE_SIZE_256, num // _CUBE_SIZE_256
g, num = num % _CUBE_SIZE_256, num // _CUBE_SIZE_256
r = num % _CUBE_SIZE_256
return '#%x%x%x' % (_CUBE_STEPS |
quantumlib/Cirq | cirq-google/cirq_google/engine/abstract_program.py | Python | apache-2.0 | 6,521 | 0.000767 | # Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obt | ain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An inte | rface for quantum programs.
The quantum program represents a circuit (or other execution) that,
when combined with a run context, will become a quantum job.
"""
import abc
import datetime
from typing import Dict, List, Optional, Sequence, Set, TYPE_CHECKING, Union
import cirq
from cirq_google.engine.client import quantum
if TYPE_CHECKING:
import cirq_google.engine.abstract_job as abstract_job
import cirq_google.engine.abstract_engine as abstract_engine
class AbstractProgram(abc.ABC):
"""An abstract object representing a quantum program.
This program generally wraps a `Circuit` with additional metadata.
When combined with an appropriate RunContext, this becomes a
Job that can run on either an Engine service or simulator.
Programs can also be a batch (list of circuits) or calibration
requests.
This is an abstract class that inheritors should implement.
"""
@abc.abstractmethod
def engine(self) -> 'abstract_engine.AbstractEngine':
"""Returns the parent Engine object.
Returns:
The program's parent Engine.
"""
@abc.abstractmethod
def get_job(self, job_id: str) -> 'abstract_job.AbstractJob':
"""Returns an AbstractJob for an existing id.
Args:
job_id: Unique ID of the job within the parent program.
Returns:
A AbstractJob for this program.
"""
@abc.abstractmethod
def list_jobs(
self,
created_before: Optional[Union[datetime.datetime, datetime.date]] = None,
created_after: Optional[Union[datetime.datetime, datetime.date]] = None,
has_labels: Optional[Dict[str, str]] = None,
execution_states: Optional[Set[quantum.enums.ExecutionStatus.State]] = None,
) -> Sequence['abstract_job.AbstractJob']:
"""Returns the list of jobs for this program.
Args:
project_id: A project_id of the parent Google Cloud Project.
program_id: Unique ID of the program within the parent project.
created_after: retrieve jobs that were created after this date
or time.
created_before: retrieve jobs that were created after this date
or time.
has_labels: retrieve jobs that have labels on them specified by
this dict. If the value is set to `*`, jobs having the label
regardless of the label value will be returned. For example, to
query programs that have the shape label and have the color
label with value red can be queried using
{'color': 'red', 'shape':'*'}
execution_states: retrieve jobs that have an execution state that
is contained in `execution_states`. See
`quantum.enums.ExecutionStatus.State` enum for accepted values.
Returns:
A sequence of `AbstractJob` objects that satisfy the constraints.
"""
@abc.abstractmethod
def create_time(self) -> 'datetime.datetime':
"""Returns when the program was created."""
@abc.abstractmethod
def update_time(self) -> 'datetime.datetime':
"""Returns when the program was last updated."""
@abc.abstractmethod
def description(self) -> str:
"""Returns the description of the program."""
@abc.abstractmethod
def set_description(self, description: str) -> 'AbstractProgram':
"""Sets the description of the program.
Params:
description: The new description for the program.
Returns:
This AbstractProgram.
"""
@abc.abstractmethod
def labels(self) -> Dict[str, str]:
"""Returns the labels of the program."""
@abc.abstractmethod
def set_labels(self, labels: Dict[str, str]) -> 'AbstractProgram':
"""Sets (overwriting) the labels for a previously created quantum program.
Params:
labels: The entire set of new program labels.
Returns:
This AbstractProgram.
"""
@abc.abstractmethod
def add_labels(self, labels: Dict[str, str]) -> 'AbstractProgram':
"""Adds new labels to a previously created quantum program.
Params:
labels: New labels to add to the existing program labels.
Returns:
This AbstractProgram.
"""
@abc.abstractmethod
def remove_labels(self, keys: List[str]) -> 'AbstractProgram':
"""Removes labels with given keys from the labels of a previously
created quantum program.
Params:
label_keys: Label keys to remove from the existing program labels.
Returns:
This AbstractProgram.
"""
@abc.abstractmethod
def get_circuit(self, program_num: Optional[int] = None) -> cirq.Circuit:
"""Returns the cirq Circuit for the program. This is only
supported if the program was created with the V2 protos.
Args:
program_num: if this is a batch program, the index of the circuit in
the batch. This argument is zero-indexed. Negative values
indexing from the end of the list.
Returns:
The program's cirq Circuit.
"""
@abc.abstractmethod
def batch_size(self) -> int:
"""Returns the number of programs in a batch program.
Raises:
ValueError: if the program created was not a batch program.
"""
@abc.abstractmethod
def delete(self, delete_jobs: bool = False) -> None:
"""Deletes a previously created quantum program.
Params:
delete_jobs: If True will delete all the program's jobs, other this
will fail if the program contains any jobs.
"""
@abc.abstractmethod
def delete_job(self, job_id: str) -> None:
"""Removes a child job from this program."""
|
Fenugreek/tamarind | bools.py | Python | gpl-3.0 | 1,641 | 0.005484 | """
Some utilities for bools manipulation.
Copyright 2013 Deepak Subburam
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
"""
def fields_test(dictionary, conditions):
"""
Return +1 if all conditions are satisfied; 0 if at least one (but not all)
conditions are satisfied; and -1 no conditions are satisfied.
conditions:
dictionary with keys corresponding to keys in dictionary, and values which are
tuples of the form (+2|+1|0|-1|-2|None, val).
+2 meaning dictionary[key] > val,
+1 meaning dictionary[key] >= val,
| 0 meaning dictionary[key] == val,
-1 meaning dictionary[key] <= val,
-2 meaning dictionary[key] < val,
None meaning dictionary[key] != val.
"""
count = 0
net = 0
for key, cond_value in list(conditions.items()):
count += 1
cond, value = cond_value
field_value = dictionary[key]
if cond == 1:
result = field_value >= value
elif cond == -1:
result = field_ | value <= value
elif cond == 0:
result = field_value == value
elif cond == 2 :
result = field_value > value
elif cond == -2:
result = field_value < value
elif cond == 0:
result = field_value != value
else: raise AssertionError('Bad condition ' + str(cond))
net += result
if net == count: return 1
elif net > 0: return 0
else: return -1
|
gerardroche/sublime-phpunit | tests/test_is_valid_php_version_file_version.py | Python | bsd-3-clause | 3,218 | 0 | from PHPUnitKit.tests import unittest
from PHPUnitKit.plugin import is_valid_php_version_file_version
class TestIsValidPhpVersionFileVersion(unittest.TestCase):
def test_invalid_values(self):
self.assertFalse(is_valid_php_version_file_version(''))
self.assertFalse(is_valid_php_version_file_version(' '))
self.assertFalse(is_valid_php_version_file_version('foobar'))
self.assertFalse(is_valid_php_version_file_version('masterfoo'))
self.assertFalse(is_valid_php_version_file_version('.'))
self.assertFalse(is_valid_php_version_file_version('x'))
self.assertFalse(is_valid_php_version_file_version('x.x'))
self.assertFalse(is_valid_php_version_file_version('x.x.x'))
self.assertFalse(is_valid_php_version_file_version('x'))
self.assertFalse(is_valid_php_version_file_version('snapshot'))
def test_master_branch_version(self):
self.assertTrue(is_valid_php_version_file_version('master'))
def test_specific_semver_versions(self):
self.assertTrue(is_valid_php_version_file_version('5.0.0'))
self.assertTrue(is_valid_php_version_file_version('5.0.1'))
self.assertTrue(is_valid_php_version_file_version('5.0.7'))
self.assertTrue(is_valid_php_version_file_version('5.0.30'))
self.assertTrue(is_valid_php_version_file_version('5.0.32'))
self.assertTrue(is_valid_php_version_file_version('5.1.0'))
self.assertTrue(is_valid_php_version_file_version('5.1.1'))
self.assertTrue(is_valid_php_version_file_version('5.1.3'))
self.assertTrue(is_valid_php_version_file_version('5.1.27'))
self.assertTrue(is_valid_php_version_file_version('7.0.0'))
self.assertTrue(is_valid_php_version_file_version('7.1.19'))
def test_minor_versions(self):
self.assertTrue(is_valid_php_version_file_version('5.6'))
self.assertTrue(is_valid_php_versio | n_file_version('7.1'))
self.assertTrue(is_valid_php_version_file_version('7.2'))
def test_major_dot_x_versions(self):
self.assertTrue(is_valid_php_version_file_version('5.x'))
self.assertTrue(is_valid | _php_version_file_version('6.x'))
self.assertTrue(is_valid_php_version_file_version('7.x'))
self.assertTrue(is_valid_php_version_file_version('8.x'))
def test_major_dot_minor_dot_x_versions(self):
self.assertTrue(is_valid_php_version_file_version('7.0.x'))
self.assertTrue(is_valid_php_version_file_version('7.1.x'))
self.assertTrue(is_valid_php_version_file_version('7.2.x'))
def test_snapshot_versions(self):
self.assertTrue(is_valid_php_version_file_version('5.4snapshot'))
self.assertTrue(is_valid_php_version_file_version('5.5snapshot'))
self.assertTrue(is_valid_php_version_file_version('5.6snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.0snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.1snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.0.0snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.1.0snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.1.1snapshot'))
|
Kiganshee/Flip-Sign | FlipDotSign.py | Python | cc0-1.0 | 9,790 | 0.005414 | """
Since this is basically firmware, it should be able to handle a lot of hard coding. Some of the elements,
especially the events to add, will be difficult to allow the user to send.
1. ???
2. List of message objects
3. Pop random element off list of message objects
4. When list reaches X entries, start the ??? process in a new thread to update the list of message objects
5. Repeat with new list of message objects
"""
from MessageClasses import *
from DisplayClasses import *
import googleapiclient.errors
import copy
import random
import time
import serial
from TransitionFunctions import *
from Generate_Layout import *
from MessageGenerator import *
from WeatherClasses import *
import os
z = SimpleTransition('', 'z')
fontsize = 9
minfontsize = 3
wait_time = 300
base_directory = os.path.dirname(__file__)
weather_API_key = open(os.path.join(base_directory,'WeatherKey.txt')).readline()
default_font_path = os.path.join(base_directory,'PressStart2P.ttf')
google_sheet_id = open(os.path.join(base_directory,'GoogleSheet.txt')).readline()
google_location_key = open(os.path.join(base_directory,'Google_Location_Key.txt')).readline()
home_location = input('Please enter zip code for home location: ')
def GetGoogleSheetData(sheetID, credentials, lstCalendars, lstTemporaryMessages):
# Create google sheets object
http = credentials.authorize(httplib2.Http())
discoveryurl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
result = {}
try:
SHEETS = discovery.build('sheets', 'v4', http=http, discoveryServiceUrl=discoveryurl)
except httplib2.ServerNotFoundError:
raise IOError("No Internet Connection")
try_again = True
num_times = 1
# Service may be unavailable, so try at least 3 times, backing off during
while try_again:
try:
# if successful, then update TryAgain to get out of the loop
result = SHEETS.spreadsheets().values().get(spreadsheetId=sheetID, range="Messages!A:C").execute()
try_again = False
except googleapiclient.errors.HttpError:
num_times += 1
if num_times == 4:
# if we've done this 4 times, raise an ValueError to be caught by the calling function
raise ValueError
# wait before trying again
time.sleep(int(random.random() * (2 ^ num_times - 1)))
except httplib2.ServerNotFoundError:
raise IOError("No Internet Connection")
for processmessage in result['values']:
if processmessage[0] == "GCal":
lstCalendars.append(GoogleCalendar(processmessage[1], credentials))
elif processmessage[0] == "SpecificDateMessage":
lstTemporaryMessages.append(SpecificDateMessage(processmessage[1], parse(processmessage[2])))
elif processmessage[0] == "BasicTextMessage":
lstTemporaryMessages.append(BasicTextMessage(processmessage[1]))
elif processmessage[0] == "MessageGenerator":
lstGeneratedMessages = Message_Generator(processmessage[1],processmessage[2]).create_messages()
for Generated_Message in lstGeneratedMessages:
lstTemporaryMessages.append(Generated_Message)
elif processmessage[0] == "WeatherLocation":
location = WeatherLocation(processmessage[1], processmessage[2], weather_API_key, default_font_path)
lstTemporaryMessages.append(location.ten_day_forecast(rows=21, columns=168, daysfromnow=0))
port = '/dev/ttyS0'
serialinterface = serial.Serial(port=port, baudrate=19600, parity=serial.PARITY_NONE, bytesize=serial.EIGHTBITS,
timeout=1, stopbits=serial.STOPBITS_ONE)
Display = FlipDotDisplay(columns=168, rows=21, serialinterface=serialinterface, layout=Generate_Layout_2())
transition_functions = [SimpleTransition, dissolve_changes_only]
# set up list of transit messages - since this is static, it is done outside the loop
lstTransitMessages = []
# lstTransitMessages.append(TransitMessageURL(
# "http://www.norta.com/Mobile/whers-my-busdetail.aspx?stopcode=235&routecode=10123&direction=0", "Street Car"))
# lstTransitMessages.append(TransitMessageURL(
# "http://www.norta.com/Mobile/whers-my-busdetail.aspx?stopcode=145&routecode=10122&direction=0", "Magazine Bus"))
# lstTransitMessages.append(TransitMessageURL(
# "http://www.norta.com/Mobile/whers-my-busdetail.aspx?stopcode=58&routecode=10121&direction=0", "Tchoup Bus"))
q = datetime.datetime(1990, 1, 1, 1, 1)
start_time = datetime.time(6,45)
end_time = datetime.time(23,00)
while True:
q = datetime.datetime(1990, 1, 1, 1, 1)
now_time_fix = q.now().time()
if start_time < now_time_fix < end_time:
# Reset list of calendars and messages to display
lstCalendars = []
lstMessagestoDisplay = []
lstTemporaryMessages = []
try:
# attempt to get new temporary messages and calendars from the google spreadsheet
# the "check" list is used so that the temporary messages list is only replaced if | the internet is up
check = []
GetGoogleSheetData(google_sheet_id, get_credentials(), lstCalendars, check)
lstTemporaryMessag | es = check
print("Pulled google sheet data")
except IOError:
# if the internet is down, do nothing
print("Found no internet connection when pulling google sheet data.")
pass
except ValueError:
print("No google service when opening google sheet.")
lstTemporaryMessages.append(BasicTextMessage("No Google Service"))
# for each calendar in the list of google calendars we want to display
# if the internet connection check earlier was unsuccessful, then this will be an empty list and the whole block
# will be skipped
for cal in lstCalendars:
# create a temporary list of messages from the google calendar routine
temp = []
try:
# run the message creation
in_tuple = cal.create_messages(5)
# the first element of the tuple is a list of event messages
temp = in_tuple[0]
# the second element of the tuple is a list of tuples
# first element of each tuple is the location string
# second element is the number of days until that event
for location in in_tuple[1]:
# turn the first element of each tuple into a weather location
weather_location = WeatherLocation(location[0], location[0], weather_API_key,
default_font_path, google_location_key=google_location_key,
home_location=home_location)
# get the forecast - go ahead a max of five days or until the event starts
num_of_days_until = min(5, location[1])
weather_forecast = weather_location.ten_day_forecast(rows=21, columns=168,
daysfromnow=num_of_days_until)
temp.append(weather_forecast)
print("Created messages from google calendar.")
except IOError:
pass
print("No internet connection when pulling from google calendar.")
# for each message we got back from GCal, add that to the list of temporary messages
for message in temp:
lstTemporaryMessages.append(message)
# if it's between 6 and 9 AM, we care a lot more about transit than anything else, add a lot more of those
if 6 < datetime.datetime.now().hour < 9:
for i in range(3):
lstMessagestoDisplay += copy.deepcopy(lstTransitMessages)
# build the list of messages to display
lstMessagestoDisplay += copy.deepcopy(lstTransitMessages)
lstMessagestoDisplay += lstTemporaryMessages
random.shuffle(lstMessagestoDisplay)
# for each me |
noemis-fr/old-custom | product_tags/product.py | Python | agpl-3.0 | 1,476 | 0.00542 | # -*- coding: utf-8 -*-
#################################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Julius Network Solutions SARL <contact@juli | us.fr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANT | ABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
from openerp.osv import fields, orm
from openerp.tools.translate import _
class product_tag(orm.Model):
_name = "product.tag"
_columns = {
'name': fields.char('Tag Name', required=True, size=64),
}
class product_product(orm.Model):
_inherit = "product.product"
_columns = {
'tag_ids' : fields.many2many('product.tag', 'product_product_tag_rel', 'tag_id', 'product_id', 'Tags'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
lichenhui/guahao | guahao_beijing/login.py | Python | apache-2.0 | 2,160 | 0.043961 | #!/usr/bin/env python
#coding=utf8
#负责模拟登陆
#
import pycurl
import StringIO
from urllib import urlencode
import get_login_code
def GetLoginInfo(post_data,cookie_file,proxy):
login_url = "http://www.bjguahao.gov.cn/comm/logon.php"
#cookie_file = './Cookie/cookie.txt'
ch = pycurl.Curl()
buffer_con = StringIO.StringIO()
header = [
"Content-Type: application/x-www-form-urlencoded; charset=UTF-8",
"Accept: */*",
"Cache-Control:max-age=0",
"Connection:keep-alive",
"Host:www.bjguahao.gov.cn",
"User-Agent:Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
"X-Requested-With:XMLHttpRequest",
"Referer:http://www.bjguahao.gov.cn/comm/index.html",
"Origin:http://www.bjguahao.gov.cn",
]
# post_data = {
# "truename":"李晨辉",
# "sfzhm":"142726199305301214",
# "yzm":"4811"
# }
post_data = urlencode(post_data)
ch.setopt(ch.URL, login_url)
ch.setopt(ch.VERBOSE, 1) #查看http信息
ch.setopt(ch.FOLLOWLOCATION, 1)
ch.setopt(ch.HTTPHEADER, header)
ch.setopt(ch.WRITEFUNCTION, buffer_con.write)
ch.setopt(ch.POSTFIELDS, post_data) #发送的数据
ch.setopt(ch.COOKIEFILE, cookie_file)
ch.setopt(ch.COOKIEJAR, cookie_file) #保存cookie
#ch.setopt(ch.PROXY, 'http://125.46.100.198:9999') #设置代理服务器
if proxy : ch.setopt(ch.PROXY, proxy) #设置代理服务器
ch.perform()
html=buffer_con.getvalue()
buffer_con.close()
ch.close()
return html
def Login(truename,id_card_num,cookie_file,code_img,proxy):
post_data = {}
post_data['truename'] = truename
post_data['sfzhm'] = id_card_num
post_data['yzm'] = get_login_code.GetLoginCode(cookie_file,code_img,proxy)
login_info = GetLoginInfo(post_data,cookie_file,proxy)
login_info = unicode(login_info,"gbk").encode('utf8')
return login_info
if __name__ == '__main__':
#login_info = Login('张佳',' | 640321199001020977','cookie.txt','login_code_img.gif','http://125.46.100.198:9999')
login_info = Login('李晨辉','142726199305301214','cookie.txt','login_code_img.gif',False)
fo = open('login_response.txt | ', 'w')
fo.write(login_info)
fo.close()
print login_info
|
wackywendell/parm | setup.py | Python | bsd-3-clause | 1,332 | 0.003754 | #! /usr/bin/env python
# System imports
from setuptools import Extension, setup
# Third-party modules - we depend on numpy for everything
import numpy
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
module_opts = [
("sim2d", "pyparm/sim_wrap2d.cxx", ["-DVEC2D"]),
("sim3d", "pyparm/sim_wrap3d.cxx", ["-DVEC3D"]),
("sim2dlong", "pyparm/sim_wrap2dlong.cxx", ["-DVEC2D", "-DLONGFLOAT"]),
("sim3dlong", "pyparm/sim_wrap3dlong.cxx", ["-DVEC3D", "-DLONGFLOAT"])
]
hpp_files = ["src/collection.hpp", "src/constraints.hpp",
"src/interaction.hpp", "src/trackers.hpp", "src/box.hpp",
"src/vecrand.hpp"]
cpp_files = ["src/collection.cpp", "src/constraints.cpp",
"src/interaction.cpp", "src/trackers.cpp", "src/box.cpp",
"src/vecrand.cpp"]
swigged_modules = [
| Extension(
"_" + name,
[swig_file],
include_dirs=[numpy_include, "src"],
extra_compile_args=compile_opts + ["-DSWIG_TYPE_TABLE=" + name, '-std=c++98'],
) for name, swig_file, compile_opts in module_opts
]
setup(
name="pyparm",
description="None",
author="Wendell Smith",
version="0.2",
packages= | ['pyparm'],
ext_modules=swigged_modules
)
|
labsquare/CuteVariant | cutevariant/gui/plugins/word_set/__init__.py | Python | gpl-3.0 | 1,098 | 0.001821 | from PySide2.QtWidgets import QApplication
__title__ = "Wordsets editor"
__description__ = "A plugin to manage word sets"
__long_description__ = """
<p>This plugin allows to create sets of words that can be matched with the
attributes of the project's variants.</p>
<p>
Once the addition of a word set is started, a manual addition one by one of the
words is possible; for practical reasons it is however advisable to directly
import a text file containing merely 1 word per line.</p>
The set can be reworked at any time via an editor.<br>
<br>
<i>Example of use:</i><br>
<br>
<i>A user wishes to quickly filter all variants of a project related to a set of
relevant genes for him.
He therefore creates a word set and then makes a selection via:</i>
<ul>
<li>the <em>Fi | lters Editor</em> plugin with a filter of the type:
<pre>gene IN ('WORDSET', 'my_word_set')</pre></li>
<li>the <em>VQL Editor</em> plugin with a VQL request of the type:
<pre>SELECT chr,pos,ref,alt,gene FROM variants WHERE gene IN WORDSET['my_word_set']</pre></li>
</ul>
"""
__author__ | = "Sacha schutz"
__version__ = "1.0.0"
|
opengovt/ckan-agency-management-tool | mandrill_email.py | Python | agpl-3.0 | 2,565 | 0 | import os
import json
import jinja2
import logging
import datetime
from request import global_vars
from settings i | mport MANDRILL_API_KEY, MANDRILL_SENDER
from settings import CURRENT_URL, MANDRILL_API_BASE_ENDPOINT
from google.appengine.api import url | fetch
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)), autoescape=True)
def send_email(receiver_name=False, receiver_email=False, subject=False,
content={}, email_type=False, many_receiver=False):
if not subject or not email_type:
return False
data = {}
data['type'] = email_type
data['current_url'] = CURRENT_URL
data['email_content'] = content
data['receiver_name'] = receiver_name
data['receiver_email'] = receiver_email
data["footer_year"] = global_vars.datetime_now_adjusted.strftime("%Y")
template = jinja_environment.get_template('frontend/email-template.html')
content['date'] = datetime.datetime.utcnow().strftime('%B %d, %Y %H:%M:%S')
if receiver_name and receiver_email:
receiver = [{"email": receiver_email, "name": receiver_name}]
else:
receiver = []
if many_receiver:
for r in many_receiver:
data['receiver_name'] = r['name']
send_via_mandrill(
receiver=[{"email": r['email'], "name": r['name']}],
subject=subject, html=template.render(data),
plain_text=None, email_type=email_type)
return True
send_via_mandrill(
receiver=receiver, subject=subject,
html=template.render(data), plain_text=None, email_type=email_type)
return True
def send_via_mandrill(receiver, subject, html=None,
plain_text=None, email_type=None):
data = {
"key": MANDRILL_API_KEY,
"message": {
"html": html,
"subject": subject,
"from_email": MANDRILL_SENDER,
"from_name": "Open Data Network",
"to": receiver,
"headers": {
"Reply-To": MANDRILL_SENDER
},
"tags": [
"notifications",
email_type
],
"important": True,
"track_opens": True,
"track_clicks": True,
"auto_text": True
},
"async": False
}
response = urlfetch.fetch(
url=MANDRILL_API_BASE_ENDPOINT + "messages/send.json",
method=urlfetch.POST, payload=json.dumps(data), deadline=30)
logging.debug(response.content)
|
lamby/pkg-fabric | tests/test_operations.py | Python | bsd-2-clause | 31,113 | 0.000418 | from __future__ import with_statement
import os
import shutil
import sys
import types
from contextlib import nested
from StringIO import StringIO
import unittest
import random
import types
from nose.tools import raises, eq_, ok_
from fudge import with_patched_object
from fabric.state import env, output
from fabric.operations import require, prompt, _sudo_prefix, _shell_wrap, \
_shell_escape
from fabric.api import get, put, hide, show, cd, lcd, local, run, sudo, quiet
from fabric.sftp import SFTP
from fabric.exceptions import CommandTimeout
from fabric.decorators import with_settings
from utils import *
from server import (server, PORT, RESPONSES, FILES, PASSWORDS, CLIENT_PRIVKEY,
USER, CLIENT_PRIVKEY_PASSPHRASE)
#
# require()
#
def test_require_single_existing_key():
"""
When given a single existing key, require() throws no exceptions
"""
# 'version' is one of the default values, so we know it'll be there
require('version')
def test_require_multiple_existing_keys():
"""
When given multiple existing keys, require() throws no exceptions
"""
require('version', 'sudo_prompt')
@aborts
def test_require_single_missing_key():
"""
When given a single non-existent key, require() aborts
"""
require('blah')
@aborts
def test_require_multiple_missing_keys():
"""
When given multiple non-existent keys, require() aborts
"""
require('foo', 'bar')
@aborts
def test_require_mixed_state_keys():
"""
When given mixed-state keys, require() aborts
"""
require('foo', 'version')
@mock_streams('stderr')
def test_require_mixed_state_keys_prints_missing_only():
"""
When given mixed-state keys, require() prints missing keys only
"""
try:
require('foo', 'version')
except SystemExit:
err = sys.stderr.getvalue()
assert 'version' not in err
assert 'foo' in err
@aborts
def test_require_iterable_provided_by_key():
"""
When given a provided_by iterable value, require() aborts
"""
# 'version' is one of the default values, so we know it'll be there
def fake_providing_function():
pass
require('foo', provided_by=[fake_providing_function])
@aborts
def test_require_noniterable_provided_by_key():
"""
When given a provided_by noniterable value, require() aborts
"""
# 'version' is one of the default values, so we know it'll be there
def fake_providing_function():
pass
require('foo', provided_by=fake_providing_function)
@aborts
def test_require_key_exists_empty_list():
"""
When given a single existing key but the value is an empty list, require()
aborts
"""
# 'hosts' is one of the default values, so we know it'll be there
require('hosts')
@aborts
@with_settings(foo={})
def test_require_key_exists_empty_dict():
"""
When given a single existing key but the value is an empty dict, require()
aborts
"""
require('foo')
@aborts
@with_settings(foo=())
def test_require_key_exists_empty_tuple():
"""
When given a single existing key but the value is an empty tuple, require()
aborts
"""
require('foo')
@aborts
@with_settings(foo=set())
def test_require_key_exists_empty_set():
"""
When given a single existing key but the value is an empty set, require()
aborts
"""
require('foo')
@with_settings(foo=0, bar=False)
def test_require_key_exists_false_primitive_values():
"""
When given keys that exist with primitive values that evaluate to False,
require() throws no exception
"""
require('foo', 'bar')
@with_settings(foo=['foo'], bar={'bar': 'bar'}, baz=('baz',), qux=set('qux'))
def test_require_complex_non_empty_values():
"""
When given keys that exist with non-primitive values that are not empty,
require() throws no exception
"""
require('foo', 'bar', 'baz', 'qux')
#
# prompt()
#
def p(x):
sys.stdout.write(x)
@mock_streams('stdout')
@with_patched_input(p)
def test_prompt_appends_space():
"""
prompt() appends a single space when no default is given
"""
s = "This is my prompt"
prompt(s)
eq_(sys.stdout.getvalue(), s + ' ')
@mock_streams('stdout')
@with_patched_input(p)
def | test_prompt_with_default():
"""
prompt() appends gi | ven default value plus one space on either side
"""
s = "This is my prompt"
d = "default!"
prompt(s, default=d)
eq_(sys.stdout.getvalue(), "%s [%s] " % (s, d))
#
# run()/sudo()
#
def test_sudo_prefix_with_user():
"""
_sudo_prefix() returns prefix plus -u flag for nonempty user
"""
eq_(
_sudo_prefix(user="foo", group=None),
"%s -u \"foo\" " % (env.sudo_prefix % env)
)
def test_sudo_prefix_without_user():
"""
_sudo_prefix() returns standard prefix when user is empty
"""
eq_(_sudo_prefix(user=None, group=None), env.sudo_prefix % env)
def test_sudo_prefix_with_group():
"""
_sudo_prefix() returns prefix plus -g flag for nonempty group
"""
eq_(
_sudo_prefix(user=None, group="foo"),
"%s -g \"foo\" " % (env.sudo_prefix % env)
)
def test_sudo_prefix_with_user_and_group():
"""
_sudo_prefix() returns prefix plus -u and -g for nonempty user and group
"""
eq_(
_sudo_prefix(user="foo", group="bar"),
"%s -u \"foo\" -g \"bar\" " % (env.sudo_prefix % env)
)
@with_settings(use_shell=True)
def test_shell_wrap():
prefix = "prefix"
command = "command"
for description, shell, sudo_prefix, result in (
("shell=True, sudo_prefix=None",
True, None, '%s "%s"' % (env.shell, command)),
("shell=True, sudo_prefix=string",
True, prefix, prefix + ' %s "%s"' % (env.shell, command)),
("shell=False, sudo_prefix=None",
False, None, command),
("shell=False, sudo_prefix=string",
False, prefix, prefix + " " + command),
):
eq_.description = "_shell_wrap: %s" % description
yield eq_, _shell_wrap(command, shell_escape=True, shell=shell, sudo_prefix=sudo_prefix), result
del eq_.description
@with_settings(use_shell=True)
def test_shell_wrap_escapes_command_if_shell_is_true():
"""
_shell_wrap() escapes given command if shell=True
"""
cmd = "cd \"Application Support\""
eq_(
_shell_wrap(cmd, shell_escape=True, shell=True),
'%s "%s"' % (env.shell, _shell_escape(cmd))
)
@with_settings(use_shell=True)
def test_shell_wrap_does_not_escape_command_if_shell_is_true_and_shell_escape_is_false():
"""
_shell_wrap() does no escaping if shell=True and shell_escape=False
"""
cmd = "cd \"Application Support\""
eq_(
_shell_wrap(cmd, shell_escape=False, shell=True),
'%s "%s"' % (env.shell, cmd)
)
def test_shell_wrap_does_not_escape_command_if_shell_is_false():
"""
_shell_wrap() does no escaping if shell=False
"""
cmd = "cd \"Application Support\""
eq_(_shell_wrap(cmd, shell_escape=True, shell=False), cmd)
def test_shell_escape_escapes_doublequotes():
"""
_shell_escape() escapes double-quotes
"""
cmd = "cd \"Application Support\""
eq_(_shell_escape(cmd), 'cd \\"Application Support\\"')
def test_shell_escape_escapes_dollar_signs():
"""
_shell_escape() escapes dollar signs
"""
cmd = "cd $HOME"
eq_(_shell_escape(cmd), 'cd \$HOME')
def test_shell_escape_escapes_backticks():
"""
_shell_escape() escapes backticks
"""
cmd = "touch test.pid && kill `cat test.pid`"
eq_(_shell_escape(cmd), "touch test.pid && kill \`cat test.pid\`")
class TestCombineStderr(FabricTest):
@server()
def test_local_none_global_true(self):
"""
combine_stderr: no kwarg => uses global value (True)
"""
output.everything = False
r = run("both_streams")
# Note: the exact way the streams are jumbled here is an implementation
# detail of our fake SSH server and may change in the future.
eq_("ssttddoeurtr", r.stdout)
eq_(r.stderr, "")
@server()
def test_ |
MicroWorldwide/tweeria | web/functions.py | Python | mit | 10,763 | 0.00223 | # -*- coding: utf-8 -*-
# General functions for web part and parser
import datetime
import time
import re
RE_MESSAGE_VARS = re.compile('(\{([^\}]*)\})')
RE_TAGS = re.compile('<[^<]+?>', re.U + re.I + re.M)
RE_PARAGRAPH = re.compile('\n', re.U + re.I + re.MULTILINE)
RE_OUT_FORMAT = re.compile('(\'|`)', re.U + re.I + re.M)
# others
def plural(n, text):
countString = ""
if (n > 0 and len(text) == 3):
if (n % 10 == 1 and n % 100 != 11):
countString = text[0]
elif (n % 10 >= 2 and n % 10 <= 4 and (n % 100 < 10 or n % 100 >= 20)):
countString = text[1]
else:
countString = text[2]
return countString
def pluralEnd(n, text):
countString = ""
if len(text) == 3:
n = int(n)
if n % 10 in (0, 1, 4, 5, 6, 9) or 9 < n < 21:
countString = text[0]
elif n % 10 in (2, 6, 7, 8):
countString = text[1]
elif n % 10 == 3:
countString = text[2]
return countString
def format_datetime(value, format='medium'):
if format == 'full':
format = "EEEE, d. MMMM y 'at' HH:mm"
elif format == 'medium':
format = "EE dd.MM.y HH:mm"
return datetime.datetime.fromtimestamp(int(value)).strftime('%d-%m-%Y %H:%M:%S')
def summAr(array, attribute=False):
total = 0
for val in array:
if attribute:
total += int(array[val][attribute])
else:
total += array[val]
return total
def getReadbleTime(incoming_time):
if incoming_time == 0:
return "At the Beginning"
cur_time = time.time()
event_time = cur_time - int(incoming_time)
time_array = [
{'second': event_time},
{'minute': event_time / 60},
{'hour': event_time / 3600},
{'day': event_time / 86400},
{'week': event_time / 604800},
{'month': event_time / 2592000},
{'year': event_time / 31536000},
]
timestring = ' '
for i in time_array:
if i.values()[0] >= 1:
timestring = str(int(i.values()[0])) + ' ' + i.keys()[0]
if i.values()[0] >= 2:
timestring += 's'
if timestring == ' ':
timestring = 'Right now'
else:
timestring = str(timestring) + ' ago'
return timestring
def getMessages(messages, with_time=True, host='', tags=None):
# format tags array to dict for fast search
tags_dict = {}
if tags:
for tag in tags:
tags_dict.update({tag['variable']: tag['name']})
count = 0
messages = messages[::-1]
for item in messages:
if 'data' in item:
count += 1
text = item['message']
clear_text = text
data = item['data']
res = re.findall(RE_MESSAGE_VARS, text)
for m in res:
pattern = ''
clear_pattern = ''
if m[1][0:3] == 'tag':
params = m[1].split('=', 2)
if len(params) > 1:
variable = params[1].strip()
if variable in tags_dict:
tag = '#' + tags_dict[variable]
clear_pattern = tag
if len(params) > 2 and params[2].strip() == 'show':
pattern = tag
if m[1][0:3] == 'url':
params = m[1].split('=', 2)
if len(params) > 1:
pattern = '<a href="{0}">{1}</a>'
if len(params) > 2:
url = params[2].strip()
name = params[1].strip()
else:
url = params[1].strip()
name = url
pattern = pattern.format(url, name)
clear_pattern = url
elif m[0] == '{playerpage}':
clear_pattern = host + data['player']
elif m[1] in data:
if m[0] == '{monster}':
pattern = '<span class="monster" rel="obj/2/' + str(int(data['monster_UID'])) + '">' + data['monster'] + '</span>'
clear_pattern = data['monster']
elif m[0] == '{player}':
pattern = '<a href="/' + data['player'] + '" class="player">' + data['player'] + '</a>'
clear_pattern = '@' + data['player']
elif m[0] == '{party}':
is_player = 'is_player' in data and data['is_player']
pattern = '<a href="/' + data['party'] + '" class="player is-player-' + str(is_player) + '">' + data['party'] + '</a>'
clear_pattern = '@' + data['party']
elif m[0] == '{lvl}':
pattern = str(int(data['lvl']))
elif m[0] == '{dungeon}':
pattern = '<span rel="obj/3/' + str(int(data['dungeon_UID'])) + '" class="dungeon">' + data['dungeon'] + '</span>'
clear_pattern = pattern
elif m[0] == '{item}':
if 'item_UID' in data:
pattern = '<a rel="obj/1/' + str(int(data['item_UID'])) + '" class="looted-item-normal">[' + data['item'] + ']</a>'
else:
pattern = '<a rel="obj/1/' + str(data['item_id']) + '" class="looted-item">[' + data['item'] + ']</a>'
clear_pattern = data['item']
elif m[0] == '{achv}':
pattern = '<a class="achv" rel="/obj/5/' + str(data['achv_UID']) + '">[' + data['achv'] + ']</a>'
clear_pattern = pattern
elif m[0] == '{spell}':
pattern = '<a class="achv" rel="/obj/10/' + str(data['spell_UID']) + '">[' + data['spell'] + ']</a>'
elif m[0] == '{poi}':
pattern = '<span class="poi" rel="/obj/9/' + str(data['poi_UID']) + '">[' + data['poi'] + ']</span>'
elif m[0] == '{quest}':
pattern = '<span class="q | uest" rel="/obj/13/' + str(data['quest_UID']) + '">[' + data['quest'] + ']</span>'
elif m[0] == '{winner_guild}':
pattern = '<span class="guild" rel="/obj/11/' + str(data['winner_guild']) + '">[' + data['winner_guild_name'] + ']</span>'
elif m[ | 0] == '{looser_guild}':
pattern = '<span class="guild" rel="/obj/11/' + str(data['looser_guild']) + '">[' + data['looser_guild_name'] + ']</span>'
elif m[0] == '{winner_faction}':
pattern = '<span class="guild" rel="/obj/12/' + str(data['winner_faction']) + '">[' + data['winner_faction_name'] + ']</span>'
elif m[0] == '{looser_faction}':
pattern = '<span class="guild" rel="/obj/12/' + str(data['looser_faction']) + '">[' + data['looser_faction_name'] + ']</span>'
if not clear_pattern:
clear_pattern = pattern
clear_text = re.sub(m[0], clear_pattern, clear_text)
text = re.sub(m[0], pattern, text)
item['message'] = text
item['clear_message'] = clear_text
item['id'] = count
if with_time:
item['time'] = getReadbleTime(item['data']['time'])
return messages
def prettyItemBonus(item, stats_names):
primary = ''
if item['type'] in [1, 2, 3]:
primary_stat = 'DMG'
else:
primary_stat = 'DEF'
if primary_stat in item['bonus']:
if primary_stat == 'DMG':
primary = str(item['bonus'][primary_stat] - 1) + ' — ' + str(
item |
tapomayukh/projects_in_python | sandbox_tapo/src/refs/Python Examples_Pygame/Python Examples/pygame_base_template.py | Python | mit | 1,067 | 0.022493 | # Sample Python/Pygame Programs
# Simpson College Computer Science
# http://cs.simpson.edu
import pygame
# Define some colors
black = ( 0, 0, 0)
white = ( 255, 255, 255)
green = ( 0, 255, 0)
red = ( 255, 0, 0)
pygame.init()
# Set the height and width of the screen
size=[700,500]
screen=pygame.display.set_mode(size)
pygame.display.set_caption("My Game")
#Loop until the user clicks the close button.
done=False
# Used to manage how fast the screen updates
clock=pygame.time.Cl | ock()
# -------- Main Program Loop -----------
while done==False:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked | close
done=True # Flag that we are done so we exit this loop
# Set the screen background
screen.fill(black)
# Limit to 20 frames per second
clock.tick(20)
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Be IDLE friendly. If you forget this line, the program will 'hang'
# on exit.
pygame.quit ()
|
nrc/dxr | tests/test_empty_tree/test_empty_tree.py | Python | mit | 241 | 0.004149 | from dx | r.testing import DxrInstanceTestCase
class TestEmptyTree(DxrInstanceTestCase):
"""Tests for empty source tree"""
def test_empty(self):
"""Test empty"""
self.found_nothing('path:*.*', is_case_sensitive=Fals | e)
|
AMOboxTV/AMOBox.LegoBuild | plugin.video.youtube/resources/lib/youtube/client/login_client.py | Python | gpl-2.0 | 12,698 | 0.00378 | __author__ = 'bromix'
import time
import urlparse
from resources.lib.kodion import simple_requests as requests
from resources.lib.youtube.youtube_exceptions import LoginException
class LoginClient(object):
CONFIGS = {
'youtube-tv': {
'system': 'All',
'key': 'AIzaSyAd-YEOqZz9nXVzGtn3KWzYLbLaajhqIDA',
'id': '861556708454-d6dlm3lh05idd8npek18k6be8ba3oc68.apps.googleusercontent.com',
'secret': 'SboVhoG9s0rNafixCSGGKXAT'
},
# API KEY for search and channel infos. These should work most of the time without login to safe some quota
'youtube-for-kodi-quota': {
'token-allowed': False,
'system': 'All',
'key': 'AIzaSyAd-YEOqZz9nXVzGtn3KWzYLbLaajhqIDA',
'id': '861556708454-d6dlm3lh05idd8npek18k6be8ba3oc68.apps.googleusercontent.com',
'secret': 'SboVhoG9s0rNafixCSGGKXAT'
},
'youtube-for-kodi-fallback': {
'token-allowed': False,
'system': 'Fallback!',
'key': 'AIzaSyAUjiaAUOcm6wmA8BHMloDby6U4RMtKLvs',
'id': '970514987436-b1rlhh1sf3fufqcvlm2a2taa2tq4t5uc.apps.googleusercontent.com',
'secret': 'zFaJYGEbvx329c8G_GPO5RJ3'
},
'youtube-for-kodi-12': {
'system': 'Frodo',
'key': 'AIzaSyAd-YEOqZz9nXVzGtn3KWzYLbLaajhqIDA',
'id': '861556708454-d6dlm3lh05idd8npek18k6be8ba3oc68.apps.googleusercontent.com',
'secret': 'SboVhoG9s0rNafixCSGGKXAT'
},
'youtube-for-kodi-13': {
'system': 'Gotham',
'key': 'AIzaSyAd-YEOqZz9nXVzGtn3KWzYLbLaajhqIDA',
'id': '861556708454-d6dlm3lh05idd8npek18k6be8ba3oc68.apps.googleusercontent.com',
'secret': 'SboVhoG9s0rNafixCSGGKXAT'
},
'youtube-for-kodi-14': {
'system': 'Helix',
'key': 'AIzaSyAd-YEOqZz9nXVzGtn3KWzYLbLaajhqIDA',
'id': '861556708454-d6dlm3lh05idd8npek18k6be8ba3oc68.apps.googleusercontent.com',
'secret': 'SboVhoG9s0rNafixCSGGKXAT'
},
'youtube-for-kodi-15': {
'system': 'Isengard',
'key': 'AIzaSyAd-YEOqZz9nXVzGtn3KWzYLbLaajhqIDA',
'id': '861556708454-d6dlm3lh05idd8npek18k6be8ba3oc68.apps.googleusercontent.com',
'secret': 'SboVhoG9s0rNafixCSGGKXAT'
},
'youtube-for-kodi-16': {
'system': 'Jarvis',
'key': 'AIzaSyAd-YEOqZz9nXVzGtn3KWzYLbLaajhqIDA',
'id': '861556708454-d6dlm3lh05idd8npek18k6be8ba3oc68.apps.googleusercontent.com',
'secret': 'SboVhoG9s0rNafixCSGGKXAT'
}
}
def __init__(self, config={}, language='en-US', access_token='', access_token_tv=''):
if not config:
config = self.CONFIGS['youtube-for-kodi-fallback']
pass
self._config = config
self._config_tv = self.CONFIGS['youtube-tv']
# the default language is always en_US (like YouTube on the WEB)
if not language:
language = 'en_US'
pass
language = language.replace('-', '_')
language_components = language.split('_')
if len(language_components) != 2:
language = 'en_US'
pass
self._language = language
self._country = language.split('_')[1]
self._access_token = access_token
self._access_token_tv = access_token_tv
self._log_error_callback = None
pass
def set_log_error(self, callback):
self._log_error_callback = callback
pass
def log_error(self, text):
if self._log_error_callback:
self._log_error_callback(text)
pass
else:
print text
pass
pass
def revoke(self, refresh_token):
headers = {'Host': 'www.youtube.com',
'Connection': 'keep-alive',
'Origin': 'https://www.youtube.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.28 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
'Accept': '*/*',
'DNT': '1',
'Referer': 'https://www.youtube.com/tv',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.8,de;q=0.6'}
post_data = {'token': refresh_token}
# url
url = 'https://www.youtube.com/o/oauth2/revoke'
result = requests.post(url, data=post_data, headers=headers, verify=False)
if result.status_code != requests.codes.ok:
raise LoginException('Logout Failed')
pass
def refresh_token_tv(self, refresh_token, grant_type=''):
client_id = self.CONFIGS['youtube-tv']['id']
client_secret = self.CONFIGS['youtube-tv']['secret']
return self.refresh_token(refresh_token, client_id=client_id, client_secret=client_secret,
grant_type=grant_type)
def refresh_token(self, refresh_token, client_id='', client_secret='', grant_type=''):
headers = {'Host': 'www.youtube.com',
'Connection': 'keep-alive',
'Origin': 'https://www.youtube.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.28 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
'Accept': '*/*',
'DNT': '1',
'Referer': 'https://www.youtube.com/tv',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.8,de;q=0.6'}
_client_id = client_id
if not client_id:
_client_id = self._config['id']
pass
_client_secret = client_secret
if not _client_secret:
_client_secret = self._config['secret']
pass
post_data = {'client_id': _client_id,
'client_secret': _client_secret,
'refresh_token': refresh_token,
'grant_type': 'refresh_token'}
# url
url = 'https://www.youtube.com/o/oauth2/token'
result = requests.post(url, data=post_data, headers=headers, verify=False)
if result.status_code != requests.codes.ok:
raise LoginException('Login Failed')
if result.headers.get('content-type', '').startswith('application/json'):
json_data = result.json()
access_token = json_data['access_token']
expires_in = time.time() + int(json_data.get('expires_in', 3600))
return access_token, expires_in
return '', ''
def get_device_token_tv(self, code, client_id='', client_secret='', grant_type=''):
client_id = self.CONFIGS['youtube-tv']['id']
client_secret = self.CONFIGS['youtube-tv']['secret']
return self.get_device_token(code, client_id=client_id, client_secret=client_secret, grant_type=grant_type)
def get_device_token(self, code, client_id='', c | lient_secret='', grant_type=''):
headers = {'Host': 'www.youtube.com',
'Connection': 'keep-alive',
'Origin': 'https://www.youtube.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) | Chrome/40.0.2214.28 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
'Accept': '*/*',
'DNT': '1',
'Referer': 'https://www.youtube.com/tv',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.8,de;q=0.6'}
_client_id = client_id
if not client_id:
_client_id = self._config['id']
pass
_client_secret = client_secret
if not _client_secret:
_client_secret = self._config['secret']
pass
post_data = |
ajdawson/eofs | lib/eofs/iris.py | Python | gpl-3.0 | 25,623 | 0 | """Meta-data preserving EOF analysis for `iris`."""
# (c) Copyright 2013-2016 Andrew Dawson. All Rights Reserved.
#
# This file is part of eofs.
#
# eofs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your optio | n) any later version.
#
# eofs is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should hav | e received a copy of the GNU General Public License
# along with eofs. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function) # noqa
import collections
from copy import copy
from iris.cube import Cube
from iris.coords import DimCoord
from . import standard
from .tools.iris import get_time_coord, weights_array, classified_aux_coords
class Eof(object):
"""EOF analysis (meta-data enabled `iris` interface)"""
def __init__(self, cube, weights=None, center=True, ddof=1):
"""Create an Eof object.
The EOF solution is computed at initialization time. Method
calls are used to retrieve computed quantities.
**Argument:**
*dataset*
A `~iris.cube.Cube` instance containing the data to be
analysed. Time must be the first dimension. Missing values
are allowed provided that they are constant with time (e.g.,
values of an oceanographic field over land).
**Optional arguments:**
*weights*
Sets the weighting method. The following pre-defined
weighting methods are available:
* *'area'* : Square-root of grid cell area normalized by
total grid area. Requires a latitude-longitude grid to be
present in the `~iris.cube.Cube` *dataset*. This is a
fairly standard weighting strategy. If you are unsure
which method to use and you have gridded data then this
should be your first choice.
* *'coslat'* : Square-root of cosine of latitude. Requires a
latitude dimension to be present in the `~iris.cube.Cube`
*dataset*.
* *None* : Equal weights for all grid points (*'none'* is
also accepted).
Alternatively an array of weights whose shape is compatible
with the `~iris.cube.Cube` *dataset* may be supplied instead
of specifying a weighting method.
*center*
If *True*, the mean along the first axis of *dataset* (the
time-mean) will be removed prior to analysis. If *False*,
the mean along the first axis will not be removed. Defaults
to *True* (mean is removed).
The covariance interpretation relies on the input data being
anomalies with a time-mean of 0. Therefore this option
should usually be set to *True*. Setting this option to
*True* has the useful side effect of propagating missing
values along the time dimension, ensuring that a solution
can be found even if missing values occur in different
locations at different times.
*ddof*
'Delta degrees of freedom'. The divisor used to normalize
the covariance matrix is *N - ddof* where *N* is the
number of samples. Defaults to *1*.
**Returns:**
*solver*
An `Eof` instance.
**Examples:**
EOF analysis with grid-cell-area weighting for the input field::
from eofs.iris import Eof
solver = Eof(cube, weights='area')
"""
# Check that the input is an Iris cube.
if not isinstance(cube, Cube):
raise TypeError('the input must be an iris cube')
# Check for a time coordinate, raise an error if there isn't one.
# The get_time_coord function will raise a ValuerError with a
# useful message so no need to handle it explicitly here.
_time, self._time_dim = get_time_coord(cube)
self._time = copy(_time)
if self._time_dim != 0:
raise ValueError('time must be the first dimension, '
'consider using the transpose() method')
# Get the cube coordinates and remove time, leaving just the other
# dimensions.
self._coords = [copy(coord) for coord in cube.dim_coords]
self._coords.remove(self._time)
if not self._coords:
raise ValueError('one or more non-time dimensions are required')
# Store the auxiliary coordinates from the cube, categorising them into
# coordinates spanning time only, coordinates spanning space only, and
# coordinates spanning both time and space. This is helpful due to the
# natural separation of space and time in EOF analysis. The time and
# space spanning coordinates are only useful for reconstruction, as all
# other methods return either a temporal field or a spatial field.
(self._time_aux_coords,
self._space_aux_coords,
self._time_space_aux_coords) = classified_aux_coords(cube)
# Define the weights array for the cube.
if weights is None:
wtarray = None
else:
try:
scheme = weights.lower()
wtarray = weights_array(cube, scheme=scheme)
except AttributeError:
wtarray = weights
try:
# Ensure weights are the same type as the cube data.
wtarray = wtarray.astype(cube.data.dtype)
except AttributeError:
pass
# Initialize a solver.
self._solver = standard.Eof(cube.data,
weights=wtarray,
center=center,
ddof=ddof)
#: Number of EOFs in the solution.
self.neofs = self._solver.neofs
# Get the name of the cube to refer to later.
self._cube_name = cube.name(default='dataset').replace(' ', '_')
self._cube_var_name = cube.var_name
def pcs(self, pcscaling=0, npcs=None):
"""Principal component time series (PCs).
**Optional arguments:**
*pcscaling*
Set the scaling of the retrieved PCs. The following
values are accepted:
* *0* : Un-scaled principal components (default).
* *1* : Principal components are scaled to unit variance
(divided by the square-root of their eigenvalue).
* *2* : Principal components are multiplied by the
square-root of their eigenvalue.
*npcs*
Number of PCs to retrieve. Defaults to all the PCs. If the
number of requested PCs is more than the number that are
available, then all available PCs will be returned.
**Returns:**
*pcs*
A `~iris.cube.Cube` containing the ordered PCs. The PCs are
numbered from 0 to *npcs* - 1.
**Examples:**
All un-scaled PCs::
pcs = solver.pcs()
First 3 PCs scaled to unit variance::
pcs = solver.pcs(npcs=3, pcscaling=1)
"""
pcs = self._solver.pcs(pcscaling, npcs)
pcdim = DimCoord(list(range(pcs.shape[1])),
var_name='pc',
long_name='pc_number')
coords = [copy(self._time), pcdim]
pcs = Cube(
pcs,
dim_coords_and_dims=list(zip(coords, list(range(pcs.ndim)))),
var_name='pcs',
long_name='principal_components')
# Add any auxiliary coords spanning time back to the returned cube.
for coord, dims in self._time_aux_coords:
pcs.add_aux_coord(copy(coord), dims)
return pcs
def eofs(self, eofscaling=0, neofs=None):
"""Emipirical orthogonal functions (E |
whichwit/scm-stv | mlms/_get-mlms.py | Python | gpl-2.0 | 725 | 0.009655 | # -*- coding: utf-8 -*-
import pyodbc
cs = {
'server':'ahwsqlinind019.ind1.stvincent.org',
'database':'st2cpr1.tst_153',
#'database':'st1bprvb.st1',
'user':'scmis',
'pw':'year04',
}
conn = pyodbc.connect(
| driver='{SQL Server}',
server=cs['server'],
uid=cs['user'],
pwd=cs['pw'],
)
if not conn:
raise IOError
cursor = conn.cursor()
sql = """
select Name
, replace(convert(varchar(max),Logic),'{{{SINGLE-QUOTE}}}',char(39)) as Logic"""+"""
from {db}.dbo.CV3MLM
where Active = 1
and Status = 4
order by 1""".format(db=cs['database'] or 'st2cpr1.tst_153')
for row in cursor.execute(sql):
with open(row.Name, 'w', encoding='utf-8') as f:
f.wri | te(row.Logic)
|
sergei-maertens/discord-bot | bot/plugins/stats/resources.py | Python | mit | 619 | 0.001616 | from import_export import fields, resources
class GamesPlayedResource(resources.Resource):
game = fields.Field(attribute='game__name', column_name='game')
time = fields.Field(attribute='time', column_name='time (hours)')
num_players = fields.Field(attribute='num_players', column_name='num_players')
class Meta:
export_order = ['game', 'time', 'num_players']
def dehydr | ate_game(self, obj):
return obj['game__name']
def dehydrate_time(self, obj):
return obj['time'].total_seconds() / 3600
def dehydrate_num_players(self, obj):
return obj['num_play | ers']
|
AustinRoy7/Pomodoro-timer | venv/Lib/os.py | Python | mit | 36,970 | 0.002218 | r"""OS routines for NT or Posix depending on what system we're on.
This exports:
- all functions from posix, nt or ce, e.g. unlink, stat, etc.
- os.path is either posixpath or ntpath
- os.name is either 'posix', 'nt' or 'ce'.
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator (always '.')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
import stat as st
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR",
"SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen",
"popen", "extsep"]
def _exists(name):
return name in globals()
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
# Any new dependencies of the os module and/or changes in path separator
# requires updating importlib as well.
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
__all__.append('_exit')
except ImportError:
pass
import posixpath as path
try:
from posix import _have_functions
except ImportError:
pass
import posix
| __all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
__all__.append('_exit')
except ImportError:
pass
import ntpath as path
import nt
__all__. | extend(_get_exports_list(nt))
del nt
try:
from nt import _have_functions
except ImportError:
pass
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
__all__.append('_exit')
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
try:
from ce import _have_functions
except ImportError:
pass
else:
raise ImportError('no os specific module found')
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
if _exists("_have_functions"):
_globals = globals()
def _add(str, fn):
if (fn in _globals) and (str in _have_functions):
_set.add(_globals[fn])
_set = set()
_add("HAVE_FACCESSAT", "access")
_add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_FUTIMESAT", "utime")
_add("HAVE_LINKAT", "link")
_add("HAVE_MKDIRAT", "mkdir")
_add("HAVE_MKFIFOAT", "mkfifo")
_add("HAVE_MKNODAT", "mknod")
_add("HAVE_OPENAT", "open")
_add("HAVE_READLINKAT", "readlink")
_add("HAVE_RENAMEAT", "rename")
_add("HAVE_SYMLINKAT", "symlink")
_add("HAVE_UNLINKAT", "unlink")
_add("HAVE_UNLINKAT", "rmdir")
_add("HAVE_UTIMENSAT", "utime")
supports_dir_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
supports_effective_ids = _set
_set = set()
_add("HAVE_FCHDIR", "chdir")
_add("HAVE_FCHMOD", "chmod")
_add("HAVE_FCHOWN", "chown")
_add("HAVE_FDOPENDIR", "listdir")
_add("HAVE_FEXECVE", "execve")
_set.add(stat) # fstat always works
_add("HAVE_FTRUNCATE", "truncate")
_add("HAVE_FUTIMENS", "utime")
_add("HAVE_FUTIMES", "utime")
_add("HAVE_FPATHCONF", "pathconf")
if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3
_add("HAVE_FSTATVFS", "statvfs")
supports_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
# Some platforms don't support lchmod(). Often the function exists
# anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP.
# (No, I don't know why that's a good design.) ./configure will detect
# this and reject it--so HAVE_LCHMOD still won't be defined on such
# platforms. This is Very Helpful.
#
# However, sometimes platforms without a working lchmod() *do* have
# fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15,
# OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes
# it behave like lchmod(). So in theory it would be a suitable
# replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s
# flag doesn't work *either*. Sadly ./configure isn't sophisticated
# enough to detect this condition--it only determines whether or not
# fchmodat() minimally works.
#
# Therefore we simply ignore fchmodat() when deciding whether or not
# os.chmod supports follow_symlinks. Just checking lchmod() is
# sufficient. After all--if you have a working fchmodat(), your
# lchmod() almost certainly works too.
#
# _add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_LCHFLAGS", "chflags")
_add("HAVE_LCHMOD", "chmod")
if _exists("lchown"): # mac os x10.3
_add("HAVE_LCHOWN", "chown")
_add("HAVE_LINKAT", "link")
_add("HAVE_LUTIMES", "utime")
_add("HAVE_LSTAT", "stat")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_UTIMENSAT", "utime")
_add("MS_WINDOWS", "stat")
supports_follow_symlinks = _set
del _set
del _have_functions
del _globals
del _add
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
# Other possible SEEK values are directly imported from posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works like
mkdir, except that any intermediate path segment (not just the rightmost)
will be created if it does not exist. If the target directory already
exists, raise an OSError if exist_ok is False. Otherwise no exception is
raised. This is recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode, exist_ok)
except FileExistsError:
# Defeats race condition when another thread created the path
pass
cdir = curdir
if isinstance(tail, bytes):
cdir = bytes(curdir, 'ASCII')
if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists
return
try:
mkdir(name, mode)
except OSError:
# Cannot rely on checking for EEXIST, since the operating system
# could give priority to other errors like EACCES or EROFS
if not exist_ok or not path.isdir(name):
raise
def removedirs(name):
"""removedirs(name)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories cor |
deepmind/xmanager | xmanager/xm/core.py | Python | apache-2.0 | 23,876 | 0.006451 | # Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract API specification for XManager implementations.
Each implementation of the XManager API should override the abstract methods.
Users are normally expected to have the following pair of imports:
```
from xmanager import xm
from xmanager import xm_foo
```
"""
import abc
import asyncio
from concurrent import futures
import enum
import getpass
import inspect
import queue
import threading
from typing import Any, Awaitable, Callable, Collection, Dict, List, Mapping, Optional, Sequence, overload
import attr
from xmanager.xm import async_packager
from xmanager.xm import id_predictor
from xmanager.xm import job_blocks
from xmanager.xm import job_operators
from xmanager.xm import metadata_context
from xmanager.xm import pattern_matching
def _check_if_unsupported_args_are_present(args: Mapping[str, Any],
supported_args: Collection[str],
job_type: str) -> None:
supported_args = set(supported_args)
unsupported_args = set(args.keys()) - supported_args
if unsupported_args:
raise ValueError(
f'Arguments {unsupported_args!r} are not supported by {job_type}. Only '
f'{supported_args!r} are allowed.')
def _apply_args_to_job(job: job_blocks.Job, args: Mapping[str, Any]) -> None:
"""Overrides job properties."""
_check_if_unsupported_args_are_present(args, ('args', 'env_vars'), 'xm.Job')
if 'args' in args:
job.args = job_blocks.merge_args(job.args, args['args'])
job.env_vars.update(args.get('env_vars', {}))
def _apply_args_to_job_group(job_group: job_blocks.JobGroup,
args: Mapping[str, Any]) -> None:
"""Recursively overrides job group properties."""
if args:
_check_if_unsupported_args_are_present(args, job_group.jobs.keys(),
'xm.JobGroup')
for key, job in job_group.jobs.items():
_apply_args(job, args.get(key, {}))
_apply_args = pattern_matching.match(
_apply_args_to_job, _apply_args_to_job_group,
pattern_matching.Case([job_blocks.JobGeneratorType, Any],
lambda other, args: None))
class ExperimentUnitStatus(abc.ABC):
"""The status of an experiment unit."""
@property
@abc.abstractmethod
def is_active(self) -> bool:
"""Returns whether the unit is not in terminal state.
It may be actively running or queued. The unit may produce more results.
If the unit is stopped by a user it will be neither active, completed
nor failed.
"""
raise NotImplementedError
@property
@abc.abstractmethod
def is_completed(self) -> bool:
"""Returns whether the unit has completed without failures.
This is a terminal state. The unit has produced all the intended results.
But it still may be restarted by an explicit request.
"""
raise NotImplementedError
@property
@abc.abstractmethod
def is_failed(self) -> bool:
"""Returns whether the unit has failed.
This is a terminal state. Experiment unit will enter this state on any
fatal failure, such as process exiting with non-zero code, cloud rejecting
to schedule/queue the job or exceptions in JobGenerator. The unit will stay
in this state unless explicitly restarted.
Intermediate failures do not result in this state.
"""
raise NotImplementedError
@property
@abc.abstractmethod
def message(self) -> str:
"""An optional human-readable message providing context for the status.
This may take the form of explaining why the work unit is in this state,
or any potentially transient errors the work unit may be experiencing.
"""
raise NotImplementedError
class ExperimentUnitError(RuntimeError):
"""Experiment unit could not be completed."""
class ExperimentUnitFailedError(ExperimentUnitError):
"""A job running in an experiment unit has failed."""
class ExperimentUnitNotCompletedError(ExperimentUnitError):
"""Experiment unit is neither running nor completed.
For example it may be stopped by a user.
"""
class NotFoundError(KeyError):
"""Experiment/Work Unit/etc. has not been found."""
def _work_unit_arguments(
job: job_blocks.JobType,
args: Optional[Mapping[str, Any]],
) -> Mapping[str, Any]:
"""Constructs work unit arguments to display them in various UIs.
If users pass `args` to the `.add` method explicitly, we assume `args` to be
the sought work unit arguments. If `args` are not passed to `.add`, we deduce
work unit arguments implicitly from the `job`s' `args` and `env_vars`.
Args:
job: A job to run inside a work unit.
args: Explicitly specified arguments (could be empty).
Returns:
Depending on the type of the `job` given, can be one of the following:
- if it's an instance of `Job`, we return `{'args': job.args, 'env_vars':
job.env_vars}` with empty values omitted;
- if it's an instance of `JobGroup`, we recursively unwind the group while
populating corresponding nested dictionaries until we reach standalone
`Job | `s;
- if it's a job generator, we return `{}`.
"""
if args is not None:
# In order to give users control on what is shown as work unit arguments we
# don't alter them if a value is given.
return args
def deduce_args_for_job(job: job_blocks.Job) -> Dict[str, Any]:
args = {
'args': job.args.to_dict(kwargs_only=True),
'env_vars': job.env_vars
}
return {ke | y: value for key, value in args.items() if value}
def deduce_args_for_job_group(group: job_blocks.JobGroup) -> Dict[str, Any]:
args = {}
for job_name, job in group.jobs.items():
job_args = deduce_args(job)
if job_args:
args[job_name] = job_args
return args
deduce_args = pattern_matching.match(
deduce_args_for_job, deduce_args_for_job_group,
pattern_matching.Case([job_blocks.JobGeneratorType],
lambda generator: {}))
return deduce_args(job)
class Importance(enum.Enum):
"""How important it is to schedule particular Experiment or ExperimentUnit.
This is a hint to the scheduler. Not all schedulers take it into account
(xm_local doesn't). And even with smart scheduler a less important work unit
may run before a more important one e.g. if it uses a less contended resource.
Unlike ServiceTier, importance only controls preference within a team i.e. how
team's resources are divided between team's experiments. It has no effect on
resource allocation between teams.
"""
# High impact experiments. Try scheduling them even at the cost of significant
# reduction of the overall throughput that your experiments get.
HIGH = 'high'
# The default importance.
NORMAL = 'normal'
# Prefer to schedule other experiments with higher importance, but in overall
# try to maximize throughput.
LOW = 'low'
@attr.s(auto_attribs=True, kw_only=True)
class ExperimentUnitRole(abc.ABC):
"""The role of an experiment unit within the experiment structure.
Attributes:
importance: how important it is to schedule this executable unit comparing
to all your executable units (from all your experiments).
"""
importance: Importance = Importance.NORMAL
class ExperimentUnit(abc.ABC):
"""ExperimentUnit is a collection of semantically associated `Job`s."""
experiment: 'Experiment'
def __init__(self, experiment: 'Experiment',
create_task: Callable[[Awaitable[Any]], futures.Future],
args: Optional[Mapping[str,
|
google/layered-scene-inference | lsi/visualization/html_utils.py | Python | apache-2.0 | 1,782 | 0.00954 | #!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for dumping tensorflow events to a folder.
"""
from __future__ import absolute_import
from __ | future__ import division
from __future__ import print_function
def html_page(content):
init_text = '<!DOCTYPE html>\n<html>\n<head>\n{}\n</head>\n<body>\n'.format(
page_style()
)
end_text = '\n</html>\n</body>\n'
return init_text + content + end_text
def page_style():
"""Html style string.
Ar | gs:
Returns:
style_str: HTML sytle string
"""
style_str = '''<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 10px;
}
</style>'''
return style_str
def image(rel_path, caption, height, width):
img_str = '<img src="{}" alt="{}" style="width:{}px;height:{}px;">'.format(
rel_path, caption, width, height)
return img_str
def table(table_rows):
init_text = '<table style="width:100%">\n'
end_text = '</table>\n'
table_str = init_text
for tr in table_rows:
table_str += '<tr>\n{}</tr>\n'.format(tr)
table_str += end_text
return table_str
def table_row(table_cols):
row_str = ''
for tc in table_cols:
row_str += '<td>\n{}\n</td>\n'.format(tc)
return row_str
|
comic/comic-django | app/grandchallenge/workstation_configs/forms.py | Python | apache-2.0 | 2,610 | 0.0023 | from django.forms import ModelForm
from grandchallenge.core.forms import SaveFormInitMixin
from grandchallenge.core.widgets import JSONEditorWidget
from grandchallenge.workstation_configs.models import (
KEY_BINDINGS_SCHEMA,
OVERLAY_SEGMENTS_SCHEMA,
WorkstationConfig,
)
class WorkstationConfigForm(SaveFormInitMixin, ModelForm):
class Meta:
model = WorkstationConfig
fields = (
"title",
"description",
"image_context",
"window_presets",
"default_window_preset",
"default_slab_thickness_mm",
"default_slab_render_method",
"default_orientation",
"default_overlay_alpha",
"overlay_luts",
"default_overlay_lut",
"default_overlay_interpolation",
"overlay_segments",
"key_bindings",
"default_zoom_scale",
"show_image_info_plugin",
| "show_display_plugin",
"show_image_switcher_plugin",
"show_algorithm_output_plugin",
"show_overlay_plugin",
"show_invert_tool",
"show_flip_tool",
"show_window_level_tool",
"show_reset_tool",
"show_overlay_selection_tool",
"show_lut_selection_tool",
"enable_contrast_enhancement",
| "auto_jump_center_of_gravity",
)
widgets = {
"overlay_segments": JSONEditorWidget(
schema=OVERLAY_SEGMENTS_SCHEMA
),
"key_bindings": JSONEditorWidget(schema=KEY_BINDINGS_SCHEMA),
}
help_texts = {
"overlay_segments": (
"If an categorical overlay is shown, it is possible to show toggles "
"to change the visibility of the different overlay categories. To do "
"so, configure the categories that should be displayed. Data from the"
" algorithm's output.json can be added as an extra label to each "
"toggle using jinja templating. "
'For example: [{ "voxel_value": 0, "name": "Level 0", "visible": '
'false, "metric_template": "{{metrics.volumes[0]}} mm³"},]'
),
"image_context": "This tells the viewer how to show the images "
"defined in the hanging list",
"window_presets": "These are the window LUT presets the viewer can choose between. "
"By default, none are selected. "
"Select multiple presets by holding CTRL or dragging your mouse",
}
|
afolmert/mentor | src/models.py | Python | gpl-2.0 | 12,011 | 0.003663 | #!/usr/bin/env python
# -*- coding: iso-8859-2 -*-
#
# Copyright (C) 2007 Adam Folmert <afolmert@gmail.com>
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#
#
#
"""This is the module for models used in Mentor GUI"""
import release
__author__ = '%s <%s>' % \
( release.authors['afolmert'][0], release.authors['afolmert'][1])
__license__ = release.license
__version__ = release.version
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from cards import Card, Cards
from utils import isstring, log
from utils_qt import tr
class CardModel(QAbstractItemModel):
"""Model to be used for list and tree view."""
class InvalidIndexError(Exception): pass
class ModelNotActiveError(Exception): pass
def __init__(self, parent=None):
QAbstractListModel.__init__(self, parent)
self.cards = Cards()
def _checkIndex(self, index):
if index is None or not index.isValid() or index == QModelIndex():
raise CardModel.InvalidIndexError, "Invalid index given"
def _checkActive(self):
if not self.isActive():
raise CardModel.ModelNotActiveError, "Model is not active. Use open first."
def open(self, dbpath):
self.cards.open(str(dbpath))
# FIXME why these do not work??
self.reset()
# ^ self.emit(SIGNAL('modelReset()'))
def close(self):
self.emit(SIGNAL('modelAboutToBeReset()'))
self.cards.close()
self.reset()
def filepath(self):
"""Returns path to currently open database"""
if self.cards.isOpen():
return self.cards.db_path
else:
return None
def isActive(self):
return self.cards.isOpen()
def parent(self, index):
return QModelIndex()
def rowCount(self, parent=QModelIndex()):
if parent.isValid():
return 0
else:
if self.cards.isOpen():
return self.cards.getCardsCount()
else:
return 0
def columnCount(self, parent=QModelIndex()):
if parent.isValid():
return 0
else:
if self.cards.isOpen():
return 5
else:
return 0
def index(self, row, column, parent=QModelIndex()):
if row < 0 or column < 0 or not self.cards.isOpen():
return QModelIndex()
else:
# returns index with given card id
header = self.cards.getCardHeaders('', row, row + 1)
if len(header) == 1:
return self.createIndex(row, column, int(header[0][0]))
else:
return QModelIndex()
# for display role only id+question in following columns will be
# for specific data , in the following columns
def data(self, index, role=Qt.DisplayRole):
self._checkIndex(index)
if role not in (Qt.DisplayRole, Qt.UserRole):
return QVariant()
card = self.cards.getCard(index.internalId())
if role == Qt.UserRole:
return card
else:
if index.column() == 0:
return QVariant('#%d %s' % (card.id, str(card.question).strip()))
elif index.column() == 1:
return QVariant('%s' % str(card.answer).strip())
elif index.column() == 2:
return QVariant('%s' % str(card.question_hint).strip())
elif index.column() == 3:
return QVariant('%s' % str(card.answer_hint).strip())
elif index.column() == 4:
return QVariant('%s' % str(card.score))
else:
return QVariant()
def flags(self, index):
return QAbstractListModel.flags(self, index) | Qt.ItemIsEnabled | Qt.ItemIsSelectable
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role == Qt.DisplayRole:
if orientation == Qt.Horizontal:
if section == 0:
return QVariant("Question")
elif section == 1:
return QVariant("Answer")
elif section == 2:
return QVariant(tr("Question hint"))
elif section == 3:
return QVariant(tr("Answer hint"))
elif section == 4:
return QVariant(tr("Score"))
else:
return QVariant()
else:
return QVariant(str(section))
return QVariant()
def getPreviousIndex(self, index):
"""Returns previous index before given or given if it's first."""
self._checkIndex(index)
if index.row() == 0:
return index
else:
return self.index(index.row() - 1, 0)
# pointer , get row before
def getNextIndex(self, index):
"""Returns next index after given or given if it's last."""
self._checkIndex(index)
if index.row() == self.rowCount() - 1:
return index
else:
return self.index(index.row() + 1, 0)
# get row after ?
# TODO
# what about inserting rows
# and moving rows up and down ??
# must have parameter position or display position ??
# TODO
# add special handlers like rowsAboutToBeInserted etc .
# right now only model to be reset
def addNewCard(self):
"""Adds a new empty card."""
self.emit(SIGNAL('modelAboutToBeReset()'))
rowid = self.cards.addCard(Card())
# TODO is it ok to return it here?
result = self.createIndex(self.cards.getCardsCount(), 0, rowid)
# cards.addCard(Card())
# TODO
# why these do not work ?
self.reset()
# self.emit(SIGNAL('modelReset()'))
#
return result
def deleteCard(self, index):
self._checkIndex(index)
self.emit(SIGNAL('modelAboutToBeReset()'))
self.cards.deleteCard(index.internalId())
# why these do not work??
self.reset()
# self.emit(SIGNAL('modelReset()'))
# cards - deleteCard card_id
# TODO question
# how to update card if peg is somewhere else ?
# maybe keep blob as well ?
# the items are then splitted
def updateCard(self, index, question, answer):
self._checkIndex(index)
card = Card(index.internalId(), question, answer)
self.cards.updateCard(card)
# update data in the model
self.emit(SIGNAL('dataChanged(QModelIndex)'), index)
# TODO model should not have any | algorithms - it should be just as a proxy
# between database and any more | advanced algorithm
# e.g. database importer
# btw. they should use the same classes with the probe program
# TODO progress bar for importing and possibility to cancel if is a long
# operatoin
def importQAFile(self, file, clean=True):
"""Import cards from given question&answer file.
@param file can be file name or file like object
"""
self.emit(SIGNAL('modelAboutToBeReset()'))
self._checkActive()
if isstring(file):
file = open(file, 'rt')
if clean:
self.cards.deleteAllCards()
prefix = ''
last_prefix = ''
card = Card()
for line in file.readlines():
if line.upper().startswith('Q:') or line.upper().startswith('A:'):
last_prefix = prefix
prefix = line[:2].upper()
line = |
AndreMiras/order_map_symbols | order_map_symbols.py | Python | mit | 2,082 | 0.00048 | #!/usr/bin/env python
import argparse
from operator import itemgetter
def extract_symbols_text(map_file_text):
start_pattern = "Common symbol size file"
stop_pattern = "Discarded input sections"
start_index = map_file_text.index(start_pattern) + len(start_pattern)
stop_index = map_file_text.index(stop_pattern)
map_file_text = map_file_text[start_index:stop_index]
return map_file_text
def extract_symbols_dicts(symbols_text):
symbols_dicts = []
line = {}
words = symbols_text.split()
stop = len(words)
step = 3
start = step
for i in range(start, stop, step):
variable = words[i-3]
size_str = words[i-2]
size = int(size_str, 16)
filename = words[i-1]
| line = {
"variable": variable,
"size": size,
"filename": filename,
}
symbols_dicts.append(line)
return symbols_dicts
|
def sort_symbols(symbols_dicts):
sorted_symbols_dicts = sorted(symbols_dicts, key=itemgetter('size'))
return sorted_symbols_dicts
def print_symbols(symbols_dicts):
string_format = "%20s\t%20s\t%20s"
print(string_format % ("variable", "size", "filename"))
for symbols in symbols_dicts:
variable = symbols['variable']
size = symbols['size']
filename = symbols['filename']
print(string_format % (variable, size, filename))
def process(map_file):
map_file_text = map_file.read()
map_file.close()
symbols_text = extract_symbols_text(map_file_text)
symbols_dicts = extract_symbols_dicts(symbols_text)
sorted_symbols_dicts = sort_symbols(symbols_dicts)
print_symbols(sorted_symbols_dicts)
def parse_args():
parser = argparse.ArgumentParser(
description="Order map file symbols from command line.")
parser.add_argument('--map', type=argparse.FileType('r'), required=True)
args = parser.parse_args()
map_file = args.map
return map_file
def main():
map_file = parse_args()
process(map_file)
if __name__ == "__main__":
main()
|
sio2project/oioioi | oioioi/maintenancemode/apps.py | Python | gpl-3.0 | 115 | 0 | from django.apps import AppConfig
cl | ass MaintenancemodeAppConfig(AppConfig):
| name = "oioioi.maintenancemode"
|
geosolutions-it/cread-workflows | publishVector.py | Python | apache-2.0 | 1,125 | 0.010667 | from cread.geonodemanager import GeonodeManager
import os
import zipfile
# The data entry form
##DomainName=string host[:port]
##StoreName=string police_station_wgs84
##Layer=vector
##UserName= | string admin
##Password=string geoserver
# Get the path on FileSystem of the layer loaded on QGIS and selected by the user
myfilepath = processing.getObject(Layer).dataProvider().dataS | ourceUri()
# Access to the shapefile folder and create a zip to upload it using geoserver REST interface
(myDirectory,nameFile) = os.path.split(myfilepath)
zipf = zipfile.ZipFile(os.path.join(myDirectory,'shpUpload.zip'), 'w')
for file in os.listdir(myDirectory):
print file
if(str(file) != "shpUpload.zip"):
print myDirectory
print os.path.join(myDirectory, str(file))
print os.path.relpath(os.path.join(myDirectory, str(file)), myDirectory)
zipf.write(os.path.join(myDirectory, str(file)), str(file))
zipf.close()
zip = str(os.path.join(myDirectory, 'shpUpload.zip'))
# Publish a datastore (e.g. a vector layer)
GeonodeManager(UserName, Password, str(DomainName)).publish_datastore(zip, str(StoreName)) |
JointBox/jointbox | src/common/validators.py | Python | gpl-3.0 | 913 | 0.003286 | # JointBox - Your DIY smart home. Simplified.
# Copyright (C) 2017 Dmitry Berezov | sky
#
# JointBox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# JointBox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License fo | r more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def boolean(value):
return isinstance(value, bool)
def integer(value):
return isinstance(value, int)
def module_id(value):
return True
|
rexdf/wp2octopress | wp2oct/authoring.py | Python | gpl-3.0 | 244 | 0 | # coding: utf-8
__author__ = 'Rexdf'
| __email__ = 'rexdf@rexdf.org'
__copyright__ = "Copyright 2014, %s <http://blog.rexdf.org>" % __author__
__license__ = 'GNU GPL 3'
__status__ = 'Development'
__url__ = 'http: | //github.com/rexdf/wp2octopress'
|
gramps-project/addons-source | DataEntryGramplet/DataEntryGramplet.gpr.py | Python | gpl-2.0 | 761 | 0.021025 | #------------------------------------------------------------------------
#
# Register Gramplet
#
#------------------------------------------------------------------------
register(GRAMPLET,
id="Data Entry Gramplet",
name=_("Data Entry Gramplet"),
description = _("Gramplet for quick data entry"),
height=375,
expand=False,
gramplet = 'DataEntryGramplet',
gramplet_title=_("Data Entry"),
| detached_width = 510,
detached_height = 480,
version = '1.0.46',
gramps_target_version = "5.1",
status=STABLE, # not yet tested with python 3
fname="DataEntryGramplet.py",
help_url="Data Entry Gramplet",
navtypes=[ | "Person"],
)
|
switch-model/switch-hawaii-studies | database/DB_Code/study_dates/study_hour.py | Python | apache-2.0 | 2,084 | 0.008637 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import datetime
import os
import psycopg2
import sys
con = None
f = None
try:
################ CHANGE THESE PARAMETERS ONLY ################
con = psycopg2.connect(database='switch', host='localhost', port='5432', user='deepakc_super', password='myPassword')
################ CHANGE THESE PARAMETERS ONLY ################
cur = con.cursor()
# set the timezone that we will use to define sample dates (i.e., if we say a sample date corresponds to 7/31/2008, which time zone do we mean?)
# this should probably be read from some configuration file, since it varies from project to project
cur.execute("""SET TIME ZONE 'HST'""")
# make a master table of all historical dates and hours (this should really be moved to the scripts that import system loads or hourly weather data)
query = """
DROP TABLE IF EXISTS "date_time";
CREATE TABLE "date_time" AS (
SELECT DISTINCT CAST(date_trunc('day', date_time) AS DATE) as date, date_time
FROM system_load
ORDER by date_time
);
"""
con.commit()
# note: eventually this may need to allow for shorter intervals instead of rounding hour_of_day to an integer
query = """
DROP TABLE IF EXISTS "study_hour";
CREATE TABLE "study_hour" (study_date int, study_hour int, hour_of_day smallint, date_time timestamp with time zone);
INSERT INTO "study_hour"
SELECT s.study_date,
s.study_date*100+CAST(EXTRACT(HOUR FROM d.date_time) AS INTEGER) AS study_hour,
CAST(EXTRACT(HOUR FROM d.date_time) AS SMALLINT) AS hour_of_day,
d.date_time
FROM study_date s INNER JOIN date_time d USING (date)
ORDER BY study_date, study_ho | ur;
"""
cur.execute(query)
con.commit()
except psycopg2.DatabaseError, e | :
if con:
con.rollback()
print 'Error %s' % e
sys.exit(1)
except IOError, e:
if con:
con.rollback()
print 'Error %s' % e
sys.exit(1)
finally:
if con:
con.close()
|
Starofall/RTX | rtxlib/preprocessors/__init__.py | Python | mit | 779 | 0.001284 | from colorama import Fore
from rtxlib import info, error, debug
from rtxlib.preprocessors.SparkPreProcessor import SparkPreProcessor
def init_pre_processors(wf):
""" we look into the workflows definition and run the required preprocessors """
if hasattr(wf, "pre_processors"):
pp = wf.pre_processors
for p in pp:
if p["type"] == "spark":
| p["instance"] = SparkPreProce | ssor(wf, p)
else:
info("> Preprocessor | None", Fore.CYAN)
def kill_pre_processors(wf):
""" after the experiment, we stop all preprocessors """
try:
for p in wf.pre_processors:
p["instance"].shutdown()
info("> Shutting down Spark preprocessor")
except AttributeError:
pass
|
google-research/google-research | yoto/utils/__init__.py | Python | apache-2.0 | 608 | 0.001645 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache | License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of | the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
tomaaron/raiden | tools/genesis_builder.py | Python | mit | 1,166 | 0.000858 | # -*- coding: utf-8 -*-
from ethereum.utils import sha3, encode_hex, denoms
from raiden.utils import privatekey_to_address
from raiden.tests.utils.blockchain import GENESIS_STUB
CLUSTER_NAME = 'raiden'
def generate_accounts(seeds):
"""Create private keys and addresses for all seeds.
"""
return {
seed: d | ict(
privatekey=encode_hex(sha3(seed)),
address=encode_hex(privatekey_to_address(sha3(seed)))
) for seed in seeds}
def mk_genesis(accounts, initial_alloc=denoms.ether * 100000000):
"""
Create a genesis-block dict with allocation for all `accounts`.
:param accounts: list of account addresses (hex)
:param initial_alloc: the amount to allocate for the `accounts`
:return: genes | is dict
"""
genesis = GENESIS_STUB.copy()
genesis['extraData'] = CLUSTER_NAME
genesis['alloc'] = {
account: {
'balance': str(initial_alloc)
}
for account in accounts
}
# add the one-privatekey account ("1" * 64) for convenience
genesis['alloc']['19e7e376e7c213b7e7e7e46cc70a5dd086daff2a'] = dict(balance=str(initial_alloc))
return genesis
|
hdunderscore/hd-io_scene_fbx | __init__.py | Python | gpl-2.0 | 15,423 | 0.001426 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
bl_info = {
"name": "Autodesk FBX format",
"author": "Campbell Barton, Bastien Montagne",
"version": (3, 1, 0),
"blender": (2, 70, 0),
"location": "File > Import-Export",
"description": "Export FBX meshes, UV's, vertex colors, materials, "
"textures, cameras, lamps and actions",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Import-Export/Autodesk_FBX",
"support": 'OFFICIAL',
"category": "Import-Export"}
if "bpy" in locals():
import imp
if "import_fbx" in locals():
imp.reload(import_fbx)
if "export_fbx_bin" in locals():
imp.reload(export_fbx_bin)
if "export_fbx" in locals():
imp.reload(export_fbx)
import bpy
from bpy.props import (StringProperty,
BoolProperty,
FloatProperty,
EnumProperty,
)
from bpy_extras.io_utils import (ImportHelper,
ExportHelper,
path_reference_mode,
axis_conversion,
)
class ImportFBX(bpy.types.Operator, ImportHelper):
"""Load a FBX geometry file"""
bl_idname = "import_scene.fbx"
bl_label = "Import FBX"
bl_options = {'UNDO', 'PRESET'}
directory = StringProperty()
filename_ext = ".fbx"
filter_glob = StringProperty(default="*.fbx", options={'HIDDEN'})
use_manual_orientation = BoolProperty(
name="Manual Orientation",
description="Specify orientation and scale, instead of using embedded data in FBX file",
default=False,
)
axis_forward = EnumProperty(
name="Forward",
items=(('X', "X Forward", ""),
('Y', "Y Forward", ""),
('Z', "Z Forward", ""),
('-X', "-X Forward", ""),
('-Y', "-Y Forward", ""),
('-Z', "-Z Forward", ""),
),
default='-Z',
)
axis_up = EnumProperty(
name="Up",
items=(('X', "X Up", ""),
('Y', "Y Up", ""),
('Z', "Z Up", ""),
('-X', "-X Up", ""),
('-Y', "-Y Up", ""),
('-Z', "-Z Up", ""),
),
default='Y',
)
global_scale = FloatProperty(
name="Scale",
min=0.001, max=1000.0,
default=1.0,
)
use_image_search = BoolProperty(
name="Image Search",
description="Search subdirs for any associated images (Warning, may be slow)",
default=True,
)
use_alpha_decals = BoolProperty(
name="Alpha Decals",
description="Treat materials with alpha as decals (no shadow casting)",
default=False,
options={'HIDDEN'}
)
decal_offset = FloatProperty(
name="Decal Offset",
description="Displace geometry of alpha meshes",
min=0.0, max=1.0,
default=0.0,
options={'HIDDEN'}
)
def draw(self, context):
layout = self.layout
layout.prop(self, "use_manual_orientation"),
sub = layout.column()
sub.enabled = self.use_manual_orientation
sub.prop(self, "axis_forward")
sub.prop(self, "axis_up")
sub.prop(self, "global_scale")
layout.prop(self, "use_image_search")
#layout.prop(self, "use_alpha_decals")
layout.prop(self, "decal_offset")
def execute(self, context):
keywords = self.as_keywords(ignore=("filter_glob", "directory"))
keywords["use_cycles"] = (context.scene.render.engine == 'CYCLES')
from . import import_fbx
return import_fbx.load(self, context, **keywords)
class ExportFBX(bpy.types.Operator, ExportHelper):
"""Selection to an ASCII Autodesk FBX"""
bl_idname = "export_scene.fbx"
bl_label = "Export FBX"
bl_options = {'UNDO', 'PRESET'}
filename_ext = ".fbx"
filter_glob = StringProperty(default="*.fbx", options={'HIDDEN'})
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
version = EnumProperty(
items=(('BIN7400', "FBX 7.4 binary", "Newer 7.4 binary version, still in development (no animation yet)"),
('ASCII6100', "FBX 6.1 ASCII", "Legacy 6.1 ascii version"),
),
name="Exporter Version",
description="Choose which version of the exporter to use",
default='BIN7400',
)
use_selection = BoolProperty(
name="Selected Objects",
description="Export selected objects on visible layers",
default=False,
)
global_scale = FloatProperty(
name="Scale",
description="Scale all data (Some importers do not support scaled armatures!)",
min=0.001, max=1000.0,
soft_min=0.01, soft_max=1000.0,
default=1.0,
)
axis_forward = EnumProperty(
name="Forward",
items=(('X', "X Forward", ""),
('Y', "Y Forward", ""),
('Z', "Z Forward", ""),
('-X', "-X Forward", ""),
('-Y', "-Y Forward", ""),
('-Z', "-Z Forward", ""),
),
default='-Z',
)
axis_up = EnumProperty(
name="Up",
items=(('X', "X Up", ""),
('Y', "Y Up", ""),
('Z', "Z Up | ", ""),
('-X', "-X Up", ""),
('-Y', "-Y Up", ""),
('-Z', "-Z Up", ""),
),
defau | lt='Y',
)
# 7.4 only
bake_space_transform = BoolProperty(
name="Bake Space Transform",
description=("Bake space transform into object data, avoids getting unwanted rotations to objects when "
"target space is not aligned with Blender's space "
"(WARNING! experimental option, might give odd/wrong results)"),
default=False,
)
object_types = EnumProperty(
name="Object Types",
options={'ENUM_FLAG'},
items=(('EMPTY', "Empty", ""),
('CAMERA', "Camera", ""),
('LAMP', "Lamp", ""),
('ARMATURE', "Armature", ""),
('MESH', "Mesh", ""),
('OTHER', "Other", "Other geometry types, like curve, metaball, etc. (converted to meshes)"),
),
description="Which kind of object to export",
default={'EMPTY', 'CAMERA', 'LAMP', 'ARMATURE', 'MESH'},
)
use_mesh_modifiers = BoolProperty(
name="Apply Modifiers",
description="Apply modifiers to mesh objects (except Armature ones!)",
default=True,
)
recalc_split_normals = BoolProperty(
name="Recalc. Split Normals",
description="Recalc. normals? Otherwise export normals.",
default=True,
)
mesh_smooth_type = EnumProperty(
name="Smoothing",
items=(('OFF', "Off", "Don't write smoothing"),
('FACE', "Face", "Write face smoothing"),
('EDGE', "Edge", "Write edge smoothing"),
),
description="Export smoothing information (not mandatory if your target importer understand split normals)",
default='FACE',
)
use_mesh_edges = BoolProperty(
name="Loose |
nwokeo/supysonic | venv/lib/python2.7/site-packages/mutagen/_senf/_print.py | Python | agpl-3.0 | 10,191 | 0 | # -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import sys
import os
import ctypes
from ._fsnative import _encoding, is_win, is_unix, _surrogatepass
from ._compat import text_type, PY2, PY3
from ._winansi import AnsiState, ansi_split
from . import _winapi as winapi
def print_(*objects, **kwargs):
"""print_(*objects, sep=None, end=None, file=None, flush=False)
Args:
objects (object): zero or more objects to print
sep (str): Object separator to use, defaults to ``" "``
end (str): Trailing string to use, defaults to ``"\\n"``.
If end is ``"\\n"`` then `os.linesep` is used.
file (object): A file-like object, defaults to `sys.stdout`
flush (bool): If the file stream should be flushed
Raises:
EnvironmentError
Like print(), but:
* Supports printing filenames under Unix + Python 3 and Windows + Python 2
* Emulates ANSI escape sequence support under Windows
* Never fails due to encoding/dec | oding errors. Tries hard to get everything
on screen as is, but will fall back to "?" if all fails.
This does not conflict with ``colorama``, but will not use it on Windows.
"""
sep = kwargs.get("sep")
sep = sep if sep is not None else " "
end = kwargs.get("end")
end = end if end is not None else "\n"
file = kwargs.get("file")
file = file if file is not None else sys.stdout
flush = bool(kwargs.get("flush", False))
if is_win:
_print_windows(objects, | sep, end, file, flush)
else:
_print_unix(objects, sep, end, file, flush)
def _print_unix(objects, sep, end, file, flush):
"""A print_() implementation which writes bytes"""
encoding = _encoding
if isinstance(sep, text_type):
sep = sep.encode(encoding, "replace")
if not isinstance(sep, bytes):
raise TypeError
if isinstance(end, text_type):
end = end.encode(encoding, "replace")
if not isinstance(end, bytes):
raise TypeError
if end == b"\n":
end = os.linesep
if PY3:
end = end.encode("ascii")
parts = []
for obj in objects:
if not isinstance(obj, text_type) and not isinstance(obj, bytes):
obj = text_type(obj)
if isinstance(obj, text_type):
if PY2:
obj = obj.encode(encoding, "replace")
else:
try:
obj = obj.encode(encoding, "surrogateescape")
except UnicodeEncodeError:
obj = obj.encode(encoding, "replace")
assert isinstance(obj, bytes)
parts.append(obj)
data = sep.join(parts) + end
assert isinstance(data, bytes)
file = getattr(file, "buffer", file)
try:
file.write(data)
except TypeError:
if PY3:
# For StringIO, first try with surrogates
surr_data = data.decode(encoding, "surrogateescape")
try:
file.write(surr_data)
except (TypeError, ValueError):
file.write(data.decode(encoding, "replace"))
else:
# for file like objects with don't support bytes
file.write(data.decode(encoding, "replace"))
if flush:
file.flush()
ansi_state = AnsiState()
def _print_windows(objects, sep, end, file, flush):
"""The windows implementation of print_()"""
h = winapi.INVALID_HANDLE_VALUE
try:
fileno = file.fileno()
except (EnvironmentError, AttributeError):
pass
else:
if fileno == 1:
h = winapi.GetStdHandle(winapi.STD_OUTPUT_HANDLE)
elif fileno == 2:
h = winapi.GetStdHandle(winapi.STD_ERROR_HANDLE)
encoding = _encoding
parts = []
for obj in objects:
if isinstance(obj, bytes):
obj = obj.decode(encoding, "replace")
if not isinstance(obj, text_type):
obj = text_type(obj)
parts.append(obj)
if isinstance(sep, bytes):
sep = sep.decode(encoding, "replace")
if not isinstance(sep, text_type):
raise TypeError
if isinstance(end, bytes):
end = end.decode(encoding, "replace")
if not isinstance(end, text_type):
raise TypeError
if end == u"\n":
end = os.linesep
text = sep.join(parts) + end
assert isinstance(text, text_type)
is_console = True
if h == winapi.INVALID_HANDLE_VALUE:
is_console = False
else:
# get the default value
info = winapi.CONSOLE_SCREEN_BUFFER_INFO()
if not winapi.GetConsoleScreenBufferInfo(h, ctypes.byref(info)):
is_console = False
if is_console:
# make sure we flush before we apply any console attributes
file.flush()
# try to force a utf-8 code page, use the output CP if that fails
cp = winapi.GetConsoleOutputCP()
try:
encoding = "utf-8"
if winapi.SetConsoleOutputCP(65001) == 0:
encoding = None
for is_ansi, part in ansi_split(text):
if is_ansi:
ansi_state.apply(h, part)
else:
if encoding is not None:
data = part.encode(encoding, _surrogatepass)
else:
data = _encode_codepage(cp, part)
os.write(fileno, data)
finally:
# reset the code page to what we had before
winapi.SetConsoleOutputCP(cp)
else:
# try writing bytes first, so in case of Python 2 StringIO we get
# the same type on all platforms
try:
file.write(text.encode("utf-8", _surrogatepass))
except (TypeError, ValueError):
file.write(text)
if flush:
file.flush()
def _readline_windows():
"""Raises OSError"""
try:
fileno = sys.stdin.fileno()
except (EnvironmentError, AttributeError):
fileno = -1
# In case stdin is replaced, read from that
if fileno != 0:
return _readline_windows_fallback()
h = winapi.GetStdHandle(winapi.STD_INPUT_HANDLE)
if h == winapi.INVALID_HANDLE_VALUE:
return _readline_windows_fallback()
buf_size = 1024
buf = ctypes.create_string_buffer(buf_size * ctypes.sizeof(winapi.WCHAR))
read = winapi.DWORD()
text = u""
while True:
if winapi.ReadConsoleW(
h, buf, buf_size, ctypes.byref(read), None) == 0:
if not text:
return _readline_windows_fallback()
raise ctypes.WinError()
data = buf[:read.value * ctypes.sizeof(winapi.WCHAR)]
text += data.decode("utf-16-le", _surrogatepass)
if text.endswith(u"\r\n"):
return text[:-2]
def _decode_codepage(codepage, data):
"""
Args:
codepage (int)
data (bytes)
Returns:
`text`
Decodes data using the given codepage. If some data can't be decoded
using the codepage it will not fail.
"""
assert isinstance(data, bytes)
if not data:
return u""
# get the required buffer length first
length = winapi.MultiByteToWideChar(codepage, 0, data, len(data), None, 0)
if length == 0:
raise ctypes.WinError()
# now decode
buf = ctypes.create_unicode_buffer(length)
length = winapi.MultiByteToWideChar(
codepage, 0, data, len(data), buf, length)
if length == 0:
raise ctypes.WinError()
return buf[:]
def _encode_codepage(codepage, text):
"""
Args:
|
eckardm/archivematica | src/dashboard/src/components/api/urls.py | Python | agpl-3.0 | 1,766 | 0.002831 | # This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General P | ublic License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for | more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import url, patterns
from django.conf import settings
from components.api import views
urlpatterns = patterns('',
url(r'transfer/approve', views.approve_transfer),
url(r'transfer/unapproved', views.unapproved_transfers),
url(r'transfer/status/(?P<unit_uuid>' + settings.UUID_REGEX + ')', views.status, {'unit_type': 'unitTransfer'}),
url(r'transfer/start_transfer/', views.start_transfer_api),
url(r'ingest/status/(?P<unit_uuid>' + settings.UUID_REGEX + ')', views.status, {'unit_type': 'unitSIP'}),
url(r'ingest/waiting', views.waiting_for_user_input),
url(r'^(?P<unit_type>transfer|ingest)/(?P<unit_uuid>' + settings.UUID_REGEX + ')/delete/', views.mark_hidden),
url(r'^ingest/reingest', views.start_reingest),
url(r'administration/dips/atom/levels/$', views.get_levels_of_description),
url(r'administration/dips/atom/fetch_levels/$', views.fetch_levels_of_description_from_atom),
url(r'filesystem/metadata/$', views.path_metadata),
)
|
wearpants/osf.io | tests/test_prereg.py | Python | apache-2.0 | 3,366 | 0.000297 | from nose.tools import * # noqa
from modularodm import Q
from website.prereg import prereg_landing_page as landing_page
from website.prereg.utils import drafts_for_user, get_prereg_schema
from website.project.model import ensure_schemas, MetaSchema
from tests.base import OsfTestCase
from tests import factories
class TestPreregLandingPage(OsfTestCase):
def setUp(self):
super(TestPreregLandingPage, self).setUp()
ensure_schemas()
self.user = factories.UserFactory()
def test_no_projects(self):
assert_equal(
landing_page(user=self.user),
{
'has_projects': False,
'has_draft_registrations': False,
'campaign_long': 'Prereg Challenge',
'campaign_short': 'prereg',
}
)
def test_has_project(self):
factories.ProjectFactory(creator=self.user)
assert_equal(
landing_page(user=self.user),
{
'has_projects': True,
'has_draft_registrations': False,
'campaign_long': 'Prereg Challenge',
'campaign_short': 'prereg',
}
)
def test_has_project_and_draft_registration(self):
prereg_schema = MetaSchema.find_one(
Q('name', 'eq', 'Prereg Challenge')
)
factories.DraftRegistrationFactory(
initiator=self.user,
registration_schema=prereg_schema
)
assert_equal(
landing_page(user=self.user),
{
'has_projects': True,
'has_draft_registrations': True,
'campaign_long': 'Prereg Challenge',
'campaign_short': 'prereg',
}
)
def test_drafts_for_user_omits_registered(self):
prereg_schema = MetaSchema.find_one(
Q('name', 'eq', 'Prereg Challenge') &
Q('schema_version', 'eq', 2)
)
d1 = factories.DraftRegistrationFactory(
initiator=self.user,
registration_schema=prereg_schema
)
d2 = factories.DraftRegistrationFactory(
initiator=self.user,
registration_schema=prereg_schema
)
d3 = factories.DraftRegistrationFactory(
initiator=self.user,
registration_schema=prereg_schema
)
d1.registered_node = factories.ProjectFactory()
d1.save()
drafts = drafts_for_user(self.user, 'prereg')
for d in drafts:
assert_in(d._id, (d2._id, d3._id))
assert_not_equal(d._id, d1._id)
class TestPreregUtils(OsfTestCase):
def setUp(self):
super(TestPreregUtils, self).setUp()
ensure_schemas()
def test_get_prereg_schema_returns_prereg_metaschema(self):
schema = get_prereg_schema()
assert_is_ | instance(schema, MetaSchema)
assert_equal(schema.name, 'Prereg Challenge')
def test_get_prereg_schema_can_return_erpc_metaschema(self):
schema = get_prereg_schema('erpc')
assert_is_instance(schema, MetaSchema)
assert_equal(schema.name, 'Election Research Preacceptance Competition')
def test_get_prereg_schema_raises_error_for_invalid_campaign(self):
with | assert_raises(ValueError):
get_prereg_schema(campaign='invalid')
|
louisq/staticguru | utility/jdk_override.py | Python | mit | 1,445 | 0.00692 | """
The MIT License (MIT)
Copyright (c) 2016 Louis-Philippe Querel l_querel@encs.concordia.ca
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FI | TNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from u | tility.abstract_override import AbstractOverride
class JdkOverride(AbstractOverride):
def _get_override_format(self):
return 'JAVA_HOME="%s"'
def _get_default_format(self):
return ''
def __init__(self, *args):
AbstractOverride.__init__(self, *args)
self.name = "JDK"
|
rkrp/2timepad-breaker | crack.py | Python | gpl-3.0 | 1,264 | 0.006329 | #!/usr/bin/python
import re
from sys import argv
from orig import strxor
#Load dictionary for validation
with open('cracklib-small', 'r') as fp:
wordlist = set(fp.read().split('\n'))
def isprintable(inp):
words = inp.split(' ')
if len(words) == 1:
return False
for word in words:
word = word.strip()
if len(word) >= 4 and word in wordlist:
return True
return False
def main():
#Ciphertext to be cracked
ct = "32510ba9babebbbefd001547a810e67149caee11d945cd7 | fc81a05e9f85aac650e9052ba6a8cd8257bf14d13e6f0a803b54fde9e77472dbff89d71b57bddef121336cb85ccb8f3315f4b52e301d16e9f52f904".decode('hex')
#Loading the other ciphertexts encrypted with the same key
with open('inputs', 'r') as fp:
cts = fp.read().split('\n')
text = argv[1] #Guessed part of the text
num = 0
for case_t in cts:
if not case_t:
continue
num += 1
print 'Case', num, ':'
ca | se_t = case_t.decode('hex')
c1c2 = strxor(ct, case_t)
for i in range(len(c1c2)):
res = strxor(text, c1c2[i:i+len(text)])
if isprintable(res):
print i, res
print
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.