repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
GadgetSteve/metrics
|
metrics/processargs.py
|
Python
|
mit
| 8,553
| 0.012043
|
"""Process command line arguments."""
import sys
import os
from optparse import OptionParser, BadOptionError
usage_str = """python metrics [ options ] pgm1.ex1 [ pgm2.ex2 ... ]
Metrics are computed for the source code files
pgm1.ex1, pgm2.ex2, etc. At least one file name is required,
else this message appears.
Three types of output can be produced:
* Standard output for a quick summary of the main metrics.
Capitalized options negate the default option.
"""
class MyOptionParser(OptionParser):
"""Subclass OptionParser so I can override default error handler."""
def __init__( self, *args, **kwds ):
"""Just call super class's __init__ since we aren't making changes here."""
OptionParser.__init__( self, *args, **kwds )
def error( self, msg ):
"""Explicitly raise BadOptionError so calling program can handle it."""
raise BadOptionError( msg )
class ProcessArgsError( Exception ): pass
class ProcessArgs( object ):
"""Process command line arguments."""
def __init__( self,
*pArgs,
**pKwds
):
"""Initial processing of arguments."""
# default values for possible parameters
lib_name = ''
in_file_list = None
recurse_dir_list = None
self.include_metrics_str = 'sloc:SLOCMetric,mccabe:McCabeMetric'
exclude_metrics_str = None
quiet = False
verbose = 0
output_format = None
self.__dict__.update( locals() )
del( self.__dict__['self'] ) # remove recursive self from self.__dict__
self.__dict__.update( pKwds )
del( self.__dict__['pKwds'] ) # remove redunda
|
nt pKwds in self.__dict__
# set up option parser
parser = MyOptionParser( '', version="%prog 0.8.1" )
parser.add_option("-f", "--files",
dest="in_file_list",
default=self.in_file_list,
help="File containing list of path names to modules for analysis." )
parser.add_option("-r", "--recurse-dir",
dest="recurse_dir",
|
default= None,
help="Name of a directory to recurse into. (Default is '.')" )
parser.add_option("-i", "--include",
dest="include_metrics_str",
default=self.include_metrics_str,
help="list of metrics to include in run. This is a comma separated list of metric module names with no whitespace. Optionally, you can specify the class name of the metric by following the module name with a colon (:) and the metric class name. (Default metrics are 'mccabe:McCabeMetric,sloc:SLOCMetric'. Default metric class name for metric module 'wxYz' is 'WxYzMetric' when only module name given -- note capitalized metric class name.)" )
parser.add_option("-l", "--library",
dest="lib_name",
default=self.lib_name,
help="user-defined name applied to collection of modules (Default is '')" )
parser.add_option("-q", "--quiet",
action="store_true",
dest="quiet",
default=self.quiet,
help="suppress normal summary output to stdout. (Default is %s)" % (self.quiet) )
parser.add_option("-v", "--verbose",
action="count",
dest="verbose",
default=self.verbose,
help="Produce verbose output - more -v's produce more output. (Default is no verbose output to stdout)")
parser.add_option("--format",
dest="output_format_str",
default = self.output_format,
choices = ["xml", "csv"],
help="Choose an output format for a parser to read. Valid choices: xml, csv")
# parse the command line/arguments for this instance
try:
(options, args) = parser.parse_args()
except BadOptionError, e:
sys.stderr.writelines( "\nBadOptionError: %s\n" % str( e ) )
sys.stderr.writelines( "\nThe valid options are:\n\n" )
sys.stderr.writelines(parser.format_help())
sys.exit( 1 )
print 'options: %s' % options
print 'args: %s' % args
# augment parameter values from instantiation with
# command line values.
# the command line parameter values take precidence
# over values in program.
args.extend( pArgs )
# convert command line arguments into instance values
self.__dict__.update( options.__dict__ )
if self.in_file_list:
try:
inf = open( self.in_file_list )
files = [line.strip() for line in inf]
inf.close()
args.extend( files )
except IOError, e:
raise ProcessArgsError( e )
exclude = ['.svn', '.hg', '.CVS', '.git']
if self.recurse_dir:
start = self.recurse_dir
print "Recurse %s" % (start)
for (root, dirs, files) in os.walk(start):
newfiles = []
for excl in exclude:
if excl in dirs:
dirs.remove(excl)
newfiles.extend([os.path.join(root, fn) for fn in files])
#print root, len(newfiles), 'Files found!'
args.extend(newfiles)
self.in_file_names = args
self.include_metrics = self.process_include_metrics(self.include_metrics_str)
# standardize
if self.output_format_str is not None:
self.output_format_str = self.output_format_str.upper()
if len( args ) < 1:
print usage_str
print parser.format_help()
e = "No souce filenames given.\n"
# because of what I believe to be a bug in the doctest module,
# which makes it mishandle exceptions, I have 'faked' the handling
# of raising an exception and just return
# if doctestSw:
# print e
# return
# else:
raise ProcessArgsError( e )
def conflict_handler(self, *args, **kwds):
print "args=%s" % args
print "kwds=%s" % kwds
def process_include_metrics(self, include_metrics_str):
include_metrics = []
try:
metric_list = include_metrics_str.split( ',' )
for a in metric_list:
s = a.split( ':' )
if len( s ) == 2: # both metric class and module name given
include_metrics.append( s )
elif len( s ) == 1:
# only the module name given. Generate default metric
# class name by capitalizing first letter of module
# name and appending "Metric" so the default metric
# class name for module wxYz is WxYzMetric.
if s[0]:
defName = s[0][0].upper() + s[0][1:] + 'Metric'
include_metrics.append( (s[0], defName) )
else:
raise ProcessArgsError("Missing metric module name")
else:
raise ProcessArgsError("Malformed items in includeMetric string")
except AttributeError, e:
e = ( "Invalid list of metric names: %s" %
include_metrics_str )
raise ProcessArgsError( e )
return include_metrics
def testpa( pa ):
"""Test of ProcessArgs.
Usage:
>>> pa=ProcessArgs('inFile.py')
>>> testpa(pa) #doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Arguments processed:
Include Metric Modules=sloc:SLOCMetric,mccabe:McCabeMetric
quiet=False
verbose=0
Metrics to be used are:
Module sloc contains metric class SLOCMetric
Module mccabe contains metric class McCabeMetric
Input files:
inFile.py
>>>
"""
print """Arguments processed:
\tIncl
|
nischalsheth/contrail-controller
|
src/config/utils/provision_bgp.py
|
Python
|
apache-2.0
| 7,230
| 0.002905
|
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import json
import copy
from netaddr import IPNetwork
from pprint import pformat
from vnc_api.vnc_api import *
from vnc_admin_api import VncApiAdmin
def get_ip(ip_w_pfx):
return str(IPNetwork(ip_w_pfx).ip)
# end get_ip
class BgpProvisioner(object):
def __init__(self, user, password, tenant, api_server_ip, api_server_port,
api_server_use_ssl=False, use_admin_api=False):
self._admin_user = user
self._admin_password = password
self._admin_tenant_name = tenant
self._api_server_ip = api_server_ip
self._api_server_port = api_server_port
self._api_server_use_ssl = api_server_use_ssl
self._vnc_lib = VncApiAdmin(
use_admin_api, self._admin_user, self._admin_password,
self._admin_tenant_name,
self._api_server_ip,
self._api_server_port, '/',
api_server_use_ssl=self._api_server_use_ssl)
# end __init__
def _get_rt_inst_obj(self):
vnc_lib = self._vnc_lib
# TODO pick fqname hardcode from common
rt_inst_obj = vnc_lib.routing_instance_read(
fq_name=['default-domain', 'default-project',
'ip-fabric', '__default__'])
return rt_inst_obj
# end _get_rt_inst_obj
def add_bgp_router(self, router_type, router_name, router_ip,
router_asn, address_families=[], md5=None):
if not address_families:
address_families = ['route-target', 'inet-vpn', 'e-vpn', 'erm-vpn',
'inet6-vpn']
if router_type != 'control-node':
address_families.remove('erm-vpn')
if router_type != 'control-node':
if 'erm-vpn' in address_families:
raise RuntimeError("Only contrail bgp routers can support "
"family 'erm-vpn'")
bgp_addr_fams = AddressFamilies(address_families)
bgp_sess_attrs = [
BgpSessionAttributes(address_families=bgp_addr_fams)]
bgp_sessions = [BgpSession(attributes=bgp_sess_attrs)]
bgp_peering_attrs = BgpPeeringAttributes(session=bgp_sessions)
rt_inst_obj = self._get_rt_inst_obj()
vnc_lib = self._vnc_lib
if router_type == 'control-node':
vendor = 'contrail'
elif router_type == 'router':
vendor = 'mx'
else:
vendor = 'unknown'
router_params = BgpRouterParams(router_type=router_type,
vendor=vendor, autonomous_system=int(router_asn),
identifier=get_ip(router_ip),
address=get_ip(router_ip),
port=179, address_families=bgp_addr_fams)
bgp_router_obj = BgpRouter(router_name, rt_inst_obj,
bgp_router_parameters=router_params)
# Return early with a log if it already exists
try:
fq_name = bgp_router_obj.get_fq_name()
existing_obj = vnc_lib.bgp_router_read(fq_name=fq_name)
if md5:
bgp_params = existing_obj.get_bgp_router_parameters()
# set md5
print "Setting md5 on the existing uuid"
md5 = {'key_items': [ { 'key': md5 ,"key_id":0 } ], "key_type":"md5"}
bgp_params.set_auth_data(md5)
existing_obj.set_bgp_router_parameters(bgp_params)
vnc_lib.bgp_router_update(existing_obj)
print
|
("BGP Router " + pformat(fq_name) +
" already exists with uuid " + existing_obj.uuid)
return
except NoIdError:
pass
cur_id = vnc_lib.bgp_router_create(bgp_router_obj)
cur_obj = vnc_lib.bgp_router_read(id=cur_id)
# full-mesh with existing bgp routers
fq_name = rt_inst_obj.get_fq_name()
bgp_router_list = vnc_lib.bgp_routers_list(parent
|
_fq_name=fq_name)
bgp_router_ids = [bgp_dict['uuid']
for bgp_dict in bgp_router_list['bgp-routers']]
bgp_router_objs = []
for id in bgp_router_ids:
bgp_router_objs.append(vnc_lib.bgp_router_read(id=id))
for other_obj in bgp_router_objs:
if other_obj.uuid == cur_id:
continue
cur_obj.add_bgp_router(other_obj, bgp_peering_attrs)
if md5:
md5 = {'key_items': [ { 'key': md5 ,"key_id":0 } ], "key_type":"md5"}
rparams = cur_obj.bgp_router_parameters
rparams.set_auth_data(md5)
cur_obj.set_bgp_router_parameters(rparams)
vnc_lib.bgp_router_update(cur_obj)
# end add_bgp_router
def del_bgp_router(self, router_name):
vnc_lib = self._vnc_lib
rt_inst_obj = self._get_rt_inst_obj()
fq_name = rt_inst_obj.get_fq_name() + [router_name]
cur_obj = vnc_lib.bgp_router_read(fq_name=fq_name)
# remove full-mesh with existing bgp routers
fq_name = rt_inst_obj.get_fq_name()
bgp_router_list = vnc_lib.bgp_routers_list(parent_fq_name=fq_name)
bgp_router_ids = [bgp_dict['uuid']
for bgp_dict in bgp_router_list['bgp-routers']]
bgp_router_objs = []
for id in bgp_router_ids:
bgp_router_objs.append(vnc_lib.bgp_router_read(id=id))
for other_obj in bgp_router_objs:
if other_obj.uuid == cur_obj.uuid:
# our refs will be dropped on delete further down
continue
other_obj.del_bgp_router(cur_obj)
vnc_lib.bgp_router_delete(id=cur_obj.uuid)
# end del_bgp_router
def add_route_target(self, rt_inst_fq_name, router_asn,
route_target_number):
vnc_lib = self._vnc_lib
rtgt_val = "target:%s:%s" % (router_asn, route_target_number)
net_obj = vnc_lib.virtual_network_read(fq_name=rt_inst_fq_name[:-1])
route_targets = net_obj.get_route_target_list()
if route_targets:
route_targets.add_route_target(rtgt_val)
else:
route_targets = RouteTargetList([rtgt_val])
net_obj.set_route_target_list(route_targets)
vnc_lib.virtual_network_update(net_obj)
# end add_route_target
def del_route_target(self, rt_inst_fq_name, router_asn,
route_target_number):
vnc_lib = self._vnc_lib
rtgt_val = "target:%s:%s" % (router_asn, route_target_number)
net_obj = vnc_lib.virtual_network_read(fq_name=rt_inst_fq_name[:-1])
if rtgt_val not in net_obj.get_route_target_list().get_route_target():
print "%s not configured for VN %s" % (rtgt_val,
rt_inst_fq_name[:-1])
return
route_targets = net_obj.get_route_target_list()
route_targets.delete_route_target(rtgt_val)
if route_targets.get_route_target():
net_obj.set_route_target_list(route_targets)
else:
net_obj.set_route_target_list(None)
vnc_lib.virtual_network_update(net_obj)
# end del_route_target
# end class BgpProvisioner
|
dials/dials
|
tests/test_plot_reflections.py
|
Python
|
bsd-3-clause
| 445
| 0
|
from __future__ import annotations
|
import procrunner
def test_run(dials_data, tmp_path):
procrunner.run(
(
|
"dials.plot_reflections",
dials_data("centroid_test_data") / "experiments.json",
dials_data("centroid_test_data") / "integrated.refl",
"scan_range=0,5",
),
working_directory=tmp_path,
).check_returncode()
assert (tmp_path / "centroids.png").is_file()
|
healthchecks/healthchecks
|
hc/front/tests/test_add_discord.py
|
Python
|
bsd-3-clause
| 1,235
| 0
|
from django.test.utils import override_settings
from hc.test import BaseTestCase
@override_settings(DISCORD_CLIENT_ID="t1", DISCORD_CLIENT_SECRET="s1")
class AddDiscordTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.url = "/projects/%s/add_discord/" % self.project.code
def test_instructions_work(self):
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.url)
self.asser
|
tContains(r, "Connect Discord", status_code=200)
self.assertContains(r, "discordapp.com/api/oauth2/authorize")
# There should now be a key in session
|
self.assertTrue("add_discord" in self.client.session)
@override_settings(DISCORD_CLIENT_ID=None)
def test_it_requires_client_id(self):
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.url)
self.assertEqual(r.status_code, 404)
def test_it_requires_rw_access(self):
self.bobs_membership.role = "r"
self.bobs_membership.save()
self.client.login(username="bob@example.org", password="password")
r = self.client.get(self.url)
self.assertEqual(r.status_code, 403)
|
yugangzhang/GitTest
|
CMS_Profile/94-sample.py
|
Python
|
bsd-3-clause
| 105,895
| 0.016292
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vi: ts=4 sw=4
################################################################################
# Code for defining a 'Sample' object, which keeps track of its state, and
# simplifies the task of aligning, measuring, etc.
################################################################################
# Known Bugs:
# N/A
################################################################################
# TODO:
# - Search for "TODO" below.
# - Ability to have a collection of simultaneous motions? (E.g. build up a set
# of deferred motions?)
# - Use internal naming scheme to control whether 'saxs'/'waxs' is put in the
# filename
################################################################################
import time
import re
import os
import shutil
class CoordinateSystem(object):
"""
A generic class defining a coordinate system. Several coordinate systems
can be layered on top of one another (with a reference to the underlying
coordinate system given by the 'base_stage' pointer). When motion of a given
CoordinateSystem is requested, the motion is passed (with coordinate
conversion) to the underlying stage.
"""
hint_replacements = { 'positive': 'negative',
'up': 'down',
'left': 'right',
'towards': 'away from',
'downstream': 'upstream',
'inboard': 'outboard',
'clockwise': 'counterclockwise',
'CW': 'CCW',
}
# Core methods
########################################
def __init__(self, name='<unnamed>', base=None, **kwargs):
'''Create a new CoordinateSystem (e.g. a stage or a sample).
Parameters
----------
name : str
Name for this stage/sample.
base : Stage
The stage on which this stage/sample sits.
'''
self.name = name
self.base_stage = base
self.enabled = True
self.md = {}
self._marks = {}
self._set_axes_definitions()
self._init_axes(self._axes_definitions)
def _set_axes_definitions(self):
'''Internal function which defines the axes for this stage. This is kept
as a separate function so that it can be over-ridden easily.'''
# The _axes_definitions array holds a list of dicts, each defining an axis
self._axes_definitions = []
def _init_axes(self, axes):
'''Internal method that generates method names to control the various axes.'''
# Note: Instead of defining CoordinateSystem() having methods '.x', '.xr',
# '.y', '.yr', etc., we programmatically generate these methods when the
# class (and subclasses) are instantiated.
# Thus, the Axis() class has generic versions of these methods, which are
# appropriated renamed (bound, actually) when a class is instantiated.
self._axes = {}
for axis in axes:
axis_object = Axis(axis['name'], axis['motor'], axis['enabled'], axis['scaling'], axis['units'], axis['hint'], self.base_stage, stage=self)
self._axes[axis['name']] = axis_object
# Bind the methods of axis_object to appropriately-named methods of
# the CoordinateSystem() class.
setattr(self, axis['name'], axis_object.get_position )
setattr(self, axis['name']+'abs', axis_object.move_absolute )
setattr(self, axis['name']+'r', axis_object.move_relative )
setattr(self, axis['name']+'pos', axis_object.get_position )
setattr(self, axis['name']+'posMotor', axis_object.get_motor_position )
setattr(self, axis['name']+'units', axis_object.get_units )
setattr(self, axis['name']+'hint', axis_object.get_hint )
setattr(self, axis['name']+'info', axis_object.get_info )
setattr(self, axis['name']+'set', axis_object.set_current_position )
setattr(self, axis['name']+'o', axis_object.goto_origin )
setattr(self, axis['name']+'setOrigin', axis_object.set_origin )
setattr(self, axis['name']+'mark', axis_object.mark )
setattr(self, axis['name']+'search', axis_object.search )
setattr(self, axis['name']+'scan', axis_object.scan )
setattr(self, axis['name']+'c', axis_object.center )
def comment(self, text, logbooks=None, tags=None, append_md=True, **md):
'''Add a comment related to this CoordinateSystem.'''
text += '\n\n[comment for CoordinateSystem: {}
|
({})].'.format(self.name, self.__class__.__name__)
if append_md:
md_current = { k : v for k, v in RE.md.items() } # Global md
md_current.update(get_beamline().get_md()) # Beamline md
# Self md
#md_current.update(self.get_md())
# Specified md
md_current.update(md)
text += '\n\n\nMetadata\n----------------------------------------
|
'
for key, value in sorted(md_current.items()):
text += '\n{}: {}'.format(key, value)
logbook.log(text, logbooks=logbooks, tags=tags)
def set_base_stage(self, base):
self.base_stage = base
self._init_axes(self._axes_definitions)
# Convenience/helper methods
########################################
def multiple_string_replacements(self, text, replacements, word_boundaries=False):
'''Peform multiple string replacements simultaneously. Matching is case-insensitive.
Parameters
----------
text : str
Text to return modified
replacements : dictionary
Replacement pairs
word_boundaries : bool, optional
Decides whether replacements only occur for words.
'''
# Code inspired from:
# http://stackoverflow.com/questions/6116978/python-replace-multiple-strings
# Note inclusion of r'\b' sequences forces the regex-match to occur at word-boundaries.
if word_boundaries:
replacements = dict((r'\b'+re.escape(k.lower())+r'\b', v) for k, v in replacements.items())
pattern = re.compile("|".join(replacements.keys()), re.IGNORECASE)
text = pattern.sub(lambda m: replacements[r'\b'+re.escape(m.group(0).lower())+r'\b'], text)
else:
replacements = dict((re.escape(k.lower()), v) for k, v in replacements.items())
pattern = re.compile("|".join(replacements.keys()), re.IGNORECASE)
text = pattern.sub(lambda m: rep[re.escape(m.group(0))], text)
return text
def _hint_replacements(self, text):
'''Convert a motor-hint into its logical inverse.'''
# Generates all the inverse replacements
replacements = dict((v, k) for k, v in self.hint_replacements.items())
replacements.update(self.hint_replacements)
return self.multiple_string_replacements(text, replacements, word_boundaries=True)
# Control methods
########################################
def setTemperature(self, temperature, verbosity=3):
if verbosity>=1:
print('Temperature functions not implemented in {}'.format(self.__class__.__name__))
def temperature(self, verbosity=3):
if verbosity>=1:
print('Temperature functions not implemented in {}'.format(self.__class__.__name__))
return 0.0
# Motion methods
########################################
def enable(self):
self.enabled = True
def disable(self):
self.enabled = False
|
conradstorz/raffle
|
run_raffle.py
|
Python
|
apache-2.0
| 364
| 0.013736
|
#!/usr/bin/python
# Python 2/3 compatibility boilerplate
from __future__ import (absolute_import, division,
print_function, unicode_literals)
|
from builtins import *
# begin our implementation
from raffle import *
print(Fore.RED + 'Starting raffle.py.....')
run(['john', 'mary', 'rodrigo', 'jane', 'julie', 'm
|
ichelle', 'goose', 'dan'])
|
mswart/pyopenmensa
|
tests/feed/test_lazy_canteen.py
|
Python
|
lgpl-3.0
| 1,685
| 0.000593
|
# -*- coding: UTF-8 -*-
from datetime import date
import re
import pytest
from pyopenmensa.feed import LazyBuilder
@pytest.fixture
def canteen():
return LazyBuilder()
def test_date_converting(canteen):
day = date(2013, 3, 7)
assert canteen.dayCount() == 0
canteen.setDayClosed('2013-03-07')
assert canteen.dayCount() == 1
canteen.setDayClosed(day)
assert canteen.dayCount() == 1
canteen.setDayClosed('07.03.2013')
assert canteen.dayCount() == 1
def test_has_meals_for(canteen):
day = date(2013, 3, 7)
assert canteen.hasMealsFor(day) is False
canteen
|
._days[day] = {'Hausgericht': ('Gulash', [], {})}
assert canteen.hasMealsFor(day) is True
canteen.setDayClosed(day)
assert canteen.hasMealsFor(day) is False
def test_add_meal(canteen):
day = date(2013, 3, 7)
canteen.addMeal(day, 'Hauptgericht', 'Gul
|
asch')
assert canteen.hasMealsFor(day)
def test_to_long_meal_name(canteen):
day = date(2013, 3, 7)
canteen.addMeal(day, 'Hauptgericht', 'Y'*251)
canteen.hasMealsFor(day)
def test_caseinsensitive_notes(canteen):
day = date(2013, 3, 7)
canteen.legendKeyFunc = lambda v: v.lower()
canteen.setLegendData(legend={'f': 'Note'})
canteen.addMeal(day, 'Test', 'Essen(F)')
assert canteen._days[day]['Test'][0] == ('Essen', ['Note'], {})
def test_notes_regex(canteen):
day = date(2013, 3, 7)
canteen.extra_regex = re.compile('_([0-9]{1,3})_(?:: +)?', re.UNICODE)
canteen.setLegendData(legend={'2': 'Found Note'})
canteen.addMeal(day, 'Test', '_2_: Essen _a_, _2,2_, (2)')
assert canteen._days[day]['Test'][0] == ('Essen _a_, _2,2_, (2)', ['Found Note'], {})
|
bitesofcode/projexui
|
projexui/resources/rc/__plugins__.py
|
Python
|
lgpl-3.0
| 343
| 0.017493
|
__recurse__ = False
__toc__ = [r'projexui.resources.rc.pyqt4_projexui_apps_rc',
r'projexu
|
i.resources.rc.pyqt4_projexui_default_rc',
r'projexui.resources.rc.pyqt4_projexui_styles_rc',
r'projexui.resources.rc.pyside_pro
|
jexui_apps_rc',
r'projexui.resources.rc.pyside_projexui_default_rc',
r'projexui.resources.rc.pyside_projexui_styles_rc']
|
deniscostadsc/playground
|
solutions/beecrowd/1005/1005.py
|
Python
|
mit
| 98
| 0
|
a = float(input())
b = float(input())
print('MEDIA = {:.
|
5f}'.format((a * 3.5 + b * 7.5
|
) / 11.0))
|
cbecker/LightGBM
|
examples/python-guide/simple_example.py
|
Python
|
mit
| 1,762
| 0
|
# coding: utf-8
# pylint: disable = invalid-name, C0111
import json
import lightgbm as lgb
import pandas as pd
from sklearn.metrics import mean_squared_error
# load or create your dataset
print('Load data...')
df_train = pd.read_csv('../regression/regression.train', header=None, sep='\t')
df_test = pd.read_csv('../regression/regression.test', header=None, sep='\t')
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test
|
= df_test.dro
|
p(0, axis=1)
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': {'l2', 'auc'},
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0
}
print('Start training...')
# train
gbm = lgb.train(params,
lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
early_stopping_rounds=5)
print('Save model...')
# save model to file
gbm.save_model('model.txt')
print('Start predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
# eval
print('The rmse of prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)
print('Dump model to JSON...')
# dump model to json (and save to file)
model_json = gbm.dump_model()
with open('model.json', 'w+') as f:
json.dump(model_json, f, indent=4)
print('Calculate feature importances...')
# feature importances
print('Feature importances:', list(gbm.feature_importance()))
# print('Feature importances:', list(gbm.feature_importance("gain")))
|
MatthewCox/PyMoronBot
|
pymoronbot/config.py
|
Python
|
mit
| 4,668
| 0.000643
|
# -*- coding: utf-8 -*-
import copy
from ruamel.yaml import YAML
from six import iteritems
_required = ['server']
class Config(object):
def __init__(self, configFile):
self.configFile = configFile
self._configData = {}
self.yaml = YAML()
self._inBaseConfig = []
def loadConfig(self):
configData = self._readConfig(self.configFile)
self._validate(configData)
self._configData = configData
def _readConfig(self, fileName):
try:
with open(fileName, mode='r') as config:
configData = self.yaml.load(config)
if not configData:
configData = {}
# if this is the base server config, store what keys we loaded
if fileName == self.configFile:
self._inBaseConfig = list(configData.keys())
except Exception as e:
raise ConfigError(fileName, e)
if 'import' not in configData:
return configData
for fname in configData['import']:
includeConfig = self._readConfig('configs/{}.yaml'.format(fname))
for key, val in iteritems(includeConfig):
# not present in base config, just assign it
if key not in configData:
configData[key] = val
continue
# skip non-collection types that are already set
if isinstance(configData[key], (str, int)):
continue
if isinstance(val, str):
raise ConfigError(fname, 'The included config file tried '
'to merge a non-string with a '
'string')
try:
iter(configData[key])
iter(val)
except TypeError:
|
# not a collection, so just don't merge them
pass
else:
try:
# merge with + operator
configData[key] += val
except TypeError:
# dicts can't merge with +
|
try:
for subKey, subVal in iteritems(val):
if subKey not in configData[key]:
configData[key][subKey] = subVal
except (AttributeError, TypeError):
# if either of these, they weren't both dicts.
raise ConfigError(fname, 'The variable {!r} could '
'not be successfully '
'merged'.format(key))
return configData
def writeConfig(self):
# filter the configData to only those keys
# that were present in the base server config,
# or have been modified at runtime
configData = copy.deepcopy(self._configData)
to_delete = set(configData.keys()).difference(self._inBaseConfig)
for key in to_delete:
del configData[key]
# write the filtered configData
try:
with open(self.configFile, mode='w') as config:
self.yaml.dump(configData, config)
except Exception as e:
raise ConfigError(self.configFile, e)
def getWithDefault(self, key, default=None):
if key in self._configData:
return self._configData[key]
return default
def _validate(self, configData):
for key in _required:
if key not in configData:
raise ConfigError(self.configFile, 'Required item {!r} was not found in the config.'.format(key))
def __len__(self):
return len(self._configData)
def __iter__(self):
return iter(self._configData)
def __getitem__(self, key):
return self._configData[key]
def __setitem__(self, key, value):
# mark this key to be saved in the server config
if key not in self._inBaseConfig:
self._inBaseConfig.append(key)
self._configData[key] = value
def __contains__(self, key):
return key in self._configData
class ConfigError(Exception):
def __init__(self, configFile, message):
self.configFile = configFile
self.message = message
def __str__(self):
return 'An error occurred while reading config file {}: {}'.format(self.configFile,
self.message)
|
PaddlePaddle/Paddle
|
python/paddle/fluid/contrib/mixed_precision/decorator.py
|
Python
|
apache-2.0
| 28,628
| 0.002655
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ... import core
from ... import default_main_program
from ... import default_startup_program
from ... import framework
from ... import layers
from ... import program_guard
from ... import unique_name
from . import fp16_utils
from .fp16_utils import rewrite_program
from .fp16_utils import cast_model_to_fp16
from .fp16_utils import cast_parameters_to_fp16
from .fp16_utils import update_role_var_grad
from .fp16_lists import AutoMixedPrecisionLists
from .amp_nn import check_finite_and_unscale
from .amp_nn import update_loss_scaling
import types
import warnings
import paddle
__all__ = ["decorate"]
class OptimizerWithMixedPrecision(object):
"""
Optimizer with mixed-precision (MP) training. This is a wrapper of a common
optimizer, plus the support of mixed-precision pre-training. The object
of this class almost has the same behavior as the common optimizer, with the
methods `minimize()`, `backward()`, `apply_gradients()` implemented.
Additionally, it enables the MP training automatically, i.e, the creation
and maintenance of master parameters, scaling of loss, etc.
Args:
optimizer (Optimizer): A common Optimizer object.
amp_lists (CustomOpLists): An CustomOpLists object.
init_loss_scaling (float): The initial loss scaling factor.
use_dynamic_loss_scaling (bool): Whether to use dynamic loss scaling.
incr_every_n_steps(int): Increases loss scaling every n consecutive
steps with finite gradients.
decr_every_n_nan_or_inf(int): Decreases loss scaling every n
accumulated steps with nan or
inf gradients.
incr_ratio(float): The multiplier to use when increasing the loss
scaling.
decr_ratio(float): The less-than-one-multiplier to use when decreasing
the loss scaling.
use_pure_fp16(bool): Whether to use the pure fp16 training. Default False.
use_fp16_guard(bool): Whether to use `fp16_guard` when constructing the program.
Default None, which means that its value is equal to `use_pure_fp16`.
"""
def __init__(self, optimizer, amp_lists, init_loss_scaling,
use_dynamic_loss_scaling, incr_every_n_steps,
decr_every_n_nan_or_inf, incr_ratio, decr_ratio, use_pure_fp16,
use_fp16_guard):
self._optimizer = optimizer
self._amp_lists = amp_lists
self._param_grads = None
self._train_program = None
self._is_distributed = False
self._scaled_loss = None
self._loss_scaling = None
self._init_loss_scaling = init_loss_scaling
self._use_dynamic_loss_scaling = use_dynamic_loss_scaling
self._learning_rate = optimizer._learning_rate
self._learning_rate_map = optimizer._learning_rate_map
self._use_pure_fp16 = use_pure_fp16
self._use_fp16_guard = use_fp16_guard
self._to_fp16_var_names = None
if self._use_dynamic_loss_scaling:
self._incr_every_n_steps = incr_every_n_steps
self._decr_every_n_nan_or_inf = decr_every_n_nan_or_inf
self._incr_ratio = incr_ratio
self._decr_ratio = decr_ratio
self._num_good_steps = None
self._num_bad_steps = None
def _set_distributed(self, flag):
# if distributed, all cards will communication with each other,
# overlap communication and computation by split the
# check_finite_and_unscale op.
self._is_distributed = flag
def get_loss_scaling(self):
"""Return the real-time loss scaling factor.
"""
assert self._loss_scaling is not None, 'Please call minimize() before calling get_loss_scaling().'
return self._loss_scaling
def get_scaled_loss(self):
"""Return the scaled loss.
It's useful when you feed customed loss into executor.
"""
return self._scaled_loss
def _supports_check_nan_inf(self):
return getattr(self._optimizer, "_supports_check_nan_inf", False)
def _init_amp_var(self):
self._loss_scaling = layers.create_global_var(
name=unique_name.generate("loss_scaling"),
shape=[1],
value=self._init_loss_scaling,
dtype='float32',
persistable=True)
if self._use_dynamic_loss_scaling:
self._num_good_steps = layers.create_global_var(
name=unique_name.generate("num_good_steps"),
shape=[1],
value=0,
dtype='int32',
persistable=True)
self._num_bad_steps = layers.create_global_var(
name=unique_name.generate("num_bad_steps"),
shape=[1],
value=0,
dtype='int32',
persistable=True)
# Ensure the data type of learning rate vars is float32 (same as the
# master parameter dtype)
if isinstance(self._optimizer._learning_rate, float):
self._optimizer._learning_rate_map[default_main_program()] = \
layers.create_global_var(
name=unique_name.generate("learning_rate"),
shape=[1]
|
,
value=float(self._optimizer._learning_rate),
dtype='float32',
persistable=True)
def backward(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None,
callbacks=None):
"""
Backward propagation or auto differentiation for gradients' computation.
Args:
loss (Variable): The loss Variable to minimi
|
ze.
startup_program (Program|None): The startup Program for initializing
parameters in `parameter_list`.
parameter_list (list|None): A list of Variables to update.
no_grad_set (set|None): A set of Variables should be ignored.
callbacks (list|None): A list of callable objects to run when appending
backward operator for one parameter.
Returns:
A list of (param, grad), which is a tuple of a parameter and its
gradient respectively, and the scaled loss.
"""
train_program = loss.block.program
self._train_program = train_program
# NOTE(zhiqiu): _float_status is only used for NPU.
if core.is_compiled_with_npu():
float_status = paddle.static.data(
name="float_status", shape=[8], dtype='float32')
self._train_program.global_block().append_op(
type="alloc_float_status",
outputs={"FloatStatus": float_status}, )
self._train_program.global_block().append_op(
type="clear_float_status",
inputs={"FloatStatus": float_status},
outputs={"FloatStatusOut": float_status}, )
self._float_status = float_status
else:
self._float_status = None
with program_guard(self._train_program, startup_program):
self._init_amp_var()
if self._use_pure_fp16:
self._to_fp16_var_names = cast_model_to_fp16(
self._train_program, self._amp_lists, self._use_fp16_guard)
else:
|
jaffyadhav/django-resume-parser
|
manage.py
|
Python
|
unlicense
| 810
| 0
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "resumeparser.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may
|
fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
|
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
JuliaLang/JuliaBox
|
engine/src/juliabox/plugins/dns_gcd/impl_gcd.py
|
Python
|
mit
| 2,592
| 0.002315
|
__author__ = 'Nishanth'
from juliabox.cloud import JBPluginCloud
from juliabox.jbox_util import JBoxCfg, retry_on_errors
from googleapiclient.discovery import build
from oauth2client.client import GoogleCredentials
import threading
class JBoxGCD(JBPluginCloud):
provides = [JBPluginCloud.JBP_DNS, JBPluginCloud.JBP_DNS_GCD]
threadlocal = threading.local()
INSTALLID = None
REGION = None
DOMAIN = None
@staticmethod
def configure():
cloud_host = JBoxCfg.get('cloud_host')
JBoxGCD.INSTALLID = cloud_host['install_id']
JBoxGCD.REGION = cloud_host['region']
JBoxGCD.DOMAIN = cloud_host['domain']
@staticmethod
def domain():
if JBoxGCD.DOMAIN is None:
JBoxGCD.configure()
return JBoxGCD.DOMAIN
@staticmethod
def connect():
c = getattr(JBoxGCD.threadlocal, 'conn', None)
if c is None:
JBoxGCD.configure()
creds = GoogleCredentials.get_application_default()
JBoxGCD.threadlocal.conn = c = build("dns", "v1", credentials=creds)
return c
@staticmethod
@retry_on_errors(retries=2)
def add_cname(name, value):
JBoxGCD.connect().changes().create(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
body={'kind': 'dns#change',
'additions': [
{'rrdatas': [value],
'kind': 'dns#resourceRecordSet',
'type': 'A',
'name': name,
'ttl': 300} ] }).execute()
@staticmethod
@retry_on_errors(retries=2)
def delete_cname(name):
resp = JBoxGCD.connect().resourceRecordSets().list(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
name=name, type='A').execute()
if len(resp['rrsets']) == 0:
JBoxGCD.log_debug('No prior dns registration found for %s', name)
else:
cname = resp['rrsets'][0]['rrdatas'][0]
ttl = resp['rrsets'][0]['ttl']
JBoxGCD.connect().changes().create(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
body={'kind': 'dns#change',
'deletions': [
{'rrdatas': [str(cname)],
'kind': 'dns#resourceRec
|
ordSet',
'type': 'A',
'name': name,
|
'ttl': ttl} ] }).execute()
JBoxGCD.log_warn('Prior dns registration was found for %s', name)
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/sympy/core/tests/test_evalf.py
|
Python
|
agpl-3.0
| 11,270
| 0.011713
|
from sympy.core.evalf import PrecisionExhausted, complex_accuracy
from sympy import pi, I, Symbol, Add, Rational, exp, sqrt, sin, cos, \
fibonacci, Integral, oo, E, atan, log, integrate, floor, ceiling, \
factorial, binomial, Sum, zeta, Catalan, Pow, GoldenRatio, sympify, \
sstr, Function, Eq, Mul, Pow, Derivative
from sympy.mpmath.libmp.libmpf import from_float
from sympy.utilities.pytest import raises
x = Symbol('x')
y = Symbol('y')
n = Symbol('n')
def NS(e, n=15, **options):
return sstr(sympify(e).evalf(n, **options), full_prec=True)
def test_evalf_helpers():
assert complex_accuracy((from_float(2.0),None,35,None)) == 35
assert complex_accuracy((from_float(2.0),from_float(10.0),35,100)) == 37
assert complex_accuracy((from_float(2.0),from_float(1000.0),35,100)) == 43
assert complex_accuracy((from_float(2.0),from_float(10.0),100,35)) == 35
assert complex_accuracy((from_float(2.0),from_float(1000.0),100,35)) == 35
def test_evalf_basic():
assert NS('pi',15) == '3.14159265358979'
assert NS('2/3',10) == '0.6666666667'
assert NS('355/113-pi',6) == '2.66764e-7'
assert NS('16*atan(1/5)-4*atan(1/239)', 15) == '3.14159265358979'
def test_cancellation():
assert NS(Add(pi,Rational(1,10**1000),-pi,evaluate=False),15,maxn=1200) == '1.00000000000000e-1000'
def test_evalf_powers():
assert NS('pi**(10**20)',10) == '1.339148777e+49714987269413385435'
assert NS(pi**(10**100),10) == ('4.946362032e+4971498726941338543512682882'
'9089887365167832438044244613405349992494711208'
'95526746555473864642912223')
assert NS('2**(1/10**50)',15) == '1.00000000000000'
assert NS('2**(1/10**50)-1',15) == '6.93147180559945e-51'
# Evaluation of Rump's ill-conditioned polynomial
def test_evalf_rump():
a = 1335*y**6/4+x**2*(11*x**2*y**2-y**6-121*y**4-2)+11*y**8/2+x/(2*y)
assert NS(a, 15, subs={x:77617, y:33096}) == '-0.827396059946821'
def test_evalf_complex():
assert NS('2*sqrt(pi)*I',10) == '3.544907702*I'
assert NS('3+3*I',15) == '3.00000000000000 + 3.00000000000000*I'
assert NS('E+pi*I',15) == '2.71828182845905 + 3.14159265358979*I'
assert NS('pi * (3+4*I)',15) == '9.42477796076938 + 12.5663706143592*I'
assert NS('I*(2+I)',15) == '-1.00000000000000 + 2.00000000000000*I'
#assert NS('(pi+E*I)*(E+pi*I)',15) in ('.0e-15 + 17.25866050002*I', '.0e-17 + 17.25866050002*I', '-.0e-17 + 17.25866050002*I')
assert NS('(pi+E*I)*(E+pi*I)',15,chop=True) == '17.2586605000200*I'
def test_evalf_complex_powers():
assert NS('(E+pi*I)**100000000000000000') == \
'-3.58896782867793e+61850354284995199 + 4.58581754997159e+61850354284995199*I'
# XXX: rewrite if a+a*I simplification introduced in sympy
#assert NS('(pi + pi*I)**2') in ('.0e-15 + 19.7392088021787*I', '.0e-16 + 19.7392088021787*I')
assert NS('(pi + pi*I)**2', chop=True) == '19.7392088021787*I'
assert NS('(pi + 1/10**8 + pi*I)**2') == '6.2831853e-8 + 19.7392088650106*I'
assert NS('(pi + 1/10**12 + pi*I)**2') == '6.283e-12 + 19.7392088021850*I'
#assert NS('(pi + pi*I)**4') == '-389.63636413601 + .0e-14*I'
assert NS('(pi + pi*I)**4', chop=True) == '-389.636364136010'
assert NS('(pi + 1/10**8 + pi*I)**4') == '-389.636366616512 + 2.4805021e-6*I'
assert NS('(pi + 1/10**12 + pi*I)**4') == '-389.636364136258 + 2.481e-10*I'
assert NS('(10000*pi + 10000*pi*I)**4', chop=True) == '-3.89636364136010e+18'
def test_evalf_exponentiation():
assert NS(sqrt(-pi)) == '1.77245385090552*I'
assert NS(Pow(pi*I, Rational(1,2), evaluate=False)) == '1.25331413731550 + 1.25331413731550*I'
assert NS(pi**I) == '0.413292116101594 + 0.910598499212615*I'
assert NS(pi**(E+I/3)) == '20.8438653991931 + 8.36343473930031*I'
assert NS((pi+I/3)**(E+I/3)) == '17.2442906093590 + 13.6839376767037*I'
assert NS(exp(pi)) == '23.1406926327793'
assert NS(exp(pi+E*I)) == '-21.0981542849657 + 9.50576358282422*I'
assert NS(pi**pi) == '36.4621596072079'
assert NS((-pi)**pi) == '-32.9138577418939 - 15.6897116534332*I'
assert NS((-pi)**(-pi)) == '-0.0247567717232697 + 0.0118013091280262*I'
# An example from Smith, "Multiple Precision Complex Arithmetic and Functions"
def test_evalf_complex_cancellation():
A = Rational('63287/100000')
B = Rational('52498/100000')
C = Rational('69301/100000')
D = Rational('83542/100000')
F = Rational('2231321613/2500000000')
# XXX: the number of returned mantissa digits in the real part could
# change with the implementation. What matters is that the returned digits are
# correct.
assert NS((A+B*I)*(C+D*I),6) == '6.44862e-6 + 0.892529*I'
assert NS((A+B*I)*(C+D*I),10) == '6.447099821e-6 + 0.8925286452*I'
assert NS((A+B*I)*(C+D*I) - F*I, 5) in ('6.4471e-6 - .0e-15*I', '6.4471e-6 + .0e-15*I')
def test_evalf_logs():
assert NS("log(3+pi*I)", 15) == '1.46877619736226 + 0.808448792630022*I'
assert NS("log(pi*I)", 15) == '1.14472988584940 + 1.57079632679490*I'
def test_evalf_trig():
assert NS('sin(1)',15) == '0.841470984807897'
assert NS('cos(1)',15) == '0.540302305868140'
assert NS('sin(10**-6)',15) == '9.99999999999833e-7'
assert NS('cos(10**-6)',15) == '0.999999999999500'
assert NS('sin(E*10**100)',15) == '0.409160531722613'
# Some input near roots
assert NS
|
(sin(exp(pi*sqrt(163))*pi), 15) == '-2.35596641936785e-12'
assert NS(sin(pi*10**100 + Rational(7,10**5), evaluate=False), 15, maxn=120) == \
'6.99999999428333e-5'
assert NS(sin(Rational(7,10**5), evaluate=False), 15) == \
'6.99999999428333e-5'
# Check detection of various false identities
def test_evalf_near_integers():
# Binet's fo
|
rmula
f = lambda n: ((1+sqrt(5))**n)/(2**n * sqrt(5))
assert NS(f(5000) - fibonacci(5000), 10, maxn=1500) == '5.156009964e-1046'
# Some near-integer identities from
# http://mathworld.wolfram.com/AlmostInteger.html
assert NS('sin(2017*2**(1/5))',15) == '-1.00000000000000'
assert NS('sin(2017*2**(1/5))',20) == '-0.99999999999999997857'
assert NS('1+sin(2017*2**(1/5))',15) == '2.14322287389390e-17'
assert NS('45 - 613*E/37 + 35/991', 15) == '6.03764498766326e-11'
def test_evalf_ramanujan():
assert NS(exp(pi*sqrt(163)) - 640320**3 - 744, 10) == '-7.499274028e-13'
# A related identity
A = 262537412640768744*exp(-pi*sqrt(163))
B = 196884*exp(-2*pi*sqrt(163))
C = 103378831900730205293632*exp(-3*pi*sqrt(163))
assert NS(1-A-B+C,10) == '1.613679005e-59'
# Input that for various reasons have failed at some point
def test_evalf_bugs():
assert NS(sin(1)+exp(-10**10),10) == NS(sin(1),10)
assert NS(exp(10**10)+sin(1),10) == NS(exp(10**10),10)
assert NS('log(1+1/10**50)',20) == '1.0000000000000000000e-50'
assert NS('log(10**100,10)',10) == '100.0000000'
assert NS('log(2)',10) == '0.6931471806'
assert NS('(sin(x)-x)/x**3', 15, subs={x:'1/10**50'}) == '-0.166666666666667'
assert NS(sin(1)+Rational(1,10**100)*I,15) == '0.841470984807897 + 1.00000000000000e-100*I'
assert x.evalf() == x
assert NS((1+I)**2*I,6) == '-2.00000 + 2.32831e-10*I'
d={n: (-1)**Rational(6,7), y: (-1)**Rational(4,7), x: (-1)**Rational(2,7)}
assert NS((x*(1+y*(1 + n))).subs(d).evalf(),6) == '0.346011 + 0.433884*I'
assert NS(((-I-sqrt(2)*I)**2).evalf()) == '-5.82842712474619'
assert NS((1+I)**2*I,15) == '-2.00000000000000 + 2.16840434497101e-19*I'
#1659 (1/2):
assert NS(pi.evalf(69) - pi) == '-4.43863937855894e-71'
#1659 (2/2): With the bug present, this still only fails if the
# terms are in the order given here. This is not generally the case,
# because the order depends on the hashes of the terms.
assert NS(20 - 5008329267844*n**25 - 477638700*n**37 - 19*n,
subs={n:.01}) == '19.8100000000000'
def test_evalf_integer_parts():
a = floor(log(8)/log(2) - exp(-1000), evaluate=False)
b = floor(log(8)/log(2), evaluate=False)
raises(PrecisionExhausted, "a.evalf()")
assert a.evalf(chop=True) == 3
assert a.evalf(maxn=500) == 2
raises(PrecisionExhausted, "b.evalf()")
raises(PrecisionExhausted, "b.evalf(maxn=500)")
|
wikimedia/pywikibot-core
|
scripts/checkimages.py
|
Python
|
mit
| 76,106
| 0
|
#!/usr/bin/python3
"""
Script to check recently uploaded files.
This script checks if a file description is present and if there are other
problems in the image's description.
This script will have to be configured for each language. Please submit
translations as addition to the Pywikibot framework.
Everything that needs customisation is indicated by comments.
This script understands the following command-line arguments:
-limit The number of images to check (default: 80)
-commons The bot will check if an image on Commons has the same name
and if true it reports the image.
-duplicates[:#] Checking if the image has duplicates (if arg, set how many
rollback wait before reporting the image in the report
instead of tag the image) default: 1 rollback.
-duplicatesreport Report the duplicates in a log *AND* put the template in
the images.
-maxusernotify Maximum notifications added to a user talk page in a single
check, to avoid email spamming.
-sendemail Send an email after tagging.
-break To break the bot after the first check (default: recursive)
-sleep[:#] Time in seconds between repeat runs (default: 30)
-wait[:#] Wait x second before check the images (default: 0)
-skip[:#] The bot skip the first [:#] images (default: 0)
-start[:#] Use allimages() as generator
(it starts already from File:[:#])
-cat[:#] Use a category as generator
-regex[:#] Use regex, must be used with -url or -page
-page[:#] Define the name of the wikipage where are the images
-url[:#] Define the url where are the images
-nologerror If given, this option will disable the error that is risen
when the log is full.
Instructions for the real-time settings.
For every new block you have to add:
<------- ------->
In this way the bot can understand where the block starts in order to take the
right parameter.
* Name= Set the name of the block
* Find= search this text in the image's description
* Findonly= search for exactly this text in the image's description
* Summary= That's the summary that the bot will use when it will notify the
problem.
* Head= That's the incipit that the bot will use for the message.
* Text= This is the template that the bot will use when it will report the
image's problem.
Todo
----
* Clean the code, some passages are pretty difficult to understand.
* Add the "catch the language" function for commons.
* Fix and reorganise the new documentation
* Add a report for the image tagged.
"""
#
# (C) Pywikibot team, 2006-2022
#
# Distributed under the terms of the MIT license.
#
import collections
import re
import time
from typing import Generator
import pywikibot
from pywikibot import config, i18n
from pywikibot import pagegenerators as pg
from pywikibot.backports import List, Tuple
from pywikibot.bot import suggest_help
from pywikibot.exceptions import (
EditConflictError,
Error,
IsRedirectPageError,
LockedPageError,
NoPageError,
NotEmailableError,
PageRelatedError,
PageSaveRelatedError,
ServerError,
TranslationError,
)
from pywikibot.family import Family
from pywikibot.site import Namespace
###############################################################################
# <--------------------------- Change only below! --------------------------->#
###############################################################################
# NOTE: in the messages used by the bot if you put __botnick__ in the text, it
# will automatically replaced with the bot's nickname.
# That's what you want that will be added. (i.e. the {{no source}} with the
# right day/month/year )
N_TXT = {
'commons': '{{subst:nld}}',
'meta': '{{No license}}',
'test': '{{No license}}',
'ar': '{{subst:ملم}}',
'arz': '{{subst:ملم}}',
'de': '{{Dateiüberprüfung}}',
'en': '{{subst:nld}}',
'fa': '{{subst:حق تکثیر تصویر نامعلوم}}',
'fr': '{{subst:lid}}',
'ga': '{{subst:Ceadúnas de dhíth}}',
'hr': '{{Bez licence}}',
'hu': '{{nincslicenc|~~~~~}}',
'it': '{{subst:unverdata}}',
'ja': '{{subst:Nld}}',
'ko': '{{subst:nld}}',
'ru': '{{subst:nld}}',
'sd': '{{subst:اجازت نامعلوم}}',
'sr': '{{subst:датотека без лиценце}}',
'ta': '{{subst:nld}}',
'ur': '{{subst:حقوق نسخہ تصویر نامعلوم}}',
'zh': '{{subst:No license/auto}}',
}
# Text that the bot will try to see if there's already or not. If there's a
# {{ I'll use a regex to make a better check.
# This will work so:
# '{{no license' --> '\{\{(?:template:)?no[ _]license ?(?:\||\n|\}|/) ?' (case
# insensitive).
# If there's not a {{ it will work as usual (if x in Text)
TXT_FIND = {
'commons': ['{{no license', '{{no license/en',
'{{nld', '{{no permission', '{{no permission since'],
'meta': ['{{no license', '{{nolicense', '{{nld'],
'test': ['{{no license'],
'ar': ['{{لت', '{{لا ترخيص'],
'arz': ['{{nld', '{{no license'],
'de': ['{{DÜP', '{{Düp', '{{Dateiüberprüfung'],
'en': ['{{nld', '{{no license'],
'fa': ['{{حق تکثیر تصویر نامعلوم۲'],
'ga': ['{{Ceadúnas de dhíth', '{{Ceadúnas de dhíth'],
'hr': ['{{bez licence'],
'hu': ['{{nincsforrás', '{{nincslicenc'],
'it': ['{{unverdata', '{{unverified'],
'ja': ['{{no source', '{{unknown',
'{{non free', '<!--削除についての議論が終了するまで'],
'ko': ['{{출처 없음', '{{라이선스 없음', '{{Unknown'],
'ru': ['{{no license'],
'sd': ['{{ناحوالا', '{{ااجازت نامعلوم', '{{Di-no'],
'sr': ['{{датотека без лиценце', '{{датотека без извора'],
'ta': ['{{no source', '{{nld', '{{no license'],
'ur': ['{{ناحوالہ', '{{اجازہ نامعلوم', '{{Di-no'],
'zh': ['{{no source', '{{unknown', '{{No license'],
}
# When the bot find that the usertalk is empty is not pretty to put only the
# no source without the welcome, isn't it?
EMPTY = {
'commons': '{{subst:welcome}}\n~~~~\n',
'meta': '{{subst:Welcome}}\n~~~~\n',
'ar': '{{subst:أهلا ومرحبا}}\n~~~~\n',
'arz': '{{subst:
|
اهلا و سهلا}}\n~~~~\n',
'de': '{{subst:willkommen}} ~~~~',
'en': '{{subst:welcome}}\n~~~~\n',
'fa': '{{subst:خوشامدید|%s}}',
'fr': '{{Bienvenue nouveau\n~~~~\n',
'ga': '{{subst:Fáilte}} - ~~~~\n',
'hr': '{{subst:dd}}--
|
~~~~\n',
'hu': '{{subst:Üdvözlet|~~~~}}\n',
'it': '<!-- inizio template di benvenuto -->\n{{subst:Benvebot}}\n~~~~\n'
'<!-- fine template di benvenuto -->',
'ja': '{{subst:Welcome/intro}}\n{{subst:welcome|--~~~~}}\n',
'ko': '{{환영}}--~~~~\n',
'ru': '{{subst:Приветствие}}\n~~~~\n',
'sd': '{{ڀليڪار}}\n~~~~\n',
'sr': '{{dd}}--~~~~\n',
'ta': '{{welcome}}\n~~~~\n',
'ur': '{{خوش آمدید}}\n~~~~\n',
'zh': '{{subst:welcome|sign=~~~~}}',
}
# if the file has an unknown extension it will be tagged with this template.
# In reality, there aren't unknown extension, they are only not allowed...
DELETE_IMMEDIATELY = {
'commons': '{{speedy|The file has .%s as extension. '
'Is it ok? Please check.}}',
'meta': '{{Delete|The file has .%s as extension.}}',
'ar': '{{شطب|الملف له .%s كامتداد.}}',
'arz': '{{مسح|الملف له .%s كامتداد.}}',
'en': '{{db-meta|The file has .%s as extension.}}',
'fa': '{{حذف سریع|تصویر %s اضافی است.}}',
'ga': '{{scrios|Tá iarmhír .%s ar an comhad seo.}}',
'hu': '{{azonnali|A fájlnak .%s a kiterjesztése}}',
'it': '{{cancella subito|motivo=Il file ha come estensione ".%s"}}',
'ja': '{{db|知らないファイルフォーマット %s}}',
'ko': '{{delete|잘못된 파일 형식 (.%s)}}',
'ru': '{{db-badimage}}',
'sr': '{{speedy|Ова датотека садржи екстензију %s. '
'Молим вас да проверите да ли је у складу са правилима.}}',
'ta': '{{delete|'
'இந்தக் கோப்பு .%s என்றக் கோப்பு நீட்சியைக் கொண்டுள்ளது.}}',
'ur': '{{سریع حذف شدگی|اس ملف میں .%s بطور توسیع موجود ہے۔ }}',
'zh': '{{delete|未知檔案格式%s}}',
}
# That's the text that the bot will add if it doesn't find the license.
# Note: every __
|
redhat-openstack/trove
|
trove/tests/unittests/instance/test_instance_models.py
|
Python
|
apache-2.0
| 11,729
| 0
|
# Copyright 2014 Rackspace Hosting
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from mock import Mock, patch
from trove.backup import models as backup_models
from trove.common import cfg
from trove.common import exception
from trove.common.instance import ServiceStatuses
from trove.datastore import models as datastore_models
from trove.instance import models
from trove.instance.models import DBInstance
from trove.instance.models import filter_ips
from trove.instance.models import Instance
from trove.instance.models import InstanceServiceStatus
from trove.instance.models import SimpleInstance
from trove.instance.tasks import InstanceTasks
from trove.taskmanager import api as task_api
from trove.tests.fakes import nova
from trove.tests.unittests import trove_testtools
from trove.tests.unittests.util import util
CONF = cfg.CONF
class SimpleInstanceTest(trove_testtools.TestCase):
def setUp(self):
super(SimpleInstanceTest, self).setUp()
db_info = DBInstance(
InstanceTasks.BUILDING, name="TestInstance")
self.instance = SimpleInstance(
None, db_info, InstanceServiceStatus(
ServiceStatuses.BUILDING), ds_version=Mock(), ds=Mock())
db_info.addresses = {"private": [{"addr": "123.123.123.123"}],
"internal": [{"addr": "10.123.123.123"}],
"public": [{"addr": "15.123.123.123"}]}
self.orig_conf = CONF.network_label_regex
self.orig_ip_regex = CONF.ip_regex
self.orig_black_list_regex = CONF.black_list_regex
def tearDown(self):
super(SimpleInstanceTest, self).tearDown()
CONF.network_label_regex = self.orig_conf
CONF.ip_start = None
def test_get_root_on_create(self):
root_on_create_val = Instance.get_root_on_create(
'redis')
self.assertFalse(root_on_create_val)
def test_filter_ips_white_list(self):
CONF.network_label_regex = '.*'
CONF.ip_regex = '^(15.|123.)'
CONF.black_list_regex = '^10.123.123.*'
ip = self.instance.get_visible_ip_addresses()
ip = filter_ips(
ip, CONF.ip_regex, CONF.black_list_regex)
self.assertEqual(2, len(ip))
self.assertTrue('123.123.123.123' in ip)
self.assertTrue('15.123.123.123' in ip)
def test_filter_ips_black_list(self):
CONF.network_label_regex = '.*'
CONF.ip_regex = '.*'
CONF.black_list_regex = '^10.123.123.*'
ip = self.instance.get_visible_ip_addresses()
ip = filter_ips(
ip, CONF.ip_regex, CONF.black_list_regex)
self.assertEqual(2, len(ip))
self.assertTrue('10.123.123.123' not in ip)
def test_one_network_label(self):
CONF.network_label_regex = 'public'
ip = self.instance.get_visible_ip_addresses()
self.assertEqual(['15.123.123.123'], ip)
def test_two_network_labels(self):
CONF.network_label_regex = '^(private|public)$'
ip = self.instance.get_visible_ip_addresses()
self.assertEqual(2, len(ip))
self.assertTrue('123.123.123.123' in ip)
self.assertTrue('15.123.123.123' in ip)
def test_all_network_labels(self):
CONF.network_label_regex = '.*'
ip = self.instance.get_visible_ip_addresses()
self.assertEqual(3, len(ip))
self.assertTrue('10.123.123.123' in ip)
self.assertTrue('123.123.123.123' in ip)
self.assertTrue('15.123.123.123' in ip)
class CreateInstanceTest(trove_testtools.TestCase):
@patch.object(task_api.API, 'get_client', Mock(return_value=Mock()))
def setUp(self):
util.init_db()
self.context = trove_testtools.TroveTestContext(self, is_admin=True)
self.name = "name"
self.flavor_id = 5
self.image_id = "UUID"
self.databases = []
self.users = []
self.datastore = datastore_models.DBDatastore.create(
id=str(uuid.uuid4()),
name='mysql' + str(uuid.uuid4()),
)
self.datastore_version = (
datastore_models.DBDatastoreVersion.create(
id=str(uuid.uuid4()),
datastore_id=self.datastore.id,
name="5.5" + str(uuid.uuid4()),
manager="mysql",
image_id="image_id",
packages="",
active=True))
self.volume_size = 1
self.az = "az"
self.nics = None
self.configuration = None
self.tenant_id = "UUID"
self.datastore_version_id = str(uuid.uuid4())
self.db_info = DBInstance.create(
name=self.name, flavor_id=self.flavor_id,
tenant_id=self.tenant_id,
volume_size=self.volume_size,
datastore_version_id=self.datastore_version.id,
task_status=InstanceTasks.BUILDING,
configuration_id=self.configuration
)
self.backup_name = "name"
self.descr = None
self.backup_state = backup_models.BackupState.COMPLETED
self.instance_id = self.db_info.id
self.parent_id = None
self.deleted = False
self.backup = backup_models.DBBackup.create(
name=self.backup_name,
description=self.descr,
tenant_id=self.tenant_id,
state=self.backup_state,
instance_id=self.instance_id,
parent_id=self.parent_id,
datastore_version_id=self.datastore_version.id,
deleted=False
)
self.backup.size = 1.1
self.backup.save()
self.backup_id = self.backup.id
self.orig_client = models.create_nova_client
models.create_nova_client = nova.fake_create_nova_client
self.orig_api = task_api.API(self.context).create_instance
task_api.API(self.context).create_instance = Mock()
self.run_with_quotas = models.run_with_quotas
models.run_with_quotas = Mock()
self.check = backup_models.DBBackup.check_swift_object_exist
backup_models.DBBackup.check_swift_object_exist = Mock(
return_value=True)
super(CreateInstanceTest, self).setUp()
@patch.object(task_api.API, 'get_client', Mock(return_value=Mock()))
def tearDown(self):
self.db_info.delete()
self.backup.delete()
self.datastore.delete()
self.datastore_version.delete()
models.create_nova_client = self.orig_client
task_api.API(self.context).create_instance = self.orig_api
models.run_with_quotas = self.run_with_quotas
backup_models.DBBackup.check_swift_object_ex
|
ist = self.check
self.backup.delete()
self.db_info.delete()
super(CreateInstanceTest, self).tearDown()
def test_exception_on_invalid_backup_size(self):
self.assertEq
|
ual(self.backup.id, self.backup_id)
exc = self.assertRaises(
exception.BackupTooLarge, models.Instance.create,
self.context, self.name, self.flavor_id,
self.image_id, self.databases, self.users,
self.datastore, self.datastore_version,
self.volume_size, self.backup_id,
self.az, self.nics, self.configuration
)
self.assertIn("Backup is too large for "
"given flavor or volume.", str(exc))
def test_can_restore_from_backup_with_almost_equal_size(self):
# target size equals to "1Gb"
self.backup.size = 0.99
self.backup.save()
instance = models.Instance.create(
self.cont
|
garyp/djwed
|
wedding/admin.py
|
Python
|
mit
| 10,509
| 0.008945
|
from django import forms
from djwed.wedding.models import *
from djwed.wedding.admin_actions import *
from django.contrib import admin
class RequireOneFormSet(forms.models.BaseInlineFormSet):
"""Require at least one form in the formset to be completed."""
def clean(self):
"""Check that at least one form has been completed."""
super(RequireOneFormSet, self).clean()
if not self.is_valid():
return
for cleaned_data in self.cleaned_data:
# form has data and we aren't deleting it.
if cleaned_data and not cleaned_data.get('DELETE', False):
# we can break out after the first complete form
return
raise forms.ValidationError("At least one %s is required." %
(self.model._meta.verbose_name,))
class InviteeNotesInline(admin.TabularInline):
model = InviteeNotes
extra = 0
verbose_name_plural = "invitee notes"
class RSVPInline(admin.TabularInline):
model = RSVP
extra = 2
class GuestInline(admin.StackedInline):
model = Guest
extra = 1
class FoodOptionInline(admin.StackedInline):
model = FoodOption
extra = 3
class CommentInline(admin.StackedInline):
model = Comment
extra = 0
exclude = ('rsvp',)
readonly_fields = ('text',)
verbose_name_plural = "comments from invitees"
class GiftThankYouInline(admin.TabularInline):
model = ThankYou
extra = 0
verbose_name = "Source"
verbose_name_plural = "Sources"
formset = RequireOneFormSet
class InviteeThankYouInline(admin.TabularInline):
model = ThankYou
extra = 0
class InviteeAdmin(admin.ModelAdmin):
#fieldsets = [
# (None, {'fields': ['question']}),
# ('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
#]
inlines = [GuestInline,InviteeNotesInline,CommentInline,InviteeThankYouInline]
list_display = ('full_name', 'tags', 'full_address', 'state','country')
list_editable = ('tags',)
list_filter = ['side', 'association','country','state']
search_fields = ['full_name_override','invite_code','guest__first_name', 'guest__last_name', 'guest__nickname']
actions = [
export_as_csv_action("Export addresses as CSV",
fields=['full_name', 'full_address']),
]
#date_hierarchy = 'pub_date'
class LongFoodChoiceField(forms.ModelChoiceField):
#widget = forms.widgets.RadioSelect()
def label_from_instance(self, obj):
return obj.long_desc
class GuestAdmin(admin.ModelAdmin):
inlines = [RSVPInline,]
list_display = ('full_name', 'email', 'tags')
list_filter = ['rsvp__status', 'role', 'invitee__side', 'invitee__association']
search_fields = ['first_name', 'last_name']
list_editable = ('email', 'tags')
class RSVPAdminForm(forms.ModelForm):
class Meta: model = RSVP
def clean(self, *args, **kwargs):
sret = super(RSVPAdminForm, self).clean(*args,**kwargs)
if self.cleaned_data['food_selection'] and self.cleaned_data['food_selection'].venue != self.cleaned_data['venue']:
raise ValidationError('Food selected from another venue')
if self.cleaned_data['venue'].site != u'MA' and self.cleaned_data['bus_selection']:
raise ValidationError('Bus selection for a site with no bus')
rsvp_filter = RSVP.objects.filter(venue = self.cleaned_data['venue'],
guest = self.cleaned_data['guest'])
if rsvp_filter.count()>1 or (rsvp_filter.count() == 1
and rsvp_filter.all()[0] != self.instance):
raise ValidationError('Only one RSVP allowed per person')
return sret
class RSVPAdmin(admin.ModelAdmin):
#inlines = [GuestInline,]
#food_selection = LongFoodChoiceField([], required=False, empty_label = "--- Please choose from a dinner selection below ---")
list_display = (
|
'guest_site',
'venue',
'status',
'food_selection',
'bus_selection',
'last_updated',
'prelim',
'guest_invitee',
'last_update_s
|
ource',
'guest',
'table_assign',
)
search_fields = [
'guest__first_name',
'guest__last_name',
'guest__invitee__guest__last_name',
'guest__invitee__invite_code',
]
list_editable = (
'status',
'food_selection',
'bus_selection',
'prelim',
'last_update_source',
'table_assign',
)
form = RSVPAdminForm
list_filter = ('venue','status', 'guest__invitee__side',
'guest__invitee__association', 'guest__invitee__country',
'guest__invitee__state',
)
def guest_site(self,rsvp):
return u"%s (%s)"%(rsvp.guest.full_name(), unicode(rsvp.venue.site))
guest_site.short_description = "Guest (Site)"
def guest_invitee(self,rsvp):
return rsvp.guest.invitee
guest_invitee.short_description = "Invitee"
def guest_invitee_association(self,rsvp):
return rsvp.guest.invitee.association
guest_invitee_association.short_description = "Association"
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "guest":
kwargs["queryset"] = Guest.objects.all().order_by('last_name','first_name')
return db_field.formfield(**kwargs)
return super(RSVPAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
class InviteeNotesAdmin(admin.ModelAdmin):
search_fields = ['invitee__guest__first_name',
'invitee__guest__last_name','invitee__guest__nickname']
list_display = [ 'invitee',
'likely_site',
'ma_likelihood',
'ca_likelihood',
'or_likelihood',
'savedate',
'batch',
'invitee_rsvp_count',
'adults',
'children',
'invitee_country',
]
list_editable = ['ma_likelihood',
'ca_likelihood',
'savedate',
'batch',
]
def invitee_rsvp_count(self,inote):
counts = inote.invitee.rsvp_yes_counts()
return ', '.join('%s: %d' % (venue, counts[venue])
for venue in sorted(counts.keys()))
invitee_rsvp_count.short_description = "RSVP Yes"
def invitee_country(self,inote):
return str(inote.invitee.country)
invitee_country.short_description = "Country"
class CommentAdmin(admin.ModelAdmin):
list_filter = ['type']
search_fields = ['invitee__guest__first_name','text',
'invitee__guest__last_name','invitee__guest__nickname']
list_display = ['id','invitee','type','last_updated','text']
class VenueAdmin(admin.ModelAdmin):
inlines = [FoodOptionInline,]
class PageSnippetAdmin(admin.ModelAdmin):
list_display = ['key','title','last_updated']
class GiftAdmin(admin.ModelAdmin):
search_fields = [
'sources__guest__first_name',
'sources__guest__nickname',
'sources__guest__last_name',
'notes',
'description',
]
list_filter = ['status','registry','assignment']
list_display = ['source_names','received','description','notes',
'assignment','registry','status']
list_editable = ('status', 'assignment')
inlines = [GiftThankYouInline,]
radio_fields = {
'assignment': admin.HORIZONTAL,
'registry': admin.VERTICAL,
'status': admin.HORIZONTAL,
}
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "source" and request.META['REQUEST_METHOD'] != 'POST':
kwargs["queryset"] = Invitee.objects.all().order_by('guest__la
|
eliran-stratoscale/rackattack-physical
|
rackattack/physical/tests/integration/main_faketestclients.py
|
Python
|
apache-2.0
| 8,351
| 0.001317
|
import yaml
import time
import random
import threading
import subprocess
from rackattack.physical import pikapatch
from rackattack import clientfactory
from rackattack.physical import config
from rackattack.api import Requirement, AllocationInfo
from rackattack.physical.tests.integration.main import useFakeGeneralConfiguration
import pika
assert "egg" in pika.__file__
class RackattackTestClients(threading.Thread):
SCENARIOS = dict(few=(1, 4), moreThanFew=(5, 9), many=(10, 30))
SCENARIOS_PROBABILITIES = dict(few=0.7, moreThanFew=0.2, many=0.1)
def __init__(self, nodeBaseName="node"):
assert(sum(self.SCENARIOS_PROBABILITIES.values()) == 1)
super(RackattackTestClients, self).__init__()
self._nodeBaseName = nodeBaseName
self._client = clientfactory.factory()
with open(config.CONFIGURATION_FILE) as f:
conf = yaml.load(f.read())
self._osmosisServerIP = conf["OSMOSIS_SERVER_IP"]
self._label = self._generateLabelName()
self._nrHosts = self._getNrHosts()
self._nrAllocatedHosts = 0
self._profiledAllocation = None
self._allocations = set()
self._stop = False
def run(self):
while True:
if self._stop:
while self._allocations:
allocation = self._allocations.pop()
allocation.free()
return
self._updateNrAllocatedHosts()
if self._nrAllocatedHosts == self._nrHosts:
self._free()
elif not self._allocations:
self._allocateForBackground()
elif self._nrAllocatedHosts <= self._nrHosts:
self._performRandomLoadAction()
else:
assert(False)
interval = 0.5 + random.random() * 1.2
time.sleep(interval)
def stop(self):
self._stop = True
def _updateNrAllocatedHosts(self):
stillAlive = set()
self._nrAllocatedHosts = 0
for allocation in self._allocations:
if allocation.dead() is None:
self._nrAllocatedHosts += len(allocation._requirements)
stillAlive.add(allocation)
self._allocations = stillAlive
def _generateLabelName(self):
cmd = "osmosis listlabels --objectStores=%(osmosisServerIP)s:1010 star | head -n 1" % \
dict(osmosisServerIP=self._osmosisServerIP)
print "Running %(cmd)s" % dict(cmd=cmd)
labelName = subprocess.check_output(cmd, shell=True)
labelName = labelName.strip()
return labelName
def _performRandomLoadAction(self):
wantedAllocationRatio = 0.65
allocationRatio = self._nrAllocatedHosts / float(self._nrHosts)
print "allocationRatio: {}, nrAllocated: {}, nrHosts: {}".format(allocationRatio,
self._nrAllocatedHosts,
self._nrHosts)
if allocationRatio < wantedAllocationRatio:
print "Will most likeliy allocate now..."
majorityAction = self._allocateForBackground
minorityAction = self._free
else:
print "Reached the wanted ratio..."
time.sleep(0.5)
print "Will most likeliy free now..."
majorityAction = self._free
minorityAction = self._allocateForBackground
withinWhatRange = random.random()
if withinWhatRange < 0.9:
majorityAction()
else:
minorityAction()
def _generateRequirements(self, nrHosts, pool):
requirements = dict([("{}{}".format(self._nodeBaseName, nodeIdx),
Requirement(imageLabel=self._label,
imageHint=self._label,
hardwareConstra
|
ints=None,
pool=pool))
for nodeIdx in xrange(nrHosts)])
return requirements
def _generateAllocationInfo(self):
allocationInfo = AllocationInfo(user="johabab", purpose="loadTests")
retu
|
rn allocationInfo
def allocate(self, nrHosts, pool="default"):
self._updateNrAllocatedHosts()
self._allocate(nrHosts, pool)
def _allocateForBackground(self):
nrHosts = self._getRandomNrHosts()
self._allocate(nrHosts)
def _allocate(self, nrHostsToAllocate, pool="default"):
requirements = self._generateRequirements(nrHostsToAllocate, pool=pool)
allocationInfo = self._generateAllocationInfo()
print "Trying to allocate %(nrHosts)s hosts from %(pool)s" % dict(nrHosts=len(requirements),
pool=pool)
allocation = None
try:
allocation = self._client.allocate(requirements, allocationInfo)
self._allocations.add(allocation)
print "Allocation succeeded"
except Exception as e:
if 'not enough machines' in str(e):
print "Allocation failed: not enough machines"
else:
print str(e)
return allocation
def _getRandomNrHosts(self):
scenarioNames = self.SCENARIOS.keys()
scenarioNames.sort()
withinWhichRange = random.random()
rangeBound = 0
chosenScenarioName = None
for scenarioName in scenarioNames:
rangeBound += self.SCENARIOS_PROBABILITIES[scenarioName]
if withinWhichRange <= rangeBound:
chosenScenarioName = scenarioName
break
assert chosenScenarioName is not None
nrHosts = random.randint(*self.SCENARIOS[chosenScenarioName])
return nrHosts
def free(self):
self._updateNrAllocatedHosts()
self._free()
def _free(self):
allocation = self._allocations.pop()
print "Trying to free an allocation..."
try:
allocation.free()
except Exception as e:
print "Failed freeing allocation: {}".format(str(e))
print "Allocation freed."
def _getNrHosts(self):
status = self._client.call("admin__queryStatus")
return len(status["hosts"])
backgroundStressTestClient = None
profilingTestClient = None
def bgStress(mode):
if mode == "on":
print "Starting test clients..."
backgroundStressTestClient.start()
elif mode == "off":
print "Stopping test clients..."
backgroundStressTestClient.stop()
def allocate(nrHosts, pool="default"):
nrHosts = int(nrHosts)
profilingTestClient.allocate(nrHosts, pool=pool)
profilingAllocation = True
def free():
profilingTestClient.free()
def main():
print """Available commands:
bgstress on/off
\tRuns allocations (and frees them) in the background.
allocate nrHosts [pool=default]
\tAllocates the given number of hosts from the given pool.
free
\tFrees the current allocation (which was created with the 'allocate' command, if such allocation
exists."""
useFakeGeneralConfiguration()
import pdb
pdb.set_trace()
global backgroundStressTestClient, profilingTestClient, profilingAllocation
backgroundStressTestClient = RackattackTestClients("background-stress")
profilingTestClient = RackattackTestClients("profiling")
client = clientfactory.factory()
profilingAllocation = False
commands = dict(bgstress=bgStress, allocate=allocate, free=free)
while True:
cmdline = raw_input()
cmdline = cmdline.strip()
if not cmdline:
continue
cmdline = cmdline.split(" ")
cmdline = [item.strip() for item in cmdline]
commandName = cmdline[0]
args = cmdline[1:]
if commandName not in commands:
print "Invalid command: %(commandName)s" % dict(commandName=commandName)
continue
command = commands[commandName]
try:
command(*args)
except Exception
|
pshowalter/solutions-geoprocessing-toolbox
|
clearing_operations/scripts/PointTargetGRG.py
|
Python
|
apache-2.0
| 24,658
| 0.005272
|
# coding: utf-8
#
# Esri start of added imports
import sys, os, arcpy
# Esri end of added imports
# Esri start of added variables
g_ESRI_variable_1 = 'lyrFC'
g_ESRI_variable_2 = 'lyrTmp'
g_ESRI_variable_3 = 'ID'
g_ESRI_variable_4 = 'lyrOut'
g_ESRI_variable_5 = ';'
# Esri end of added variables
#------------------------------------------------------------------------------
# Copyright 2014 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
#
# ==================================================
# PointTargetGRG.py
# --------------------------------------------------
# Built on ArcGIS
# ==================================================
#
# Creates a Gridded Reference Graphic
#
#
# ==================================================
# HISTORY:
#
# 8/25/2015 - mf - Needed to update script for non-ArcMap/Pro testing environment
#
# ==================================================
import os, sys, math, traceback
import arcpy
from arcpy import env
import Utilities
# Read in the parameters
targetPointOrigin = arcpy.GetParameterAsText(0)
numberCellsHo = arcpy.GetParameterAsText(1)
numberCellsVert = arcpy.GetParameterAsText(2)
cellWidth = arcpy.GetParameterAsText(3)
cellHeight = arcpy.
|
GetParameterAsText(4)
cellUnits = arcpy.GetParameterAsText(5)
gridSize = arcpy.GetParameterAsText(6)
labelStartPos = arcpy.GetParameterAsText(7)
labelStyle = arcpy.GetParameterAsText(8)
outputFeatureClass =
|
arcpy.GetParameterAsText(9)
tempOutput = os.path.join("in_memory", "tempFishnetGrid")
sysPath = sys.path[0]
appEnvironment = None
DEBUG = True
mxd = None
mapList = None
df, aprx = None, None
def labelFeatures(layer, field):
''' set up labeling for layer '''
if appEnvironment == "ARCGIS_PRO":
if layer.supports("SHOWLABELS"):
for lblclass in layer.listLabelClasses():
lblclass.visible = True
lblclass.expression = " [" + str(field) + "]"
layer.showLabels = True
elif appEnvironment == "ARCMAP":
if layer.supports("LABELCLASSES"):
for lblclass in layer.labelClasses:
lblclass.showClassLabels = True
lblclass.expression = " [" + str(field) + "]"
layer.showLabels = True
arcpy.RefreshActiveView()
else:
pass # if returns "OTHER"
def findLayerByName(layerName):
''' find layer in app '''
global mapList
global mxd
#UPDATE
# if isPro:
if appEnvironment == "ARCGIS_PRO":
for layer in mapList.listLayers():
if layer.name == layerName:
arcpy.AddMessage("Found matching layer [" + layer.name + "]")
return layer
else:
arcpy.AddMessage("Incorrect layer: [" + layer.name + "]")
# else:
elif appEnvironment == "ARCMAP":
for layer in arcpy.mapping.ListLayers(mxd):
if layer.name == layerName:
arcpy.AddMessage("Found matching layer [" + layer.name + "]")
return layer
else:
arcpy.AddMessage("Incorrect layer: [" + layer.name + "]")
else:
arcpy.AddMessage("Non-map application (ArcCatalog, stand-alone test, etc.")
def RotateFeatureClass(inputFC, outputFC,
angle=0, pivot_point=None):
"""Rotate Feature Class
inputFC Input features
outputFC Output feature class
angle Angle to rotate, in degrees
pivot_point X,Y coordinates (as space-separated string)
Default is lower-left of inputFC
As the output feature class no longer has a "real" xy locations,
after rotation, it no coordinate system defined.
"""
def RotateXY(x, y, xc=0, yc=0, angle=0, units="DEGREES"):
"""Rotate an xy cooordinate about a specified origin
x,y xy coordinates
xc,yc center of rotation
angle angle
units "DEGREES" (default) or "RADIANS"
"""
import math
x = x - xc
y = y - yc
# make angle clockwise (like Rotate_management)
angle = angle * -1
if units == "DEGREES":
angle = math.radians(angle)
xr = (x * math.cos(angle)) - (y * math.sin(angle)) + xc
yr = (x * math.sin(angle)) + (y * math.cos(angle)) + yc
return xr, yr
# temp names for cleanup
env_file = None
lyrFC, lyrTmp, lyrOut = [None] * 3 # layers
tmpFC = None # temp dataset
Row, Rows, oRow, oRows = [None] * 4 # cursors
try:
# process parameters
try:
xcen, ycen = [float(xy) for xy in pivot_point.split()]
pivot_point = xcen, ycen
except:
# if pivot point was not specified, get it from
# the lower-left corner of the feature class
ext = arcpy.Describe(inputFC).extent
xcen, ycen = ext.XMin, ext.YMin
pivot_point = xcen, ycen
angle = float(angle)
# set up environment
env_file = arcpy.CreateScratchName("xxenv",".xml","file",
os.environ["TEMP"])
arcpy.SaveSettings(env_file)
# Disable any GP environment clips or project on the fly
arcpy.ClearEnvironment("extent")
arcpy.ClearEnvironment("outputCoordinateSystem")
WKS = env.workspace
if not WKS:
if os.path.dirname(outputFC):
WKS = os.path.dirname(outputFC)
else:
WKS = os.path.dirname(
arcpy.Describe(inputFC).catalogPath)
env.workspace = env.scratchWorkspace = WKS
# Disable GP environment clips or project on the fly
arcpy.ClearEnvironment("extent")
arcpy.ClearEnvironment("outputCoordinateSystem")
# get feature class properties
lyrFC = g_ESRI_variable_1
arcpy.MakeFeatureLayer_management(inputFC, lyrFC)
dFC = arcpy.Describe(lyrFC)
shpField = dFC.shapeFieldName
shpType = dFC.shapeType
FID = dFC.OIDFieldName
# create temp feature class
tmpFC = arcpy.CreateScratchName("xxfc", "", "featureclass")
arcpy.CreateFeatureclass_management(os.path.dirname(tmpFC),
os.path.basename(tmpFC),
shpType)
lyrTmp = g_ESRI_variable_2
arcpy.MakeFeatureLayer_management(tmpFC, lyrTmp)
# set up id field (used to join later)
TFID = "XXXX_FID"
arcpy.AddField_management(lyrTmp, TFID, "LONG")
arcpy.DeleteField_management(lyrTmp, g_ESRI_variable_3)
# rotate the feature class coordinates
# only points, polylines, and polygons are supported
# open read and write cursors
Rows = arcpy.SearchCursor(lyrFC, "", "",
"%s;%s" % (shpField,FID))
oRows = arcpy.InsertCursor(lyrTmp)
arcpy.AddMessage("Opened search cursor")
if shpType == "Point":
for Row in Rows:
shp = Row.getValue(shpField)
pnt = shp.getPart()
pnt.X, pnt.Y = RotateXY(pnt.X, pnt.Y, xcen, ycen, angle)
oRow = oRows.newRow()
oRow.setValue(shpField, pnt)
oRow.setValue(TFID, Row. getValue(FID))
oRows.insertRow(oRow)
elif shpType in ["Polyline", "Polygon"]:
parts = arcpy.Array()
rings = arcpy.Array()
ring = arcpy.Array()
for Row in Rows:
shp = Row.getValue(shpField)
p = 0
|
dlu-ch/dlb
|
test/dlb/0/test_di.py
|
Python
|
lgpl-3.0
| 16,694
| 0.001498
|
# SPDX-License-Identifier: LGPL-3.0-or-later
# dlb - a Pythonic build tool
# Copyright (C) 2020 Daniel Lutz <dlu-ch@users.noreply.github.com>
import testenv # also sets up module search paths
import dlb.di
import dlb.fs
import sys
import re
import logging
import time
import io
import collections
import unittest
class LoggingCompatibilityTest(unittest.TestCase):
def test_levels_are_equals(self):
for level_name in ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'):
self.assertEqual(getattr(logging, level_name), getattr(dlb.di, level_name))
class GetLevelMarkerTest(unittest.TestCase):
def test_exact_levels_are_correct(self):
self.assertEqual('D', dlb.di.get_level_indicator(dlb.di.DEBUG))
self.assertEqual('I', dlb.di.get_level_indicator(dlb.di.INFO))
self.assertEqual('W', dlb.di.get_level_indicator(dlb.di.WARNING))
self.assertEqual('E', dlb.di.get_level_indicator(dlb.di.ERROR))
self.assertEqual('C', dlb.di.get_level_indicator(dlb.di.CRITICAL))
def test_fails_for_positive_are_debug(self):
msg = "'level' must be positive"
with self.assertRaises(ValueError) as cm:
dlb.di.get_level_indicator(logging.NOTSET)
self.assertEqual(msg, str(cm.exception))
with self.assertRaises(ValueError) as cm:
dlb.di.get_level_indicator(-123)
self.assertEqual(msg, str(cm.exception))
def test_exact_greater_than_critical_are_critical(self):
self.assertEqual('C', dlb.di.get_level_indicator(dlb.di.CRITICAL + 123))
def test_between_is_next_smaller(self):
self.assertEqual('I', dlb.di.get_level_indicator(dlb.di.INFO + 1))
self.assertEqual('I', dlb.di.get_level_indicator(dlb.di.WARNING - 1))
class FormatMessageTest(unittest.TestCase):
def format_info_message(self, message):
return dlb.di.format_message(message, dlb.di.INFO)
def test_fails_on_empty(self):
with self.assertRaises(ValueError) as cm:
self.format_info_message('')
msg = "'message' must contain at least one non-empty line"
self.assertEqual(msg, str(cm.exception))
def test_single_line_returns_stripped(self):
self.assertEqual('I äüä schoo\U0001f609', self.format_info_message(' äüä schoo\U0001f609 '))
def test_fails_for_none(self):
with self.assertRaises(TypeError) as cm:
# noinspection PyTypeChecker
self.format_info_message(None)
msg = "'message' must be a str"
self.assertEqual(msg, str(cm.exception))
def test_fails_for_bytes(self):
with self.assertRaises(TypeError) as cm:
# noinspection PyTypeChecker
self.format_info_message(b'abc')
msg = "'message' must be a str"
self.assertEqual(msg, str(cm.exception))
def test_fails_for_nonprintable(self):
with self.assertRaises(ValueError) as cm:
self.format_info_message('abc\n a\0')
msg = (
"'message' must not contain ASCII control characters except "
"'\\t' and '\\b', unlike '\\x00' in line 2"
)
self.assertEqual(msg, str(cm.exception))
def test_removed_empty_lines_before_and_after(self):
m = self.format_info_message(' \n \n\n \na \n b\n\n \n')
self.assertEqual("I a \n | b", m)
m = self.format_info_message(' \r\n \r\n\r\n \r\na \r\n b\r\n\r\n \r\n')
self.assertEqual("I a \n | b", m)
m = self.format_info_message(' \r \r\r \ra \r b\r\r \r')
self.assertEqual("I a \n | b", m)
def test_removed_empty_lines_between(self):
m = self.format_info_message('a\n\n\n b\n c')
self.assertEqual("I a \n | b \n | c", m)
def test_unindents(self):
m = self.format_info_message(
"""
bla
a
b
""")
self.assertEqual("I bla \n | a \n | b", m)
def test_fails_for_underindented(self):
with self.assertRaises(ValueError) as cm:
self.format_info_message(
"""
bla
x
y
""")
msg = (
"each continuation line in 'message' must be indented at least 4 spaces more than "
"the first non-empty line, unlike line 4"
)
self.assertEqual(msg, str(cm.exception))
with self.assertRaises(ValueError) as cm:
self.format_info_message(
"""
bla
x
y
""")
self.assertEqual(msg, str(cm.exception))
def test_fails_for_reserved_start(self):
with self.assertRaises(ValueError) as cm:
self.format_info_message("'hehe'")
msg = "first non-empty line in 'message' must not start with reserved character \"'\""
self.assertEqual(msg, str(cm.exception))
def test_field_are_justified(self):
m = self.format_info_message(
"""
a\tb33\t100\b
a2\tb2\t10\b
a33\tb\t1\b
""")
self.assertEqual('I a b33100 \n | a2 b2 10 \n | a33b 1', m)
m = self.format_info_message(
"""
table:
a:\t A =\b 1\b
b2:\t B =\b 23\b
""")
self.assertEqual('I table: \n | a: A = 1 \n | b2: B = 23', m)
def test_fails_for_dot_at_end_of_first_line(self):
with self.assertRaises(ValueError) as cm:
|
self.format_info_message("start...")
msg = "first non-empty line in 'message' must not end with '.'"
self.assertEqual(msg, str(cm.exce
|
ption))
with self.assertRaises(ValueError) as cm:
self.format_info_message("done.")
msg = "first non-empty line in 'message' must not end with '.'"
self.assertEqual(msg, str(cm.exception))
class MessageThresholdTest(unittest.TestCase):
def test_default_is_info(self):
dlb.di.set_threshold_level(dlb.di.WARNING + 1)
self.assertTrue(dlb.di.is_unsuppressed_level(dlb.di.WARNING + 1))
self.assertFalse(dlb.di.is_unsuppressed_level(dlb.di.WARNING))
dlb.di.set_threshold_level(dlb.di.CRITICAL + 100)
self.assertTrue(dlb.di.is_unsuppressed_level(dlb.di.CRITICAL + 100))
self.assertFalse(dlb.di.is_unsuppressed_level(dlb.di.CRITICAL + 99))
def test_fails_on_nonpositve(self):
with self.assertRaises(ValueError) as cm:
dlb.di.set_threshold_level(0)
msg = "'level' must be positive"
self.assertEqual(msg, str(cm.exception))
def test_fails_on_none(self):
with self.assertRaises(TypeError) as cm:
dlb.di.set_threshold_level(None)
msg = "'level' must be something convertible to an int"
self.assertEqual(msg, str(cm.exception))
class SetOutputFileTest(unittest.TestCase):
class File:
def write(self, text: str):
pass
def test_fails_for_none(self):
with self.assertRaises(TypeError) as cm:
dlb.di.set_output_file(None)
msg = "'file' does not have a 'write' method: None"
self.assertEqual(msg, str(cm.exception))
def test_successful_for_stdout_and_stderr(self):
dlb.di.set_output_file(sys.stdout)
r = dlb.di.set_output_file(sys.stderr)
self.assertEqual(sys.stdout, r)
r = dlb.di.set_output_file(sys.stderr)
self.assertEqual(sys.stderr, r)
def test_successful_for_custom_class_with_only_write(self):
f = SetOutputFileTest.File()
r = dlb.di.set_output_file(f)
r = dlb.di.set_output_file(r)
self.assertEqual(f, r)
class ClusterTest(unittest.TestCase):
def setUp(self):
dlb.di.set_threshold_level(dlb.di.INFO)
_ = dlb.di._first_monotonic_ns # make sure attribute exists
dlb.di._first_monotonic_ns = None
def test_works_as_context_manager(self):
output = io.StringIO()
dlb.di.set_output_file(output)
c = dlb.di.Cluster('A\n a')
self.assertEqual('', output.getvalue()) # do
|
vsoch/repofish
|
analysis/methods/1.run_find_repos.py
|
Python
|
mit
| 1,341
| 0.014169
|
from glob import glob
import numpy
import pickle
import os
# Run iterations of "count" to count the number of terms in each folder of zipped up pubmed articles
home = os.environ["HOME"]
scripts = "%s/SCRIPT/repofish/analysis/methods" %(home)
base = "%s/data/pubmed" %os.environ["LAB"]
outfolder = "%s/repos" %(base)
articles_folder = "%s/articles" %(base)
if not os.path.exists(outfolder):
os.mkdir(outfolder)
folders = [x for x in glob("%s/*" %articles_folder) if os.path.isdir(x)]
batch_size = 1000.0
iters = int(numpy.ceil(len(folders)/batch_size))
# Prepare and submit a job for each
for i in range(iters):
start = i*int(batch_size)
if i != iters:
end = start + int(b
|
atch_size)
else:
end = len(folde
|
rs)
subset = folders[start:end]
script_file = "%s/findgithub_%s.job" %(scripts,i)
filey = open(script_file,'w')
filey.writelines("#!/bin/bash\n")
filey.writelines("#SBATCH --job-name=%s\n" %i)
filey.writelines("#SBATCH --output=.out/%s.out\n" %i)
filey.writelines("#SBATCH --error=.out/%s.err\n" %i)
filey.writelines("#SBATCH --time=2:00:00\n")
for folder in subset:
filey.writelines('python %s/1.find_repos.py "%s" %s\n' % (scripts,folder,outfolder))
filey.close()
os.system("sbatch -A Analysis_Lonestar -p normal -n 24 findgithub_%s.job" %i)
|
gasparmoranavarro/TopoDelProp
|
forms/frmSelec.py
|
Python
|
gpl-2.0
| 2,643
| 0.004162
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:/Users/Gaspar/.qgis/python/plugins/delPropiedad/forms_ui/frmSelec.ui'
#
# Created: We
|
d Jul 18 12:50:20 2012
# by: PyQt4 UI cod
|
e generator 4.8.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_frmSelec(object):
def setupUi(self, frmSelec):
frmSelec.setObjectName(_fromUtf8("frmSelec"))
frmSelec.resize(972, 310)
frmSelec.setWindowTitle(QtGui.QApplication.translate("frmSelec", "Seleccionar trabajo", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget = QtGui.QTableWidget(frmSelec)
self.tableWidget.setGeometry(QtCore.QRect(10, 30, 951, 231))
self.tableWidget.setToolTip(QtGui.QApplication.translate("frmSelec", "Seleccione una fila y pulse aceptar", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.tableWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.tableWidget.setObjectName(_fromUtf8("tableWidget"))
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.bttAceptar = QtGui.QPushButton(frmSelec)
self.bttAceptar.setGeometry(QtCore.QRect(440, 270, 111, 31))
self.bttAceptar.setText(QtGui.QApplication.translate("frmSelec", "Aceptar", None, QtGui.QApplication.UnicodeUTF8))
self.bttAceptar.setObjectName(_fromUtf8("bttAceptar"))
self.bttCancelar = QtGui.QPushButton(frmSelec)
self.bttCancelar.setGeometry(QtCore.QRect(570, 270, 91, 31))
self.bttCancelar.setText(QtGui.QApplication.translate("frmSelec", "Cancelar", None, QtGui.QApplication.UnicodeUTF8))
self.bttCancelar.setObjectName(_fromUtf8("bttCancelar"))
self.label = QtGui.QLabel(frmSelec)
self.label.setGeometry(QtCore.QRect(20, 10, 331, 16))
self.label.setText(QtGui.QApplication.translate("frmSelec", "Selecciones el trabajo que desea consultar:", None, QtGui.QApplication.UnicodeUTF8))
self.label.setObjectName(_fromUtf8("label"))
self.retranslateUi(frmSelec)
QtCore.QMetaObject.connectSlotsByName(frmSelec)
def retranslateUi(self, frmSelec):
pass
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
frmSelec = QtGui.QDialog()
ui = Ui_frmSelec()
ui.setupUi(frmSelec)
frmSelec.show()
sys.exit(app.exec_())
|
nanodude/Torch200
|
Torch200.py
|
Python
|
gpl-3.0
| 2,015
| 0.002978
|
__author__ = 'Carlos'
from time import sleep, time
import minimalmodbus as mb
import csv
class Torch200:
def __init__(self, com_port):
mb.BAUDRATE = 9600
mb.TIMEOUT = 3
self.profile = mb.Instrument(com_port, 1)
self.control = mb.Instrument(com_port, 2)
self.start_time = None
self.exceptions_count = 0
def start(self):
pass
def stop(self):
pass
def get_data(self):
try:
(prof_temp,) = self.profile.read_registers(0x1000, 1)
(ctrl_temp, set_point) = self.control.read_registers(0x1000, 2)
except (IOError, ValueError):
self.control.serial.flushInput()
self.exceptions_count += 1
raise
meas_time = time()
return (meas_time, set_point, ctrl_temp, prof_temp)
def set_program(self, id):
if type(id) is not int:
raise TypeError
if id not in range(1, 6):
raise ValueError
self.control.write_registers(0x1004, id)
if __name__=='__main__':
oven = Torch200('COM15')
start_time = None
while True:
try:
data = oven.get_data()
except (IOError, ValueError):
sleep(0.1)
continue
(meas_time, set_point, ctrl_temp, prof_temp) = data
if set_point > 0:
if start_time is None:
start_time = meas_time
file
|
name = r'C:\Documents and Settings\Carlos\My Documents\Dropbox\torch\T200C+ ' + str(start_time) + r'.csv'
csv_fp = open(file
|
name, 'wb')
csv_out = csv.writer(csv_fp)
csv_out.writerow(['time', 'set_point', 'ctrl_temp', 'prof_temp'])
data = (meas_time - start_time, set_point, ctrl_temp, prof_temp)
csv_out.writerow(data)
else:
if start_time is not None:
csv_fp.close()
start_time = None
print "(%6.2f, %3d, %3d, %3d)" % data
sleep(0.5)
|
cjaymes/pyscap
|
src/scap/model/ocil_2_0/ChoiceQuestionResultType.py
|
Python
|
gpl-3.0
| 988
| 0.001012
|
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License f
|
or more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
from scap.model.ocil_2_0.QuestionResultType import QuestionResultType
import logging
logger = logging.getLogger(__name__)
class ChoiceQuestionResultType(QuestionResultType):
MODEL_MAP = {
'elements': [
|
{'tag_name': 'answer', 'class': 'ChoiceAnswerType', 'max': 1},
],
}
|
acutesoftware/AIKIF
|
aikif/lib/cls_plan_BDI.py
|
Python
|
gpl-3.0
| 5,209
| 0.009023
|
# cls_plan_BDI.py
import datetime
class Plan_BDI(object):
"""
class for handling various plans fo
|
r AIKIF using
Belief | Desires | Intentions
"""
def __init__(self, name, dependency):
self.name = name
self.id = 1
self.dependency = dependency
self.plan_version = "v0.10"
self.success = False
self.start_date = datetime.datetime.now().strftime("%I:%M%p %d-%B-%Y")
self.resources = []
self.constraint = []
self.beliefs = Beliefs(self)
self.desires = Desires(self)
self.intentions = Intentions(self)
|
def __str__(self):
res = "---== Plan ==---- \n"
res += "name : " + self.name + "\n"
res += "version : " + self.plan_version + "\n"
for i in self.beliefs.list():
res += "belief : " + i + "\n"
for i in self.desires.list():
res += "desire : " + i + "\n"
for i in self.intentions.list():
res += "intention : " + i + "\n"
return res
def get_name(self):
return self.name
def generate_plan(self):
"""
Main logic in class which generates a plan
"""
print("generating plan... TODO")
def load_plan(self, fname):
""" read the list of thoughts from a text file """
with open(fname, "r") as f:
for line in f:
if line != '':
tpe, txt = self.parse_plan_from_string(line)
#print('tpe= "' + tpe + '"', txt)
if tpe == 'name':
self.name = txt
elif tpe == 'version':
self.plan_version = txt
elif tpe == 'belief':
self.beliefs.add(txt)
elif tpe == 'desire':
self.desires.add(txt)
elif tpe == 'intention':
self.intentions.add(txt)
def save_plan(self, fname):
with open(fname, "w") as f:
f.write("# AIKIF Plan specification \n")
f.write("name :" + self.name + "\n")
f.write("version :" + self.plan_version + "\n")
for txt in self.beliefs.list():
f.write("belief :" + txt + "\n")
for txt in self.desires.list():
f.write("desire :" + txt + "\n")
for txt in self.intentions.list():
f.write("intention :" + txt + "\n")
def parse_plan_from_string(self, line):
tpe = ''
txt = ''
if line != '':
if line[0:1] != '#':
parts = line.split(":")
tpe = parts[0].strip()
txt = parts[1].strip()
return tpe, txt
def add_resource(self, name, tpe):
"""
add a resource available for the plan. These are text strings
of real world objects mapped to an ontology key or programs
from the toolbox section (can also be external programs)
"""
self.resources.append([name, tpe])
def add_constraint(self, name, tpe, val):
"""
adds a constraint for the plan
"""
self.constraint.append([name, tpe, val])
class Thoughts(object):
""" base class for beliefs, desires, intentions simply
to make it easier to manage similar groups of objects """
def __init__(self, thought_type):
#print("Thoughts - init: thought_type = " + thought_type + "\n")
self._thoughts = []
self._type = thought_type
def __str__(self):
res = ' -- Thoughts --\n'
for i in self._thoughts:
res += i + '\n'
return res
def add(self, name):
self._thoughts.append(name)
def list(self, print_console=False):
lst = []
for i, thought in enumerate(self._thoughts):
if print_console is True:
print(self._type + str(i) + ' = ' + thought)
lst.append(thought)
return lst
class Beliefs(Thoughts):
def __init__(self, parent_plan):
self.parent_plan = parent_plan
super(Beliefs, self).__init__('belief')
class Desires(Thoughts):
def __init__(self, parent_plan):
self.parent_plan = parent_plan
super(Desires, self).__init__('desire')
class Intentions(Thoughts):
def __init__(self, parent_plan):
self.parent_plan = parent_plan
super(Intentions, self).__init__('intention')
def TEST():
myplan = Plan_BDI('new plan', '')
myplan.beliefs.add('belief0')
myplan.beliefs.add('belief1')
myplan.beliefs.add('belief2')
myplan.desires.add('desire0')
myplan.desires.add('desire1')
myplan.intentions.add('intention0')
myplan.beliefs.list()
myplan.desires.list()
myplan.intentions.list()
#myplan.save_plan("test_plan.txt")
#myplan.load_plan("test_plan.txt")
print(str(myplan))
if __name__ == '__main__':
TEST()
|
SIM-TU-Darmstadt/mbslib
|
dependencies/mbstestlib/src/testsetXML2intermediateConverter.py
|
Python
|
lgpl-3.0
| 9,479
| 0.007068
|
#!/usr/bin/python
#===============================================================================
#
# conversion script to create a mbstestlib readable file containing test specifications
# out of an testset file in XML format
#
#===============================================================================
# Input can be given via optional command line parameters.
#
#
# TODO: add check for joint count
# TODO: add model description to output (as comment)
import sys # for io
import xml.dom.minidom # for xml parsing
from glob import glob # for expanding wildcards in cmd line arguements
class _config:
default_input_file = 'testset-example.xml'
output_file_ext = '.txt'
empty_vecn = ""
zero_vec = "0 0 0"
unity_mat = "1 0 0 0 1 0 0 0 1"
case_defaults = { 'delta': "0.001",
'base_r': zero_vec,
'base_R': unity_mat,
'base_v': zero_vec,
'base_omega': zero_vec,
'base_vdot': zero_vec,
'base_omegadot': zero_vec,
'gravitiy': zero_vec,
'joints_q': empty_vecn,
'joints_qdot': empty_vecn,
'joints_qdotdot': empty_vecn,
'joints_tau': empty_vecn,
'tcp_r': zero_vec,
'tcp_R': unity_mat,
'tcp_v': zero_vec,
'tcp_omega': zero_vec,
'tcp_vdot': zero_vec,
'tcp_omegadot': zero_vec,
'f_ext': zero_vec,
'n_ext': zero_vec
}
case_output_order = [
'delta',
'base_r',
'base_R',
'base_v',
'base_omega',
'base_vdot',
'base_omegadot',
'gravitiy',
'joints_q',
'joints_qdot',
'joints_qdotdot',
'joints_tau',
'tcp_r',
'tcp_R',
'tcp_v',
'tcp_omega',
'tcp_vdot',
'tcp_omegadot',
'f_ext',
'n_ext'
]
class _state:
error_occured_while_processing_xml = False
input_file = ''
def getText(nodelist):
# str(method.childNodes[0].nodeValue) # TODO: remove
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
# inspired by http://code.activestate.com/recipes/52306-to-sort-a-dictionary/
def sortedDict(adict):
return [ adict[k] for k in sorted(adict.keys()) ]
# parses a specific node and either stores it's value in a dict or the default value
# may set the error bit
def parse_opt(nodename, valuetype, current_case, current_case_value_dict):
# if the node does not exist use the default value
nodelist = current_case.getElementsByTagName(nodename)
if nodelist.length == 0:
current_case_value_dict.update({nodename : _config.case_defaults.get(nodename)})
elif nodelist.length > 1:
_state.error_occured_while_processing_xml = True
print("'" + nodename + "' defined more than once.")
return
else:
# we have one single node to parse
node = nodelist[0]
value = node.getAttribute(valuetype)
if value == None:
# TODO: more advanced checks with regexp
_state.error_occured_while_processing_xml = True
print("'" + nodename + "' has an empty value or wrong type ('"+ valuetype +"').")
return
else :
current_case_value_dict.update({nodename : value})
return
def convert_xml_testset_2_raw_testset(mbs_test_set):
raw_testsets = dict([]) # filename:content dict
for mbs in mbs_test_set.getElementsByTagName('mbs'): # for every file
file = mbs.getAttribute('file')
raw_testset = []
if mbs.getElementsByTagName('model').length != 1:
_state.error_occured_while_processing_xml = True
print("Only one model allowed per file!")
return dict([])
# extract model
raw_testset.append("% " + mbs.getElementsByTagName('model')[0].getAttribute('desc'))
raw_testset.append(getText(mbs.getElementsByTagName('model')[0].childNodes))
# insert separation marker
raw_testset.append("\nendmodel")
# now process the cases
if mbs.getElementsByTagName('cas
|
e').length == 0:
_state.error_occured_while_processing_xml = True
pri
|
nt("No cases defined!")
return dict([])
cases = dict([])
for case in mbs.getElementsByTagName('case'):
# TODO: sanity check -> number collisions
# parse case
case_nr = case.getAttribute('nr')
case_desc = case.getAttribute('desc')
case_value_dict = dict([])
# everything but joints does not have to be defined explicitly
# TODO: unify these calls in a generic way (e.g. add type to case_output_order and iterate over it)
parse_opt('delta', 'scalar', case, case_value_dict)
parse_opt('base_r', 'vector3', case, case_value_dict)
parse_opt('base_R', 'matrix3x3', case, case_value_dict)
parse_opt('base_v', 'vector3', case, case_value_dict)
parse_opt('base_omega', 'vector3', case, case_value_dict)
parse_opt('base_vdot', 'vector3', case, case_value_dict)
parse_opt('base_omegadot', 'vector3', case, case_value_dict)
parse_opt('gravitiy', 'vector3', case, case_value_dict)
# TODO: checks with n (the number of joints)
parse_opt('joints_q', 'vector_n', case, case_value_dict)
parse_opt('joints_qdot', 'vector_n', case, case_value_dict)
parse_opt('joints_qdotdot', 'vector_n', case, case_value_dict)
parse_opt('joints_tau', 'vector_n', case, case_value_dict)
parse_opt('tcp_r', 'vector3', case, case_value_dict)
parse_opt('tcp_R', 'matrix3x3', case, case_value_dict)
parse_opt('tcp_v', 'vector3', case, case_value_dict)
parse_opt('tcp_omega', 'vector3', case, case_value_dict)
parse_opt('tcp_vdot', 'vector3', case, case_value_dict)
parse_opt('tcp_omegadot', 'vector3', case, case_value_dict)
parse_opt('f_ext', 'vector3', case, case_value_dict)
parse_opt('n_ext', 'vector3', case, case_value_dict)
if _state.error_occured_while_processing_xml: return dict([])
# compile raw case output
case_content = ["\n" + case_desc]
for value_name in _config.case_output_order:
if case_value_dict.get(value_name) is None :
_state.error_occured_while_processing_xml = True
print("Not all values defined in one testcase!")
return dict([])
case_content.append(case_value_dict.get(value_name))
cases.update({case_nr : "\n".join(case_content)})
# flatten cases (and sort)
raw_testset.append("\n".join(sortedDict(cases)))
# update file:testset dict
raw_testsets.update({file : "\n".join(raw_testset)})
# return the dict of files:testsets
return raw_testsets
#===============================================================================
# process command line arguments (i.e. file i/o)
#===============================================================================
script_name = sys.argv[0][sys.argv[0].rfind("\\")+1:]
if len(sys.argv) == 1:
_state.input_file = _config.default_input_file
print("No command line arguments were given. Defaulting to:")
print("Input '"
|
vgrem/Office365-REST-Python-Client
|
office365/sharepoint/files/move_operations.py
|
Python
|
mit
| 105
| 0
|
class MoveOperations:
"""Specifies criteria for how to move files.
|
"""
no
|
ne = 0
overwrite = 1
|
livni/old-OK
|
src/knesset/mks/migrations/0004_add_members_residence.py
|
Python
|
bsd-3-clause
| 6,016
| 0.009142
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Member.area_of_residence'
db.add_column('mks_member', 'area_of_residence', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True), keep_default=False)
# Adding field 'Member.place_of_residence'
db.add_column('mks_member', 'place_of_residence', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Member.area_of_residence'
db.delete_column('mks_member', 'area_of_residence')
# Deleting field 'Member.place_of_residence'
db.delete_column('mks_member', 'place_of_residence')
models = {
'mks.correlation': {
'Meta': {'object_name': 'Correlation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'm1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'m1'", 'to': "orm['mks.Member']"}),
'm2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'m2'", 'to': "orm['mks.Member']"}),
'normalized_score': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'not_same_party': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'mks.member': {
'Meta': {'object_name': 'Member'},
'area_of_residence': ('django.db.mo
|
dels.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields
|
.related.ForeignKey', [], {'related_name': "'members'", 'blank': 'True', 'null': 'True', 'to': "orm['mks.Party']"}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.weeklypresence': {
'Meta': {'object_name': 'WeeklyPresence'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'hours': ('django.db.models.fields.FloatField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"})
}
}
complete_apps = ['mks']
|
zestedesavoir/Python-ZMarkdown
|
zmarkdown/blockparser.py
|
Python
|
bsd-3-clause
| 3,639
| 0
|
from __future__ import unicode_literals
from __future__ import absolute_import
from . import util
from . import odict
class State(list):
""" Track the current and nested state of the parser.
This utility class is used to track the state of the BlockParser and
support multiple levels if nesting. It's just a simple API wrapped around
a list. Each time a state is set, that state is appended to the end of the
list. Each time a state is reset, that state is removed from the end of
the list.
Therefore, each time a state is set for a nested block, that state must be
reset when we back out of that level of nesting or the state could be
corrupted.
While all the methods of a list object are available, only the three
defined below need be used.
"""
def set(self, state):
""" Set a new state. """
self.append(state)
def reset(self):
""" Step back one step in nested state. """
se
|
lf.pop()
def isstate(self, state):
""" Test that top (current) level is of given state. """
if len(self):
return self[-1] == state
else:
return False
class BlockParser:
""" Parse Markdown blocks into an ElementTree object.
A
|
wrapper class that stitches the various BlockProcessors together,
looping through them and creating an ElementTree object.
"""
def __init__(self, zmarkdown):
self.blockprocessors = odict.OrderedDict()
self.state = State()
self.zmarkdown = zmarkdown
def parseDocument(self, lines):
""" Parse a markdown document into an ElementTree.
Given a list of lines, an ElementTree object (not just a parent
Element) is created and the root element is passed to the parser
as the parent. The ElementTree object is returned.
This should only be called on an entire document, not pieces.
"""
# Create a ElementTree from the lines
self.root = util.etree.Element(self.zmarkdown.doc_tag)
self.parseChunk(self.root, '\n'.join(lines))
return util.etree.ElementTree(self.root)
def parseChunk(self, parent, text):
""" Parse a chunk of markdown text and attach to given etree node.
While the ``text`` argument is generally assumed to contain multiple
blocks which will be split on blank lines, it could contain only one
block. Generally, this method would be called by extensions when
block parsing is required.
The ``parent`` etree Element passed in is altered in place.
Nothing is returned.
"""
if self.zmarkdown.inline:
self.blockprocessors["paragraph"].run(parent, [text])
else:
self.parseBlocks(parent, text.split('\n\n'))
def parseBlocks(self, parent, blocks):
""" Process blocks of markdown text and attach to given etree node.
Given a list of ``blocks``, each blockprocessor is stepped through
until there are no blocks left. While an extension could potentially
call this method directly, it's generally expected to be used
internally.
This is a public method as an extension may need to add/alter
additional BlockProcessors which call this method to recursively
parse a nested block.
"""
while blocks:
for processor in self.blockprocessors.values():
if processor.test(parent, blocks[0]):
if processor.run(parent, blocks) is not False:
# run returns True or None
break
|
thesecretlab/snippet-expander
|
tests/test_source_document.py
|
Python
|
mit
| 1,336
| 0.004491
|
import unittest
from source_document import SourceDocument
from test_tagged_document import create_test_repo
from tagged_document import TaggedDocument
class SourceDocumentTests(unittest.TestCase):
"""Unit tests for the Document class"""
def test_cleaning(self):
# Tests removing snippets
input_path = "tests/sample-expanded.txt"
reference_path = "tests/sample.txt"
reference_text = open(reference_path, "r").read()
document = SourceDocument(input_path)
self.assertEqual(document.cleaned_
|
contents, reference_text)
def test_finding_documents(self):
found_documents = SourceDocument.find("tests", ["txt"])
self.assertTrue(len(found_documents) == 7)
def test_process
|
ing(self):
# Tests rendering a snippet using tagged documents.
repo = create_test_repo()
tagged_documents = TaggedDocument.find(repo, ["txt"])
self.assertTrue(tagged_documents)
input_path = "tests/sample.txt"
reference_path = "tests/sample-expanded.txt"
reference_text = open(reference_path, "r").read()
source = SourceDocument(input_path)
rendered_output = source.render(tagged_documents, language="swift",show_query=False)
self.assertEqual(rendered_output, (reference_text, True))
|
synw/django-alapage
|
alapage/management/commands/create_homepage.py
|
Python
|
mit
| 603
| 0.006633
|
from __future__ import print_function
from d
|
jango.core.management.base import BaseCommand, CommandError
from alapage.models import Page
class Command(BaseCommand):
help = 'Creates a homepage'
def handle(self, *args, **options):
content = ""
#~ check if home exists
home_exists = Page.objects.fil
|
ter(url='/').exists()
#~ create page
if not home_exists:
Page.objects.create(url='/', title='Home', content=content)
print("Homepage created")
else:
print("The homepage already exists with root url")
return
|
dsaldana/phantoms_soccer2d
|
phantom_team/run_agent.py
|
Python
|
gpl-2.0
| 1,609
| 0.001243
|
#!/usr/bin/env python
# ----
|
------------------------------------------------------------------------
# GNU General Pub
|
lic License v2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# ----------------------------------------------------------------------------
import time
import sys
from players.atack_agent import AtackAgent
# sys.path.append("../")
PORT = 6000
HOST = "localhost"
"""
Run an agent
"""
if __name__ == "__main__":
# enforce current number of arguments, print help otherwise
if len(sys.argv) == 2:
print "args: ./run_team.py <team_name>"
#Get team name from arguments
team_name = sys.argv[1]
else:
team_name = "default"
AtackAgent().connect(HOST, PORT, team_name).play()
# wait until killed to terminate agent processes
try:
while 1:
time.sleep(0.05)
except KeyboardInterrupt:
print "Exiting."
sys.exit()
|
nkashy1/elastic-boogaloo
|
example.py
|
Python
|
mit
| 706
| 0.001416
|
from elastic_boogaloo import classifiers, distributions, scorers
from elasticsearch import Elasticsearch
es_client =
|
Elasticsearch('localhost:9200'
|
)
scorer = scorers.ElasticsearchIndexTopScorer(es_client, 'megacorp')
positive_distribution = distributions.ExponentialDistribution()
negative_distribution = distributions.ExponentialDistribution()
classifier = classifiers.UnopinionatedBinaryClassifier(scorer, positive_distribution, negative_distribution)
print('Training douglas as positive...')
classifier.train_positive('douglas')
print('Done')
print('Probability of douglas being positive:', classifier.classify('douglas'))
print('Probability of rock being positive:', classifier.classify('rock'))
|
manhhomienbienthuy/scikit-learn
|
benchmarks/bench_plot_parallel_pairwise.py
|
Python
|
bsd-3-clause
| 1,272
| 0
|
# Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
plt.figure("scikit-learn parallel %s benchmark results" % func.__name__)
plt.plot(sample_sizes, one_core, labe
|
l="one core")
plt.plot(sample_sizes, multi_core, label="multi core")
plt.xlabel("n_samples")
p
|
lt.ylabel("Time (s)")
plt.title("Parallel %s" % func.__name__)
plt.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
plt.show()
|
rananda/cfme_tests
|
cfme/tests/infrastructure/test_advanced_search_providers.py
|
Python
|
gpl-2.0
| 10,078
| 0.002977
|
# -*- coding: utf-8 -*-
"""This testing module tests the behaviour of the search box in the Provider section
It does not check for filtering results so far."""
import fauxfactory
import pytest
from selenium.common.exceptions import NoSuchElementException
from cfme.infrastructure import host
from cfme.infrastructure.provider import InfraProvider
# TODO: we should not call out to utils here, but maybe rather have an infra setup provider fixture
from fixtures.pytest_store import store
from utils.providers import setup_a_provider_by_class
from utils.appliance.implementations.ui import navigate_to
from utils.log import logger
from cfme.web_ui import search
from cfme.web_ui.search import DisabledButtonException
from cfme.web_ui.cfme_exception import (assert_no_cfme_exception,
is_cfme_exception, cfme_exception_text)
pytestmark = [pytest.mark.usefixtures("setup_cleanup_search"), pytest.mark.tier(3)]
@pytest.fixture(scope="module")
def single_provider():
"""Ensure the infra provider is setup"""
try:
return setup_a_provider_by_class(InfraProvider)
except Exception as ex:
pytest.skip("Exception while setting up providers, therefore skipping: {}".format(ex))
@pytest.fixture(scope="module")
def hosts_with_vm_count(hosts):
"""Returns a list of tuples (hostname, vm_count)"""
hosts_with_vm_count = []
for host_name in hosts:
hosts_with_vm_count.append((host_name, int(host.find_quadicon(host_name, True).no_vm)))
return sorted(hosts_with_vm_count, key=lambda tup: tup[1])
@pytest.yield_fixture(scope="function")
def setup_cleanup_search():
"""Navigate to InfraProvider, clear search on setup and teardown"""
navigate_to(InfraProvider, 'All')
search.ensure_no_filter_applied()
yield
# cleanup after test
search.ensure_no_filter_applied()
search.ensure_advanced_search_closed()
@pytest.yield_fixture(scope="function")
def rails_delete_filter(request):
"""Introspect a function bound filter_name and use ssh_client and rails to delete it"""
# No pre-test, just cleanup after yield
yield
filter_name = getattr(request.function, "filter_name", None)
logger.debug('rails_delete_filter: calling rails to delete filter: {}'.format(filter_name))
if filter_name:
try:
store.current_appliance.ssh_client.run_rails_command(
'"MiqSearch.where(:description => {}).first.delete"'.format(repr(filter_name)))
except Exception as ex:
logger.warning('rails_delete_filter: exception during delete. {}'.format(ex))
pass
else:
logger.warning('rails_delete_filter: failed to get filter_name')
def test_can_do_advanced_search(single_provider):
navigate_to(InfraProvider, 'All')
assert search.is_advanced_search_possible(), "Cannot do advanced search here!"
@pytest.mark.requires("test_can_do_advanced_search")
def test_can_open_advanced_search(single_provider):
navigate_to(InfraProvider, 'All')
search.ensure_advanced_search_open()
@pytest.mark.requires("test_can_open_advanced_search")
def test_filter_without_user_input(single_provider):
# Set up the filter
search.fill_and_apply_filter("fill_count(Infrastructure Provider.VMs, >=, 0)")
assert_no_cfme_exception()
@pytest.mark.requires("test_can_open_advanced_search")
def test_filter_with_user_input(single_provider):
# Set up the filter
logger.debug('DEBUG: test_with_user_input: fill and apply')
search.fill_and_apply_filter("fill_count(Infrastructure Provider.VMs, >=)",
fill_callback={"COUNT": 0})
assert_no_cfme_exception()
@pytest.mark.requires("test_can_open_advanced_search")
def test_fi
|
lter_with_user_input_and_cancellation(single_provider):
# Set up the filter
search.fill_and_apply_filter(
"fill_count(Infrastructure Provider.VMs, >=)", fill_callback={"COUNT": 0},
cancel_on_user_filling=True
)
assert_no_cfme_exception()
@pytest.mark.requires("test_can_open_advanced_search")
def test_filter_save_cancel(single_provider, rails_delete_filter):
# bind filter_name to the function
|
for fixture cleanup
test_filter_save_cancel.filter_name = fauxfactory.gen_alphanumeric()
logger.debug('Set filter_name to: {}'.format(test_filter_save_cancel.filter_name))
# Try save filter
assert search.save_filter("fill_count(Infrastructure Provider.VMs, >)",
test_filter_save_cancel.filter_name, cancel=True)
assert_no_cfme_exception()
assert search.reset_filter()
# Exception depends on system state - Load button will be disabled if there are no saved filters
with pytest.raises((DisabledButtonException, NoSuchElementException)):
search.load_filter(saved_filter=test_filter_save_cancel.filter_name)
@pytest.mark.requires("test_can_open_advanced_search")
def test_filter_save_and_load(single_provider, rails_delete_filter):
# bind filter_name to the function for fixture cleanup
test_filter_save_and_load.filter_name = fauxfactory.gen_alphanumeric()
logger.debug('Set filter_name to: {}'.format(test_filter_save_and_load.filter_name))
# Save filter
assert search.save_filter("fill_count(Infrastructure Provider.VMs, >, 0)",
test_filter_save_and_load.filter_name)
assert_no_cfme_exception()
# Reset filter
assert search.reset_filter()
# Load filter
assert search.load_filter(test_filter_save_and_load.filter_name)
assert_no_cfme_exception()
@pytest.mark.requires("test_can_open_advanced_search")
def test_filter_save_and_cancel_load(single_provider, rails_delete_filter):
# bind filter_name to the function for fixture cleanup
test_filter_save_and_cancel_load.filter_name = fauxfactory.gen_alphanumeric()
logger.debug('Set filter_name to: {}'.format(test_filter_save_and_cancel_load.filter_name))
# Save filter
assert search.save_filter("fill_count(Infrastructure Provider.VMs, >, 0)",
test_filter_save_and_cancel_load.filter_name)
assert_no_cfme_exception()
# Reset Filter
assert search.reset_filter()
# Load and cancel
assert search.load_filter(test_filter_save_and_cancel_load.filter_name, cancel=True)
assert_no_cfme_exception()
@pytest.mark.requires("test_can_open_advanced_search")
def test_filter_save_and_cancel_load_with_user_input(single_provider, rails_delete_filter):
# bind filter_name to the function for fixture cleanup
test_filter_save_and_cancel_load_with_user_input.filter_name = fauxfactory.gen_alphanumeric()
logger.debug('Set filter_name to: {}'.format(
test_filter_save_and_cancel_load_with_user_input.filter_name))
# Save filter
assert search.save_filter("fill_count(Infrastructure Provider.VMs, >)",
test_filter_save_and_cancel_load_with_user_input.filter_name)
assert_no_cfme_exception()
# Reset Filter
assert search.reset_filter()
search.load_and_apply_filter(
test_filter_save_and_cancel_load_with_user_input.filter_name,
fill_callback={"COUNT": 0},
cancel_on_user_filling=True
)
assert_no_cfme_exception()
def test_quick_search_without_filter(request, single_provider):
assert_no_cfme_exception()
# Make sure that we empty the regular search field after the test
request.addfinalizer(search.ensure_normal_search_empty)
# Filter this host only
search.normal_search(fauxfactory.gen_alphanumeric())
assert_no_cfme_exception()
def test_quick_search_with_filter(request, single_provider):
search.fill_and_apply_filter("fill_count(Infrastructure Provider.VMs, >=, 0)")
assert_no_cfme_exception()
# Make sure that we empty the regular search field after the test
request.addfinalizer(search.ensure_normal_search_empty)
# Filter this host only
search.normal_search(fauxfactory.gen_alphanumeric())
assert_no_cfme_exception()
def test_can_delete_filter(single_provider):
filter_name = fauxfactory.gen_alphanumeric()
logger.debug('Set filter_name to: {}'.format(filter_name))
assert search.save_
|
weijia/djangoautoconf
|
djangoautoconf/management/commands/create_default_super_user.py
|
Python
|
bsd-3-clause
| 844
| 0.003555
|
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from djangoautoconf.local_key_manager import get_default_admin_username, \
get_defa
|
ult_admin_password
from djangoautoconf.management.commands.web_manage_tools.user_creator import create_admin
def create_default_admin():
super_username = get_default_admin_username()
super_password = get_default_admin_password()
if not User.objects.filter(username=super_username).exists():
create_admin(super_username, super_password, "r@j.cn")
|
print("default admin created")
else:
print("default admin already created")
class Command(BaseCommand):
args = ''
help = 'Create command cache for environment where os.listdir is not working'
def handle(self, *args, **options):
create_default_admin()
|
mattrobenolt/warehouse
|
warehouse/search/indexes.py
|
Python
|
apache-2.0
| 3,926
| 0
|
# Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS
|
" BASIS,
# WITHOUT WARR
|
ANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import hashlib
import os
from elasticsearch import Elasticsearch, TransportError
from elasticsearch.helpers import bulk_index
from warehouse.utils import AttributeDict
class Index(object):
_index = "warehouse"
def __init__(self, models, config):
self.models = models
self.config = config
self.es = Elasticsearch(
hosts=self.config.hosts,
**self.config.get("client_options", {})
)
self.types = AttributeDict()
def register(self, type_):
obj = type_(self)
self.types[obj._type] = obj
def reindex(self, index=None, alias=True, keep_old=False):
# Generate an Index Name for Warehouse
index = "".join([
index if index is not None else self._index,
hashlib.md5(os.urandom(16)).hexdigest()[:8],
])
# Create this index
self.es.indices.create(index, {
"mappings": {
doc_type._type: doc_type.get_mapping()
for doc_type in self.types.values()
},
})
# Index everything into the new index
for doc_type in self.types.values():
doc_type.index_all(index=index)
# Update the alias unless we've been told not to
if alias:
self.update_alias(self._index, index, keep_old=keep_old)
def update_alias(self, alias, index, keep_old=False):
# Get the old index from ElasticSearch
try:
old_index = self.es.indices.get_alias(self._index).keys()[0]
except TransportError as exc:
if not exc.status_code == 404:
raise
old_index = None
# Remove the alias to the old index if it exists
if old_index is not None:
actions = [{"remove": {"index": old_index, "alias": alias}}]
else:
actions = []
# Add the alias to the new index
actions += [{"add": {"index": index, "alias": alias}}]
# Update To the New Index
self.es.indices.update_aliases({"actions": actions})
# Delete the old index if it exists and unless we're keeping it
if not keep_old and old_index is not None:
self.es.indices.delete(old_index)
class BaseMapping(object):
SEARCH_LIMIT = 25
def __init__(self, index):
self.index = index
def get_mapping(self):
raise NotImplementedError
def get_indexable(self):
raise NotImplementedError
def extract_id(self, item):
raise NotImplementedError
def extract_document(self, item):
raise NotImplementedError
def index_all(self, index=None):
# Determine which index we are indexing into
_index = index if index is not None else self.index._index
# Bulk Index our documents
bulk_index(
self.index.es,
[
{
"_index": _index,
"_type": self._type,
"_id": self.extract_id(item),
"_source": self.extract_document(item),
}
for item in self.get_indexable()
],
)
def search(self, query):
raise NotImplementedError
|
ShaunKarran/homesense
|
esp8266/micropython/main.py
|
Python
|
gpl-3.0
| 403
| 0
|
import ujson
|
def json_load(file_name):
with open(file_name, 'r') as f:
data = ujson.loads(f.read())
return data
def json_dump(file_name, data):
with open(file_name, 'w') as f:
f.write(ujson.dumps(data))
test_dict = {
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
}
js
|
on_dump('test.json', test_dict)
test_load = json_load('test.json')
print(test_load)
|
EichlerLab/read_depth_genotyper
|
scripts/make_ml_output_summary.py
|
Python
|
mit
| 15,332
| 0.028111
|
import sys
import socket
import os
import os.path
from optparse import OptionParser
#import scipy as scp
import numpy as np
import matplotlib.pyplot as plt
import pylab
import genome_management.kg_file_handling as kgf
import math
def file_exists(ls,file):
for f in ls:
if(f==file):
return 1
return 0
def mkdir(dir,file):
ls_dir = os.listdir(dir)
if(not(file_exists(ls_dir,file))):
command = "mkdir %s/%s"%(dir,file)
os.system(command)
return "%s/%s"%(dir,file)
class region_info:
def __init__(self,name,chr,start,end,TID):
self.name = name
self.chr = chr
self.start = start
self.end = end
self.frequencies_by_pop = {}
self.cps_by_genome = {}
self.transcript_id = TID
self.TID = TID
self.cps_all = []
self.pop_by_genome = {}
def add_info_from_genome(self,cp,genome):
if(not(genome.pop in self.frequencies_by_pop)):
self.frequencies_by_pop[genome.pop] = []
self.frequencies_by_pop[genome.pop].append(cp)
self.cps_by_genome[genome.genome_name] = cp
self.pop_by_genome[genome.genome_name] = genome.pop
self.cps_all.append(cp)
#def get_var(self):
# self.vars = {}
#self.cps_all = np.array(self.cps_all)
# varT = self.cps_all.var()
# self.vars["all"]=varT
# self.means = {}
# meanT = self.cps_all.mean(1)
# self.means["all"] = meanT
# for pop,copies_by_pop in self.frequencies_by_pop.iteritems():
# copies_by_pop = np.array(copies_by_pop)
# self.vars[pop] = self.summary[:,pop_index].var(1)
# self.means[pop] = self.summary[:,pop_index].mean(1)
# self.vsts = {}
# self.fsts = {}
# for pop,pop_index in self.indivs_by_pop.iteritems():
# for pop_2,pop_index_2 in self.indivs_by_pop.iteritems():
# n_pop = float(pop_index.shape[0])
# n_pop_2 = float(pop_index_2.shape[0])
# both_pops = np.r_[self.indivs_by_pop[pop],self.indivs_by_pop[pop_2]]
# var_both = self.summary[:,both_pops].var(1)
# N = n_pop+n_pop_2
# self.vsts["_".join([pop,pop_2])] = (var_both - ((self.vars[pop]*n_pop+self.vars[pop_2]*n_pop_2)/N)) / var_both
def make_output_file(region,region_info,outdir,cell_line_info,genome_info):
outfile_name = "%s/%s_pop_summary.csv"%(outdir,region_info.name)
FOUT = open(outfile_name,'w')
FOUT.write("indiv,cp,pop,cell lines fixed, cell lines in Nitrogen,coverage\n")
for indiv,cp in region_info.cps_by_genome.iteritems():
pop = region_info.pop_by_genome[indiv]
output = indiv in cell_line_info and cell_line_info[indiv] or ""
output = "%s,%d,%s,%s,%f\n"%(indiv,cp,pop,output,genome_info.genomes[indiv].coverage)
FOUT.write(output)
print output
def make_simple_plot(region,region_info,outdir,cell_line_info,genome_info):
plt.rc('grid',color='0.75',linestyle='l',linewidth='0.1')
f=plt.figure()
f.set_figwidth(6)
f.set_figheight(6)
axescolor = '#f6f6f6'
left, width = 0.1, 0.8
rect1 = [left, 0.1, width, 0.8] #left, bottom, width, height
ax = f.add_axes(rect1)
colors = {'Yoruba':'r','European':'b','Asian':'g'}
for indiv,cp in region_info.cps_by_genome.iteritems():
cvg = genome_info.genomes[indiv].coverage
fixed_cell_line = cell_line_info[indiv].split(",")[0].rstrip() == "yes"
liquid_nitrogen_cell_line = cell_line_info[indiv].split(",")[1].rstrip() == "yes"
color = colors[genome_info.genomes[indiv].pop]
ax.plot(np.array([cvg]),np.array([cp]),'%so'%(color))
ax.set_xlabel("cvg",size=20)
ax.set_ylabel("copy",size=20)
ax.set_title("%s"%(region_info.name),size=20)
f.savefig("%s/%s_copy_vs_cvg.pdf"%(outdir,region_info.name),format='pdf')
plt.close(1)
def make_histogram(region,region_info,outdir,great_ape_gene_hashes):
print region_info.name
plt.rc('grid',color='0.75',linestyle='l',linewidth='0.1')
f=plt.figure()
f.set_figwidth(10)
f.set_figheight(10)
nbins=0
mx=0
mn=100
do_apes=True
great_ape_cps = {}
if do_apes:
for ape,gene_hash in great_ape_gene_hashes.iteritems():
if not region_info.TID in gene_hash:
do_apes=False
print "ID does not exist for APE"
print region_info.TID
break
great_ape_cps[ape] = gene_hash[region_info.TID]
mx=int(max(great_ape_cps[ape],mx))
mn=int(min(great_ape_cps[ape],mn))
axescolor = '#f6f6f6'
left, width = 0.1, 0.8
rect1 = [left, 0.1, width, 0.8] #left, bottom, width, height
for pop,freq_info in region_info.frequencies_by_pop.iteritems():
#nbins = int(round(max(nbins,max(freq_info))))
mx=int(max(max(freq_info),mx))
mn=int(min(min(freq_info),mn))
#nbins+=1
nbins = mx-mn+1
labels = []
pop_to_hists = {}
for pop,freq_info in region_info.frequencies_by_pop.iteritems():
print pop,freq_info
pop_to_hists[pop] = np.histogram(np.array(freq_info),bins=nbins,range=[mn,mx],normed=True,new=True)[0]
print np.histogram(np.array(freq_info),bins=nbins,range=[mn,mx],normed=True,new=True)
print pop_to_hists[pop]
x = np.arange(mn,mx+1)
width=.25
print x
for i in range(x.shape[0]):
labels.append(str(x[i]))
ax = f.add_axes(rect1)
bars = {}
leg = []
leg_colors = []
lines = []
k=0
colors = ['r','g','b','o']
starty = .9
sub=.03
i=0
for pop,freqs in region_info.frequencies_by_pop.iteritems():
med = np.median(np.array(freqs))
sig2 = np.array(freqs).var()
leg.append("%s med: %d var: %.1f"%(pop,int(med),sig2))
i+=1
for pop,hist in pop_to_hists.iteritems():
bars[pop] = ax.bar(x+k*width,hist,width,color=colors[k],alpha=0.5)
leg_colors.append(colors[k])
#ax.legend(bars[pop][0],pop)
lines.append(bars[pop][0])
k+=1
ape_colors = ['orange','purple','yellow','brown']
k=0
if do_apes:
for ape,cp in great_ape_cps.iteritems():
bars_ape = ax.bar(np.array([cp]),np.array([.1]),width/2,color=ape_colors[k],alpha=.8)
leg.append("%s %f"%(ape,cp))
lines.append(bars_ape[0])
k+=1
ax.set_xticks(x+width*k/2)
ax.set_xticklabels(labels,size=20)
ax.grid(color='k',linestyle='--',linewidth=1,alpha=.3)
yticklabels = [str(x) for x in np.arange(0,1,.1)]
ax.set_yticklabels(yticklabels,size=20)
ax.set_ylabel("%",size=20)
ax.set_xlabel("cp number",size=20)
ax.legend(lines,leg)
ax.set_title("%s"%(region_info.name),size=20)
f.savefig("%s/%s_pop_hist.pdf"%(outdir,region_info.name),format='pdf')
plt.close(1)
return
k=0
for pop,ihist in percent_hists.iteritems():
percent_hists[pop] = ihist/ihist.sum()
#jhplot(x,hist,"|%s"%(colors[k]))
#hist(x)
vlines(x+float(k)/3,zeros,percent_hists[pop],color=colors[k],linewidth=7)
k+=1
leg.append(pop)
#
|
legend(leg)
title("percent")
print leg
legend(leg)
f.get_axes()[0].xaxis.set_ticks(range(21))
#f.add_axes([0,40,0,1],xticks=[0,1,2,3,4,5,6,8,9,10,11,12,13,14,15,16,17,18,19,20],label='axis2
|
',axisbg='g')
#[0,1,2,3,4,5,6,8,9,10,11,12,13,14,15,16,17,18,19,20])
f=figure(2)
k=0
for pop,ihist in mode_hists.iteritems():
mode_hists[pop] = ihist/ihist.sum()
#plot(x,hist,"|%s"%(colors[k]))
#hist(x)
vlines(x+float(k)/5,zeros,mode_hists[pop],color=colors[k],linewidth=7)
k+=1
legend(leg)
title("Predicted copy number %s"%(name))
xlabel("predicted copy number")
ylabel("percentage of population")
f.get_axes()[0].xaxis.set_ticks(range(21))
savefig("%smode_hist.png"%(name),format='png')
print percent_hists
print mode_hists
def load_plot_regions(fn_regions):
if fn_regions == None: return []
|
twitter/pants
|
contrib/python/src/python/pants/contrib/python/checks/checker/indentation.py
|
Python
|
apache-2.0
| 1,262
| 0.008716
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under th
|
e Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import tokenize
from pants.contrib.python.checks.checker.common import CheckstylePlugin
# TODO(wickman) Update this to sanitize line continuation styling as we have
|
# disabled it from pycodestyle.py due to mismatched indentation styles.
class Indentation(CheckstylePlugin):
"""Enforce proper indentation."""
@classmethod
def name(cls):
return 'indentation'
INDENT_LEVEL = 2 # the one true way
def nits(self):
indents = []
for token in self.python_file.tokens:
token_type, token_text, token_start = token[0:3]
if token_type is tokenize.INDENT:
last_indent = len(indents[-1]) if indents else 0
current_indent = len(token_text)
if current_indent - last_indent != self.INDENT_LEVEL:
yield self.error('T100',
'Indentation of {} instead of {}'.format(
current_indent - last_indent, self.INDENT_LEVEL),
token_start[0])
indents.append(token_text)
elif token_type is tokenize.DEDENT:
indents.pop()
|
martazaryn/green-roofs-mappping
|
Green_Roof_MapPy/warsaw/views.py
|
Python
|
mit
| 3,276
| 0.025946
|
from django.contrib.auth import get_user_model, login, logout
from django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin
from django.db.models import Q # import for AJAX / dynamic searching
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.template import RequestContext, loader
from django.shortcuts import render, render_to_response
from django.views import View
from django.views.generic.edit import FormView, CreateView, UpdateView, DeleteView
from django.views.generic.detail import DetailView
from django.urls import reverse_lazy, reverse
from .forms import AuthForm, SearchForm, AddGreenRoofForm
from warsaw.models import GreenRoof, District, City
class WarsawView(View):
def get(self, request):
return render(request, 'warsaw/index.html')
greenroofs = GreenRoof.objects.order_by('roof_address')
template = loader.get_template('warsaw/index.html')
context = RequestContext(request, {
'greenroofs': greenroofs, 'content': render_to_string('warsaw/index.html', {'waypoints': waypoints})
})
return HttpResponse(template.render(context))
class LoginView(View):
def get(self, request):
form = AuthForm()
ctx = {'form' : form}
return render(request, 'warsaw/login.html', ctx)
def post(self, request):
form = AuthForm(data=request.POST)
ctx = {'form' : form}
if form.is_valid():
user = form.cleaned_data['user']
login(request, user)
return HttpResponseRedirect(reverse('index'))
else:
return render(request, 'warsaw/login.html', ctx)
class LogoutView(View):
def get(self, request):
logout(request)
return HttpResponseRedirect(reverse('index'))
def detail(request, poll_id):
p = get_object_or_404(Poll, pk=poll_id)
return render_to_response('polls/detail.html', {'poll': p}, context_in
|
stance=RequestContext(request))
#Needs expansion to show field for MultiPolygonField (now there is text Mpoly in the form, but no input place)
class AddGree
|
nRoofView(LoginRequiredMixin, PermissionRequiredMixin, CreateView):
permission_required = ['warsaw.add_greenroof']
raise_exception = True
model = GreenRoof #remember to import class
form_class = AddGreenRoofForm
def handle_no_permission(self):
if not self.request.user.is_authenticated:
return HttpResponseRedirect(reverse('login'))
else:
return super().handle_no_permission()
class DeleteGreenRoofView(DeleteView):
model = GreenRoof
success_url = reverse_lazy('index')
class GreenRoofSearchView(View):
def get(self, request):
ctx = {'form' : SearchForm()}
return render(request, 'warsaw/gr_search_form.html', ctx)
def post(self, request):
form = SearchForm(data=request.POST)
ctx = {'form' : form}
print('Form is valid', form.is_valid())
if form.is_valid():
address = form.cleaned_data['address']
greenroofs = GreenRoof.objects.filter(roof_address__icontains=address)
print(greenroofs)
ctx['results'] = greenroofs
return render(request, 'warsaw/gr_results_form.html', ctx)
class GreenRoofView(DetailView):
model = GreenRoof
fields = '__all__'
class UpdateGreenRoofView(UpdateView):
model = GreenRoof
fields = '__all__'
template_name_suffix = '_update_form'
class DeleteGreenRoofView(DeleteView):
model = GreenRoof
success_url = reverse_lazy('index')
|
pudo/aleph
|
aleph/logic/collections.py
|
Python
|
mit
| 7,335
| 0.000273
|
import logging
from datetime import datetime
from collections import defaultdict
from servicelayer.jobs import Job
from aleph.core import db, cache
from aleph.authz import Authz
from aleph.queues import cancel_queue, ingest_entity, get_status
from aleph.model import Collection, Entity, Document, Mapping
from aleph.model import Permission, Events, EntitySet
from aleph.index import collections as index
from aleph.index import xref as xref_index
from aleph.index import entities as entities_index
from aleph.logic.notifications import publish, flush_notifications
from aleph.logic.documents import ingest_flush, MODEL_ORIGIN
from aleph.logic.aggregator import get_aggregator
log = logging.getLogger(__name__)
def create_collection(data, authz, sync=False):
now = datetime.utcnow()
collection = Collection.create(data, authz, created_at=now)
if collection.created_at == now:
publish(
Events.CREATE_COLLECTION,
params={"collection": collection},
channels=[collection, authz.role],
actor_id=authz.id,
)
db.session.commit()
return update_collection(collection, sync=sync)
def update_collection(collection, sync=False):
"""Update a collection and re-index."""
Authz.flush()
refresh_collection(collection.id)
return index.index_collection(collection, sync=sync)
def refresh_collection(collection_id):
"""Operations to execute after updating a collection-related
domain object. This will refresh stats and flush cache."""
cache.kv.delete(
cache.object_key(Collection, collection_id),
cache.object_key(Collection, collection_id, "stats"),
)
def get_deep_collection(collection):
mappings = Mapping.by_collection(collection.id).count()
entitysets = EntitySet.type_counts(collection_id=collection.id)
return {
"statistics": index.get_collection_stats(collection.id),
"counts": {"mappings": mappings, "entitysets": entitysets},
"status": get_status(collection),
"shallow": False,
}
def compute_collections():
"""Update collection caches, including the global stats cache."""
authz = Authz.from_role(None)
schemata = defaultdict(int)
countries = defaultdict(int)
categories = defaultdict(int)
for collection in Collection.all():
compute_collection(collection)
if authz.can(collection.id, authz.READ):
categories[collection.category] += 1
things = index.get_collection_things(collection.id)
for schema, count in things.items():
schemata[schema] += count
for country in collection.countries:
countries[country] += 1
log.info("Updating global statistics cache...")
data = {
"collections": sum(categories.values()),
"schemata": dict(schemata),
"countries": dict(countries),
"categories": dict(categories),
"things": sum(schemata.values()),
}
key = cache.key(cache.STATISTICS)
cache.set_complex(key, data, expires=cache.EXPIRE)
def compute_collection(collection, force=False, sync=False):
key = cache.object_key(Collection, collection.id, "stats")
if cache.get(key) is not None and not force:
return
refresh_collection(collection.id)
log.info("[%s] Computing statistics...", collection)
index.update_collection_stats(collection.id)
cache.set(key, datetime.utcnow().isoformat())
index.index_collection(collection, sync=sync)
def aggregate_model(collection, aggregator):
"""Sync up the aggregator from the Aleph domai
|
n model."""
log.debug("[%s] Aggregating model...", collection)
aggregator.delete(origin=MODEL_ORIGIN)
writer = aggregator.bulk()
for document in Document.by_collection(collection.id):
proxy = document.to_proxy(ns=collection.ns)
writer.put(proxy, fragment="db", origin=MODEL_ORIGIN)
for entity in Entity.by_collection(collection.id):
proxy = entity.to_proxy()
aggregator.delete(entity_id=proxy.id)
writer.put(proxy
|
, fragment="db", origin=MODEL_ORIGIN)
writer.flush()
def index_aggregator(
collection, aggregator, entity_ids=None, skip_errors=False, sync=False
):
def _generate():
idx = 0
entities = aggregator.iterate(entity_id=entity_ids, skip_errors=skip_errors)
for idx, proxy in enumerate(entities, 1):
if idx > 0 and idx % 1000 == 0:
log.debug("[%s] Index: %s...", collection, idx)
yield proxy
log.debug("[%s] Indexed %s entities", collection, idx)
entities_index.index_bulk(collection, _generate(), sync=sync)
def reingest_collection(collection, job_id=None, index=False, flush=True):
"""Trigger a re-ingest for all documents in the collection."""
job_id = job_id or Job.random_id()
if flush:
ingest_flush(collection)
for document in Document.by_collection(collection.id):
proxy = document.to_proxy(ns=collection.ns)
ingest_entity(collection, proxy, job_id=job_id, index=index)
def reindex_collection(collection, skip_errors=True, sync=False, flush=False):
"""Re-index all entities from the model, mappings and aggregator cache."""
from aleph.logic.mapping import map_to_aggregator
from aleph.logic.profiles import profile_fragments
aggregator = get_aggregator(collection)
for mapping in collection.mappings:
if mapping.disabled:
log.debug("[%s] Skip mapping: %r", collection, mapping)
continue
try:
map_to_aggregator(collection, mapping, aggregator)
except Exception:
# More or less ignore broken models.
log.exception("Failed mapping: %r", mapping)
aggregate_model(collection, aggregator)
profile_fragments(collection, aggregator)
if flush:
log.debug("[%s] Flushing...", collection)
index.delete_entities(collection.id, sync=True)
index_aggregator(collection, aggregator, skip_errors=skip_errors, sync=sync)
compute_collection(collection, force=True)
def delete_collection(collection, keep_metadata=False, sync=False):
deleted_at = collection.deleted_at or datetime.utcnow()
cancel_queue(collection)
aggregator = get_aggregator(collection)
aggregator.delete()
flush_notifications(collection, sync=sync)
index.delete_entities(collection.id, sync=sync)
xref_index.delete_xref(collection, sync=sync)
Mapping.delete_by_collection(collection.id)
EntitySet.delete_by_collection(collection.id, deleted_at)
Entity.delete_by_collection(collection.id)
Document.delete_by_collection(collection.id)
if not keep_metadata:
Permission.delete_by_collection(collection.id)
collection.delete(deleted_at=deleted_at)
db.session.commit()
if not keep_metadata:
index.delete_collection(collection.id, sync=True)
aggregator.drop()
refresh_collection(collection.id)
Authz.flush()
def upgrade_collections():
for collection in Collection.all(deleted=True):
if collection.deleted_at is not None:
delete_collection(collection, keep_metadata=True, sync=True)
else:
compute_collection(collection, force=True)
# update global cache:
compute_collections()
|
MSC19950601/TextRank4ZH
|
textrank4zh/TextRank4Sentence.py
|
Python
|
mit
| 6,656
| 0.012715
|
#-*- encoding:utf-8 -*-
'''
Created on Dec 1, 2014
@author: letian
'''
import networkx as nx
from Segmentation import Segmentation
import numpy as np
import math
class TextRank4Sentence(object):
def __init__(self, stop_words_file = None, delimiters='?!;?!。;…\n'):
'''
`stop_words_file`:默认值为None,此时内部停止词表为空;可以设置为文件路径(字符串),将从停止词文件中提取停止词。
`delimiters`:默认值是`'?!;?!。;…\n'`,用来将文本拆分为句子。
self.sentences:由句子组成的列表。
self.words_no_filter:对sentences中每个句子分词而得到的两级列表。
self.words_no_stop_words:去掉words_no_filter中的停止词而得到的两级列表。
self.words_all_filters:保留words_no_stop_words中指定词性的单词而得到的两级列表。
'''
self.seg = Segmentation(stop_words_file=stop_words_file, delim
|
iters=delimiters)
self.sentences = None
self.words_no_filter = None # 2维列表
self.words_no_stop_words = None
self.words_all_filters = None
self.graph = None
self.key_sentences = None
def train(self, text, lower = False, speech_tag_filter=True,
source = 'no_stop_words', sim_func = 'stand
|
ard'):
'''
`text`:文本内容,字符串。
`lower`:是否将文本转换为小写。默认为False。
`speech_tag_filter`:若值为True,将调用内部的词性列表来过滤生成words_all_filters。
若值为False,words_all_filters与words_no_stop_words相同。
`source`:选择使用words_no_filter, words_no_stop_words, words_all_filters中的哪一个来生成句子之间的相似度。
默认值为`'all_filters'`,可选值为`'no_filter', 'no_stop_words', 'all_filters'`。
`sim_func`: 指定计算句子相似度的函数。当前只有一个函数,对应默认值`standard`。
'''
self.key_sentences = []
(self.sentences, self.words_no_filter, self.words_no_stop_words, self.words_all_filters) = self.seg.segment(text=text,
lower=lower,
speech_tag_filter=speech_tag_filter);
# -
# print self.sentences
if source == 'no_filter':
source = self.words_no_filter
elif source == 'all_filters':
source = self.words_all_filters
else:
source = self.words_no_stop_words
sim_func = self._get_similarity_standard
sentences_num = len(source)
self.graph = np.zeros((sentences_num, sentences_num))
for x in xrange(sentences_num):
for y in xrange(x, sentences_num):
similarity = sim_func(source[x], source[y])
self.graph[x, y] = similarity
self.graph[y, x] = similarity
# for x in xrange(sentences_num):
# row_sum = np.sum(self.graph[x, :])
# if row_sum > 0:
# self.graph[x, :] = self.graph[x, :] / row_sum
# print self.graph
nx_graph = nx.from_numpy_matrix(self.graph)
scores = nx.pagerank(nx_graph) # this is a dict
sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)
# print sorted_scores
for index, _ in sorted_scores:
self.key_sentences.append(self.sentences[index])
# print '\n'.join(self.key_sentences)
def _get_similarity_standard(self, word_list1, word_list2):
'''
默认的用于计算两个句子相似度的函数。
word_list1, word_list2: 分别代表两个句子,都是由单词组成的列表
'''
vector1, vector2 =self._gen_vectors(word_list1, word_list2)
# print vector1, vector2
vector3 = [vector1[x]*vector2[x] for x in xrange(len(vector1))]
vector4 = [1 for num in vector3 if num > 0.]
co_occur_num = sum(vector4)
# print co_occur_num
if co_occur_num == 0.:
return 0.
denominator = math.log(float(len(word_list1))) + math.log(float(len(word_list2))) # 分母
if denominator == 0.:
return 0.
return co_occur_num / denominator
def _gen_vectors(self, word_list1, word_list2):
'''
两个句子转换成两个同样大小向量。可以通过这两个向量来计算两个句子的相似度。
word_list1, word_list2: 分别代表两个句子,都是由单词组成的列表
'''
words = list(set(word_list1 + word_list2))
vector1 = [float(word_list1.count(word)) for word in words]
vector2 = [float(word_list2.count(word)) for word in words]
return vector1, vector2
def get_key_sentences(self, num = 6, sentence_min_len = 6):
'''
获取最重要的num个长度大于等于sentence_min_len的句子用来生成摘要。
返回列表。
'''
result = []
count = 0
for sentence in self.key_sentences:
if count >= num:
break
if len(sentence) >= sentence_min_len:
result.append(sentence)
count += 1
return result
if __name__ == '__main__':
import codecs
# text = codecs.open('../text/03.txt', 'r', 'utf-8').read()
text = "这间酒店位于北京东三环,里面摆放很多雕塑,文艺气息十足。答谢宴于晚上8点开始。"
tr4s = TextRank4Sentence(stop_words_file='../stopword.data')
tr4s.train(text=text, speech_tag_filter=True, lower=True, source = 'all_filters')
print '\n'.join(tr4s.get_key_sentences(num=1))
print '\n'.join(tr4s.sentences)
for wl in tr4s.words_no_filter:
print '[', ', \''.join(wl), ']'
print
for wl in tr4s.words_no_stop_words:
print '[', ', \''.join(wl), ']'
print
for wl in tr4s.words_all_filters:
print '[', ', \''.join(wl), ']'
|
hackultura/procult
|
procult/settings.py
|
Python
|
gpl-2.0
| 7,162
| 0.00014
|
# coding=utf-8
"""
Django settings for procult project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
from django.conf import settings
import raven
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv(
'SECRET_KEY',
'ru@3uj@@mm#(#s8_=$%h$=f+v75&8@s$dzz8-7$07-r85l0b+6'
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('DEBUG', False)
ALLOWED_HOSTS = os.getenv('ALLOWED_DOMAIN', 'localhost').split(',')
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'raven.contrib.django.raven_compat',
'rest_framework',
'rest_localflavor',
'import_export',
'procult.authentication',
'procult.core',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'procult.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'procult.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/setting
|
s/#databases
# Alem disso, usando o dj-database-url que configura o banco a partir
# da variavel de ambiente DATABASE_URL, e caso não encontre uma
# utiliza um valor
|
padrão.
# https://pypi.python.org/pypi/dj-database-url
DATABASES = {
'default': dj_database_url.config(
default='postgres://procult:123456@localhost/procult'
)
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'pt-BR'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Authentication
AUTH_USER_MODEL = 'authentication.User'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
ALLOWED_FILES = [
'application/pdf',
'application/msword',
'application/excel',
'application/x-excel',
'application/vnd.ms-excel',
'application/x-msexcel',
'application/powerpoint',
'application/mspowerpoint',
'application/x-mspowerpoint',
'application/vnd.ms-powerpoint',
'application/vnd.oasis.opendocument.text',
'application/vnd.oasis.opendocument.presentation',
'application/vnd.oasis.opendocument.spreadsheet',
'application/vnd.sun.xml.writer',
'application/vnd.sun.xml.writer.global',
'application/vnd.sun.xml.impress',
'application/vnd.sun.xml.draw',
'application/vnd.sun.xml.calc',
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'application/vnd.openxmlformats-officedocument.presentationml.slide',
'application/vnd.openxmlformats-officedocument.presentationml.slideshow',
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'application/x-7z-compressed',
'application/zip',
'application/x-rar-compressed',
'image/png',
'image/gif',
'image/jpg',
'image/jpeg',
'image/pjpeg',
'image/tiff',
'image/x-tiff',
'image/bmp',
'image/x-windows-bmp',
'audio/ogg',
'audio/mpeg',
'audio/mpeg3',
'audio/mp3',
'audio/mp4'
'audio/x-mpeg-3',
'audio/voc',
'audio/wav',
'audio/x-wav',
'audio/aiff',
'audio/x-aiff',
'audio/midi',
'audio/x-mid',
'audio/x-midi',
'audio/webm',
'application/mp4',
'application/x-troff-msvideo',
'application/vnd.rn-realmedia',
'application/ogg',
'video/mp4',
'video/mpeg',
'video/ogg',
'video/x-mpeg',
'video/avi',
'video/msvideo',
'video/x-msvideo',
'video/x-dv',
'video/quicktime'
'video/webm',
'video/H261',
'video/H263',
'video/H263-1998',
'video/H263-2000',
'video/H264',
'video/H264-RCDO',
'video/H264-SVC '
]
# Django Rest Framework
REST_FRAMEWORK = {
'DATE_FORMAT': "%d/%m/%Y",
'DATE_INPUT_FORMATS': ["%d/%m/%Y", "%d/%m/%y"],
'PAGE_SIZE': 100,
'EXCEPTION_HANDLER': 'procult.core.exceptions.custom_exception_handler',
'UNICODE_JSON': False
}
# Desabilitando o friendly browser view do Django Rest Framework
if not settings.DEBUG:
REST_FRAMEWORK.update({
'DEFAULT_RENDERER_CLASSES': (
'procult.core.renderers.UnicodeJSONRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
)
})
# Local configuration
# TODO: Separate in multiple settings
if settings.DEBUG:
INSTALLED_APPS += (
'corsheaders',
)
MIDDLEWARE_CLASSES = (
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
CORS_ORIGIN_ALLOW_ALL = os.getenv('DEBUG', False)
# Define CORS to allow client in development mode
CORS_ORIGIN_WHITELIST = (
'localhost:5000',
'procult.local:5000',
'0.0.0.0:5000',
)
RAVEN_CONFIG = {
'dsn': os.getenv('RAVEN_DSN_URL'),
# If you are using git, you can also automatically configure the
# release based on the git info.
'release': raven.fetch_git_sha(BASE_DIR),
}
|
t-brandt/acorns-adi
|
parallel/multiproc_utils.py
|
Python
|
bsd-2-clause
| 1,069
| 0.006548
|
import mutiprocessing
##########################
|
############################################
# Controllers for parallel execution, one per worker.
# Return when a 'None' job (poison pill) is reached.
######################################################################
class Consumer(multiprocessing.Process):
def __init__(self, task_queue, result_queue, block=True, timeout=None):
multiprocessing.Process
|
.__init__(self)
self.task_queue = task_queue
self.result_queue = result_queue
self.block = block
self.timeout = timeout
def run(self):
proc_name = self.name
while True:
next_task = self.task_queue.get(self.block, self.timeout)
if next_task is None:
# Poison pill means we should exit
break
self.result_queue.put(next_task())
return
class Task(object):
def __init__(self, func, args):
self.func = func
self.args = args
def __call__(self):
return self.func(*self.args)
|
kkamkou/gitmostwanted.com
|
migration/versions/4a7b02b0a63_repos_mean_table.py
|
Python
|
mit
| 782
| 0.003836
|
"""repos_mean table
Revision ID: 4a7b02b0a63
Revises: b75a76936b
Create Date: 2015-11-05 11:25:32.920590
"""
revision = '4a7b02b0a63'
down_revision = 'b75a76936b'
branch_labels = None
depends_on = None
from alembic import op
from sqlalchemy.sql import func
import sqlalchemy as sa
def upgrade():
op.create_table(
'repos_mean',
sa.Column('repo_id', sa.Big
|
Integer(), nullable=False),
sa.Column('created_at', sa.Date(), nullable=False),
sa.Column('value', sa.Float(), nullable=False),
sa.ForeignKeyConstraint(
|
['repo_id'], ['repos.id'],
name='fk_repos_mean_repo_id', ondelete='CASCADE'
),
sa.PrimaryKeyConstraint('repo_id', 'created_at')
)
def downgrade():
op.drop_table('repos_mean')
|
iotile/coretools
|
transport_plugins/awsiot/test/test_agent.py
|
Python
|
gpl-3.0
| 5,635
| 0.00213
|
import pytest
import queue
from iotile_transport_awsiot.mqtt_client import OrderedAWSIOTClient
import time
pytestmark = pytest.mark.skip("This distribution needs to be updated to work with asyncio gateway")
def test_gateway(gateway, local_broker, args):
"""Make sure we can connect to the gateway by sending packets over the mqtt message broker."""
client = OrderedAWSIOTClient(args)
client.connect('hello')
local_broker.expect(5)
client.publish('devices/d--0000-0000-0000-0002/control/probe', {'type': 'command', 'operation': 'probe', 'client': 'hello'})
local_broker.wait()
# There should be 1 command message, 1 response and 1 advertisement notification per device
assert len(local_broker.messages) == 5
assert 'devices/d--0000-0000-0000-0002/devices/d--0000-0000-0000-0001/data/advertisement' in local_broker.messages
assert 'devices/d--0000-0000-0000-0002/devices/d--0000-0000-0000-0003/data/advertisement' in local_broker.messages
assert 'devices/d--0000-0000-0000-0002/devices/d--0000-0000-0000-0004/data/advertisement' in local_broker.messages
assert 'devices/d--0000-0000-0000-0002/data/status' in local_broker.messages
assert 'devices/d--0000-0000-0000-0002/control/probe' in local_broker.messages
def test_probe(gateway, hw_man, local_broker):
"""Make sure we can probe for devices."""
local_broker.expect(3)
results = hw_man.scan(wait=0.1)
assert len(results) == 3
assert results[0]['uuid'] == 1
assert results[0]['connection_string'] == 'd--0000-0000-0000-0001'
assert results[1]['uuid'] == 3
assert results[1]['connection_string'] == 'd--0000-0000-0000-0003'
assert results[2]['uuid'] == 4
assert results[2]['connection_string'] == 'd--0000-0000-0000-0004'
def test_connect(gateway, hw_man, local_broker):
"""Make sure we can connect to a device."""
hw_man.scan(wait=0.1)
hw_man.connect(1)
hw_man.disconnect()
def test_streaming(gateway, hw_man, local_broker):
"""Make sure we can receive streamed data."""
hw_man.connect(3, wait=0.1)
hw_man.enable_streaming()
reps = hw_man.wait_reports(100, timeout=1.0)
assert len(reps) == 100
def test_tracing(gateway, hw_man, local_broker):
"""Make sure we can receive tracing data."""
hw_man.connect(4, wait=0.1)
hw_man.enable_tracing()
time.sleep(0.1)
data = hw_man.dump_trace('raw')
as
|
sert data == b'Hello world, this is tracing data!'
def test_rpcs(gateway, hw_man, local_broker):
"""Make sure we can send rpcs."""
hw_man.connect(3, wait=
|
0.1)
hw_man.controller()
def test_script(gateway, hw_man, local_broker):
"""Make sure we can send scripts."""
script = bytearray(('ab'*10000).encode('utf-8'))
progs = queue.Queue()
hw_man.connect(3, wait=0.1)
gateway.agents[0].throttle_progress = 0.0
hw_man.stream._send_highspeed(script, lambda x, y: progs.put((x,y)))
last_done = -1
last_total = None
prog_count = 0
while not progs.empty():
done, total = progs.get(block=False)
assert done <= total
assert done >= last_done
if last_total is not None:
assert total == last_total
last_done = done
last_total = total
prog_count += 1
assert prog_count > 0
dev = gateway.device_manager.adapters[0]._adapter.devices[3]
assert dev.script == script
def test_script_chunking(gateway, hw_man, local_broker):
"""Make sure we can send scripts."""
script = bytearray(('a'*1024*80).encode('utf-8'))
progs = queue.Queue()
hw_man.connect(3, wait=0.1)
gateway.agents[0].throttle_progress = 0.0
hw_man.stream._send_highspeed(script, lambda x, y: progs.put((x, y)))
last_done = -1
last_total = None
prog_count = 0
while not progs.empty():
done, total = progs.get(block=False)
assert done <= total
assert done >= last_done
if last_total is not None:
assert total == last_total
last_done = done
last_total = total
prog_count += 1
assert prog_count > 0
dev = gateway.device_manager.adapters[0]._adapter.devices[3]
assert dev.script == script
def test_script_progress_throttling(gateway, hw_man, local_broker):
"""Make sure progress updates are properly throttled."""
script = bytearray(('a'*1024*80).encode('utf-8'))
progs = []
hw_man.connect(3, wait=0.1)
gateway.agents[0].throttle_progress = 10.0
hw_man.stream._send_highspeed(script, lambda x, y: progs.append((x, y)))
dev = gateway.device_manager.adapters[0]._adapter.devices[3]
assert dev.script == script
# This should happen faster than our throttling period so we should
# get exactly 2 progress updates, on start and on finish
assert len(progs) == 2
x, y = progs[0]
assert x == 0
x, y = progs[1]
assert x == y
def test_autodisconnect(gateway, hw_man, local_broker):
"""Make sure we autodisconnect clients."""
gateway.agents[0].client_timeout = 0.1
hw_man.connect(3, wait=0.1)
assert len(gateway.agents[0]._connections) == 1
time.sleep(1.5)
assert len(gateway.agents[0]._connections) == 0
assert hw_man.stream.connection_interrupted is True
# Make sure we can reconnect automatically
hw_man.controller()
assert len(gateway.agents[0]._connections) == 1
# Let us lapse again
time.sleep(1.5)
assert len(gateway.agents[0]._connections) == 0
# Return to our disconnected state
hw_man.disconnect()
# Make sure we can connect normally again
hw_man.connect(3, wait=0.1)
|
sorenh/cc
|
vendor/Twisted-10.0.0/twisted/internet/ssl.py
|
Python
|
apache-2.0
| 7,496
| 0.002935
|
# -*- test-case-name: twisted.test.test_ssl -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
SSL transport. Requires PyOpenSSL (http://pyopenssl.sf.net).
SSL connections require a ContextFactory so they can create SSL contexts.
End users should only use the ContextFactory classes directly - for SSL
connections use the reactor.connectSSL/listenSSL and so on, as documented
in IReactorSSL.
All server context factories should inherit from ContextFactory, and all
client context factories should inherit from ClientContextFactory. At the
moment this is not enforced, but in the future it might be.
Future Plans:
- split module so reactor-specific classes are in a separate module
- support for switching TCP into SSL
- more options
Maintainer: Itamar Shtull-Trauring
"""
# If something goes wrong, most notably an OpenSSL import failure,
# sys.modules['twisted.internet.ssl'] will be bound to a partially
# initialized module object. This is wacko, but we will take advantage
# of it to publish whether or not SSL is available.
# See the end of this module for the other half of this solution.
# The correct idiom to import this module is thus:
# try:
# from twisted.internet import ssl
# except ImportError:
# # happens the first time the interpreter tries to import it
# ssl = None
# if ssl and not ssl.supported:
# # happens second and later times
# ssl = None
supported = False
# System imports
from OpenSSL import SSL
from zope.interface import implements, implementsOnly, implementedBy
# Twisted imports
from twisted.internet import tcp, interfaces, base, address
class ContextFactory:
"""A factory for SSL context objects, for server SSL connections."""
isClient = 0
def getContext(self):
"""Return a SSL.Context object. override in subclasses."""
raise NotImplementedError
class DefaultOpenSSLContextFactory(ContextFactory):
"""
L{DefaultOpenSSLContextFactory} is a factory for server-side SSL context
objects. These objects define certain parameters related to SSL
handshakes and the subsequent connection.
@ivar _contextFactory: A callable which will be used to create new
context objects. This is typically L{SSL.Context}.
"""
_context = None
def __init__(self, privateKeyFileName, certificateFileName,
sslmethod=SSL.SSLv23_METHOD, _contextFactory=SSL.Context):
"""
@param privateKeyFileName: Name of a file containing a private key
@param certificateFileName: Name of a file containing a certificate
@param sslmethod: The SSL method to use
"""
self.privateKeyFileName = privateKeyFileName
self.certificateFileName = certificateFileName
self.sslmethod = sslmethod
self._contextFactory = _contextFactory
# Create a context object right now. This is to force validation of
# the given parameters so that errors are detected earlier rather
# than later.
self.cacheContext()
def cacheContext(self):
if self._context is None:
ctx = self._contextFactory(self.sslmethod)
# Disallow SSLv2! It's insecure! SSLv3 has been around since
# 1996. It's time to move on.
ctx.set_options(SSL.OP_NO_SSLv2)
ctx.use_certificate_file(self.certificateFileName)
ctx.use_privatekey_file(self.privateKeyFileName)
self._context = ctx
def __getstate__(self):
d = self.__dict__.copy()
|
del d['_context']
return d
def __setstate__(self, state):
self.__dict__ = state
def getContext(self):
"""
Return an SSL context.
"""
return self._context
class ClientContextFactory:
"""A context factory for SSL clients."""
isClient = 1
# SSLv23_METHOD allows SS
|
Lv2, SSLv3, and TLSv1. We disable SSLv2 below,
# though.
method = SSL.SSLv23_METHOD
_contextFactory = SSL.Context
def getContext(self):
ctx = self._contextFactory(self.method)
# See comment in DefaultOpenSSLContextFactory about SSLv2.
ctx.set_options(SSL.OP_NO_SSLv2)
return ctx
class Client(tcp.Client):
"""I am an SSL client."""
implementsOnly(interfaces.ISSLTransport,
*[i for i in implementedBy(tcp.Client) if i != interfaces.ITLSTransport])
def __init__(self, host, port, bindAddress, ctxFactory, connector, reactor=None):
# tcp.Client.__init__ depends on self.ctxFactory being set
self.ctxFactory = ctxFactory
tcp.Client.__init__(self, host, port, bindAddress, connector, reactor)
def getHost(self):
"""Returns the address from which I am connecting."""
h, p = self.socket.getsockname()
return address.IPv4Address('TCP', h, p, 'SSL')
def getPeer(self):
"""Returns the address that I am connected."""
return address.IPv4Address('TCP', self.addr[0], self.addr[1], 'SSL')
def _connectDone(self):
self.startTLS(self.ctxFactory)
self.startWriting()
tcp.Client._connectDone(self)
class Server(tcp.Server):
"""I am an SSL server.
"""
implements(interfaces.ISSLTransport)
def getHost(self):
"""Return server's address."""
h, p = self.socket.getsockname()
return address.IPv4Address('TCP', h, p, 'SSL')
def getPeer(self):
"""Return address of peer."""
h, p = self.client
return address.IPv4Address('TCP', h, p, 'SSL')
class Port(tcp.Port):
"""I am an SSL port."""
_socketShutdownMethod = 'sock_shutdown'
transport = Server
def __init__(self, port, factory, ctxFactory, backlog=50, interface='', reactor=None):
tcp.Port.__init__(self, port, factory, backlog, interface, reactor)
self.ctxFactory = ctxFactory
def createInternetSocket(self):
"""(internal) create an SSL socket
"""
sock = tcp.Port.createInternetSocket(self)
return SSL.Connection(self.ctxFactory.getContext(), sock)
def _preMakeConnection(self, transport):
# *Don't* call startTLS here
# The transport already has the SSL.Connection object from above
transport._startTLS()
return tcp.Port._preMakeConnection(self, transport)
class Connector(base.BaseConnector):
def __init__(self, host, port, factory, contextFactory, timeout, bindAddress, reactor=None):
self.host = host
self.port = port
self.bindAddress = bindAddress
self.contextFactory = contextFactory
base.BaseConnector.__init__(self, factory, timeout, reactor)
def _makeTransport(self):
return Client(self.host, self.port, self.bindAddress, self.contextFactory, self, self.reactor)
def getDestination(self):
return address.IPv4Address('TCP', self.host, self.port, 'SSL')
from twisted.internet._sslverify import DistinguishedName, DN, Certificate
from twisted.internet._sslverify import CertificateRequest, PrivateCertificate
from twisted.internet._sslverify import KeyPair
from twisted.internet._sslverify import OpenSSLCertificateOptions as CertificateOptions
__all__ = [
"ContextFactory", "DefaultOpenSSLContextFactory", "ClientContextFactory",
'DistinguishedName', 'DN',
'Certificate', 'CertificateRequest', 'PrivateCertificate',
'KeyPair',
'CertificateOptions',
]
supported = True
|
helloworldC2/VirtualRobot
|
porc/pages.py
|
Python
|
mit
| 2,872
| 0.000696
|
from .resource import Resource
from collections import Iterator
import copy
try:
# python 2
from urllib import quote
except ImportError:
# python 3
from urllib.parse import quote
class Pages(Iterator):
def __init__(self, opts, url, path, params):
if isinstance(path, list):
pages_url = '/'.join([url] + [quote(elem) for elem in path])
else:
pages_url = '/'.join([url, quote(path)])
self.resource = Resource(pages_url, **opts)
self.params = params
self._root_resource = Resource(url[:url.find('/v0')], **opts)
self.response = None
def _handle_page(self, querydict={}, val='next', **headers):
"""
Executes the request getting the next (or previous) page,
incrementing (or decrementing) the current page.
"""
params = copy.copy(self.params)
params.update(querydict)
# update uri based on
|
next page
if self.response:
self.response.raise_for_status()
_next = self.response.links.get(val, {}).get('url')
if _next:
response = self._root_resource._make_request(
'GET', _next, params, **headers)
self._handle_res(None, response)
return response
else:
raise StopIteration
else:
response = self.resource._make_request(
|
'GET', '', params, **headers)
self._handle_res(None, response)
return response
def _handle_res(self, session, response):
"""
Stores the response, which we use for determining
next and prev pages.
"""
self.response = response
def reset(self):
"""
Clear the page's current place.
page_1 = page.next().result()
page_2 = page.next().result()
page.reset()
page_x = page.next().result()
assert page_x.url == page_1.url
"""
self.response = None
def next(self, querydict={}, **headers):
"""
Gets the next page of results.
Raises `StopIteration` when there are no more results.
"""
return self._handle_page(querydict, **headers)
def __next__(self):
return self.next()
def prev(self, querydict={}, **headers):
"""
Gets the previous page of results.
Raises `StopIteration` when there are no more results.
Note: Only collection searches provide a `prev` value.
For all others, `prev` will always return `StopIteration`.
"""
return self._handle_page(querydict, 'prev', **headers)
def all(self):
results = []
for response in self:
response.raise_for_status()
results.extend(response['results'])
return results
|
RudolfCardinal/crate
|
crate_anon/crateweb/core/context_processors.py
|
Python
|
gpl-3.0
| 1,968
| 0
|
#!/usr/bin/env python
"""
crate_anon/crateweb/core/context_processors.py
===============================================================================
Copyright (C) 2015-2021 Rudolf Cardinal (rudolf@pobox.com).
This file is part of CRATE.
CRATE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CRATE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CRATE. If not, see <https://www.gnu.org/licenses/>.
===============================================================================
**A common context dictionary for all Django requ
|
ests.**
"""
from typing import Any, Dict
from django.conf import settings
from django.http.request import HttpRequest
from crate_anon.common.constants import CRATE_DOCS_URL, HelpUrl
# noinspection PyUnusedLocal
def common_context(request: HttpRequest) -> Dict[str, Any]:
"""
Returns a context used across the site.
Args:
request: the :class:`django.http.request.HttpRequest`
Returns:
dict: a context dictionary
"""
return {
'CRATE_DOCS_URL': CRATE_DOCS_URL,
'HelpUrl': HelpUrl,
'nav_on_main_menu': False,
'RESEARCH_DB_TITLE': settings.RESEARCH_DB_TITLE,
}
# Try to minimize SQL here (ideally none!), as these calls will be used for
# EVERY request.
# This problem can partially be circumvented with a per-request cache; see
# http://stackoverflow.com/questions/3151469/per-request-cache-in-django
# But good practice is: keep queries to a minimum.
|
saketkc/moca
|
moca/helpers/exceptions.py
|
Python
|
isc
| 455
| 0.002198
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
class MocaException(Exception):
"""Base class for MoCA Exceptions"""
def __init__(self, ms
|
g):
self.value = msg
def __str__(self):
"""string representation of MoCA Exception
Returns
-------
mocastr: string representation
"""
mocastr = repr(self.value)
r
|
eturn mocastr
|
siddhika1889/Pydev-Editor
|
tests/pysrc/extendable/static.py
|
Python
|
epl-1.0
| 143
| 0.020979
|
class TestStatic(object):
@staticmethod
def static1(self):
pass
@staticmethod
d
|
ef
|
static2(self):
pass
|
arizvisa/syringe
|
template/video/h264.py
|
Python
|
bsd-2-clause
| 2,279
| 0.005265
|
from ptypes import *
v = 0 # FIXME: this file format is busted
class seq_parameter_set_rbsp(pbinary.struct):
class __pic_order_type_1(pbinary.struct):
_fields_ = [
(1, 'delta_pic_order_always_zero_flag'),
(v, 'offset_for_non_ref_pic'),
(v, 'offset_for_top_to_bottom_field'),
(v, 'num_ref_frames_in_pic_order_cnt_cycle'),
(lambda s: dyn.array( dyn.clone(pbinary.struct,_fields_=[(v,'offset_for_ref_frame')]), s['num_ref_frames_in_pic_order_cnt_cycle']), 'ref_frames')
]
def __pic_order(self):
type = self['pic_order_cnt_type']
if type == 0:
return dyn.clone(pbinary.struct, _fields_=[(v, 'log2_max_pic_order_cnt_lsb')])
elif type == 1:
return __pic_order_type_1
raise NotImplementedError(type)
class __frame_crop_offset(pbinary.struct):
_fields_ = [
(v, 'frame_crop_left_offset'),
(v, 'frame_crop_right_offset'),
(v, 'frame_crop_top_offset'),
|
(v, 'frame_crop_bottom_offset'),
]
def __frame_crop(self):
if self['frame_cropping_flag']:
return __frame_crop_offset
return dyn.clone(pbinary.struct,_fields_=[])
def __rbsp_trailing_bits(self):
return 0
_fields_ = [
(8, 'profile_idc'),
(1, 'constraint_set0_flag'),
|
(1, 'constraint_set1_flag'),
(1, 'constraint_set2_flag'),
(5, 'reserved_zero_5bits'),
(8, 'level_idc'),
(v, 'seq_parameter_set_id'),
(v, 'pic_order_cnt_type'),
(__pic_order, 'pic_order'),
(v, 'num_ref_frames'),
(1, 'gaps_in_frame_num_value_allowed_flag'),
(v, 'pic_width_in_mbs_minus1'),
(v, 'pic_height_in_map_units_minus1'),
(1, 'frame_mbs_only_flag'),
(lambda s: [0,1][s['frame_mbs_only_flag']], 'mb_adaptive_frame_field_flag'),
(1, 'direct_8x8_inference_flag'),
(1, 'frame_cropping_flag'),
(__frame_crop, 'frame_crop'),
(1, 'vul_parameters_present_flag'),
(lambda s: [dyn.clone(pbinary.struct,_fields_=[]),__vul_parameters][s['vul_parameters_present_flag']], 'vul_parameters'),
(__rbsp_trailing_bits, 'rbsp_trailing_bits'),
]
|
Nikea/VisTrails
|
vistrails/core/db/locator.py
|
Python
|
bsd-3-clause
| 30,882
| 0.005375
|
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
import base64
import getpass
import os.path
from vistrails.core import get_vistrails_application
from vistrails.core.configuration import get_vistrails_configuration
from vistrails.core.system import vistrails_default_file_type, get_elementtree_library, \
default_connections_file, vistrails_examples_directory
from vistrails.core.external_connection import ExtConnectionList, DBConnection
from vistrails.core.thumbnails import ThumbnailCache
from vistrails.core import debug
from vistrails.db.services.locator import XMLFileLocator as _XMLFileLocator, \
DBLocator as _DBLocator, ZIPFileLocator as _ZIPFileLocator, \
BaseLocator as _BaseLocator, UntitledLocator as _UntitledLocator
from vistrails.db.services.io import SaveBundle, test_db_connection
from vistrails.db import VistrailsDBException
from vistrails.db.domain import DBWorkflow
ElementTree = get_elementtree_library()
class BaseLocator(_BaseLocator):
@staticmethod
def convert_locator(locator):
if locator.__class__ == _XMLFileLocator:
locator.__class__ = XMLFileLocator
elif locator.__class__ == _ZIPFileLocator:
locator.__class__ = ZIPFileLocator
elif locator.__class__ == _DBLocator:
DBLocator.convert(locator)
elif locator.__class__ == _UntitledLocator:
locator.__class__ = UntitledLocator
@staticmethod
def from_url(url):
locator = _BaseLocator.from_url(url)
BaseLocator.convert_locator(locator)
return locator
class CoreLocator(object):
@staticmethod
def prompt_autosave(parent_widget):
pass # Opens a dialog that prompts the user if they want to
# use temporaries
@staticmethod
def load_from_gui(parent_widget, obj_type):
pass # Opens a dialog that the user will be able to use to
# show the right values, and returns a locator suitable
# for loading a file
@staticmethod
def save_from_gui(parent_widget, obj_type, locator):
pass # Opens a dialog that the user will be able to use to
# show the right values, and returns a locator suitable
# for saving a file
def update_from_gui(self, klass=None):
pass
# FIXME Need to do some more intelligent conversions anywhere this
# function gets called
@staticmethod
def get_convert_klass(vt_type):
from vistrails.core.vistrail.vistrail import Vistrail
from vistrails.core.vistrail.pipeline import Pipeline
from vistrails.core.log.log import Log
from vistrails.core.modules.module_registry import ModuleRegistry
from vistrails.core.log.opm_graph import OpmGraph
klass_map = {Vistrail.vtType: Vistrail,
Pipeline.vtType: Pipeline,
Log.vtType: Log,
ModuleRegistry.vtType: ModuleRegistry,
OpmGraph.vtType: OpmGraph}
return klass_map[vt_type]
class Untit
|
ledLocator(_UntitledLocator, CoreLocator):
def load(self, klass=None):
from vistrails.core.vistrail.vistrail import Vistrail
if klass is None:
klass = Vistrail
obj = _UntitledLocator.load(self, klass.vtType)
klass.convert(obj)
obj.locator = self
return obj
class XMLFileLocator(_XMLFileLocator, CoreLocator):
def __init__(self, filename, **kwargs):
_XMLFileLocator.__init__(self, filename, **kwargs)
|
def load(self, klass=None):
from vistrails.core.vistrail.vistrail import Vistrail
if klass is None:
klass = Vistrail
obj = _XMLFileLocator.load(self, klass.vtType)
klass.convert(obj)
obj.locator = self
return obj
def save(self, obj):
is_bundle = False
if type(obj) == type(SaveBundle(None)):
is_bundle = True
save_bundle = obj
obj = save_bundle.get_primary_obj()
klass = obj.__class__
obj = _XMLFileLocator.save(self, obj, False)
klass.convert(obj)
obj.locator = self
if is_bundle:
return SaveBundle(save_bundle.bundle_type, obj)
return obj
def save_as(self, obj, version=None):
is_bundle = False
if type(obj) == type(SaveBundle(None)):
is_bundle = True
save_bundle = obj
obj = save_bundle.get_primary_obj()
klass = obj.__class__
obj = _XMLFileLocator.save(self, obj, True, version)
klass.convert(obj)
obj.locator = self
if is_bundle:
return SaveBundle(save_bundle.bundle_type, obj)
return obj
##########################################################################
def __eq__(self, other):
if not isinstance(other, XMLFileLocator):
return False
return self._name == other._name
##########################################################################
@staticmethod
def prompt_autosave(parent_widget):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_autosave_prompt(parent_widget)
@staticmethod
def load_from_gui(parent_widget, obj_type):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_load_file_locator_from_gui(parent_widget, obj_type)
@staticmethod
def save_from_gui(parent_widget, obj_type, locator=None):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_save_file_locator_from_gui(parent_widget, obj_type,
locator)
# def update_from_gui(self, parent_widget, klass=None):
# from core.vistrail.vistrail import Vistrail
# if klass is None:
# klass = Vistrail
# import gui.extras.core.db.locator as db_gui
# return db_gui.get_load_file_locator_from_gui(parent_widget, klass.vtType)
class DBLocator(_DBLocator, CoreLocator):
class getKeyChain(object):
def set_key(self, key, passwd):
get_vistrails_application().keyChain.set_key(key,passwd)
def get_key(self, key):
return get_vistrails_application().keyChain.get_key(key)
keyCha
|
undeadpixel/mallet
|
test/fixtures/hmm_fixtures.py
|
Python
|
mit
| 1,846
| 0.008126
|
import mallet.hmm as h_mm
import mallet.state as state
# emissions
def emissions():
return [
{'A': 0.25, 'B': 0.25, 'C': 0.5},
{'A': 0.55, 'B': 0.15, 'C': 0.3},
{'A': 0.675, 'B': 0.20, 'C': 0.125},
{'B': 0.5, 'C': 0.5},
{'A': 0.0, 'B': 0.5, 'C': 0.5}
]
def invalid_emissions():
return [
{'A': 0.5, 'B': 0.25, 'C': 0.10}
]
# states
def state_params():
emissions_list = emissions()
return [
(1, 'Begin', 'BEGIN', {}),
(2, 'State1', 'S', emissions_list[0]),
(3, 'State2', 'T', emissions_list[1]),
(4, 'State3', 'U', emissions_list[2]),
(5, 'End', 'END', {}),
]
def states():
state_param_list = state_params()
return dict((params[0], state.State(*params)) for params in state_param_list)
# transitions
def transitions(state_list = None):
if state_list is None: state_list = states()
return {
1: {
state_list[2]: 1.0
},
2: {
state_list[2]: 0.5,
state_list[3]: 0.5
},
3: {
state_list[3]: 0.75
|
,
state_list[4]: 0.25
},
4: {
|
state_list[4]: 0.15,
state_list[5]: 0.85
},
5: {}
}
def fake_transitions(state_list = None):
if state_list is None: state_list = states()
return {
1: {
state_list[2]: 1.0,
state_list[3]: 0.0
}
}
def states_with_transitions():
states_with_transitions = states()
transition_list = transitions(states_with_transitions)
for name, state in states_with_transitions.iteritems():
state.transitions = transition_list[state.id_num]
return states_with_transitions
def hmm():
return h_mm.HMM(states_with_transitions())
|
dwlehman/blivet
|
blivet/devicetree.py
|
Python
|
lgpl-2.1
| 42,702
| 0.001171
|
# devicetree.py
# Device management for anaconda's storage configuration module.
#
# Copyright (C) 2009-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Dave Lehman <dlehman@redhat.com>
#
import os
import re
from gi.repository import BlockDev as blockdev
from .actionlist import ActionList
from .errors import DeviceError, DeviceTreeError, StorageError
from .deviceaction import ActionDestroyDevice, ActionDestroyFormat
from .devices import BTRFSDevice, DASDDevice, NoDevice, PartitionDevice
from .devices import LVMLogicalVolumeDevice, LVMVolumeGroupDevice
from . import formats
from .devicelibs import lvm
from .devicelibs import edd
from . import udev
from . import util
from .flags import flags
from .populator import Populator
from .storage_log import log_method_call, log_method_return
import logging
log = logging.getLogger("blivet")
_LVM_DEVICE_CLASSES = (LVMLogicalVolumeDevice, LVMVolumeGroupDevice)
class DeviceTree(object):
""" A quasi-tree that represents the devices in the system.
The tree contains a list of :class:`~.devices.StorageDevice` instances,
which does not necessarily reflect the actual state of the system's
devices. :class:`~.deviceaction.DeviceAction` is used to perform
modifications to the tree, except when initially populating the tree.
:class:`~.deviceaction.DeviceAction` instances are registered, possibly
causing the addition or removal of :class:`~.devices.StorageDevice`
instances to/from the tree. A :class:`~.deviceaction.DeviceAction`
is reversible up to the time its 'execute' method is called.
Only one action of any given type/object pair should exist for
any given device at any given time.
:class:`~.deviceaction.DeviceAction` instances can only be registered
for leaf devices, except for resize actions.
"""
def __init__(self, conf=None, passphrase=None, luksDict=None,
iscsi=None, dasd=None):
"""
:keyword conf: storage discovery configuration
:type conf: :class:`~.StorageDiscoveryConfig`
:keyword passphrase: default LUKS passphrase
:keyword luksDict: a dict with UUID keys and passphrase values
:type luksDict: dict
:keyword iscsi: ISCSI control object
:type iscsi: :class:`~.iscsi.iscsi`
:keyword dasd: DASD control object
:type dasd: :class:`~.dasd.DASD`
"""
self.reset(conf, passphrase, luksDict, iscsi, dasd)
def reset(self, conf=None, passphrase=None, luksDict=None,
iscsi=None, dasd=None):
""" Reset the instance to its initial state. """
# internal data members
self._devices = []
self._actions = ActionList()
# a list of all device names we encounter
self.names = []
self._hidden = []
# initialize attributes that may later hold cached lvm info
self.dropLVMCache()
lvm.lvm_cc_resetFilter()
self._populator = Populator(self,
conf=conf,
passphrase=passphrase,
luksDict=luksDict,
iscsi=iscsi,
dasd=dasd)
@property
def actions(self):
return self._actions
def setDiskImages(self, images):
""" Set the disk images and reflect them in exclusiveDisks.
:param images: dict with image name keys and filename values
:type images: dict
.. note::
Disk images are automatically exclusive. That means that, in the
presence of disk images, any local storage not associated with
the disk images is ignored.
"""
self._populator.setDiskImages(images)
@property
def exclusiveDisks(self):
return self._populator.exclusiveDisks
@property
def ignoredDisks(self):
return self._populator.ignoredDisks
@property
def dasd(self):
return self._populator.dasd
@dasd.setter
def dasd(self, dasd):
self._populator.dasd = dasd
@property
def protectedDevNames(self):
return self._populator.protectedDevNames
@property
def diskImages(self):
return self._populator.diskImages
@property
def pvInfo(self):
if self._pvs_cache is None:
pvs = blockdev.lvm.pvs()
self._pvs_cache = dict((pv.pv_name, pv) for pv in pvs) # pylint: disable=attribute-defined-outside-init
return self._pvs_cache
@property
def lvInfo(self):
if self._lvs_cache is None:
lvs = blockdev.lvm.lvs()
self._lvs_cache = dict(("%s-%s" % (lv.vg_name, lv.lv_name), lv) for lv in lvs) # pylint: disable=attribute-defined-outside-init
|
return self._lvs_cache
def dropLVMCache(self):
""" Drop cached lvm information. """
self._pvs_cache = None # pylint: disable=attribute-defined-outside-init
self._lvs_cache = None # pylint: disable=attribute-defined-outside-init
def _addDevice(self, newdev, new=True):
""" Add a device to the tree.
:param newdev: the device to add
:type newdev: a subclass of :class:`~.devices.StorageDevic
|
e`
Raise ValueError if the device's identifier is already
in the list.
"""
if newdev.uuid and newdev.uuid in [d.uuid for d in self._devices] and \
not isinstance(newdev, NoDevice):
raise ValueError("device is already in tree")
# make sure this device's parent devices are in the tree already
for parent in newdev.parents:
if parent not in self._devices:
raise DeviceTreeError("parent device not in tree")
newdev.addHook(new=new)
self._devices.append(newdev)
# don't include "req%d" partition names
if ((newdev.type != "partition" or
not newdev.name.startswith("req")) and
newdev.type != "btrfs volume" and
newdev.name not in self.names):
self.names.append(newdev.name)
log.info("added %s %s (id %d) to device tree", newdev.type,
newdev.name,
newdev.id)
def _removeDevice(self, dev, force=None, modparent=True):
""" Remove a device from the tree.
:param dev: the device to remove
:type dev: a subclass of :class:`~.devices.StorageDevice`
:keyword force: whether to force removal of a non-leaf device
:type force: bool
:keyword modparent: update parent device to account for removal
:type modparent: bool
.. note::
Only leaves may be removed.
"""
if dev not in self._devices:
raise ValueError("Device '%s' not in tree" % dev.name)
if not dev.isleaf and not force:
log.debug("%s has %d kids", dev.name, dev.kids)
raise ValueError("Cannot remove non-leaf device '%s'" % dev.name)
dev.removeHook(modparent=mo
|
PeterHo/mysite
|
lists/migrations/0001_initial.py
|
Python
|
apache-2.0
| 420
| 0.002381
|
# -*- coding: utf-8
|
-*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
]
|
,
),
]
|
Yarrick13/hwasp
|
tests/wasp1/AllAnswerSets/aggregates_max_bug_1.test.py
|
Python
|
apache-2.0
| 92
| 0
|
inpu
|
t = """
% No auxiliary atoms at all.
ouch :- #max{V:a(V)} = 0.
"
|
""
output = """
{}
"""
|
eltoncarr/tubular
|
tubular/ec2.py
|
Python
|
agpl-3.0
| 19,451
| 0.001954
|
"""
Convenience functions built on top of boto that are useful
when we deploy using asgard.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import logging
import time
from datetime import datetime, timedelta
import backoff
import boto
from boto.exception import EC2ResponseError, BotoServerError
from boto.ec2.autoscale.tag import Tag
from tubular.utils import EDP, WAIT_SLEEP_TIME
from tubular.exception import (
ImageNotFoundException,
MultipleImagesFoundException,
MissingTagException,
TimeoutException,
)
LOG = logging.getLogger(__name__)
ISO_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
ASG_DELETE_TAG_KEY = 'delete_on_ts'
MAX_ATTEMPTS = os.environ.get('RETRY_MAX_ATTEMPTS', 5)
RETRY_FACTOR = os.environ.get('RETRY_FACTOR', 1.5)
def giveup_if_not_throttling(ex):
"""
Checks that a BotoServerError exceptions message contains the throttling string.
Args:
ex (boto.exception.BotoServerError):
Returns:
False if the throttling string is not found.
"""
return not (str(ex.status) == "400" and ex.body and '<Code>Throttling</Code>' in ex.body)
@backoff.on_exception(backoff.expo,
BotoServerError,
max_tries=MAX_ATTEMPTS,
giveup=giveup_if_not_throttling,
factor=RETRY_FACTOR)
def get_all_autoscale_groups(names=None):
"""
Get all the autoscale groups
Arguments:
names (list) - A list of ASG names as strings
Returns:
List of :class:`boto.ec2.autoscale.group.AutoScalingGroup` instances.
"""
autoscale_conn = boto.connect_autoscale()
fetched_asgs = autoscale_conn.get_all_groups(names=names)
total_asgs = []
while True:
total_asgs.extend([asg for asg in fetched_asgs])
if fetched_asgs.next_token:
fetched_asgs = autoscale_conn.get_all_groups(names=names, next_token=fetched_asgs.next_token)
else:
break
return total_asgs
@backoff.on_exception(backoff.expo,
BotoServerError,
max_tries=MAX_ATTEMPTS,
giveup=giveup_if_not_throttling,
factor=RETRY_FACTOR)
def get_all_load_balancers(names=None):
"""
Get all the ELBs
Arguments:
names (list): A list of ELB names as strings
Returns:
a list of :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
"""
elb_conn = boto.connect_elb()
fetched_elbs = elb_conn.get_all_load_balancers(names)
total_elbs = []
while True:
total_elbs.extend([elb for elb in fetched_elbs])
if fetched_elbs.next_token:
fetched_elbs = elb_conn.get_all_load_balancers(names, fetched_elbs.next_token)
else:
break
return total_elbs
def _instance_elbs(instance_id, elbs):
"""
Given an EC2 instance and ELBs, return the ELB(s) in which it is active.
Arguments:
instance_id (:obj:`boto.ec2.instance.Reservation`): Instance used to find out which ELB it is active in.
elbs (:obj:`list` of :obj:`boto.ec2.elb.loadbalancer.LoadBalan
|
cer`): List of ELBs to us in checking.
Returns:
:obj:`list` of :obj:`boto.ec2.elb.loadbalancer.LoadBalancer`:
One or more ELBs used by the passed-in instance -or- None.
"""
instance_elbs = []
for elb in elbs:
elb_instance_ids = [inst.id for inst in elb.instances]
if instance_id in elb_instance_ids:
instance_elbs.append(elb)
return instance_elbs
@backoff.on_excep
|
tion(backoff.expo,
BotoServerError,
max_tries=MAX_ATTEMPTS,
giveup=giveup_if_not_throttling,
factor=RETRY_FACTOR)
def active_ami_for_edp(env, dep, play):
"""
Given an environment, deployment, and play, find the base AMI id used for the active deployment.
Arguments:
env (str): Environment to check (stage, prod, loadtest, etc.)
dep (str): Deployment to check (edx, edge, mckinsey, etc.)
play (str): Play to check (edxapp, discovery, ecommerce, etc.)
Returns:
str: Base AMI id of current active deployment for the EDP.
Raises:
MultipleImagesFoundException: If multiple AMI IDs are found within the EDP's ELB.
ImageNotFoundException: If no AMI IDs are found for the EDP.
"""
LOG.info("Looking up AMI for {}-{}-{}...".format(env, dep, play))
ec2_conn = boto.connect_ec2()
all_elbs = get_all_load_balancers()
LOG.info("Found {} load balancers.".format(len(all_elbs)))
edp_filter = {
"tag:environment": env,
"tag:deployment": dep,
"tag:play": play,
}
reservations = ec2_conn.get_all_reservations(filters=edp_filter)
LOG.info("{} reservations found for EDP {}-{}-{}".format(len(reservations), env, dep, play))
amis = set()
for reservation in reservations:
for instance in reservation.instances:
elbs = _instance_elbs(instance.id, all_elbs)
if instance.state == 'running' and len(elbs) > 0:
amis.add(instance.image_id)
LOG.info("AMI found for {}-{}-{}: {}".format(env, dep, play, instance.image_id))
else:
LOG.info("Instance {} state: {} - elbs in: {}".format(instance.id, instance.state, len(elbs)))
if len(amis) > 1:
msg = "Multiple AMIs found for {}-{}-{}, should have only one.".format(env, dep, play)
raise MultipleImagesFoundException(msg)
if len(amis) == 0:
msg = "No AMIs found for {}-{}-{}.".format(env, dep, play)
raise ImageNotFoundException(msg)
return amis.pop()
@backoff.on_exception(backoff.expo,
BotoServerError,
max_tries=MAX_ATTEMPTS,
giveup=giveup_if_not_throttling,
factor=RETRY_FACTOR)
def tags_for_ami(ami_id):
"""
Look up the tags for an AMI.
Arguments:
ami_id (str): An AMI Id.
Returns:
dict: The tags for this AMI.
Raises:
ImageNotFoundException: No image found with this ami ID.
MissingTagException: AMI is missing one or more of the expected tags.
"""
LOG.debug("Looking up edp for {}".format(ami_id))
ec2 = boto.connect_ec2()
try:
ami = ec2.get_all_images(ami_id)[0]
except IndexError:
raise ImageNotFoundException("ami: {} not found".format(ami_id))
except EC2ResponseError as error:
raise ImageNotFoundException(str(error))
return ami.tags
def edp_for_ami(ami_id):
"""
Look up the EDP tags for an AMI.
Arguments:
ami_id (str): An AMI Id.
Returns:
EDP Named Tuple: The EDP tags for this AMI.
Raises:
ImageNotFoundException: No image found with this ami ID.
MissingTagException: AMI is missing one or more of the expected tags.
"""
tags = tags_for_ami(ami_id)
try:
edp = EDP(tags['environment'], tags['deployment'], tags['play'])
except KeyError as key_err:
missing_key = key_err.args[0]
msg = "{} is missing the {} tag.".format(ami_id, missing_key)
raise MissingTagException(msg)
LOG.debug("Got EDP for {}: {}".format(ami_id, edp))
return edp
def validate_edp(ami_id, environment, deployment, play):
"""
Validate that an AMI is tagged for a specific EDP (environment, deployment, play).
Arguments:
ami_id (str): An AMI Id.
environment (str): Environment for AMI, e.g. prod, stage
deployment (str): Deployment for AMI, e.g. edx, edge
play (str): Play for AMI, e.g. edxapp, insights, discovery
Returns:
True if AMI EDP matches specified EDP, otherwise False.
"""
edp = edp_for_ami(ami_id)
edp_matched = (
edp.environment == environment and
edp.deployment == deployment and
edp.play == play
)
if not edp_matched:
LOG.info("AMI {0} EDP did not match specified: {1} != ({2}, {3}, {4})".format(
ami_id, edp, environment, deployment, play
))
return edp_matched
de
|
spikeekips/serf-python
|
test/test_command_join.py
|
Python
|
mpl-2.0
| 1,679
| 0.008338
|
import pytest
import serf
from _base import FakeClient, FakeConnection
def test_request_join () :
_body = dict(
Existing=('127.0.0.1:7901', ),
Replay=True,
)
_request = serf.get_request_class('join')(**_body)
_request.check(FakeClient(), )
assert _request.is_checked
_body = dict( # missing value
What='is it',
)
_request =
|
serf.get_request_class('join')(**_body)
with pytest.raises(serf.InvalidRequest,
|
) :
_request.check(FakeClient(), )
assert not _request.is_checked
_body = dict(
Existing=('127.0.0.1:7901', ),
Replay=1, # invalid value, it must be bool
)
_request = serf.get_request_class('join')(**_body)
with pytest.raises(serf.InvalidRequest, ) :
_request.check(FakeClient(), )
assert not _request.is_checked
class JoinFakeConnection (FakeConnection, ) :
socket_data = (
'\x82\xa5Error\xa0\xa3Seq\x00',
'\x82\xa5Error\xa0\xa3Seq\x01\x81\xa3Num\x01',
)
def test_response_join () :
_client = serf.Client(connection_class=JoinFakeConnection, )
def _callback (response, ) :
assert response.request.command == 'join'
assert not response.error
assert response.is_success
assert response.body is not None
assert response.seq == 1
_body = response.body
assert isinstance(_body, dict, )
assert 'Num' in _body
assert _body.get('Num') == 1
_body = dict(
Existing=('127.0.0.1:7901', ),
Replay=True,
)
_client.join(**_body).add_callback(_callback, ).request()
|
UCL-INGI/Informatique-1
|
old_pythia/18_java/test/gendataset.py
|
Python
|
agpl-3.0
| 406
| 0.009901
|
# -*- coding: utf-8
|
-*-
# Test datas
|
et script
# Author: Sébastien Combéfis
# Date: December 23, 2012
# Problem: Question de Bilan Final : Mission 1
from lib.pythia import *
import random
class TestDataSetQ1(TestDataSet):
def __init__(self):
TestDataSet.__init__(self, 'q1', 5)
def genTestData(self):
A = random.randint(1, 100)
return [A]
TestDataSetQ1().generate()
|
JioCloud/python-ceilometerclient
|
ceilometerclient/exc.py
|
Python
|
apache-2.0
| 3,070
| 0.000326
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import sys
class BaseException(Exception):
"""An error occurred."""
def __init__(self, message=None):
self.message = message
def __str__(self):
return self.message or self.__class__.__doc__
class CommandError(BaseException):
"""Invalid usage of CLI."""
class InvalidEndpoint(BaseException):
"""The provided endpoint is invalid."""
class CommunicationError(BaseException):
"""Unable to communicate with server."""
class HTTPException(BaseException):
"""Base exception for all HTTP-derived exceptions."""
code = 'N/A'
def __init__(self, details=None):
self.details = details
def __str__(self):
try:
data = json.loads(self.details)
message = data.get("error_message", {}).get("faultstring")
if m
|
essage:
return "%s (HTTP %s) ERROR %s" % (
self.__class__.__name__, se
|
lf.code, message)
except (ValueError, TypeError, AttributeError):
pass
return "%s (HTTP %s)" % (self.__class__.__name__, self.code)
class HTTPMultipleChoices(HTTPException):
code = 300
def __str__(self):
self.details = ("Requested version of OpenStack Images API is not"
"available.")
return "%s (HTTP %s) %s" % (self.__class__.__name__, self.code,
self.details)
class HTTPBadRequest(HTTPException):
code = 400
class HTTPUnauthorized(HTTPException):
code = 401
class HTTPForbidden(HTTPException):
code = 403
class HTTPNotFound(HTTPException):
code = 404
class HTTPMethodNotAllowed(HTTPException):
code = 405
class HTTPConflict(HTTPException):
code = 409
class HTTPOverLimit(HTTPException):
code = 413
class HTTPInternalServerError(HTTPException):
code = 500
class HTTPNotImplemented(HTTPException):
code = 501
class HTTPBadGateway(HTTPException):
code = 502
class HTTPServiceUnavailable(HTTPException):
code = 503
#NOTE(bcwaldon): Build a mapping of HTTP codes to corresponding exception
# classes
_code_map = {}
for obj_name in dir(sys.modules[__name__]):
if obj_name.startswith('HTTP'):
obj = getattr(sys.modules[__name__], obj_name)
_code_map[obj.code] = obj
def from_response(response, details=None):
"""Return an instance of an HTTPException based on httplib response."""
cls = _code_map.get(response.status, HTTPException)
return cls(details)
|
rjdp/cement
|
cement/utils/test.py
|
Python
|
bsd-3-clause
| 2,208
| 0
|
"""Cement testing utilities."""
import unittest
from tempfile import mkstemp, mkdtemp
from ..core import backend, foundation
from ..utils.misc import rando
# shortcuts
from nose import SkipTest
from nose.tools import ok_ as ok
from nose.tools import eq_ as eq
from nose.tools import raises
from nose.plugins.attrib import attr
class TestApp(foundation.CementApp):
"""
Basic CementApp for generic testing.
"""
class Meta:
label = "app-%s" % rando()[:12]
config_files = []
argv = []
base_controller = None
arguments = []
exit_on_close = False
class CementTestCase(unittest.TestCase):
"""
A sub-class of unittest.TestCase.
"""
app_class = TestApp
"""The test class that is used by self.make_app to create an app."""
def __init__(self, *args, **kw):
super(CementTestCase, self).__init__(*args, **kw)
def setUp(self):
"""
Sets up self.app with a generic TestApp(). Also resets the backend
hooks and handlers so that everytime an app is created it is setup
clean each time.
"""
self.app = self.make_app()
_, self.tmp_file = mkstemp()
self.tmp_dir = mkdtemp()
def make_app(self, *args, **kw):
"""
Create a generic app using TestApp. Arguments and Keyword Arguments
are passed to the app.
"""
self.reset_backend()
return self.app_class(*args, **kw)
def reset_backend(self):
"""
Remove all registered hooks and handlers from the backend.
"""
for _handler in backend.__handlers__.copy():
del backend.__handlers__[_handler]
for _hook in backend.__hooks__.copy():
del backend.__hooks__[_hook]
def ok(self, expr, msg=None):
"""Shorthand for assert."""
return ok(exp
|
r, msg)
def eq(self, a, b, msg=None):
"""Shorthand for 'assert a == b, "%r != %r" % (a, b)'. """
return eq(a, b, msg)
# The following are for internal, Cement unit testing only
@attr('core')
class CementCoreTestCase(CementTestCase):
pass
@attr('ext')
class CementExtTestCase(CementTestCase):
pa
|
ss
|
popazerty/blackhole-vuplus
|
RecordTimer.py
|
Python
|
gpl-2.0
| 43,007
| 0.030321
|
import os
from enigma import eEPGCache, getBestPlayableServiceReference, \
eServiceReference, iRecordableService, quitMainloop, eActionMap, setPreferredTuner
from Components.config import config
from Components.UsageConfig import defaultMoviePath
from Components.TimerSanityCheck import TimerSanityCheck
from Screens.MessageBox import MessageBox
import Screens.Standby
import Screens.InfoBar
from Tools import Directories, Notifications, ASCIItranslit, Trashcan
from Tools.XMLTools import stringToXML
import timer
import xml.etree.cElementTree
import NavigationInstance
from ServiceReference import ServiceReference
from time import localtime, strftime, ctime, time
from bisect import insort
from sys import maxint
# ok, for descriptions etc we have:
# service reference (to get the service name)
# name (title)
# description (description)
# event data (ONLY for time adjustments etc.)
# parses an event, and gives out a (begin, end, name, duration, eit)-tuple.
# begin and end will be corrected
def parseEvent(ev, description = True):
if description:
name = ev.getEventName()
description = ev.getShortDescription()
if description == "":
description = ev.getExtendedDescription()
else:
name = ""
description = ""
begin = ev.getBeginTime()
end = begin + ev.getDuration()
eit = ev.getEventId()
begin -= config.recording.margin_before.value * 60
end += config.recording.margin_after.value * 60
return (begin, end, name, description, eit)
class AFTEREVENT:
NONE = 0
STANDBY = 1
DEEPSTANDBY = 2
AUTO = 3
def findSafeRecordPath(dirname):
if not dirname:
return None
from Components import Harddisk
dirname = os.path.realpath(dirname)
mountpoint = Harddisk.findMountPoint(dirname)
if mountpoint in ('/', '/media'):
print '[RecordTimer] media is not mounted:', dirname
return None
if not os.path.isdir(dirname):
try:
os.makedirs(dirname)
except Exception, ex:
print '[RecordTimer] Failed to create dir "%s":' % dirname, ex
return None
return dirname
def checkForRecordings():
if NavigationInstance.instance.getRecordings():
return True
rec_time = NavigationInstance.instance.RecordTimer.getNextTimerTime(isWakeup=True)
return rec_time > 0 and (rec_time - time()) < 360
# please do not translate log messages
class RecordTimerEntry(timer.TimerEntry, object):
######### the following static methods and members are only in use when the box is in (soft) standby
wasInStandby = False
wasInDeepStandby = False
receiveRecordEvents = False
@staticmethod
def keypress(key=None, flag=1):
if flag and (RecordTimerEntry.wasInStandby or RecordTimerEntry.wasInDeepStandby):
RecordTimerEntry.wasInStandby = False
RecordTimerEntry.wasInDeepStandby = False
eActionMap.getInstance().unbindAction('', RecordTimerEntry.keypress)
@staticmethod
def setWasInDeepStandby():
RecordTimerEntry.wasInDeepStandby = True
eActionMap.getInstance().bindAction('', -maxint - 1, RecordTimerEntry.keypress)
@staticmethod
def setWasInStandby():
if not RecordTimerEntry.wasInStandby:
if not RecordTimerEntry.wasInDeepStandby:
eActionMap.getInstance().bindAction('', -maxint - 1, RecordTimerEntry.keypress)
RecordTimerEntry.wasInDeepStandby = False
RecordTimerEntry.wasInStandby = True
@staticmethod
def shutdown():
quitMainloop(1)
@staticmethod
def staticGotRecordEvent(recservice, event):
if event == iRecordableService.evEnd:
print "RecordTimer.staticGotRecordEvent(iRecordableService.evEnd)"
if not checkForRecordings():
print "No recordings busy of sceduled within 6 minutes so shutdown"
RecordTimerEntry.shutdown() # immediate shut
|
down
elif
|
event == iRecordableService.evStart:
print "RecordTimer.staticGotRecordEvent(iRecordableService.evStart)"
@staticmethod
def stopTryQuitMainloop():
print "RecordTimer.stopTryQuitMainloop"
NavigationInstance.instance.record_event.remove(RecordTimerEntry.staticGotRecordEvent)
RecordTimerEntry.receiveRecordEvents = False
@staticmethod
def TryQuitMainloop():
if not RecordTimerEntry.receiveRecordEvents and Screens.Standby.inStandby:
print "RecordTimer.TryQuitMainloop"
NavigationInstance.instance.record_event.append(RecordTimerEntry.staticGotRecordEvent)
RecordTimerEntry.receiveRecordEvents = True
# send fake event.. to check if another recordings are running or
# other timers start in a few seconds
RecordTimerEntry.staticGotRecordEvent(None, iRecordableService.evEnd)
#################################################################
def __init__(self, serviceref, begin, end, name, description, eit, disabled = False, justplay = False, afterEvent = AFTEREVENT.AUTO, checkOldTimers = False, dirname = None, tags = None, descramble = True, record_ecm = False, always_zap = False, zap_wakeup = "always", rename_repeat = True):
timer.TimerEntry.__init__(self, int(begin), int(end))
if checkOldTimers == True:
if self.begin < time() - 1209600:
self.begin = int(time())
if self.end < self.begin:
self.end = self.begin
assert isinstance(serviceref, ServiceReference)
if serviceref and serviceref.isRecordable():
self.service_ref = serviceref
else:
self.service_ref = ServiceReference(None)
self.eit = eit
self.dontSave = False
self.name = name
self.description = description
self.disabled = disabled
self.timer = None
self.__record_service = None
self.start_prepare = 0
self.justplay = justplay
self.always_zap = always_zap
self.zap_wakeup = zap_wakeup
self.afterEvent = afterEvent
self.dirname = dirname
self.dirnameHadToFallback = False
self.autoincrease = False
self.autoincreasetime = 3600 * 24 # 1 day
self.tags = tags or []
self.descramble = descramble
self.record_ecm = record_ecm
self.rename_repeat = rename_repeat
self.needChangePriorityFrontend = config.usage.recording_frontend_priority.value != "-2" and config.usage.recording_frontend_priority.value != config.usage.frontend_priority.value
self.change_frontend = False
self.InfoBarInstance = Screens.InfoBar.InfoBar.instance
self.ts_dialog = None
self.log_entries = []
self.resetState()
def __repr__(self):
return "RecordTimerEntry(name=%s, begin=%s, serviceref=%s, justplay=%s)" % (self.name, ctime(self.begin), self.service_ref, self.justplay)
def log(self, code, msg):
self.log_entries.append((int(time()), code, msg))
print "[TIMER]", msg
def calculateFilename(self, name=None):
service_name = self.service_ref.getServiceName()
begin_date = strftime("%Y%m%d %H%M", localtime(self.begin))
name = name or self.name
filename = begin_date + " - " + service_name
if name:
if config.recording.filename_composition.value == "short":
filename = strftime("%Y%m%d", localtime(self.begin)) + " - " + name
elif config.recording.filename_composition.value == "long":
filename += " - " + name + " - " + self.description
else:
filename += " - " + name # standard
if config.recording.ascii_filenames.value:
filename = ASCIItranslit.legacyEncode(filename)
if not self.dirname:
dirname = findSafeRecordPath(defaultMoviePath())
else:
dirname = findSafeRecordPath(self.dirname)
if dirname is None:
dirname = findSafeRecordPath(defaultMoviePath())
self.dirnameHadToFallback = True
if not dirname:
return None
self.Filename = Directories.getRecordingFilename(filename, dirname)
self.log(0, "Filename calculated as: '%s'" % self.Filename)
return self.Filename
def tryPrepare(self):
if self.justplay:
return True
else:
if not self.calculateFilename():
self.do_backoff()
self.start_prepare = time() + self.backoff
return False
rec_ref = self.service_ref and self.service_ref.ref
if rec_ref and rec_ref.flags & eServiceReference.isGroup:
rec_ref = getBestPlayableServiceReference(rec_ref, eServiceReference())
if not rec_ref:
self.log(1, "'get best playable service for group... record' failed")
return False
self.setRecordingPreferredTuner()
self.record_service = rec_ref and NavigationInstance.instance.recordService(rec_ref)
if not self.record_service:
self.log(1, "'record service' failed")
self.setRec
|
JonSteinn/Kattis-Solutions
|
src/Fractal Area/Python 3/main.py
|
Python
|
gpl-3.0
| 448
| 0.017857
|
from math import pi
def fractal_area(r,n):
first = r**2
if n == 1:
return first * pi
second = 4 * (r/2)**2
if n == 2:
|
return (first + second) * pi
rest = sum((r/2**i)**2 * 4*3**(i-1) for i in range(2,n))
return (first + second + rest) * pi
def main():
for _ in range(int(input()))
|
:
r,n = map(int, input().split())
print('%.6f' % fractal_area(r,n))
if __name__ == "__main__":
main()
|
tommo/gii
|
tools/build/__init__.py
|
Python
|
mit
| 1,086
| 0.138122
|
import os
import logging
import argparse
from gii.core import Project, app
from gii.core.tools import Build
cli = argparse.ArgumentParser(
prog = 'gii build',
description = 'Build GII Host(s) for current project'
)
cli.add_argument( 'targets',
type = str,
nargs = '*',
default = 'native'
)
cli.add_argument( '-c, --configure',
dest = 'configure',
help = 'Configure waf buildtool',
action = 'store_true',
default = False
)
cli.add_argument( '-p, --profile',
dest =
|
'profile',
help = 'release/debug ',
default = 'debug'
)
cli.add_argument( '--clean-bin',
dest = 'clean-bin',
help = 'Clean built binary files',
action = 'store_true',
default = False
)
cli.add_argument( '--clean',
dest = 'clean',
help = 'Clean build files',
action = 'sto
|
re_true',
default = False
)
cli.add_argument( '-v','--verbose',
dest = 'verbose',
help = 'Verbosal building log',
action = 'store_true',
default = False
)
def main( argv ):
app.openProject()
args = cli.parse_args( argv[1:] )
code = Build.run(
**vars( args )
)
exit( code )
|
lingpy/plugins
|
burmish/basics.py
|
Python
|
gpl-2.0
| 11,825
| 0.00882
|
# author : Johann-Mattis List
# email : mattis.list@uni-marburg.de
# created : 2014-09-10 19:49
# modified : 2014-09-10 21:17
"""
Wordlist plugin for burmish data.
"""
__author__="Johann-Mattis List"
__date__="2014-09-10"
import unicodedata as ucd
import re
import sqlite3
import lingpyd
from .unicode import *
def clean_entry(entry, **keywords):
"""
Normalize (NFC) entry and remove bad chars.
"""
kw = dict(
brackets = rcParams['brackets'],
exact_bracket_matching = True,
)
kw.update(keywords)
# normalize first
new_entry = ucd.normalize("NFC", entry)
# normalize linguistically
entries = list(new_entry)
for i,char in enumerate(entries):
try:
entries[i] = rcParams['normalizations'][char]
except KeyError:
pass
new_entry = ''.join(entries)
if kw['exact_bracket_matching']:
# delete stuff in brackets
for b1 in kw['brackets']:
b2 = get_pendant(b1)
# get possible index
idxA = new_entry.find(b1)
idxB = new_entry.find(b2)
# check for existing indices
if idxA != -1 and idxA < idxB:
new_entry = new_entry[:idxA]+new_entry[idxB+1:]
else:
b1s = []
b2s = []
for b1 in kw['brackets']:
idxA = new_entry.find(b1)
if idxA != -1:
b1s.append(idxA)
idxB = new_entry.find(get_pendant(b1))
if idxB != -1:
b2s.append(idxB)
new_entry = new_entry[:min(b1s)]+new_entry[max(b2s)+1:]
# go for spaces and replace by '_'
new_entry = new_entry.replace(' ','_')
return new_entry
def ipa2tokens(
istring,
**keywords
):
"""
Tokenize IPA-encoded strings.
Parameters
----------
seq : str
The input sequence that shall be tokenized.
diacritics : {str, None} (default=None)
A string containing all diacritics which shall be considered in the
respective analysis. When set to *None*, the default diacritic string
will be used.
vowels : {str, None} (default=None)
A string containing all vowel symbols which shall be considered in the
respective analysis. When set to *None*, the default vowel string will
be used.
tones : {str, None} (default=None)
A string indicating all tone letter symbals which shall be considered
in the respective analysis. When set to *None*, the default tone string
will be used.
combiners : str (default="\u0361\u035c")
A string with characters that are used to combine two separate
characters (compare affricates such as t͡s).
breaks : str (default="-.")
A string containing the characters that indicate that a new token
starts right after them. These can be used to indicate that two
consecutive vowels should not be treated as diphtongs or for diacritics
that are put before the following letter.
merge_vowels : bool
Indicate, whether vowels should be merged into diphtongs
(default=True), or whether each vowel symbol should be considered
separately.
merge_identical_symbols : bool
Indicate, whether identical symbols should be merged into one token, or
rather be kept separate.
semi_diacritics: str (default="ʃhsʑɕʂʐñ")
Indicate which symbols shall be treated as "semi-diacritics", that is,
as symbols which can occur on their own, but which eventually, when
preceded by a consonant, will form clusters with it. If you want to
disable this features, just set the keyword to an empty string.
Returns
-------
tokens : list
A list of IPA tokens.
Examples
--------
>>> from lingpyd import *
>>> myseq = 't͡sɔyɡə'
>>> ipa2tokens(myseq)
['t͡s', 'ɔy', 'ɡ', 'ə']
See also
--------
tokens2class
class2tokens
"""
# go for defaults
kw = dict(
vowels = lingpyd.settings.rcParams['vowels'],
diacritics = lingpyd.settings.rcParams['diacritics'],
expand_nasals = True, # addon
tones = lingpyd.settings.rcParams['tones'],
combiners = lingpyd.settings.rcParams['combiners'],
breaks = lingpyd.settings.rcParams['breaks'],
stress = lingpyd.settings.rcParams['stress'],
merge_vowels = lingpyd.settings.rcParams['merge_vowels'],
merge_identical_symbols = True,
semi_diacritics = 'ʃhsʑɕʂʐñ'
)
kw.update(keywords)
# clean the entry first
istring = clean_entry(istring)
# check for pre-tokenized strings
if ' ' in istring:
out = istring.split(' ')
if istring.startswith('#'):
return out[1:-1]
else:
return out
# create the list for the output
out = []
nasals = "ãũẽĩõ"
nasal_char = "\u0303"
semi_diacritics = kw['semi_diacritics'] #"ʃhsʑɕʂʐñ"
nogos = '_'
# set basic characteristics
vowel = False # no vowel
tone = False # no tone
merge = False # no merge command
start = True # start of unit
nasal = False # start of nasal vowel environment
for char in istring:
# check for nasal stack and vowel environment
if nasal:
if char not in kw['vowels'] and char not in kw['diacritics'] :
out += [rcParams['nasal_placeholder']]
nasal = False
# check for breaks first, since they force us to start anew
if char in kw['breaks']:
start = True
vowel = False
tone = False
merge = False
# check for combiners next
elif char in kw['combiners']:
out[-1] += char
merge = True
# check for stress
elif char in kw['stress']:
out += [char]
# XXX be careful about the removement of the start-flag here, but it
# XXX seems to make sense so far!
merge = True
tone = False
vowel = False
start = False
# check for merge command
elif merge:
out[-1] += char
if char in kw['vowels']:
vowel = True
merge = False
# check for nasals
|
elif kw['expand_nasals'] and char == nasal_char and vowel:
out[-1] += char
start = False
nasal = True
# check for weak diacritics
elif char in semi_diacritics and not start and not vowel and not tone and out[-1] not in nogos:
out[-1] += char
# check for diacritics
elif char in kw['diacritics']:
if not start
|
:
out[-1] += char
else:
out += [char]
start = False
merge = True
# check for vowels
elif char in kw['vowels']:
if vowel and kw['merge_vowels']:
out[-1] += char
else:
out += [char]
vowel = True
start = False
tone = False
# check for tones
elif char in kw['tones']:
vowel = False
if tone:
out[-1] += char
else:
out += [char]
tone = True
start = False
# consonants
else:
vowel = False
tone = False
out += [char]
start = False
tone = False
if nasal:
out += [rcParams['nasal_placeholder']]
if kw['merge_identical_symbols']:
new_out = [out[0]]
for i in range(len(out) -1):
outA = out[i]
outB = out[i+1]
if outA == outB:
new_out[-1] += outB
else:
new_out += [outB]
return new_out
return out
def secondary_structures(tokens):
|
FRBs/DM
|
frb/dm_kde/sort_transient_data.py
|
Python
|
bsd-3-clause
| 4,742
| 0.015183
|
""" Module to correct pulsar and FRB DMs for the MW ISM """
from ne2001 import ne_io, density #ne2001 ism model
import pygedm #ymw ism model
import numpy as np
import pandas as pd
from astropy import units as u
from astropy.coordinates import SkyCoord, Galactic
import logging
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO)
ne = density.ElectronDensity()
def find_delta_dm(transient_type,transient_data,ism_model,b_val,mc_deg=5,save_df=True):
"""
Find pulsar/FRB DMs corrected for by the MW ISM DM and remove observations in complex DM regions.
Returns array of DMs
FRB data is avail
|
able as a csv in the FRBs/FRB/frb/data/FRBs repo (FRB catalogue [Petroff et al. 2017])
Pulsar data is avaiable as a csv in the FRBs/pulsars/pul
|
sars/data/atnf_cat repo (v1.61 ATNF pulsar catalogue [Manchester et al. 2005])
Arguments:
transient_type (str):
Accepts 'frb' or 'pulsar'.
transient_data (str):
Path to data (in .csv format).
ism_model (str):
Model used to calculated the MW halo DM.
Accepts 'ymw16' [Yao et al. 2017] or 'ne2001' [Cordes & Lazio 2003].
b_val (int):
Galactic latitude considered (b>b_val, b<-b_val).
mc_deg (int):
Number of degrees from Magellanic clouds within which transients are removed.
save_df (str, optional):
Save transient DMs and coords to csv.
Outputs:
"""
# Sort data and get coords
if transient_type=='frb':
transcat_df = pd.read_csv(transient_data, skiprows=1, usecols= [0,5,6,7], names=['Name','l','b','dm'])
transcat_df['dm'] = transcat_df['dm'].str.split('&').str[0].astype(float).values
coords = SkyCoord(l=transcat_df['l'], b=transcat_df['b'], unit=(u.degree),frame=Galactic)
elif transient_type=='pulsar':
transcat_df = pd.read_csv(transient_data, skiprows=2, usecols = [1,2,3,9,10], names=['Name','Pref','dm','RAJD','DECJD'])
transcat_df = transcat_df[~transcat_df['dm'].str.contains('*', regex=False)].reset_index(drop=True)
transcat_df['dm'] = transcat_df['dm'].astype(float)
c_icrs = SkyCoord(ra=transcat_df['RAJD'], dec=transcat_df['DECJD'], unit=(u.degree), frame='icrs')
transcat_df['l'] = pd.DataFrame(c_icrs.galactic.l.value)
transcat_df['b'] = pd.DataFrame(c_icrs.galactic.b.value)
coords = SkyCoord(l=transcat_df['l'], b=transcat_df['b'], unit=(u.degree),frame=Galactic)
# Find transients in line of sight of MCs
logging.info('Removing transients near Magellanic clouds...')
# LMC
lmc_distance = 50*u.kpc
lmc_coord = SkyCoord('J052334.6-694522',unit=(u.hourangle, u.deg),distance=lmc_distance)
close_to_lmc = lmc_coord.separation(coords) < mc_deg*u.deg
lmc_trans = list(transcat_df[close_to_lmc]['Name'])
# SMC
smc_distance = 61*u.kpc
smc_coord = SkyCoord('J005238.0-724801',unit=(u.hourangle, u.deg),distance=smc_distance)
close_to_smc = smc_coord.separation(coords) < mc_deg*u.deg
smc_trans = list(transcat_df[close_to_smc]['Name'])
transcat_df = transcat_df[~transcat_df['Name'].isin(lmc_trans)].reset_index(drop=True)
transcat_df = transcat_df[~transcat_df['Name'].isin(smc_trans)].reset_index(drop=True)
if transient_type=='pulsar':
transcat_df = transcat_df[~transcat_df['Pref'].str.contains('mfl+06', regex=False)].reset_index(drop=True)
elif transient_type=='frb':
pass
# Remove transients with low Galactic lattitudes
logging.info('Removing transients with low Galactic lattitudes...')
transcat_df = pd.concat([transcat_df[transcat_df.b > b_val], transcat_df[transcat_df.b < -b_val]], ignore_index=True)
# ISM model
logging.info('Correcting transient DMs for ISM...')
trans_ism = []
if ism_model=='ymw16':
for i in range(len(transcat_df['dm'])):
trans_ism_ = pygedm.dist_to_dm(transcat_df['l'].iloc[i], transcat_df['b'].iloc[i], 100000)[0].value
trans_ism = np.append(trans_ism,trans_ism_)
elif ism_model=='ne2001':
for i in range(len(transcat_df['dm'])):
trans_ism_ = ne.DM(transcat_df['l'].iloc[i], transcat_df['b'].iloc[i], 100.).value
trans_ism = np.append(trans_ism,trans_ism_)
transcat_df['trans_ism'] = pd.DataFrame(trans_ism)
transcat_df['deltaDM'] = pd.DataFrame(transcat_df['dm']-transcat_df['trans_ism'])
if save_df==True:
transcat_df.to_csv('transient_data/'+transient_type+'cat_df_'+ism_model+'_'+str(int(b_val))+'.csv')
logging.info('Transient data saved to csv.')
else:
pass
return np.array(transcat_df['deltaDM'])
|
google/grr
|
grr/client/grr_response_client/components/chipsec_support/actions/__init__.py
|
Python
|
apache-2.0
| 99
| 0
|
#!/usr/
|
bin/env python
"""Conditional import for Chipsec. Only Linux is supported at this stag
|
e."""
|
dietrichc/streamline-ppc-reports
|
googleads/common.py
|
Python
|
apache-2.0
| 12,102
| 0.006363
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common client library functions and classes used by all products."""
__author__ = 'Joseph DiLallo'
import os
import sys
import warnings
import httplib2
import socks
import suds
import yaml
import googleads.errors
import googleads.oauth2
VERSION = '3.0.1'
_COMMON_LIB_SIG = 'googleads/%s' % VERSION
_PROXY_YAML_KEY = 'proxy_info'
_PYTHON_VERSION = 'Python/%d.%d' % (sys.version_info[0], sys.version_info[1])
# The keys in the authentication dictionary that are used to construct OAuth 2.0
# credentials.
_OAUTH_2_AUTH_KEYS = ('client_id', 'client_secret', 'refresh_t
|
oken')
# The keys in the proxy dictionary that are used to construct a ProxyInfo
# instance.
_PROXY_KEYS = ('host', 'port')
def GenerateLibSig(short_name):
"""Generates a library signature s
|
uitable for a user agent field.
Args:
short_name: The short, product-specific string name for the library.
Returns:
A library signature string to append to user-supplied user-agent value.
"""
return ' (%s, %s, %s)' % (short_name, _COMMON_LIB_SIG, _PYTHON_VERSION)
def LoadFromStorage(path, product_yaml_key, required_client_values,
optional_product_values):
"""Loads the data necessary for instantiating a client from file storage.
In addition to the required_client_values argument, the yaml file must supply
the keys used to create OAuth 2.0 credentials. It may also optionally provide
proxy_info in order to configure a proxy.
Args:
path: A path string to the yaml document whose keys should be used.
product_yaml_key: The key to read in the yaml as a string.
required_client_values: A tuple of strings representing values which must
be in the yaml file for a supported API. If one of these keys is not in
the yaml file, an error will be raised.
optional_product_values: A tuple of strings representing optional values
which may be in the yaml file.
Returns:
A dictionary map of the keys in the yaml file to their values. This will not
contain the keys used for OAuth 2.0 client creation and instead will have a
GoogleOAuth2Client object stored in the 'oauth2_client' field.
Raises:
A GoogleAdsValueError if the given yaml file does not contain the
information necessary to instantiate a client object - either a
required_client_values key was missing or an OAuth 2.0 key was missing.
"""
if not os.path.isabs(path):
path = os.path.expanduser(path)
try:
with open(path, 'r') as handle:
data = yaml.safe_load(handle.read())
product_data = data.get(product_yaml_key) or {}
proxy_data = data.get(_PROXY_YAML_KEY) or {}
except IOError:
raise googleads.errors.GoogleAdsValueError(
'Given yaml file, %s, could not be opened.' % path)
original_keys = list(product_data.keys())
original_proxy_keys = list(proxy_data.keys())
client_kwargs = {}
try:
for key in required_client_values:
client_kwargs[key] = product_data[key]
del product_data[key]
except KeyError:
raise googleads.errors.GoogleAdsValueError(
'Your yaml file, %s, is missing some of the required values. Required '
'values are: %s, actual values are %s'
% (path, required_client_values, original_keys))
try:
proxy_info = (httplib2.ProxyInfo(socks.PROXY_TYPE_HTTP, proxy_data['host'],
proxy_data['port'])
if proxy_data else None)
client_kwargs['https_proxy'] = ('%s:%s' % (proxy_info.proxy_host,
proxy_info.proxy_port)
if proxy_info else None)
except KeyError:
raise googleads.errors.GoogleAdsValueError(
'Your yaml file, %s, is missing some of the required proxy values.'
'Required values are: %s, actual values are %s'
% (path, _PROXY_KEYS, original_proxy_keys))
ca_certs = proxy_data.get('ca_certs', None)
disable_ssl_certificate_validation = proxy_data.get(
'disable_ssl_certificate_validation', True)
try:
client_kwargs['oauth2_client'] = (
googleads.oauth2.GoogleRefreshTokenClient(
product_data['client_id'], product_data['client_secret'],
product_data['refresh_token'], proxy_info,
disable_ssl_certificate_validation, ca_certs))
for auth_key in _OAUTH_2_AUTH_KEYS:
del product_data[auth_key]
except KeyError:
raise googleads.errors.GoogleAdsValueError(
'Your yaml file, %s, is missing some of the required OAuth 2.0 '
'values. Required values are: %s, actual values are %s'
% (path, _OAUTH_2_AUTH_KEYS, original_keys))
for value in optional_product_values:
if value in product_data:
client_kwargs[value] = product_data[value]
del product_data[value]
if product_data:
warnings.warn(
'Your yaml file, %s, contains the following unrecognized '
'keys: %s. They were ignored.' % (path, product_data), stacklevel=3)
return client_kwargs
def _PackForSuds(obj, factory):
"""Packs SOAP input into the format we want for suds.
The main goal here is to pack dictionaries with an 'xsi_type' key into
objects. This allows dictionary syntax to be used even with complex types
extending other complex types. The contents of dictionaries and lists/tuples
are recursively packed. Mutable types are copied - we don't mutate the input.
Args:
obj: A parameter for a SOAP request which will be packed. If this is
a dictionary or list, the contents will recursively be packed. If this
is not a dictionary or list, the contents will be recursively searched
for instances of unpacked dictionaries or lists.
factory: The suds.client.Factory object which can create instances of the
classes generated from the WSDL.
Returns:
If the given obj was a dictionary that contained the 'xsi_type' key, this
will be an instance of a class generated from the WSDL. Otherwise, this will
be the same data type as the input obj was.
"""
if obj in ({}, None):
# Force suds to serialize empty objects. There are legitimate use cases for
# this, for example passing in an empty SearchCriteria object to a DFA
# search method in order to select everything.
return suds.null()
elif isinstance(obj, dict):
if 'xsi_type' in obj:
try:
new_obj = factory.create(obj['xsi_type'])
except suds.TypeNotFound:
new_obj = factory.create(':'.join(['ns0', obj['xsi_type']]))
# Suds sends an empty XML element for enum types which are not set. None
# of Google's Ads APIs will accept this. Initializing all of the fields in
# a suds object to None will ensure that they don't get serialized at all
# unless the user sets a value. User values explicitly set to None will be
# packed into a suds.null() object.
for param, _ in new_obj:
# Another problem is that the suds.mx.appender.ObjectAppender won't
# serialize object types with no fields set, but both AdWords and DFP
# rely on sending objects with just the xsi:type set. The below "if"
# statement is an ugly hack that gets this to work in all(?) situations
# by taking advantage of the fact that these classes generally all have
# a type field. The only other option is to monkey patch ObjectAppender.
if param.endswith('.Type'):
setattr(new_obj, param, obj['xsi_type'])
else:
setattr(new_obj, param, None)
for key in obj:
if key == 'xsi_type': conti
|
SPbAU-ProgrammingParadigms/materials
|
python_2/common_objects.py
|
Python
|
unlicense
| 4,690
| 0.001066
|
import os
import sys
import string
import random
import math
#################################################
# State
balance = 0
def deposit(amount):
global balance
balance += amount
return balance
def withdraw(amount):
global balance
balance -= amount
return balance
#################################################
# Dict like
def make_account():
return {'balance': 0}
def deposit(account, amount):
account['balance'] += amount
return account['balance']
def withdraw(account, amount):
account['balance'] -= amount
return account['balance']
# >>> a = make_account()
# >>> b = make_account()
# >>> deposit(a, 100)
# 100
# >>> deposit(b, 50)
# 50
# >>> withdraw(b, 10)
# 40
# >>> withdraw(a, 10)
# 90
#################################################
# Class
class BankAccount:
def __init__(self, balance=0):
self.balance = balance
def withdraw(self, amount):
self.balance -= amount
return self.balance
def deposit(self, amount):
self.balance += amount
return self.balance
# >>> a = BankAccount()
# >>> b = BankAccount()
# >>> a.deposit(100)
# 100
# >>> b.deposit(50)
# 50
# >>> b.withdraw(10)
# 40
# >>> a.withdraw(10)
# 90
#################################################
# Inheritance
class MinimumBalanceAccount(BankAccount):
def __init__(self, minimum_balance):
BankAccount.__init__(self)
self.minimum_balance = minimum_balance
def withdraw(self, amount):
if self.balance - amount < self.minimum_balance:
print('Sorry, minimum balance must be maintained.')
else:
BankAccount.withdraw(self, amount)
# >>> a = MinimumBalanceAccount(0)
# >>> a.deposit(100)
# 100
# >>> b.withdraw(101)
# 'Sorry, minimum balance must be maintained.'
########################################
# Mangling, Exceptions
def generate_id(n=16):
alphabet = string.ascii_letters + string.digits
return ''.join(random.choice(alphabet) for _ in range(n))
class WithdrawError(Exception):
"""Not enough money"""
def __init__(self, amount):
super().__init__()
|
self.amount = amount
class AdvancedBankAccount:
MAX_BALANCE = 2 ** 64
def __init__
|
(self):
self._balance = 0
self.__id = generate_id()
def withdraw(self, amount):
if not isinstance(amount, int):
raise ValueError
if self._balance < amount:
raise WithdrawError(amount)
self._balance -= amount
return self._balance
def deposit(self, amount):
self._balance += amount
return self._balance
def get_max_balance():
return AdvancedBankAccount.MAX_BALANCE
if __name__ == '__main__':
a = AdvancedBankAccount()
b = a
c = AdvancedBankAccount()
a.deposit(10)
# AdvancedBankAccount.deposit(a, 10) # the same
print('UNACCEPTABLE! b balance:', b._balance)
# print(b.__id) # error, name mangling
a.get_id = lambda self: self.__id
# print(a.get_id()) # TypeError
# print(a.get_id(a)) # AttributeError
################################################
# UNACCEPTABLE!
print("UNACCEPTABLE! b id:", b._AdvancedBankAccount__id) # name unmangling
# static
AdvancedBankAccount.MAX_BALANCE = 2 ** 32
print('max balance:', AdvancedBankAccount.get_max_balance())
a.MAX_BALANCE = 2 ** 64
print('a max: {}, c max: {}'.format(a.MAX_BALANCE,
c.MAX_BALANCE))
################################################
# Exceptions
# in module import
try:
a.withdraw("100")
except:
pass
# UNACCEPTIBLE!
try:
a.withdraw(100)
except WithdrawError as e:
pass
try:
a.withdraw(100)
except (ValueError, WithdrawError) as e:
print('exception raised')
else:
print('no exception')
finally:
print('Finally')
def tricky():
try:
print('Tricky called')
return 1
finally:
print('Tricky finally called')
return 42
return 0
print(tricky())
# how about with statement?
# module is object -> import
class Shape:
def area(self):
raise NotImplementedError
class Circle(Shape):
def __init__(self, radius):
self.radius = radius
def area(self):
return math.pi * self.radius ** 2
class Square(Shape):
def __init__(self, side):
self.side = side
def area(self):
return self.side ** 2
if __name__ == "__main__":
a = [Square(10), Circle(2)]
s = sum(s.area() for s in a)
print(s)
|
ikvk/imap_tools
|
setup.py
|
Python
|
apache-2.0
| 1,092
| 0.001832
|
import os
import re
import setuptools
def get_version(package: str) -> str:
"""Return package version as listed in __version__ variable at __init__.py"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search(r"__version__\s*=\s*['\"]([^'\"]+)['\"]", init_py).group(1)
with open("README.rst", "r", encoding='utf-8') as fh:
long_description = fh.read()
setuptools.setup(
name='imap-tools',
version=get_version('imap_tools'),
packages=setuptools.find_packages(exclude=['tests']),
url='https://github.com/ikvk
|
/imap_tools',
license='Apache-2.0'
|
,
long_description=long_description,
long_description_content_type="text/x-rst",
author='Vladimir Kaukin',
author_email='KaukinVK@ya.ru',
description='Work with email by IMAP',
keywords=['imap', 'imap-client', 'python3', 'python', 'email'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
# install_requires=['typing>=3.6.2'],
)
|
bruceg/bglibs
|
python/template/__init__.py
|
Python
|
lgpl-2.1
| 1,139
| 0.005268
|
# Copyright (C) 2000,2005 Bruce Guenter <bruce@untroubled.org>
#
#
|
This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
|
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys
import functions
import context
import syntax
import lex
import parser
path = syntax.path
if __name__ == '__main__':
body = sys.stdin.read()
tree = parser.parse(body)
tree.showtree(0)
ctxt = context.Context(sys.stdout,
{'username':'nobody', 'domain':'testdomain.org'})
tree.execute(ctxt)
print ctxt.dict
|
geekonerd/smartplugs
|
data/transmit.py
|
Python
|
gpl-3.0
| 2,037
| 0.000491
|
import time
import sys
import RPi.GPIO as GPIO
'''Stringhe binarie ON e OFF per le prese 1,2,3,4 mappate come a,b,c,d'''
a_on = ''
a_off = ''
b_on = ''
b_off = ''
c_on = ''
c_off = ''
d_on = '0011100011000111011010001'
d_off = '0011100011000111011010011'
'''intervalli brevi/lunghi nel segnale e tra le ripetizioni (in secondi)'''
intervallo_breve = 0.00030
intervallo_lungo = 0.00096
intervallo_tra_tentativi = 0.0113
NUMERO_DI_TENTATIVI = 15
'''PIN da utilizzare per inviare i dati verso il chip trasmettitore'''
PIN_DATA_DI_INVIO = 16
def transmit_code(code):
'''Utilizziamo lo standard BCM per specificare quale PIN utilizzare'''
GPIO.setmode(GPIO.BCM)
'''Impostiamo il PIN indicato nello standard BCM
|
come PIN di invio dati'''
GPIO.setup(PIN_DATA_DI_INVIO, GPIO.OUT)
'''Ripetiamo la trasmissione per il numero di tentativi indicati'''
for t in range(NUMERO_DI_TENTATIVI):
for i in code:
if i == '1':
'''Bit = 1, accensione breve e poi spegnimento lungo del PIN'''
|
GPIO.output(PIN_DATA_DI_INVIO, 1)
time.sleep(intervallo_breve)
GPIO.output(PIN_DATA_DI_INVIO, 0)
time.sleep(intervallo_lungo)
elif i == '0':
'''Bit = 0, accensione lunga e poi spegnimento breve del PIN'''
GPIO.output(PIN_DATA_DI_INVIO, 1)
time.sleep(intervallo_lungo)
GPIO.output(PIN_DATA_DI_INVIO, 0)
time.sleep(intervallo_breve)
else:
continue
'''Spegnimento del PIN e attesa fino al prossimo intervallo'''
GPIO.output(PIN_DATA_DI_INVIO, 0)
time.sleep(intervallo_tra_tentativi)
'''Invio terminato e chiusura del GPIO'''
GPIO.cleanup()
if __name__ == '__main__':
'''Cattura del segnale da inviare: a_on, a_off, b_on - etc...'''
for argument in sys.argv[1:]:
exec('transmit_code(' + str(argument) + ')')
|
coinwarp/python-altcoinlib
|
altcoin/core/key.py
|
Python
|
lgpl-3.0
| 1,510
| 0.000662
|
# Copyright (C) 2011 Sam Rushing
# Copyright (C) 2012-2014 The python-bitcoinlib developers
# Copyright (C) 2015 The python-altcoinlib developers
#
# This file is part of python-bitcoinlib.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoinlib, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
import ctypes
import ctypes.util
from bitcoin.core.key import CECKey, _ssl
# CECKey with added support for key generation
class CAltcoinECKey(CECKey):
def __init__(self):
CECKey.__init__(self)
def __del__(self):
CECKey.__del__(self)
def get_secret_bytes(self):
global _ssl
secret = _ssl.EC_KEY_get0_private_key(self.k)
mb = ctypes.create_string_buffer(32)
size = _ssl.BN_bn2bin(secret, mb)
if size == 32:
return mb.raw
else:
# Move the data into a zero-padded buffer of 32 bytes
padding = 32 - size
new_buffer = ctypes.create_string_buffer(32)
for idx in range(0, padding):
new_buffer[idx] = "\x00"
for idx in
|
range(padding, 32):
new_buffer[idx] = mb[idx - padding]
return new_buffer.raw
def generate(self):
global _ssl
_ssl.EC_KEY_generate_key(self.k)
return self.k
__all__ = (
'CAl
|
tcoinECKey',
)
|
mikedh/trimesh
|
trimesh/voxel/encoding.py
|
Python
|
mit
| 27,436
| 0
|
"""OO interfaces to encodings for ND arrays which caching."""
import numpy as np
import abc
from ..util import ABC
from . import runlength as rl
from .. import caching
try:
from scipy import sparse as sp
except BaseException as E:
from ..exceptions import ExceptionModule
sp = ExceptionModule(E)
def _empty_stripped(shape):
num_dims = len(shape)
encoding = DenseEncoding(
np.empty(shape=(0,) * num_dims, dtype=bool))
padding = np.zeros(shape=(num_dims, 2), dtype=int)
padding[:, 1] = shape
return encoding, padding
class Encoding(ABC):
"""
Base class for objects that implement a specific subset of of ndarray ops.
This presents a unified interface for various different ways of encoding
conceptually dense arrays and to interoperate between them.
Example implementations are ND sparse arrays, run length encoded arrays
and dense encodings (wrappers around np.ndarrays).
"""
def __init__(self, data):
self._data = data
self._cache = caching.Cache(id_function=data.crc)
@abc.abstractproperty
def dtype(self):
pass
@abc.abstractproperty
def shape(self):
pass
@abc.abstractproperty
def sum(self):
pass
@abc.abstractproperty
def size(self):
pass
@abc.abstractproperty
def sparse_indices(self):
pass
@abc.abstractproperty
def sparse_values(self):
pass
@abc.abstractproperty
def dense(self):
pass
@abc.abstractmethod
def gather_nd(self, indices):
pass
@abc.abstractmethod
def mask(self, mask):
pass
@abc.abstractmethod
def get_value(self, index):
pass
@abc.abstractmethod
def copy(self):
pass
@property
def is_empty(self):
return self.sparse_indices[self.sparse_values != 0].size == 0
@caching.cache_decorator
def stripped(self):
"""
Get encoding with all zeros stripped from the start and end
of each axis.
Returns
------------
encoding: ?
padding : (n, 2) int
Padding at the start and end that was stripped
"""
if self.is_empty:
return _empty_stripped(self.shape)
dense = self.dense
shape = dense.shape
ndims = len(shape)
padding = []
slices = []
f
|
or dim, size in enumerate(shape):
axis = tuple(range(dim)) + tuple(range(dim + 1, ndims))
filled = np.any(dense, axis=axis)
indices, = np.nonzero(filled)
lower = indices.min()
upper = indices.max() + 1
padding.append([lower, size - up
|
per])
slices.append(slice(lower, upper))
return DenseEncoding(dense[tuple(slices)]), np.array(padding, int)
def _flip(self, axes):
return FlippedEncoding(self, axes)
def md5(self):
return self._data.md5()
def crc(self):
return self._data.crc()
@property
def ndims(self):
return len(self.shape)
def reshape(self, shape):
return self.flat if len(shape) == 1 else ShapedEncoding(self, shape)
@property
def flat(self):
return FlattenedEncoding(self)
def flip(self, axis=0):
return _flipped(self, axis)
@property
def sparse_components(self):
return self.sparse_indices, self.sparse_values
@property
def data(self):
return self._data
def run_length_data(self, dtype=np.int64):
if self.ndims != 1:
raise ValueError(
'`run_length_data` only valid for flat encodings')
return rl.dense_to_rle(self.dense, dtype=dtype)
def binary_run_length_data(self, dtype=np.int64):
if self.ndims != 1:
raise ValueError(
'`run_length_data` only valid for flat encodings')
return rl.dense_to_brle(self.dense, dtype=dtype)
def transpose(self, perm):
return _transposed(self, perm)
def _transpose(self, perm):
return TransposedEncoding(self, perm)
@property
def mutable(self):
return self._data.mutable
@mutable.setter
def mutable(self, value):
self._data.mutable = value
class DenseEncoding(Encoding):
"""Simple `Encoding` implementation based on a numpy ndarray."""
def __init__(self, data):
if not isinstance(data, caching.TrackedArray):
if not isinstance(data, np.ndarray):
raise ValueError('DenseEncoding data must be a numpy array')
data = caching.tracked_array(data)
super(DenseEncoding, self).__init__(data=data)
@property
def dtype(self):
return self._data.dtype
@property
def shape(self):
return self._data.shape
@caching.cache_decorator
def sum(self):
return self._data.sum()
@caching.cache_decorator
def is_empty(self):
return not np.any(self._data)
@property
def size(self):
return self._data.size
@property
def sparse_components(self):
indices = self.sparse_indices
values = self.gather(indices)
return indices, values
@caching.cache_decorator
def sparse_indices(self):
return np.column_stack(np.where(self._data))
@caching.cache_decorator
def sparse_values(self):
return self.sparse_components[1]
def _flip(self, axes):
dense = self.dense
for a in axes:
dense = np.flip(dense, a)
return DenseEncoding(dense)
@property
def dense(self):
return self._data
def gather(self, indices):
return self._data[indices]
def gather_nd(self, indices):
return self._data[tuple(indices.T)]
def mask(self, mask):
return self._data[mask if isinstance(mask, np.ndarray) else mask.dense]
def get_value(self, index):
return self._data[tuple(index)]
def reshape(self, shape):
return DenseEncoding(self._data.reshape(shape))
def _transpose(self, perm):
return DenseEncoding(self._data.transpose(perm))
@property
def flat(self):
return DenseEncoding(self._data.reshape((-1,)))
def copy(self):
return DenseEncoding(self._data.copy())
class SparseEncoding(Encoding):
"""
`Encoding` implementation based on an ND sparse implementation.
Since the scipy.sparse implementations are for 2D arrays only, this
implementation uses a single-column CSC matrix with index
raveling/unraveling.
"""
def __init__(self, indices, values, shape=None):
"""
Parameters
------------
indices: (m, n)-sized int array of indices
values: (m, n)-sized dtype array of values at the specified indices
shape: (n,) iterable of integers. If None, the maximum value of indices
+ 1 is used.
"""
data = caching.DataStore()
super(SparseEncoding, self).__init__(data)
data['indices'] = indices
data['values'] = values
indices = data['indices']
if len(indices.shape) != 2:
raise ValueError(
'indices must be 2D, got shaped %s' % str(indices.shape))
if data['values'].shape != (indices.shape[0],):
raise ValueError(
'values and indices shapes inconsistent: %s and %s'
% (data['values'], data['indices']))
if shape is None:
self._shape = tuple(data['indices'].max(axis=0) + 1)
else:
self._shape = tuple(shape)
if not np.all(indices < self._shape):
raise ValueError('all indices must be less than shape')
if not np.all(indices >= 0):
raise ValueError('all indices must be non-negative')
@staticmethod
def from_dense(dense_data):
sparse_indices = np.where(dense_data)
values = dense_data[sparse_indices]
return SparseEncoding(
np.stack(sparse_indices, axis=-1), values, shape=dense_data.shape)
def copy(self):
return SparseEncoding(
indices=self.sparse_indices.copy(),
|
heyrict/exam
|
exam.py
|
Python
|
apache-2.0
| 16,883
| 0.001599
|
import os
import pickle
import random
import re
from datetime import datetime
from data_processing import (InteractiveAnswer, _in_list, colorit, space_fill,
split_wrd)
BOARDER_LENGTH = 40
class Quest():
def __init__(self, q, sel=None, ta=None, args={}):
'''
Class representing a Question.
Parameters
----------
basic arguments:
q : question. necessary. list.
sel : selections. list.
ta : true answer. list.
extensable arguments:
args : dict with sets of {'name': 'value'}.
'''
self.q = q
self.sel = sel
self.ta = ta
self.args = args
def __str__(self):
'''Visualize the `Quest`.'''
return '{\n\tq: %s,\n\tsel: %s,\n\tta: %s,\n\targs: %s\n}' % \
(self.q, self.sel, self.ta, self.args)
def __eq__(self, value):
'''Evalue two `Quest`s as equal.'''
if type(value) != type(self): return False
for i in ['q', 'sel', 'ta', 'args']:
if self.__getattribute__(i) != value.__getattribute__(i):
return False
return True
def __hash__(self):
return (hash('\n'.join(self.q)) + hash('\n'.join(self.sel)) + \
hash('\n'.join(self.ta)) + hash('\n'.join(self.args))) % int(1e+16)
class QuestForm(list):
def __init__(self, *args, **kwargs):
super(QuestForm, self).__init__(*args, **kwargs)
def __getitem__(self, ind):
if type(ind) == int:
return super(QuestForm, self).__getitem__(ind)
if type(ind) == slice:
return QuestForm(super(QuestForm, self).__getitem__(ind))
else:
returns = QuestForm()
for i in ind:
returns.append(self[i])
return returns
def append(self, *args, **kwargs):
super(QuestForm, self).append(*args, **kwargs)
return self
class QuestFormTextLoader():
'''QuestForm Loader for text files.'''
def __init__(self,
questpattern,
qpattern,
selpattern=None,
tapattern=None,
argpattern={}):
'''
Parameters
----------
questpattern : regex pattern for a question. necessary.
qpattern : regex pattern for question text in a question. necessary.
selpattern : regex pattern for selections.
a question can have several matching selections.
tapattern : regex pattern for true answer.
argpattern : dict with {'arg_name' : 'arg_regex'} sets.
'''
self.questpattern = questpattern
self.qpattern = qpattern
self.selpattern = selpattern
self.tapattern = tapattern
self.argpattern = dict(argpattern)
self.is_cached = False
def get_cached_qf(self, togo='Curdata.data'):
'''Load cached QuestForm.'''
if togo in os.listdir():
if InteractiveAnswer(
'Cached data found.Continue?', yes_or_no=True).get():
with open(togo, 'rb') as f:
return pickle.load(f)
else:
datas = ["Create a new data"] + [
i for i in os.listdir() if re.findall(r'.*\.data$', i)
]
if not datas: return
print("Cached data not found, listing other datas")
for i in range(len(datas)):
print('\t%3s: \t%s' % (i, datas[i]))
no = InteractiveAnswer(
'Which one to choose?',
verify=range(len(datas)),
serializer=
lambda x: [int(i) for i in re.findall(r'[0-9]+', x)]).get()[0]
if no == 0:
return
else:
with open(datas[no], 'rb') as f:
return pickle.load(f)
def _load(self, queststr):
questform = QuestForm()
for quest in re.findall(self.questpattern, queststr):
qitem = re.findall(self.qpattern, quest)
selitem = re.findall(self.selpattern,
quest) if self.selpattern else None
taitem = re.findall(self.tapattern,
quest) if self.tapattern else None
argitem = [(patnam,re.findall(self.argpattern(patnam),quest)) \
for patnam in self.argpattern] if self.argpattern else {}
questform = questform.append(
Quest(q=qitem, sel=selitem, ta=taitem, args=argitem))
return questform
def load(self, queststr):
'''Search queststr, match arguments and returns a QuestForm.'''
qf = self.get_cached_qf()
if qf is n
|
ot None:
self.is_cached = True
return qf
if 'MainData.data' in os.listdir():
with open('MainData.data', 'rb') as f:
qf = pickle.load(f)
else:
qf = self._load(queststr)
with open('MainData.data', 'wb') as f:
pickle.dump(qf, f)
return qf
class QuestFormExcelLoader
|
(QuestFormTextLoader):
'''QuestForm Loader for excel files. Requires `pandas` module.'''
def __init__(self, qcol, selcol=None, tacol=None, argcol={}):
'''
Parameters
----------
questpattern : regex pattern for a question. necessary.
qpattern : regex pattern for question text in a question. necessary.
selpattern : regex pattern for selections.
a question can have several matching selections.
tapattern : regex pattern for true answer.
argpattern : dict with {'arg_name' : 'arg_regex'} sets.
'''
super(QuestFormExcelLoader, self).__init__(None, qcol, selcol, tacol,
argcol)
def _load(self, questdf):
import pandas as pd
if type(questdf) == str: questdf = pd.read_excel(questdf)
questform = QuestForm()
for q in range(len(questdf)):
quest = questdf.ix[q]
qitem = quest[self.qpattern]
selitem = quest[self.selpattern] if self.selpattern else None
taitem = quest[self.tapattern] if self.tapattern else None
argitem = {
pat: quest[self.argpattern[pat]]
for pat in self.argpattern
} if self.argpattern else {}
qitem = None if qitem is None else ([qitem] if isinstance(
qitem, str) else list(qitem))
selitem = None if selitem is None else ([selitem] if isinstance(
selitem, str) else list(selitem))
taitem = None if taitem is None else ([taitem] if isinstance(
taitem, str) else list(taitem))
questform = questform.append(
Quest(q=qitem, sel=selitem, ta=taitem, args=argitem))
return questform
class BeginQuestForm():
'''Class for rendering the exam.'''
def __init__(self,
qf,
arrange='qast',
no_score=False,
input_manner=None,
no_filter=False,
storage='l|w',
filenames=['Curdata.data', 'Wrongdata.data']):
'''
Parameters
----------
qf : QuestForm. The QuestForm that test on.
storage : str with several units separated by `|`.
each unit contains one or more of `twol`.
`t` indicates Quests that marked as true.
`w` indicates Quests that marked as false.
`o` indicates Quests that marked as others.
`l` indicates Quests that isn't marked.
filenames : list with each element indicates the filename of
the output of `storage` option.
arrange : iterable. each element should be one argument in a `Quest` object.
`question` indicates the question text.
`args` indicates all args.
`selections` indicates the question text.
`trueanswer` indicates the trueanswer text.
|
snyderks/advent-solutions
|
Day15.py
|
Python
|
mit
| 430
| 0
|
discs = []
# Create data
discs.append((13, 11))
discs.append((5, 0))
discs.append((17, 11))
discs.append((3, 0))
discs.append((7, 2))
discs.append((19, 17))
discs.append((11, 0))
done = False
t = 0
while d
|
one is False:
done = True
for i, disc in enumerate(discs):
if (t + i + 1 + disc[1]) % disc[0] is not
|
0:
done = False
break
if done:
print(str(t))
break
t += 1
|
Gehn/JustAChatBot
|
plugin_utils.py
|
Python
|
mit
| 5,651
| 0.028491
|
import json
import datetime
import threading
from base_plugin import *
import base_plugin
#=============================================Messaging===================================
def send_message(recipient, message, mtype='chat'):
'''
Send a message to recipient.
:param recipient: The To field of your message.
:param message: the message string to send.
:para mtype: The message type to send, supports public/private and xmpp style chat/groupchat.
'''
if mtype == 'private':
mtype = 'chat'
if mtype == 'public':
mtype = 'groupchat'
base_plugin.PluginContext.client.send_message(mto=recipient, mbody=message, mtype=mtype)
#=============================================FILTERS=====================================
#FIXME: this seems broken.
def self_message(event, plugin):
'''
filter for self generated events.
:param event: the event being filtered
:param plugin: the plugin hosting the filter
returns - true if not self generated event, false otherwise.
'''
if msg.From_Nick != plugin.client.nick and plugin.client.nick in msg.Body:
return True
return False
def on_message(event, plugin):
'''
filter for group chat events.
:param event: the event being filtered
:param plugin: the plugin hosting the filter
returns - true if a group chat event, false otherwise.
'''
if event.Type in ["groupchat"]:
return True
return False
def on_private_message(event, plugin):
'''
filter for private message events.
:param event: the event being filtered
:param plugin: the plugin hosting the filter
returns - true if a private message event, false otherwise.
'''
if not event.Room:
return True
return False
def on_presence(event, plugin):
'''
filter for join/part type events.
:param event: the event being filtered
:param plugin: the plugin hosting the filter
returns - true if a presence event, false otherwise.
'''
if event.Type in ["available", "unavailable"]:
return True
return False
#=============================================FILE OPERATORS=====================================
def put_object_to_f
|
ile(item, path):
'''
Syntactic sugar, write jsonified object to file.
:param item: Any json-able item.
:param path: path to log file.
'''
with open(path, 'w+') as f:
f.write(json.dumps(item))
def get_object_from_file(path):
|
'''
Syntactic sugar, read jsonified object from file.
:param path: path to log file where item is stored.
Returns - json expanded item from log file.
'''
with open(path, 'r') as f:
item_str = f.read()
return json.loads(item_str)
def append_to_file(string, path):
'''
Syntactic sugar, append string to file.
:param item: Any json-able item.
:param path: path to log file.
'''
with open(path, 'a') as f:
f.write(string)
def write_to_file(string, path):
'''
Syntactic sugar, write string to file.
:param item: Any json-able item.
:param path: path to log file.
'''
with open(path, 'w+') as f:
f.write(string)
def read_from_file(path):
'''
Syntactic sugar, read from file.
:param path: path to log file where item is stored.
Returns - string contents of log file.
'''
with open(path, 'r') as f:
return f.read()
def read_lines_from_file(path):
'''
Read lines from file, as seperated by newline/enter.
:param path: path to log file
Returns - list of lines
'''
return read_from_file(path).split('\n')
#===========================================TIMED EVENTS=====================================
def schedule_event_by_delay(delay, event, args=[]):
'''
Schedule an event by a delay in seconds.
:param delay: number of seconds until event triggers.
:param event: the action to be triggered.
:param args: the arguments to pass when the event is called. (default [])
'''
threading.Timer(delay, call_function_with_variable_arguments, [event, args]).start()
def schedule_event(time, event, args=[]):
'''
Schedule an event by an absolute time
:param time: the datetime object representing the trigger time.
:param event: the action to be triggered.
:param args: the arguments to pass when the event is called. (default [])
'''
delta = time - datetime.datetime.now()
threading.Timer(delta.total_seconds(), call_function_with_variable_arguments, [event, args]).start()
def schedule_event(year, month, day, hour, minute, second, event, args=[]):
'''
Schedule an event by an absolute time
:param year: year of the event
:param month: month of the event
:param day: day of the event
:param hour: hour of the event
:param minute: minute of the event
:param second: second of the event
:param event: the action to be triggered.
:param args: the arguments to pass when the event is called. (default [])
'''
time = datetime.datetime(year, month, day, hour, minute, second)
delta = time - datetime.datetime.now()
threading.Timer(delta.total_seconds(), call_function_with_variable_arguments, [event, args]).start()
#==========================================HERE THERE BE DRAGONS=================================================
def call_function_with_variable_arguments(function, arguments):
'''
Takes functions, takes arguments, makes it fit.
:param function: The function to call
:param arguments: The argument list to make fit.
'''
iterator = len(arguments)
while True:
real_exception = None
try:
function(*(arguments[:iterator]))
return
except Exception as e:
if not real_exception or "takes exactly" not in str(e) or "arguments" not in str(e):
real_exception = e
iterator -= 1
if iterator < 0:
raise real_exception
|
datastreaming/mflow
|
examples/sender.py
|
Python
|
gpl-3.0
| 825
| 0.003636
|
import sys
import os
import time
|
try:
import mflow
except:
sys.path.append(os.environ["PWD"] + "/../")
import mflow
import logging
import numpy as np
logger = logging.getLogger("mflow.mflow")
logger.setLevel(logging.ERROR)
address = "tcp://127.0.0.1:40000"
stream = mflow.connect(address, conn_type=mflow.BIND, mode=mflow.PUSH, receive_timeout=1, queue_size=1)
for i in range(16):
try:
header
|
= '{"htype": "array-1.0", "type": "int32", "shape": [10], "frame": %d}' % i
data = np.zeros(10, dtype=np.int32) + i
stream.send(header.encode(), send_more=True, block=True)
stream.send(data.tobytes(), block=False)
print("Sending message %d" % i)
# Send out every 10ms
time.sleep(0.01)
except KeyboardInterrupt:
break
stream.disconnect()
|
JamesClough/networkx
|
networkx/readwrite/gml.py
|
Python
|
bsd-3-clause
| 23,163
| 0.000086
|
# encoding: utf-8
# Copyright (C) 2008-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Author: Aric Hagberg (hagberg@lanl.gov)
"""
Read graphs in GML format.
"GML, the G>raph Modelling Language, is our proposal for a portable
file format for graphs. GML's key features are portability, simple
syntax, extensibility and flexibility. A GML file consists of a
hierarchical key-value lists. Graphs can be annotated with arbitrary
data structures. The idea for a common file format was born at the
GD'95; this proposal is the outcome of many discussions. GML is the
standard file format in the Graphlet graph editor system. It has been
overtaken and adapted by several other systems for drawing graphs."
See http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
Format
------
See http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
for format specification.
Example graphs in GML format
http://www-personal.umich.edu/~mejn/netdata/
"""
try:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
except ImportError:
from io import StringIO
from ast import literal_eval
from collections import defaultdict
import networkx as nx
from networkx.exception import NetworkXError
from networkx.utils import open_file
import re
try:
import htmlentitydefs
except ImportError:
# Python 3.x
import html.entities as htmlentitydefs
__all__ = ['read_gml', 'parse_gml', 'generate_gml', 'write_gml']
try:
long
except NameError:
long = int
try:
unicode
except NameError:
unicode = str
try:
unichr
except NameError:
unichr = chr
try:
literal_eval(r"u'\u4444'")
except SyntaxError:
# Remove 'u' prefixes in unicode literals in Python 3
rtp_fix_unicode = lambda s: s[1:]
else:
rtp_fix_unicode = None
def escape(text):
"""Use XML character references to escape characters.
Use XML character references for unprintable or non-ASCII
characters, double quotes and ampersands in a string
"""
def fixup(m):
ch = m.group(0)
return '&#' + str(ord(ch)) + ';'
text = re.sub('[^ -~]|[&"]', fixup, text)
return text if isinstance(text, str) else str(text)
def unescape(text):
"""Replace XML character references with the referenced characters"""
def fixup(m):
text = m.group(0)
if text[1] == '#':
# Character reference
if text[2] == 'x':
code = int(text[3:-1], 16)
else:
code = int(text[2:-1])
else:
# Named entity
try:
code = htmlentitydefs.name2codepoint[text[1:-1]]
except KeyError:
return text # leave unchanged
try:
return chr(code) if code < 256 else unichr(code)
except (ValueError, OverflowError):
return text # leave unchanged
return re.sub("&(?:[0-9A-Za-z]+|#(?:[0-9]+|x[0-9A-Fa-f]+));", fixup, text)
def literal_destringizer(rep):
"""Convert a Python literal to the value it represents.
Parameters
----------
rep : string
A Python literal.
Returns
-------
value : object
The value of the Python literal.
Raises
------
ValueError
If `rep` is not a Python literal.
"""
if isinstance(rep, (str, unicode)):
orig_rep = rep
if rtp_fix_unicode is not None:
rep = rtp_fix_unicode
|
(rep)
try:
return literal_eval(rep)
except SyntaxError:
raise ValueError('%r is not a valid Python literal' % (orig_rep,))
e
|
lse:
raise ValueError('%r is not a string' % (rep,))
@open_file(0, mode='rb')
def read_gml(path, label='label', destringizer=None):
"""Read graph in GML format from path.
Parameters
----------
path : filename or filehandle
The filename or filehandle to read from.
label : string, optional
If not None, the parsed nodes will be renamed according to node
attributes indicated by `label`. Default value: 'label'.
destringizer : callable, optional
A destringizer that recovers values stored as strings in GML. If it
cannot convert a string to a value, a `ValueError` is raised. Default
value : None.
Returns
-------
G : NetworkX graph
The parsed graph.
Raises
------
NetworkXError
If the input cannot be parsed.
See Also
--------
write_gml, parse_gml
Notes
-----
The GML specification says that files should be ASCII encoded, with any
extended ASCII characters (iso8859-1) appearing as HTML character entities.
References
----------
GML specification:
http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
Examples
--------
>>> G = nx.path_graph(4)
>>> nx.write_gml(G, 'test.gml')
>>> H = nx.read_gml('test.gml')
"""
def filter_lines(lines):
for line in lines:
try:
line = line.decode('ascii')
except UnicodeDecodeError:
raise NetworkXError('input is not ASCII-encoded')
if not isinstance(line, str):
lines = str(lines)
if line and line[-1] == '\n':
line = line[:-1]
yield line
G = parse_gml_lines(filter_lines(path), label, destringizer)
return G
def parse_gml(lines, label='label', destringizer=None):
"""Parse GML graph from a string or iterable.
Parameters
----------
lines : string or iterable of strings
Data in GML format.
label : string, optional
If not None, the parsed nodes will be renamed according to node
attributes indicated by `label`. Default value: 'label'.
destringizer : callable, optional
A destringizer that recovers values stored as strings in GML. If it
cannot convert a string to a value, a `ValueError` is raised. Default
value : None.
Returns
-------
G : NetworkX graph
The parsed graph.
Raises
------
NetworkXError
If the input cannot be parsed.
See Also
--------
write_gml, read_gml
Notes
-----
This stores nested GML attributes as dictionaries in the
NetworkX graph, node, and edge attribute structures.
References
----------
GML specification:
http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
"""
def decode_line(line):
if isinstance(line, bytes):
try:
line.decode('ascii')
except UnicodeDecodeError:
raise NetworkXError('input is not ASCII-encoded')
if not isinstance(line, str):
line = str(line)
return line
def filter_lines(lines):
if isinstance(lines, (str, unicode)):
lines = decode_line(lines)
lines = lines.splitlines()
for line in lines:
yield line
else:
for line in lines:
line = decode_line(line)
if line and line[-1] == '\n':
line = line[:-1]
if line.find('\n') != -1:
raise NetworkXError('input line contains newline')
yield line
G = parse_gml_lines(filter_lines(lines), label, destringizer)
return G
def parse_gml_lines(lines, label, destringizer):
"""Parse GML into a graph.
"""
def tokenize():
patterns = [
r'[A-Za-z][0-9A-Za-z_]*\b', # keys
r'[+-]?(?:[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)(?:[Ee][+-]?[0-9]+)?', # reals
r'[+-]?[0-9]+', # ints
r'".*?"', # strings
r'\[', # dict start
r'\]', # dict end
r'#.*$|\s+' # comments and whitespaces
]
tokens = re.compile(
'|'.join('(' + pattern + ')' for pattern in patterns))
lineno = 0
for line in lines:
length = len(line)
|
rtfd/readthedocs.org
|
readthedocs/core/migrations/0003_add_banned_status.py
|
Python
|
mit
| 406
| 0
|
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_make_userprofile_user_a_onetoonefield'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='banned',
field=m
|
odels.BooleanField(default=False, verbose_name='
|
Banned'),
),
]
|
Azure/azure-sdk-for-python
|
sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_get_operations.py
|
Python
|
mit
| 3,206
| 0.004055
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_get_operations.py
DESCRIPTION:
This sample demonstrates how to list/get all document model operations (succeeded, in-progress, failed)
associated with the Form Recognizer resource. Kinds of operations returned are "documentModelBuild",
"documentModelCompose", and "documentModelCopyTo". Note that operation information only persists for
24 hours. If the operation was successful, the document model can be accessed using get_model or list_models APIs.
USAGE:
python sample_get_operations.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
"""
import os
def sample_get_operations():
# [START list_operations]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import DocumentModelAdministrationClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
document
|
_model_admin_client = DocumentModelAdministrationClient(endpoint=endpoint, credential=AzureKeyCredential(key))
operations = list(document_model_admin_client.list_operations())
print("The following document model operations exist under my resource:")
for operation in operations:
print("\nOperation ID: {}".format(operation.operation_id))
print("Operation kind: {}".format(operation.kind))
print("Operation st
|
atus: {}".format(operation.status))
print("Operation percent completed: {}".format(operation.percent_completed))
print("Operation created on: {}".format(operation.created_on))
print("Operation last updated on: {}".format(operation.last_updated_on))
print("Resource location of successful operation: {}".format(operation.resource_location))
# [END list_operations]
# [START get_operation]
# Get an operation by ID
if operations:
print("\nGetting operation info by ID: {}".format(operations[0].operation_id))
operation_info = document_model_admin_client.get_operation(operations[0].operation_id)
if operation_info.status == "succeeded":
print("My {} operation is completed.".format(operation_info.kind))
result = operation_info.result
print("Model ID: {}".format(result.model_id))
elif operation_info.status == "failed":
print("My {} operation failed.".format(operation_info.kind))
error = operation_info.error
print("{}: {}".format(error.code, error.message))
else:
print("My operation status is {}".format(operation_info.status))
else:
print("No operations found.")
# [END get_operation]
if __name__ == '__main__':
sample_get_operations()
|
rosmo/aurora
|
src/main/python/apache/aurora/common/cluster.py
|
Python
|
apache-2.0
| 3,365
| 0.007727
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pystachio import Empty, Struct
from pystachio.composite import Structural
__all__ = ('Cluster',)
# TODO(wickman) It seems like some of this Trait/Mixin stuff should be a
# first-class construct in Pystachio. It could be a solution for extensible
# Job/Task definitions.
class Cluster(dict):
"""Cluster encapsulates a set of K/V attributes describing cluster configurations.
Given a cluster, attributes may be accessed directly on them, e.g.
cluster.name
cluster.scheduler_zk_path
In order to enforce particular "traits" of Cluster, use Cluster.Trait to construct
enforceable schemas, e.g.
class ResolverTrait(Cluster.Trait):
scheduler_zk_ensemble = Required(String)
scheduler_zk_path = Default(String, '/twitter/service/mesos/prod/scheduler')
cluster = Cluster(name = 'west', scheduler_zk_ensemble = 'zookeeper.west.twttr.net')
# Ensures that scheduler_zk_ensemble is defined in the cluster or it will raise a TypeError
cluster.with_trait(ResolverTrait).scheduler_zk_ensemble
# Will use the default if none is provided on Cluster.
cluster.with_trait(ResolverTrait).scheduler_zk_path
"""
Trait = Struct # noqa
def __init__(self, **kwargs):
self._traits = ()
super(Cluster, self).__init__(**kwargs)
def get_trait(self, trait):
"""Given a Cluster.Trait, extract that trait."""
if not issubclass(trait, Structural):
raise TypeError('provided t
|
rait must be a Cluster.Trait subclass, got %s' % type(trait))
# TODO(wickman) Expose this in pystachio as a non-private or add
|
a load method with strict=
return trait(trait._filter_against_schema(self))
def check_trait(self, trait):
"""Given a Cluster.Trait, typecheck that trait."""
trait_check = self.get_trait(trait).check()
if not trait_check.ok():
raise TypeError(trait_check.message())
def with_traits(self, *traits):
"""Return a cluster annotated with a set of traits."""
new_cluster = self.__class__(**self)
for trait in traits:
new_cluster.check_trait(trait)
new_cluster._traits = traits
return new_cluster
def with_trait(self, trait):
"""Return a cluster annotated with a single trait (helper for self.with_traits)."""
return self.with_traits(trait)
def __setitem__(self, key, value):
raise TypeError('Clusters are immutable.')
def __getattr__(self, attribute):
for trait in self._traits:
expressed_trait = self.get_trait(trait)
if hasattr(expressed_trait, attribute):
value = getattr(expressed_trait, attribute)()
return None if value is Empty else value.get()
try:
return self[attribute]
except KeyError:
return self.__getattribute__(attribute)
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
|
landsat-pds/landsat_ingestor
|
ingestor/thumbnailer.py
|
Python
|
apache-2.0
| 3,430
| 0.005831
|
#!/usr/bin/env python
import sys
import os
import numpy
from osgeo import gdal, gdal_array
TAIL_TRIM = 0.01
def get_band(filename, target_percent):
ds = gdal.Open(filename)
xsize = int(ds.RasterXSize * target_percent / 100.0)
ysize = int(ds.RasterYSize * target_percent / 100.0)
image = ds.GetRasterBand(1).ReadAsArray(resample_alg = gdal.GRIORA_Average,
buf_xsize = xsize,
buf_ysize = ysize)
return image
def get_scale(image):
'''
Return the values at which to clip an image.
'''
histogram = numpy.histogram(image, 65536, (-0.5, 65535.5))[0]
# Clear the nodata:
histogram[:1] = 0
count = numpy.sum(histogram)
# Walk up the near-black side of the histogram until
# we reach the end of the first percentile:
counter = 0
scale_min = None
for i in range(len(histogram)):
counter += histogram[i]
if counter > count * TAIL_TRIM:
scale_min = i
break
# Same, but moving left from the white end:
counter = 0
scale_max = None
for i in range(len(histogram)-1, 0, -1):
counter += histogram[i]
if counter > count * TAIL_TRIM:
scale_max = i
break
return scale_min, scale_max
def scale_image(image, scale_min, scale_max):
'''
Take a (presumptively uint16) image and return it scaled into
a uint8 image stretched linearly so that scale_min is mapped
to 0 and scale_max is mapped to 255.
'''
image = image.astype('float32')
image = (255 * (image - scale_min) / (scale_max - scale_min))
image = numpy.maximum(0, numpy.minim
|
um(
|
255, image))
image = image.astype('uint8')
return image
def thumbnail(root_scene, scene_dir, verbose=False):
red_file = '%s/%s_B4.TIF' % (scene_dir, root_scene)
grn_file = '%s/%s_B3.TIF' % (scene_dir, root_scene)
blu_file = '%s/%s_B2.TIF' % (scene_dir, root_scene)
if not os.path.exists(red_file) or not os.path.exists(grn_file) \
or not os.path.exists(blu_file):
print 'Missing one or more of %s, %s and %s, skip thumbnailing.' % (
red_file, grn_file, blu_file)
return
large_thumbnail = numpy.array([
get_band(red_file, 15),
get_band(grn_file, 15),
get_band(blu_file, 15)])
small_thumbnail = numpy.array([
get_band(red_file, 3),
get_band(grn_file, 3),
get_band(blu_file, 3)])
# Set the scale values for both images from the larger one:
scale_min, scale_max = get_scale(large_thumbnail)
large_thumbnail = scale_image(large_thumbnail, scale_min, scale_max)
small_thumbnail = scale_image(small_thumbnail, scale_min, scale_max)
# TODO: Georeference these jpegs
gdal_array.SaveArray(
large_thumbnail,
'%s/%s_thumb_large.jpg' % (scene_dir, root_scene),
format = 'JPEG')
gdal_array.SaveArray(
small_thumbnail,
'%s/%s_thumb_small.jpg' % (scene_dir, root_scene),
format = 'JPEG')
for filename in os.listdir(scene_dir):
if filename.endswith('.aux.xml'):
os.unlink(os.path.join(scene_dir,filename))
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'Usage: thumbnailer.py <root_scene> <scene_dir_path>'
sys.exit(1)
thumbnail(sys.argv[1], sys.argv[2])
|
fake-name/ReadableWebProxy
|
common/memory.py
|
Python
|
bsd-3-clause
| 226
| 0.030973
|
import psutil
def is_low_mem():
v = psutil.virtual_memory()
threshold = v.total / 4
# If we have less then 25% ram free, we should stop feeding the
|
job system.
if v.avai
|
lable < threshold:
return True
return False
|
commonssibi/gdcmdtools
|
gdcmdtools/put.py
|
Python
|
bsd-2-clause
| 15,440
| 0.009456
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import sys
from apiclient.http import MediaFileUpload
import apiclient.errors
import urllib
import requests
import json
import pprint
import logging
logger = logging.getLogger("gdput")
#logger.setLevel(logging.ERROR)
import random
import os
import json
from gdcmdtools.base import GDBase
from gdcmdtools.perm import GDPerm
from gdcmdtools.auth import GDAuth
DICT_OF_CONVERTIBLE_FILE_TYPE = { \
'raw':[
"Raw file",
[]],
'ss':[
"Spreadsheet",
['xls', 'xlsx', 'ods', 'csv', 'tsv', 'tab']],
'ft':[
"Fusion Table",
['csv']],
'pt':[
"Presentation",
['ppt', 'pps', 'pptx']],
'dr':[
"Drawing",
['wmf']],
'ocr':[
"OCR",
['jpg', 'git', 'png', 'pdf']],
'doc':[
"Document",
['doc', 'docx', 'html', 'htm', 'txt', 'rtf']],
'gas':[
"GAS project",
['json']],
}
# FIXME: naming
class GDPut:
def __init__(
self,
source_file,
mime_type,
target_type,
folder_id,
title,
description,
location_column,
latlng_column,
permission,
csv_column_define):
logger.debug("source_file=%s, mime_type=%s, target_type=%s" %
(source_file, mime_type, target_type))
self.source_file = source_file
self.mime_type = mime_type
self.target_type = target_type
self.folder_id = folder_id
self.title = title
self.description = description
self.location_column = location_column
self.latlng_column = latlng_column
self.permission = permission
self.csv_column_define = csv_column_define
self.file_id = None
self.ft_headers = None
self.csv_latlng_suffix = "_latlng_%04x.csv" % random.getrandbits(16)
# base
auth = GDAuth()
creds = auth.get_credentials()
self.auth_user = creds.id_token.get("email",None)
if creds == None:
raise Exception("Failed to retrieve credentials")
self.http = auth.get_authorized_http()
base = GDBase()
self.service = base.get_drive_service(self.http)
self.root = base.get_root()
# ft service
if target_type == "ft":
self.ft_service = base.get_ft_service(self.http)
def if_folder_writable(self):
try:
permissions = self.service.permissions().list(fileId=self.folder_id).execute()
valid_roles = ["writer", "owner"]
logger.debug(pprint.pformat(permissions))
for p in permissions["items"]:
email = p.get("emailAddress",None).lower()
role = p.get("role",None).lower()
logger.debug("email: %s, role: %s" % (email, role))
if( email == self.auth_user ) and (role in valid_roles):
return True
except:
return False
return False
def run(self):
# check folder_id
if self.folder_id:
if self.if_folder_writable() == False:
raise Exception("folder_id doesn't exist or insufficient permission: %s" % self.folder_id)
try:
result = getattr(self, self.target_type+"_put")()
except AttributeError as e:
logger.error(e)
raise
except Exception, e:
logger.error(e)
raise
return result
def get_current_user(self):
pass
def raw_put(self):
return self.generic_put(False)
def check_gas(self):
# have "id",
# have at least one file
# the file should have type, id, name items.
with open(self.source_file, "rb") as f:
jsons = json.loads(f.read())
if_ok = False
if type(jsons) != dict:
return False
self.file_id = jsons["id"]
if jsons["id"] and (len(jsons["files"]) > 0):
for j in jsons["files"]:
if j["type"] and j["id"] and j["name"]:
if_ok = True
else:
return False
return if_ok
def gas_pack(self):
map_type_ext = {"server_js":"js", "html":"html"}
json_packed = {}
try:
with open(self.source_file, "rb") as fr1:
jsons = json.loads(fr1.read())
path = os.path.split(self.source_file)[0]
for j in jsons["files"]:
file_name = os.path.join(path, "%s.%s" % (j["name"], map_type_ext[j["type"]]))
with open(file_name, "rb") as fr2:
file_content = fr2.read()
j["source"] = file_content
new_json = "%s.packed" % self.source_file
with open(new_json, "wb+") as fw:
fw.write(json.dumps(jsons, indent=4))
except:
return False
else:
return True
def gas_put(self):
if not self.check_gas():
raise Exception("The target file is not a GAS project json, if you like to raw-upload a json, try '-t raw'")
if not self.gas_pack():
raise Exception("Failed to pack the GAS project files")
return self.generic_put(True, file_name = "%s.packed" % self.source_file)
def check_csv(self):
self.csv_delimiter = ','
with open(self.source_file, 'rb') as csv_file:
try:
dialect = csv.Sniffer().sniff(csv_file.readline())
if dialect.delimiter == self.csv_delimiter:
return True
except:
logger.error("Failed at calling csv.Sniffer().sniff)")
return False
def csv_save_latlng(self):
rows = []
# read csv header
with open(self.source_file, 'rb') as csv_file:
csv_reader = csv.reader(csv_file)
self.ft_headers = csv_reader.next()
if self.location_column and self.latlng_column:
self.ft_headers.append(self.latlng_column)
rows.append(self.ft_headers)
# TODO: check if location in the list
index_latlng = self.ft_headers.index(self.latlng_column)
index_location = self.ft_headers.index(self.location_column)
for row in csv_reader:
latlng = self.ft_geocoding(row[index_location])
row.insert(index_latlng, latlng)
rows.append(row)
# logger.debug(rows)
# save new file
csv_file_dir = os.path.dirname(self.source_file)
csv_file_basename = os.path.basename(self.source_file)
csv_file_noextension = os.path.splitext(csv_file_basename)[0]
latlng_file = os.path.join(csv_file_dir, csv_file_noextension + self.csv_latlng_suffix)
# write csv header with latlng
with open(latlng_file, 'wb+') as csv_file:
csv_writer = csv.writer(csv_file, lineterminator='\n')
csv_writer.writerows(rows)
return latlng_file
def ss_put(self):
if not self.check_csv():
raise Exception("The delimiter of the source csv file is not '%s'" % self.csv_delimiter)
return self.generic_put(True)
def user_define_column(self, cols, csv_column_define):
return_cols = []
|
for (col,col_type) in zip(cols, self.csv_column_define):
d = {"type":col_type, "name":c
|
ol}
return_cols.append(d)
return return_cols
# read csv and convert to the fusion table
def create_ft(self, target_file):
table = {
"name":self.title,
"description":self.description,
"isExportable":True,
|
yujanshrestha/pre-trained-keras-example
|
cam_animation.py
|
Python
|
mit
| 3,666
| 0.004364
|
import sys
import os
import argparse
import tqdm
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from keras.models import load_model
from vis.visualization.saliency import visualize_cam
from train import DataGenerator
from visualize import plot_row_item
def get_model_predictions_for_npz(model, data_generator, character_name, npz_name):
npz_file_path = os.path.join(data_generator.data_path, character_name, npz_name)
pixels = np.load(npz_file_path)['pixels']
predicted_labels = model.predict(np.array([pixels]), batch_size=1)
return data_generator.encoder.one_hot_decode(predicted_labels[0].astype(np.float64))
def cam_weighted_image(model, image_path, character_idx):
pixels = np.load(image_path)['pixels']
cam = visualize_cam(model, layer_idx=-1,
filter_indices=[character_idx],
seed_input=pixels)
return np.uint8(pixels*np.dstack([cam]*3))
def make_cam_plot(model, weight, image_path, cam_path, data_generator):
path_head, npz_name = os.path.split(image_path)
_, character_name = os.path.split(path_head)
model_name = os.path.basename(os.path.dirname(weight))
character_idx = data_generator.encoder.one_hot_index(character_name)
cam = cam_weighted_image(model, image_path, character_idx)
fig = plt.figure()
inner = gridspec.GridSpec(2, 1, wspace=0.05, hspace=0, height_ratios=[5, 1.2])
image_ax = plt.Subplot(fig, inner[0])
labels_ax = plt.Subplot(fig, inner[1])
character_name_to_probability = get_model_predictions_for_npz(model,
data_generator,
character_name,
npz_name)
top_character_probability = sorted(character_name_to_probability.items(),
key=lambda item_tup: item_tup[1],
reverse=True)[:3]
top_character_names, top_character_probabilities = zip(*top_character
|
_probability)
plot_row_item(image_ax, labels_ax, cam, top_character_names, top_character_probabilities)
weight_idx = os.path.basename(weight).split('.')[1]
labels_ax.set_xlabel(npz_name)
imag
|
e_ax.set_title(model_name + ', epoch ' + weight_idx)
fig.add_subplot(image_ax)
fig.add_subplot(labels_ax)
plt.savefig(os.path.join(cam_path, 'cam_{}.png'.format(weight_idx)))
plt.close(fig)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Generate an animation of class-activation maps")
parser.add_argument('--weight-file', required=True,
help="Model weight file")
parser.add_argument('--data-directory', required=True,
help="Directory containing the input *.npz images")
parser.add_argument('--cam-path', required=True,
help="Directory for storing CAM plots.")
parser.add_argument('--images', required=True, nargs="+",
help="Images to plot CAM for.")
args = parser.parse_args(sys.argv[1:])
data_generator = DataGenerator(args.data_directory)
model = load_model(args.weight_file)
for image in tqdm.tqdm(args.images, unit="image"):
try:
image_cam_path = os.path.join(args.cam_path, os.path.basename(image))
os.makedirs(image_cam_path)
except OSError as err:
if err.errno != os.errno.EEXIST:
raise err
make_cam_plot(model, args.weight_file, image, image_cam_path, data_generator)
|
domenicosolazzo/jroc
|
jroc/tasks/sparql/dbpedia/EntityAnnotationTask.py
|
Python
|
gpl-3.0
| 5,822
| 0.00687
|
from . import BasicTask
from . import SPARQLAdapter
class EntityAnnotationTask(BasicTask):
"""
EntityAnnotationTask: Annotate a list of entities with information from a SPARQL Endpoint
"""
__kernel = None # Kernel for this loader
__inputKey = 'entities' # Expected input key
def __init__(self, name, initial_task=False):
super(EntityAnnotationTask, self).__init__(name, initial_task)
def execute(self, input):
"""
Execute a task
"""
output = None
try:
assert(isinstance(input, dict))
super(EntityAnnotationTask, self).execute(input)
data = input.get(self.__inputKey, None)
if data is None or not isinstance(data, list):
raise Exception("Impossible to parse these entities. Please that input of this task! ")
self.__kernel = SPARQLAdapter()
output = [{"entity": word, "metadata": self.__kernel.entityExtraction(word, advancedSearch=False)} for word in data]
self.finish(data=output, failed=False, error=None)
except:
output = "Error annotating the entities"
self.finish(data=None, failed=True, error=output)
return self.getOutput()
class EntityAnnotationURITask(BasicTask):
__kernel = None # Kernel for this loader
__inputKey = 'entity_name' # Expected input key
def __init__(self, name, initial_task=False):
super(EntityAnnotationURITask, self).__init__(name, initial_task)
def execute(self, input):
"""
Execute a task
"""
output = None
try:
super(EntityAnnotationURITask, self).execute(input)
data = input
if data is None:
raise Exception("Impossible to retrieve the URI of a given entity. Please that input of this task! ")
self.__kernel = SPARQLAdapter()
output = self.__kernel.getUniqueURI(data)
self.finish(data=output, failed=False, error=None)
except:
output = "Error retrieving the URI of a given entity"
self.finish(data=None, failed=True, error=output)
return self.getOutput()
class EntityAnnotationTypesTask(BasicTask):
__kernel = None # Kernel for this loader
__inputKey = 'entity_name' # Expected input key
def __init__(self, name, initial_task=False):
super(EntityAnnotationTypesTask, self).__init__(name, initial_task)
def execute(self, input):
"""
Execute a task
"""
output = None
try:
super(EntityAnnotationTypesTask, self).execute(input)
data = input
if data is None:
raise Exception("Impossible to retrieve the types of a given entity. Please that input of this task! ")
self.__kernel = SPARQLAdapter()
output = self.__kernel.getEntityType(data)
self.finish(data=output, failed=False, error=None)
except:
output = "Err
|
or retrieving the types of a given entity"
self.finish(data=None, failed=True, error=output)
return self.getOutput()
class EntityAnnotationPropertiesTask(BasicTask):
__kernel = None # Kernel for this loader
__inputKey = 'entity_name' # Expected input key
__withPropertyValues = True
__requestedProperties = []
def __init__(self, name, initial_task=Fa
|
lse, withPropertyValues=True, properties=[]):
super(EntityAnnotationPropertiesTask, self).__init__(name, initial_task)
self.__withPropertyValues = withPropertyValues
self.__requestedProperties = properties
def execute(self, input):
"""
Execute a task
"""
output = None
try:
super(EntityAnnotationPropertiesTask, self).execute(input)
data = input
if data is None:
raise Exception("Impossible to retrieve the properties of a given entity. Please that input of this task! ")
self.__kernel = SPARQLAdapter()
output = None
if not self.__requestedProperties is None and len(self.__requestedProperties) > 0:
output = {
# Property[0] => Property name
# Property[1] => Language
'properties': [self.__kernel.getProperty(data, property[0], property[1]) for property in self.__requestedProperties]
}
else:
output = self.__kernel.getProperties(data, fetchValues=self.__withPropertyValues)
self.finish(data=output, failed=False, error=None)
except:
output = "Error retrieving the properties of a given entity"
self.finish(data=None, failed=True, error=output)
return self.getOutput()
class EntityAnnotationThumbnailTask(BasicTask):
__kernel = None # Kernel for this loader
__inputKey = 'entity_name' # Expected input key
def __init__(self, name, initial_task=False):
super(EntityAnnotationThumbnailTask, self).__init__(name, initial_task)
def execute(self, input):
"""
Execute a task
"""
output = None
try:
super(EntityAnnotationThumbnailTask, self).execute(input)
data = input
if data is None:
raise Exception("Impossible to retrieve the thumbnail of a given entity. Please that input of this task! ")
self.__kernel = SPARQLAdapter()
output = self.__kernel.getThumbnail(data)
self.finish(data=output, failed=False, error=None)
except:
output = "Error retrieving the thumbnail of a given entity"
self.finish(data=None, failed=True, error=output)
return self.getOutput()
|
danhuss/faker
|
setup.py
|
Python
|
mit
| 2,500
| 0
|
#!/usr/bin/env python
import os
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__)
|
)
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as
|
fp:
README = fp.read()
with open(os.path.join(here, 'VERSION')) as version_file:
VERSION = version_file.read().strip()
excluded_packages = ["docs", "tests", "tests.*"]
if not os.environ.get('READTHEDOCS', False):
excluded_packages += ["faker.sphinx", "faker.sphinx.*"]
# this module can be zip-safe if the zipimporter implements iter_modules or if
# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.
try:
import pkgutil
import zipimport
zip_safe = hasattr(zipimport.zipimporter, "iter_modules") or \
zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()
except AttributeError:
zip_safe = False
setup(
name='Faker',
version=VERSION,
description="Faker is a Python package that generates fake data for you.",
long_description=README,
entry_points={
'console_scripts': ['faker=faker.cli:execute_from_command_line'],
'pytest11': ['faker = faker.contrib.pytest.plugin'],
},
classifiers=[
# See https://pypi.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
],
keywords='faker fixtures data test mock generator',
author='joke2k',
author_email='joke2k@gmail.com',
url='https://github.com/joke2k/faker',
license='MIT License',
packages=find_packages(exclude=excluded_packages),
platforms=["any"],
zip_safe=zip_safe,
python_requires=">=3.4",
install_requires=[
"python-dateutil>=2.4",
"text-unidecode==1.3",
],
)
|
cihangxie/cleverhans
|
tests_tf/test_mnist_tutorial_cw.py
|
Python
|
mit
| 2,033
| 0
|
import unittest
import numpy as np
from cleverhans.devtools.checks import CleverHansTest
class TestMNISTTutorialCW(CleverHansTest):
def test_mnist_tutorial_cw(self):
import tensorflow as tf
from cleverhans_tutorials import mnist_tutorial_cw
# Run the MNIST tutorial on a dataset of reduced size
# and disable visualization.
cw_tutorial_args = {'train_start': 0,
'tra
|
in_end': 10000,
'test_start': 0,
'test_end': 1666,
'viz_enabled': False}
g = tf.Graph()
with g
|
.as_default():
np.random.seed(42)
report = mnist_tutorial_cw.mnist_tutorial_cw(**cw_tutorial_args)
# Check accuracy values contained in the AccuracyReport object
self.assertTrue(report.clean_train_clean_eval > 0.85)
self.assertTrue(report.clean_train_adv_eval == 0.00)
# There is no adversarial training in the CW tutorial
self.assertTrue(report.adv_train_clean_eval == 0.)
self.assertTrue(report.adv_train_adv_eval == 0.)
g = tf.Graph()
with g.as_default():
np.random.seed(42)
report_2 = mnist_tutorial_cw.mnist_tutorial_cw(**cw_tutorial_args)
atol_fac = 1e-6
self.assertClose(report.train_clean_train_clean_eval,
report_2.train_clean_train_clean_eval,
atol=atol_fac * 1)
self.assertClose(report.train_clean_train_adv_eval,
report_2.train_clean_train_adv_eval,
atol=atol_fac * 1)
self.assertClose(report.train_adv_train_clean_eval,
report_2.train_adv_train_clean_eval,
atol=atol_fac * 1)
self.assertClose(report.train_adv_train_adv_eval,
report_2.train_adv_train_adv_eval,
atol=atol_fac * 1)
if __name__ == '__main__':
unittest.main()
|
wellenreiter01/Feathercoin
|
contrib/seeds/makeseeds.py
|
Python
|
mit
| 5,457
| 0.003848
|
#!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
import re
import sys
import dns.resolver
import collections
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {}
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/Feathercoin:0.9.6.2/|/Feathercoin:0.13.(0|1|2|99)/|/Feathercoin:0.16.(0|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
|
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extrac
|
t service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
|
sciCloud/OLiMS
|
models/instrumenttype.py
|
Python
|
agpl-3.0
| 794
| 0.013854
|
from openerp imp
|
ort fields, models,osv
from base_olims_model import BaseOLiMSModel
from opene
|
rp.tools.translate import _
from fields.string_field import StringField
from fields.text_field import TextField
from fields.widget.widget import TextAreaWidget
schema = (StringField('Title',
required=1,
),
TextField('Description',
widget=TextAreaWidget(
label=_('Description'),
description=_('Used in item listings and search results.')),
),
fields.One2many('olims.instrument',
'Type',
string='Type')
)
class InstrumentType(models.Model, BaseOLiMSModel):#(BaseContent):
_name = 'olims.instrument_type'
_rec_name = 'Title'
InstrumentType.initialze(schema)
|
tomfotherby/tower-cli
|
tests/test_commands_config.py
|
Python
|
apache-2.0
| 12,164
| 0
|
# Copyright 2015, Ansible, Inc.
# Luke Sneeringer <lsneeringer@ansible.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import stat
import warnings
import click
from click.testing import CliRunner
from tower_cli.commands.config import config, echo_setting
from tower_cli.conf import settings, Parser
from tests.compat import unittest, mock
class ConfigTests(unittest.TestCase):
"""Establish that the `tower-cli config` command works in the way
that we expect.
"""
def setUp(self):
self.runner = CliRunner()
def test_no_arguments(self):
"""Establish that if `tower-cli config` is called with no arguments,
that we print out the current configuration.
"""
# Invoke the command.
with settings.runtime_values(username='meagan', verbose=False,
password='This is the best wine.'):
result = self.runner.invoke(config)
# Ensure that we got a 0 exit status
self.assertEqual(result.exit_code, 0)
# Ensure that the output looks correct.
self.assertIn('username: meagan', result.output)
self.assertIn('password: This is the best wine.', result.output)
self.assertIn('verbose: False', result.output)
def test_key_and_no_value(self):
"""Establish that if we are given a key and no value, that the
setting's value is printed.
"""
with settings.runtime_values(password='This is the best wine.'):
result = self.runner.invoke(config, ['password'])
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output.strip(),
'password: This is the best wine.')
def test_write_setting(self):
"""Establish that if we attempt to write a valid setting, that
the parser's write method is run.
"""
# Invoke the command, but trap the file-write at the end
# so we don't plow over real things.
mock_open = mock.mock_open()
filename = os.path.expanduser('~/.tower_cli.cfg')
with mock.patch('tower_cli.commands.config.open', mock_open,
create=True):
with mock.patch.object(os, 'chmod') as chmod:
result = self.runner.invoke(config, ['username', 'luke'])
chmod.assert_called_once_with(filename, int('0600', 8))
# Ensure that the command completed successfully.
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output.strip(),
'Configuration updated successfully.')
# Ensure that the output seems to be correct.
self.assertIn(mock.call(os.path.expanduser('~/.tower_cli.cfg'), 'w'),
mock_open.mock_calls)
self.assertIn(mock.call().write('username = luke\n'),
mock_open.mock_calls)
def test_permissions_warning(self):
"""Warn user if configuration file permissions can not be set
"""
# Try to set permissions on file that does not exist, expecting warning
mock_open = mock.mock_open()
filename = '.tower_cli.cfg'
with mock.patch('tower_cli.commands.config.open', mock_open,
create=True):
with mock.patch.object(os, 'chmod') as chmod:
chmod.side_effect = OSError
with mock.patch.object(warnings, 'warn') as warn:
result = self.runner.invoke(
config, ['username', 'luke', '--scope=local'])
warn.assert_called_once_with(mock.ANY, UserWarning)
chmod.assert_called_once_with(
filename, stat.S_IRUSR | stat.S_IWUSR)
# Ensure that the command completed successfully.
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output.strip(),
'Configuration updated successfully.')
def test_write_global_setting(self):
"""Establish that if we attempt to write a valid setting, that
the parser's write method is run.
"""
# Invoke the command, but trap the file-write at the end
# so we don't plow over real things.
filename = '/etc/tower/tower_cli.cfg'
mock_open = mock.mock_open()
with mock.patch('tower_cli.commands.config.open', mock_open,
create=True):
with mock.patch.object(os.path, 'isdir') as isdir:
with mock.patch.object(os, 'chmod') as chmod:
isdir.return_value = True
result = self.runner.invoke(
config, ['username', 'luke', '--scope=global'],
)
isdir.assert_called_once_with('/etc/tower/')
chmod.assert_called_once_with(filename, int('0600', 8))
# Ensure that the command completed successfully.
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output.strip(),
'Configuration updated successf
|
ully.')
# Ensure that the output seems to be correct.
self.assertIn(mock.call('/etc/tower/tower_cli.cfg', 'w'),
mock_open.mock_calls)
self.assertIn(mock.call().write('username = luke\n'),
|
mock_open.mock_calls)
def test_write_local_setting(self):
"""Establish that if we attempt to write a valid setting locally, that
the correct parser's write method is run.
"""
# Invoke the command, but trap the file-write at the end
# so we don't plow over real things.
mock_open = mock.mock_open()
with mock.patch('tower_cli.commands.config.open', mock_open,
create=True):
with mock.patch.object(os, 'chmod') as chmod:
result = self.runner.invoke(
config, ['username', 'meagan', '--scope=local'],
)
filename = ".tower_cli.cfg"
chmod.assert_called_once_with(filename, int('0600', 8))
# Ensure that the command completed successfully.
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output.strip(),
'Configuration updated successfully.')
# Ensure that the output seems to be correct.
self.assertIn(mock.call('.tower_cli.cfg', 'w'),
mock_open.mock_calls)
self.assertIn(mock.call().write('username = meagan\n'),
mock_open.mock_calls)
def test_unset(self):
"""Establish that calling `tower-cli config --unset` works in the
way that we expect.
"""
# Invoke the command, but trap the file-write at the end
# so we don't plow over real things.
mock_open = mock.mock_open()
with mock.patch('tower_cli.commands.config.open', mock_open,
create=True):
with mock.patch.object(os, 'chmod'):
result = self.runner.invoke(config, ['username', '--unset'])
# Ensure that the command completed successfully.
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output.strip(),
'Configuration updated successfully.')
# Ensure that the output seems to be correct.
self.assertNotIn(mock.call().write('username = luke\n'),
mock_open.mock_calls)
def test_error_invalid_key(self):
"""Establish that if `tower-cli config` is sent an invalid key,
that we raise an exception.
"""
r
|
eharney/cinder
|
cinder/image/image_utils.py
|
Python
|
apache-2.0
| 26,645
| 0
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods to deal with images.
This is essentially a copy from nova.virt.images.py
Some slight modifications, but at some point
we should look at maybe pushing this up to Oslo
"""
import contextlib
import errno
import math
import os
import re
import tempfile
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
from oslo_utils import imageutils
from oslo_utils import timeutils
from oslo_utils import units
import psutil
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder.volume import throttling
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
image_helper_opts = [cfg.StrOpt('image_conversion_dir',
default='$state_path/conversion',
help='Directory used for temporary storage '
'during image conversion'), ]
CONF = cfg.CONF
CONF.register_opts(image_helper_opts)
QEMU_IMG_LIMITS = processutils.ProcessLimits(
cpu_time=8,
address_space=1 * units.Gi)
VALID_DISK_FORMATS = ('raw', 'vmdk', 'vdi', 'qcow2',
'vhd', 'vhdx', 'parallels')
QEMU_IMG_FORMAT_MAP = {
# Convert formats of Glance images to how they are processed with qemu-img.
'iso': 'raw',
'vhd': 'vpc',
}
def validate_disk_format(disk_format):
return disk_format in VALID_DISK_FORMATS
def fixup_disk_format(disk_format):
"""Return the format to be provided to qemu-img convert."""
return QEMU_IMG_FORMAT_MAP.get(disk_format, disk_format)
def qemu_img_info(path, run_as_root=True):
"""Return an object containing the parsed output from qemu-img info."""
cmd = ['env', 'LC_ALL=C', 'qemu-img', 'info', path]
if os.name == 'nt':
cmd = cmd[2:]
out, _err = utils.execute(*cmd, run_as_root=run_as_root,
prlimit=QEMU_IMG_LIMITS)
info = imageutils.QemuImgInfo(out)
# From Cinder's point of view, any 'luks' formatted images
# should be treated as 'raw'.
if info.file_format == 'luks':
info.file_format = 'raw'
return info
def get_qemu_img_version():
info = utils.execute('qemu-img', '--version', check_exit_code=False)[0]
pattern = r"qemu-img version ([0-9\.]*)"
version = re.match(pattern, info)
if not version:
LOG.warning("qemu-img is not installed.")
return None
return _get_version_from_string(version.groups()[0])
def _get_version_from_string(version_string):
return [int(x) for x in version_string.split('.')]
def check_qemu_img_version(minimum_version):
qemu_version = get_qemu_img_version()
if (qemu_version is None
or qemu_version < _get_version_from_string(minimum_version)):
if qemu_version:
current_version = '.'.join((str(element)
for element in qemu_version))
else:
current_version = None
_msg = _('qemu-img %(minimum_version)s or later is required by '
'this volume driver. Current qemu-img version: '
'%(current_version)s') % {'minimum_version': minimum_version,
'current_version': current_version}
raise exception.VolumeBackendAPIException(data=_msg)
def _convert_image(prefix, source, dest, out_format,
src_format=None, run_as_root=True):
"""Convert image to other format."""
cmd = prefix + ('qemu-img', 'convert',
'-O', out_format, source, dest)
# Check w
|
hether O_DIRECT is supp
|
orted and set '-t none' if it is
# This is needed to ensure that all data hit the device before
# it gets unmapped remotely from the host for some backends
# Reference Bug: #1363016
# NOTE(jdg): In the case of file devices qemu does the
# flush properly and more efficiently than would be done
# setting O_DIRECT, so check for that and skip the
# setting for non BLK devs
if (utils.is_blk_device(dest) and
volume_utils.check_for_odirect_support(source,
dest,
'oflag=direct')):
cmd = prefix + ('qemu-img', 'convert',
'-t', 'none')
# AMI images can be raw or qcow2 but qemu-img doesn't accept "ami" as
# an image format, so we use automatic detection.
# TODO(geguileo): This fixes unencrypted AMI image case, but we need to
# fix the encrypted case.
if (src_format or '').lower() not in ('', 'ami'):
cmd += ('-f', src_format) # prevent detection of format
cmd += ('-O', out_format, source, dest)
start_time = timeutils.utcnow()
utils.execute(*cmd, run_as_root=run_as_root)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
# NOTE(jdg): use a default of 1, mostly for unit test, but in
# some incredible event this is 0 (cirros image?) don't barf
if duration < 1:
duration = 1
try:
image_size = qemu_img_info(source,
run_as_root=run_as_root).virtual_size
except ValueError as e:
msg = ("The image was successfully converted, but image size "
"is unavailable. src %(src)s, dest %(dest)s. %(error)s")
LOG.info(msg, {"src": source,
"dest": dest,
"error": e})
return
fsz_mb = image_size / units.Mi
mbps = (fsz_mb / duration)
msg = ("Image conversion details: src %(src)s, size %(sz).2f MB, "
"duration %(duration).2f sec, destination %(dest)s")
LOG.debug(msg, {"src": source,
"sz": fsz_mb,
"duration": duration,
"dest": dest})
msg = "Converted %(sz).2f MB image at %(mbps).2f MB/s"
LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
def convert_image(source, dest, out_format, src_format=None,
run_as_root=True, throttle=None):
if not throttle:
throttle = throttling.Throttle.get_default()
with throttle.subcommand(source, dest) as throttle_cmd:
_convert_image(tuple(throttle_cmd['prefix']),
source, dest,
out_format,
src_format=src_format,
run_as_root=run_as_root)
def resize_image(source, size, run_as_root=False):
"""Changes the virtual size of the image."""
cmd = ('qemu-img', 'resize', source, '%sG' % size)
utils.execute(*cmd, run_as_root=run_as_root)
def fetch(context, image_service, image_id, path, _user_id, _project_id):
# TODO(vish): Improve context handling and add owner and auth data
# when it is added to glance. Right now there is no
# auth checking in glance, so we assume that access was
# checked before we got here.
start_time = timeutils.utcnow()
with fileutils.remove_path_on_error(path):
with open(path, "wb") as image_file:
try:
image_service.download(context, image_id, image_file)
except IOError as e:
if e.errno == errno.ENOSPC:
params = {'path': os.path.dirname(path),
'image': im
|
StYaphet/firefox-ios
|
taskcluster/scripts/get-secret.py
|
Python
|
mpl-2.0
| 2,717
| 0.004417
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import base64
import errno
import json
import os
import taskcluster
def write_secret_to_file(path, data, key, base64decode=False, json_secret=False, append=False, prefix=''):
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../' + path))
try:
os.makedirs(os.path.dirname(path))
except OSError as error:
if error.errno != errno.EEXIST:
|
raise
print("Outputting secret to: {}".format(path))
with open(path, 'a' if append else 'w') as f:
value = data['secret'][key]
if base64decode:
value = base64.b64decode(value)
if json_secret:
v
|
alue = json.dumps(value)
f.write(prefix + value)
def fetch_secret_from_taskcluster(name):
try:
secrets = taskcluster.Secrets({
# BaseUrl is still needed for tasks that haven't migrated to taskgraph yet.
'baseUrl': 'http://taskcluster/secrets/v1',
})
except taskcluster.exceptions.TaskclusterFailure:
# taskcluster library >=5 errors out when `baseUrl` is used
secrets = taskcluster.Secrets({
'rootUrl': os.environ.get('TASKCLUSTER_PROXY_URL', 'https://taskcluster.net'),
})
return secrets.get(name)
def main():
parser = argparse.ArgumentParser(
description='Fetch a taskcluster secret value and save it to a file.')
parser.add_argument('-s', dest="secret", action="store", help="name of the secret")
parser.add_argument('-k', dest='key', action="store", help='key of the secret')
parser.add_argument('-f', dest="path", action="store", help='file to save secret to')
parser.add_argument('--decode', dest="decode", action="store_true", default=False, help='base64 decode secret before saving to file')
parser.add_argument('--json', dest="json", action="store_true", default=False, help='serializes the secret to JSON format')
parser.add_argument('--append', dest="append", action="store_true", default=False, help='append secret to existing file')
parser.add_argument('--prefix', dest="prefix", action="store", default="", help='add prefix when writing secret to file')
result = parser.parse_args()
secret = fetch_secret_from_taskcluster(result.secret)
write_secret_to_file(result.path, secret, result.key, result.decode, result.json, result.append, result.prefix)
if __name__ == "__main__":
main()
|
1995parham/Learning
|
ml/fasion-mnist/main.py
|
Python
|
gpl-2.0
| 1,163
| 0
|
import tensorflow as tf
from tensorflow import keras
print(tf.__version__)
print(keras.__version__)
fasion_mnist = keras.datasets.fashion_mnist
(X_train_full, y_train_full), (X_test, y_test) = fasion_mnist.load_data()
# pixel 0-255
# 28 x 28 images
print(X_train_full.shape)
print(X_train_full.dtype)
# create validation set
X_valid,
|
X_train = X_train_full[:5000] / 255.0, X_train_full[5000:] / 255.0
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
class_names = [
"T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot",
]
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
model.add(keras.layers.Dense(300, activation="relu"))
model
|
.add(keras.layers.Dense(100, activation="relu"))
# because we have 10 exclusive classes
model.add(keras.layers.Dense(10, activation="softmax"))
print(model.summary())
model.compile(
loss="sparse_categorical_crossentropy",
optimizer="sgd",
metrics=["accuracy"],
)
history = model.fit(
X_train, y_train, epochs=30, validation_data=(X_valid, y_valid)
)
print(history)
|
loggerhead/Easy-Karabiner
|
tests/test_factory.py
|
Python
|
mit
| 11,690
| 0.000342
|
# -*- coding: utf-8 -*-
from easy_karabiner.factory import *
def test_create_keymap():
raw_keymap = [
'KeyToKey',
['ctrl'],
['f12'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __KeyToKey__
KeyCode::CONTROL_L,
KeyCode::F12
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'KeyToKey',
['ctrl', 'U'],
['end', 'shift_r', 'home', 'del', 'del', 'norepeat'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __KeyToKey__
KeyCode::U, ModifierFlag::CONTROL_L, ModifierFlag::NONE,
KeyCode::END, KeyCode::HOME, ModifierFlag::SHIFT_R,
KeyCode::DELETE, KeyCode::DELETE, Option::NOREPEAT
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'KeyToKey',
['alt', 'shift', ','],
['fn', 'left'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __KeyToKey__
KeyCode::COMMA, ModifierFlag::OPTION_L, ModifierFlag::SHIFT_L, ModifierFlag::NONE,
KeyCode::CURSOR_LEFT, ModifierFlag::FN
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'DropAllKeys',
['ModifierFlag::MY_VI_MODE'],
['DROPALLKEYS_DROP_KEY', 'DROPALLKEYS_DROP_CONSUMERKEY', 'DROPALLKEYS_DROP_POINTINGBUTTON'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __DropAllKeys__
ModifierFlag::MY_VI_MODE,
Option::DROPALLKEYS_DROP_KEY,
Option::DROPALLKEYS_DROP_CONSUMERKEY,
Option::DROPALLKEYS_DROP_POINTINGBUTTON
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'SimultaneousKeyPresses',
['9', '0', '9', 'shift'],
['shift', '0', 'left']
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __SimultaneousKeyPresses__
@begin
KeyCode::KEY_9, KeyCode::KEY_0, KeyCode::KEY_9, ModifierFlag::SHIFT_L
@end
@begin
KeyCode::KEY_0, ModifierFlag::SHIFT_L, KeyCode::CURSOR_LEFT
@end
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'DoublePressModifier',
['fn'],
['cmd', 'alt', 'I'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __DoublePressModifier__
KeyCode::FN,
@begin
KeyCode::FN
@end
@begin
KeyCode::I, ModifierFlag::COMMAND_L, ModifierFlag::OPTION_L
@end
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'DoublePressModifier',
['fn'],
['F11'],
['F12'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __DoublePressModifier__
KeyCode::FN,
@begin
KeyCode::F11
@end
@begin
KeyCode::F12
@end
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'HoldingKeyToKey',
['esc'],
['cmd_r', 'ctrl_r', 'alt_r', 'shift_r'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __HoldingKeyToKey__
KeyCode::ESCAPE,
@begin
|
KeyCode::ESCAPE
@end
@begin
KeyCode::COMMAND_R, ModifierFlag::CONTROL_R, ModifierFlag::OPTION_R, ModifierFlag::SHIFT_R
@end
|
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'KeyOverlaidModifier',
['caps'],
['ctrl'],
['esc'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __KeyOverlaidModifier__
KeyCode::CAPSLOCK,
@begin
KeyCode::CONTROL_L
@end
@begin
KeyCode::ESCAPE
@end
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'KeyDownUpToKey',
['cmd', ','],
['cmd', 'shift', 'left'],
['cmd', 'left'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __KeyDownUpToKey__
KeyCode::COMMA, ModifierFlag::COMMAND_L, ModifierFlag::NONE,
@begin
KeyCode::CURSOR_LEFT, ModifierFlag::COMMAND_L, ModifierFlag::SHIFT_L
@end
@begin
KeyCode::CURSOR_LEFT, ModifierFlag::COMMAND_L
@end
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'BlockUntilKeyUp',
['sp']
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __BlockUntilKeyUp__
KeyCode::SPACE
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'DropKeyAfterRemap',
['mission_control', 'MODIFIERFLAG_EITHER_LEFT_OR_RIGHT_SHIFT']
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __DropKeyAfterRemap__
KeyCode::MISSION_CONTROL,
MODIFIERFLAG_EITHER_LEFT_OR_RIGHT_SHIFT
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'PassThrough',
]
k = KeymapCreater.create(raw_keymap)
s = '<autogen> __PassThrough__ </autogen>'
util.assert_xml_equal(k, s)
raw_keymap = [
'double',
['cmd', 'K'],
['up'] * 6,
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __DoublePressModifier__
KeyCode::K, ModifierFlag::COMMAND_L, ModifierFlag::NONE,
@begin
KeyCode::K, ModifierFlag::COMMAND_L
@end
@begin
KeyCode::CURSOR_UP, KeyCode::CURSOR_UP, KeyCode::CURSOR_UP,
KeyCode::CURSOR_UP, KeyCode::CURSOR_UP, KeyCode::CURSOR_UP
@end
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'DoublePressModifier',
['cmd', 'J'],
['down'] * 6,
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __DoublePressModifier__
KeyCode::J, ModifierFlag::COMMAND_L, ModifierFlag::NONE,
@begin
KeyCode::J, ModifierFlag::COMMAND_L
@end
@begin
KeyCode::CURSOR_DOWN, KeyCode::CURSOR_DOWN, KeyCode::CURSOR_DOWN,
KeyCode::CURSOR_DOWN, KeyCode::CURSOR_DOWN, KeyCode::CURSOR_DOWN
@end
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'KeyToKey',
['alt', 'E'],
['KeyCode::VK_OPEN_URL_FINDER'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __KeyToKey__
KeyCode::E, ModifierFlag::OPTION_L, ModifierFlag::NONE,
KeyCode::VK_OPEN_URL_FINDER
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'FlipScrollWheel',
['FLIPSCROLLWHEEL_HORIZONTAL', 'FLIPSCROLLWHEEL_VERTICAL'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __FlipScrollWheel__
Option::FLIPSCROLLWHEEL_HORIZONTAL,
Option::FLIPSCROLLWHEEL_VERTICAL
</autogen>'''
util.assert_xml_equal(k, s)
def test_create_definition():
d = DefinitionCreater.create('KINDLE', ['com.amazon.Kindle'])
s = '''
<appdef>
<appname>KINDLE</appname>
<equal>com.amazon.Kindle</equal>
</appdef>'''
util.assert_xml_equal(d[0], s)
d = DefinitionCreater.create('EMACS_IGNORE_APP', [
'ECLIPSE', 'EMACS', 'TERMINAL',
'REMOTEDESKTOPCONNECTION', 'VI', 'X11',
'VIRTUALMACHINE', 'TERMINAL', 'SUBLIMETEXT',
])
s = '''
<replacementdef>
<replacementname>EMACS_IGNORE_APP</replacementname>
<replacementvalue>
ECLIPSE, EMACS, TERMINAL,
REMOTEDESKTOPCONNECTION, VI, X11,
VIRTUALMACHINE, TERMINAL, SUBLIMETEXT
</replacementvalue>
</replacementdef>'''
util.assert_xml_equal(d[0], s)
d1, d2 = DefinitionCreater.create('CHERRY_3494', ['0x046a', '0x0011'])
s1 = '''
<devicevendordef>
<vendorname>CHERRY_3494_VENDOR</vendorname>
<vendorid>0x046a</vendorid>
</devicevendordef>
'''
s2 = '''
|
viktorTarasov/PyKMIP
|
kmip/demos/pie/create.py
|
Python
|
apache-2.0
| 1,766
| 0
|
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import sys
from kmip.core import enums
from kmip.demos import utils
from kmip.pie import client
if __name__ == '__main__':
logger = utils.build_console_
|
logger(logging.INFO)
# Build and parse arguments
parser = utils.build_cli_parser(enums.Operation.CREATE)
opts, args = parser.parse_args(sys.argv[1:])
config = opts.config
algorithm = opts.algorithm
length = opts.length
# Exit early if the arguments are not specified
if algorithm is None:
logger.error('No algorithm provided, exiting early from demo')
sys.exit()
if length is None:
logger.error("No key length provided, exiting e
|
arly from demo")
sys.exit()
algorithm = getattr(enums.CryptographicAlgorithm, algorithm, None)
# Build the client and connect to the server
with client.ProxyKmipClient(config=config) as client:
try:
uid = client.create(algorithm, length)
logger.info("Successfully created symmetric key with ID: "
"{0}".format(uid))
except Exception as e:
logger.error(e)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.