repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
0x19/werkzeug | setup.py | Python | bsd-3-clause | 2,732 | 0 | # -*- coding: utf-8 -*-
"""
Werkzeug
========
Werkzeug started as simple collection of various utilities for WSGI
applications and has become one of the most advanced WSGI utility
modules. It includes a powerful debugger, full featured request and
response objects, HTTP utilities to handle entity tags, cache control
headers, HTTP dates, cookie handling, file uploads, a powerful URL
routing system and a bunch of community contributed addon modules.
Werkzeug is unicode aware and doesn't enforce a specific template
engine, database adapter or anything else. It doesn't even enforce
a specific way of handling requests and leaves all that up to the
developer. It's most useful for end user applications which should work
on as many server environments as possible (such as blogs, wikis,
bulletin boards, etc.).
Details and example applications are available on the
`Werkzeug website <http://werkzeug.pocoo.org/>`_.
Features
--------
- unicode awareness
- request and response objects
- various utility functions for dealing with HTTP headers such as
`Accept` and `Cache-Control` headers.
- thread local objects with proper cleanup at request end
- an interactive debugger
- A simple WSGI server with support for threading and forking
with an automatic reloader.
- a flexible URL routing system with REST support.
- fully WSGI compatible
Development Version
-------------------
The Werkzeug development version can be installed by cloning the git
repository from `github`_::
git clone git@github.com:mitsuhiko/werkzeug.git
.. _github: http://github.com/mitsuhiko/werkzeug
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='Werkzeug',
version='0.10-dev',
url='http://werkzeug.pocoo.org/',
license='BSD',
author='Armin Ronacher',
author_email='armin.ronacher@active-4.com',
description='The Swiss Army knife of Python web development',
long_descript | ion=__doc__,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
| ],
packages=['werkzeug', 'werkzeug.debug', 'werkzeug.contrib',
'werkzeug.testsuite', 'werkzeug.testsuite.contrib'],
include_package_data=True,
test_suite='werkzeug.testsuite.suite',
zip_safe=False,
platforms='any'
)
|
elijahc/ml_v1 | load_data.py | Python | mit | 4,089 | 0.023233 | import scipy.io as sio
import numpy as np
import pandas as pd
from tqdm import tqdm
import argparse
def main():
parser = argparse.ArgumentParser(description='RNN for modeling neuron populations')
parser.add_argument('infile', metavar='infile', type=str,
help='Input data file path')
parser.add_argument('outfile', metavar='outfile', type=str,
help='Path to output data')
parser.add_argument('--data_structure', type=str, default='timeseries', choices=['timeseries','ledger'],
help='Structure to parse the data into default: ledger')
parser.add_argument('--format', type=str, default='mat', choices=['mat','hdf5','csv','pickle'],
help='File Format to save data default: mat')
FLAGS = parser.parse_args()
# Load files
print('loading...stim_sequence')
stim_sequence = sio.loadmat('data/stimulus_sequence.mat')['stimulus_sequence']
FILE = FLAGS.infile
print('loading...', FILE)
mat_file = sio.loadmat(FILE)
#%%
# Filter out poor quality neurons
mask = np.squeeze(mat_file['INDCENT']).astype(bool)
resp_train = mat_file['resp_train'][mask]
stim_len = np.size(resp_train,axis=-1)
resp_train_blk = mat_file['resp_train_blk'][mask]
blank_len = np.size(resp_train_blk,axis=-1)
# Shift by 50ms to account for response latency
latency = 50
resp = np.concatenate((resp_train,resp_train_blk), axis=3)
resp = np.roll(resp,-latency,3)[:,:,:,:-latency]
resp_mean, resp_std, resp_sem = trial_stats(resp[:,:,:,55:110])
#resp_nat_sm, resp_nat_lg = subdivide(resp[:,:,:,50:105])
stim, spike_train,ids,trial = mutate(resp,stim_len,blank_len,stim_sequence)
out_dict = dict(
timeseries=spike_train,
resp_mean=resp_mean,
resp_std=resp_std,
resp_sem=resp_sem,
#nat_resp_sm=resp_nat_sm,
#nat_resp_lg=resp_nat_lg,
stim=stim,
trial_num=trial,
image_id=ids)
outfile = FLAGS.outfile
print('writing ', outfile, '...')
sio.savemat(outfile, out_dict)
def trial_stats(resp):
t_win = np.size(resp, 3)
resp = resp.sum(axis=3)
resp_mean = resp.mean(axis=2)
resp_std = resp.std(axis=2)
resp_sem = resp_std/np.sqrt(20)
return (resp_mean, resp_std, resp_sem)
def subdivide(resp):
tmp = np.squeeze(resp[:,:(2*9*30),:])
tmp = tmp.reshape(np.size(resp,0),2,9,30,20,np.size(resp,-1))
resp_nat_sm = tmp[:,0,:,:,:].reshape(np.size(tmp,0),(9*30),20,np.size(tmp,-1))
resp_nat_lg = tmp[:,1,:,:,:].reshape(np.size(tmp,0),(9*30),20,np.size(tmp,-1))
return (resp_nat_sm, resp_nat_lg)
def mutate(resp,stim_len,blank_len,stim_sequence):
image_bin = []
spikes = []
image_ids = []
trial_ids = []
trials = np.size(resp,2)
num_neurons = np.size(resp,0)
num_images = np.size(resp, 1)
i = 0
for r in tqdm(np.arange(trials)):
for image_id in stim_sequence[:,r]:
index = {'i': i,
'trial': r,
| 'image': image_id-1
}
x_on = np.zeros(stim_len, dtype=np.uint8) + 1
x_off= np.zeros(blank_len, dtyp | e=np.uint8) + 0
x = np.concatenate((x_on, x_off))
trial_vec = np.zeros_like(x,dtype=np.uint8) + r
image_vec = np.zeros_like(x,dtype=np.uint8) + image_id-1
y = resp[:,image_id-1, r,:]
i = i+1
image_bin.extend([x])
image_ids.extend([image_vec])
trial_ids.extend([trial_vec])
spikes.extend([y])
#print(index)
#print(ms)
#print(index)
#print(x.shape)
#print(x)
#print(y.shape)
#print(y)
stim,spikes = ( np.concatenate( np.array(image_bin) ),np.concatenate(np.array(spikes), axis=1).swapaxes(0,1))
ids, trial = (np.concatenate(np.array(image_ids)),np.concatenate(np.array(trial_ids)))
return (stim,spikes,ids,trial)
if __name__ == '__main__':
main()
|
teatimesoft/sugarscape | sim/grids.py | Python | gpl-3.0 | 2,389 | 0.001674 | class SquareGrid:
N = (0, 1)
E = (1, 0)
S = (0, -1)
W = (-1, 0)
PRINCIPAL_DIRECTIONS = (N, E, S, W)
def __init__(self, length, height, wraparound=True):
self.length = length
self.height = height
self.wraparound = wraparound
def get_circle(self, center, radius, *, p):
# must avoid repeating the same point twice (not just performance, but semantics)
circle = set()
if p == 0:
circle.add(center)
for direction in self.PRINCIPAL_DIRECTIONS:
for d in range(1, radius+1):
point = self.move(center, direction, d)
circle.add(point)
else:
max_distance = radius ** p
for delta_x in range(-radius, radius+1):
for delta_y in range(-radius, radius+1):
if p != float('inf'):
distance = delta_x ** p + delta_y ** p
if distance > max_distance:
continue
direction = delta_x, delta_y
point = self.move(center, self.E, delta_x)
point = self.move(point, self.N, delta_y)
circle.add(point)
return circle
def distance(self, p1, p2, *, p=2):
return sum((c1 - c2) ** p for c1, c2 in zip(p1, p2)) ** (1/p)
def is_inside(self, coords):
x, y = coords
return 0 <= x < self.length and 0 <= y < self.height
def move(self, start_coords, direction, distance):
x, y = start_coords
dx, dy = direction
x += dx * distance
y += dy * distance
if self.wraparound:
return x % self.length, y % self.height
else:
if self.is_inside((x, y)):
return x, y
el | se:
return None
def get_all_points(self):
return ((x, y) for x in range(self.length) for y in range(self.height))
def __repr__(self):
return | self.__class__.__name__ + (' no' if not self.wraparound else '') + ' wraparound' + ': {} x {}'.format(self.length, self.height)
class CircularVision:
def __init__(self, distance, *, p):
self.distance = distance
self.p = p
def get_visible_points(self, grid, position):
return grid.get_circle(position, self.distance, p=self.p)
|
agry/NGECore2 | scripts/loot/lootItems/rarelootchest/corellian_seaside.py | Python | lgpl-3.0 | 196 | 0.05102 |
def itemTemplate():
return ['object/tangible/painting/shared_painting_corl_02.iff']
|
def STFparams():
return ['static_item_n','item_painting_corl_02','sta | tic_item_d','item_painting_corl_02'] |
allure-framework/allure-python | allure-pytest/test/acceptance/status/xfail_call_status_test.py | Python | apache-2.0 | 4,151 | 0.003854 | from hamcrest import assert_that
from allure_commons_test.report import has_test_case
from allure_commons_test.result import with_status
from allure_commons_test.result import has_status_details
from allure_commons_test.result import with_message_contains
from allure_commons_test.result import with_trace_contains
def test_xfail(executed_docstring_source):
"""
>>> import pytest
>>> @pytest.mark.xfail()
... def test_xfail_example():
... assert False
"""
assert_that(executed_docstring_source.allure_report,
has_test_case("test_xfail_example",
with_status("skipped"),
has_status_details(with_message_contains("XFAIL"),
with_message_contains("AssertionError"),
with_trace_contains("def test_xfail_example():")
)
)
)
def test_xfail_with_reason_raise_mentioned_exception(executed_docstring_source):
"""
>>> import pytest
>>> @pytest.mark.xfail(raises=AssertionError, reason='Some reason')
... def test_xfail_with_reason_raise_mentioned_exception_example():
... assert False
"""
assert_that(executed_docstring_source.allure_report,
has_test_case("test_xfail_with_reason_raise_mentioned_exception_example",
with_status("skipped"),
has_status_details(with_message_contains("XFAIL Some reason"),
with_message_contains("AssertionError"),
with_trace_contains(
"def test_xfail_with_reason_raise_mentioned_exception_example():")
)
)
)
def test_xfail_raise_not_mentioned_exception(executed_docstring_source):
"""
>>> import pytest
>>> @pytest.mark.xfail(raises=AssertionError)
... def test_xfail_raise_not_mentioned_exception_example():
... raise ZeroDivisionError
"""
assert_that(executed_docstring_source.allure_report,
has_test_case("test_xfail_raise_not_mentioned_exception_example",
with_status("broken"),
has_status_details(with_message_contains("ZeroDivisionError"),
with_trace_contains(
"def test_xfail_raise_not_mentioned_exception_example():")
)
)
)
def test_xfail_do_not_raise_mentioned_exception(executed_docstring_source):
"""
>>> import pytest
>>> @pytest.mark.xfail(raises=AssertionError)
| ... def test_xfail_do_not_raise_mentioned_exception_example():
... pass
"""
assert_that(executed_docstring_source.allure_report,
has_test_case("test_xfail_ | do_not_raise_mentioned_exception_example",
with_status("passed"),
has_status_details(with_message_contains("XPASS"),
)
)
)
def test_xfail_with_reason_do_not_raise_mentioned_exception(executed_docstring_source):
"""
>>> import pytest
>>> @pytest.mark.xfail(raises=AssertionError, reason="Some reason")
... def test_xfail_with_reason_do_not_raise_mentioned_exception_example():
... pass
"""
assert_that(executed_docstring_source.allure_report,
has_test_case("test_xfail_with_reason_do_not_raise_mentioned_exception_example",
with_status("passed"),
has_status_details(with_message_contains("XPASS Some reason"),
)
)
)
|
NiklasRosenstein/localimport | setup.py | Python | mit | 1,978 | 0.0091 | import io
import os
import re
import sys
from setuptools import setup
def restify():
if os.path.isfile('README.md'):
if os.system('pandoc -s README.md -o README.rst') != 0:
print('----------------------------------------------------------')
print('WARNING: pandoc command failed, could not restify README.md')
print('----------------------------------------------------------')
if sys.stdout.isatty():
if sys.version_info[0] >= 3:
input("Enter to continue... ")
else:
raw_input("Enter to continue... ")
else:
with io.open('README.rst', encoding='utf8') as fp:
text = fp.read()
# Remove ".. raw:: html\n\n ....\n" stuff, it results from using
# raw HTML in Markdown but can not be properly rendered in PyPI.
text = re.sub('..\s*raw::\s*html\s*\n\s*\n\s+[^\n]+\n', '', text, re.M)
with io.open('README.rst', 'w', encoding='utf8') as fp:
fp.write(text)
return text
setup(
name="localimport",
version="1.7.3",
description="Isolated import of Python Modules",
long_description=restify(),
author="Niklas Rosenstein",
author_email | ="rosensteinniklas@gmail.com",
url='https://github.com/NiklasRosenstein/localimport',
py_modules=["localimport"],
keywords=["import", "embedded", "modules", "packages"],
classifiers=[
'Development Status :: 5 - Production/Stab | le',
'Environment :: Other Environment', 'Environment :: Plugins',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: Jython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'
])
|
arnaudblois/to_do_list | to_do_list/users/apps.py | Python | mit | 276 | 0 | from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'to_do_list.users'
verbose_name = "Users"
def ready(self):
"""Override this to put | in:
Users system checks
Users signal registration
"""
| pass
|
schilduil/suapp | suapp/jandw.py | Python | mit | 11,304 | 0.000885 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright (C), 2013, The Schilduil Team. All rights reserved.
"""
import sys
import pony.orm
import suapp.orm
from suapp.logdecorator import loguse, logging
__all__ = ["Wooster", "Drone", "Jeeves"]
class FlowException(Exception):
pass
class ApplicationClosed(FlowException):
pass
class Wooster:
"""
A Wooster represents a UI window/page.
GENERALLY THESE THINGS ARE REUSED SO YOU NEED TO BE VERY CAREFUL ABOUT SIDE EFFECTS.
In case you have something that cannot be reused do something like:
1/ Create a new class instance of a subclass of Wooster
2/ Call inflow on that
"""
def lock(self):
pass
def unlock(self):
pass
def inflow(self, jeeves, drone):
# The only thing it does is store the Jeeves object.
self.jeeves = jeeves
# MODE: Modal=1, Replace=2, Both=3
# jeeves.drone(self, name, mode, dataobject)
def close(self):
pass
def toJSON(self):
return "Wooster %s" % (hex(self.__hash__()))
class Drone(object):
"""
A drone is the connection between two vertices.
"""
def __init__(self, name, tovertex):
self.name = name
self.tovertex = tovertex
@loguse
def get_new_instance_clone(self, dataobject, mode):
"""
Clone the drone and add the dataobject and mode.
"""
drone = Drone(self.name, self.tovertex)
drone.dataobject = dataobject
drone.mode = mode
return drone
def toJSON(self):
return "Drone %s > %s" % (self.name, self.tovertex)
class Jeeves(object):
"""
Jeeves is the controller that determins the flow.
It uses Drones to go from Wooster to Wooster.
"""
MODE_OPEN = 3
MODE_REPLACE = 2
MODE_MODAL = 1
@loguse
def __init__(self, app=None):
"""
Initializes the Jeeves with an empty flow and app name.
"""
self.flow = {"": {}}
self.app = app
self.views = {}
self.queries = {}
# TODO: I have no idea why I added ormscope: get rid of it?
self.ormscope = {}
def toJSON(self):
"""
Makes this object be made into json.
"""
return "Jeeves %s" % (hex(self.__hash__()))
@loguse
def whichDrone(self, fromname, outmessage, **kwargs):
"""
Finding the drone matching the outmessage.
"""
logging.getLogger(__name__).debug(
": Jeeves[%r].whichDrone : Flow: %s", self, self.flow
)
drone = None
try:
drone = self.flow[fromname][outmessage]
except:
try:
drone = self.flow[""][outmessage]
except:
# TODO: do something else then bluntly exiting.
logging.getLogger(__name__).error(
": Jeeves[%r].whichDrone : Not found '%s' - exiting.",
self,
outmessage,
)
if outmessage == "EXIT":
raise ApplicationClosed()
else:
raise FlowException("Unknown outmessage: %s" % (outmessage))
return drone
@loguse("@") # Not logging the return value.
def _do_query_str(self, query_template, scope, parameters):
"""
Execute a query that is a string.
DEPRECATED
"""
query = query_template % parameters
exec("result = %s" % (query), scope)
return scope["result"]
@loguse("@") # Not logging the return value.
def pre_query(self, name, scope=None, params=None):
"""
Returns the the query and parameters.
The query and the default parameters are looked up in self.queries.
The parameters are next updated with the passed params.
The self.queries is filled by moduleloader from the loaded modlib's
view_definitions() function.
"""
if scope is None:
scope = {}
query_template, defaults = self.queries[name]
# Start with the default defined.
parameters = defaults.copy()
parameters.update(params)
# Making sure the paging parameters are integers.
try:
parameters["pagenum"] = int(parameters["pagenum"])
except:
parameters["pagenum"] = 1
try:
parameters["pagesize"] = int(parameters["pagesize"])
except:
parameters["pagesize"] = 10
logging.getLogger(__name__).debug(
"Paging #%s (%s)", parameters["pagenum"], parameters["pagesize"]
)
return (query_template, parameters)
@loguse("@") # Not loggin the return value.
def do_query(self, name, scope=None, params=None):
"""
Executes a query by name and return the result.
The result is always a UiOrmObject by using UiOrmObject.uize on the
results of the query.
"""
query_template, parameters = self.pre_query(name, scope, params)
if callable(query_template):
# A callable, so just call it.
result = query_template(params=parameters)
else:
# DEPRECATED: python code as a string.
result = self._do_query_str(query_template, scope, parameters)
return (suapp.orm.UiOrmObject.uize(r) for r in result)
@loguse
def do_fetch_set(self, module, table, primarykey, link):
"""
Fetches the result from a foreign key that is a set.
This will return the list of objects representing the rows in the
database pointed to by the foreign key (which name should be passed in
link). The return type is either a list of suapp.orm.UiOrmObject's.
Usually you can follow the foreign key directly, but not in an
asynchronous target (UI) like the web where you need to fetch it anew.
For foreign keys that are not sets you can use do_fetch.
The module, table and pr | imarykey are those from the object having the
foreign key and be | have the same as with do_fetch. The extra parameter
link is the foreign key that is pointing to the set.
"""
origin = self.do_fetch(module, table, primarykey)
result = getattr(origin, link)
return (suapp.orm.UiOrmObject.uize(r) for r in result)
@loguse
def do_fetch(self, module, table, primarykey):
"""
Fetches a specific object from the database.
This will return the object representing a row in the
specified table from the database. The return type is
either a pony.orm.core.Entity or suapp.orm.UiOrmObject
subclass, depending on the class name specified in table.
Parameters:
- module: In what module the table is defined.
This should start with modlib.
- table: Class name of the object representing the table.
The class should be a subclass of either
- pony.orm.core.Entity
- suapp.orm.UiOrmObject
- primarykey: A string representing the primary key value
or a list of values (useful in case of a
multi variable primary key).
"""
if isinstance(primarykey, str):
primarykey = [primarykey]
module = sys.modules[module]
table_class = getattr(module, table)
params = {}
if issubclass(table_class, pony.orm.core.Entity):
pk_columns = table_class._pk_columns_
elif issubclass(table_class, suapp.orm.UiOrmObject):
pk_columns = table_class._ui_class._pk_columns_
else:
return None
if len(pk_columns) == 1:
if len(primarykey) == 1:
params[pk_columns[0]] = primarykey[0]
else:
i = 0
for column in pk_columns:
params[column] = primarykey[i]
i += 1
# Checking if the primary key is a foreign key.
for column in pk_columns:
logging.getLogger(__name__).debug(
|
fredsmith/will | will/plugins/web/__init__.py | Python | mit | 33 | 0 | MODULE_DESCRIPTION = "Web page | s"
| |
brakhane/panda3d | direct/src/distributed/DistributedCartesianGridAI.py | Python | bsd-3-clause | 5,309 | 0.003014 |
from pandac.PandaModules import *
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.task import Task
from .DistributedNodeAI import DistributedNodeAI
from .CartesianGridBase import CartesianGridBase
class DistributedCartesianGridAI(DistributedNodeAI, CartesianGridBase):
notify = directNotify.newCategory("DistributedCartesianGridAI")
RuleSeparator = ":"
def __init__(self, air, startingZone, gridSize, gridRadius, cellWidth,
style="Cartesian"):
DistributedNodeAI.__init__(self, air)
self.style = style
self.startingZone = startingZone
self.gridSize = gridSize
self.gridRadius = gridRadius
self.cellWidth = cellWidth
# Keep track of all AI objects added to the grid
self.gridObjects = {}
self.updateTaskStarted = 0
def delete(self):
DistributedNodeAI.delete(self)
self.stopUpdateGridTask()
def isGridParent(self):
# If this distributed object is a DistributedGrid return 1.
# 0 by default
return 1
def getCellWidth(self):
return self.cellWidth
def getParentingRules(self):
self.notify.debug("calling getter")
rule = ("%i%s%i%s%i" % (self.startingZone, self.RuleSeparator,
self.gridSize, self.RuleSeparator,
self.gridRadius))
return [self.style, rule]
# Reparent and setLocation on av to DistributedOceanGrid
def addObjectToGrid(self, av, useZoneId=-1, startAutoUpdate=True):
self.notify.debug("setting parent to grid %s" % self)
avId = av.doId
# Create a grid parent
#gridParent = self.attachNewNode("gridParent-%s" % avId)
#self.gridParents[avId] = gridParent
self.gridObjects[avId] = av
# Put the avatar on the grid
self.handleAvatarZoneChange(av, useZoneId)
if (not self.updateTaskStarted) and startAutoUpdate:
self.startUpdateGridTask()
def removeObjectFromGrid(self, av):
# TODO: WHAT LOCATION SHOULD WE SET THIS TO?
#av.wrtReparentTo(self.parentNP)
#av.setLocation(self.air.di | strictId, 1000)
# Remove grid parent for this av
avId = av.doId
if avId in self.gridObjects:
del self.gridObjects[avId]
# Stop task if there are no more av's being managed
if len(self.gridObjects) == 0:
self.stopUpdateGridTask()
############################################# | ########################
# updateGridTask
# This task is similar to the processVisibility task for the local client.
# A couple differences:
# - we are not doing setInterest on the AI (that is a local client
# specific call).
# - we assume that the moving objects on the grid are parented to a
# gridParent, and are broadcasting their position relative to that
# gridParent. This makes the task's math easy. Just check to see
# when our position goes out of the current grid cell. When it does,
# call handleAvatarZoneChange
def startUpdateGridTask(self):
self.stopUpdateGridTask()
self.updateTaskStarted = 1
taskMgr.add(self.updateGridTask, self.taskName("updateGridTask"))
def stopUpdateGridTask(self):
taskMgr.remove(self.taskName("updateGridTask"))
self.updateTaskStarted = 0
def updateGridTask(self, task=None):
# Run through all grid objects and update their parents if needed
missingObjs = []
for avId in self.gridObjects.keys():
av = self.gridObjects[avId]
# handle a missing object after it is already gone?
if (av.isEmpty()):
task.setDelay(1.0)
del self.gridObjects[avId]
continue
pos = av.getPos()
if ((pos[0] < 0 or pos[1] < 0) or
(pos[0] > self.cellWidth or pos[1] > self.cellWidth)):
# we are out of the bounds of this current cell
self.handleAvatarZoneChange(av)
# Do this every second, not every frame
if (task):
task.setDelay(1.0)
return Task.again
def handleAvatarZoneChange(self, av, useZoneId=-1):
# Calculate zone id
# Get position of av relative to this grid
if (useZoneId == -1):
pos = av.getPos(self)
zoneId = self.getZoneFromXYZ(pos)
else:
# zone already calculated, position of object might not
# give the correct zone
pos = None
zoneId = useZoneId
if not self.isValidZone(zoneId):
self.notify.warning(
"%s handleAvatarZoneChange %s: not a valid zone (%s) for pos %s" %(self.doId, av.doId, zoneId, pos))
return
# Set the location on the server.
# setLocation will update the gridParent
av.b_setLocation(self.doId, zoneId)
def handleSetLocation(self, av, parentId, zoneId):
pass
#if (av.parentId != parentId):
# parent changed, need to look up instance tree
# to see if avatar's named area location information
# changed
#av.requestRegionUpdateTask(regionegionUid)
|
wufangjie/leetcode | 745. Prefix and Suffix Search.py | Python | gpl-3.0 | 2,102 | 0.016175 | from utils import memo
import bisect
class WordFilter(object):
def __init__(self, words):
"""
:type words: List[str]
"""
self.n = len(words)
self.words = words
self.seq_w = sorted(range(self.n), key=lambda i: words[i])
self.seq = [words[i] for i in self.seq_w]
words_rev = [w[::-1] for w in words]
self.rev_w = sorted(range(self.n), key=lambda i: words_rev[i])
self.rev = [words_rev[i] for i in self.rev_w]
@memo
def f(self, prefix, suffix):
"""
:type prefix: str
:type suffix: str
:rtype: int
"""
if prefix:
lo_p = bisect.bisect_left(self.seq, prefix)
hi_p = bisect.bisect_left(self.seq, prefix + '{', lo=lo_p)
else:
lo_p, hi_p = 0, self.n
if suffix:
temp = suffix[::-1]
lo_s = bisect.bisect_left(self.rev, temp)
hi_s = bisect.bisect_left(self.rev, temp + '{', lo=lo_s)
else:
lo_s, hi_s = 0, self.n
weight = -1
if hi_p - lo_p > hi_s - lo_s:
for i in range(lo_s, hi_s):
if self.words[self.rev_w[i]].startswith(prefix):
if self.rev_w[i] > weight:
weight = self.rev_w[i]
else:
for i in range(lo_p, hi_p):
if self.seq[i].endswith(suffix):
if self.seq_w[i] > weight:
weight = self.seq_w[i]
return weight
# NOTE: words=[apple], prefix='appl', suffix='le' if valid
# NOTE: TLE 7/12 when I did not add memo, though I think it may not be the correct way
import time
tic = time.time()
obj = WordFilter(["cabaabaaaa","ccbcababac","bacaabccba","bcbbcbacaa","abcaccbcaa","accabaccaa","cabcbbbcca","ababccabcb","caccbbcbab","bccbacbcba"])
for p, s in [["bccbacbcba","a"],["ab", | "abcaccbcaa | "],["a","aa"],["cabaaba","abaaaa"],["cacc","accbbcbab"],["ccbcab","bac"],["bac","cba"],["ac","accabaccaa"],["bcbb","aa"],["ccbca","cbcababac"]]:
print(obj.f(p, s))
print(time.time() - tic)
|
meeb/txmailgunrelay | txmailgunrelay/__init__.py | Python | apache-2.0 | 882 | 0.002268 | # -*- coding: utf-8 -*-
'''
Copyright 2013 Joe Harris
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtai | n a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
__version__ = '0.2'
__all__ = [
'mailgun',
'smtp',
'transport',
]
from txma | ilgunrelay.smtp import SmtpTransport
def smtp_relay(api_key, testing, sender_domains):
return SmtpTransport(api_key, testing, sender_domains)
'''
EOF
'''
|
knarfeh/print_logs | print_log.py | Python | mit | 2,118 | 0.000944 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import threading
import time
import sys
import logging
__author__ = "kanrfeh@outlook.com"
TOTAL = 0
MAXTIME = 0
MINTIME = 0
EXCEPT = 0
logger = logging.getLogger(name=__name__)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler("/var/log/mathilde/print_debug.log")
#ch = logging.StreamHandler()
logger.addHandler(fh)
#logger.addHandler(ch)
class PrintThread(threading.Thread):
def __init__(self, thread_name):
threading.Thread.__init__(self)
self.test_count = 0
def run(self):
self.test_performance()
def test_performance(self):
global TOTAL
global MAXTIME
global MINTIME
global EXCEPT
try:
start_time = time.time()
logger.debug("lalalala")
sys.stdout.flush()
time_span = time.time() - start_time
self.maxtime(time_span)
self.mintime(time_span)
exce | pt Exception as e:
print("Error: {}".format(e))
EXCEPT += 1
finally:
TOTAL += 1
| def maxtime(self, time_span):
global MAXTIME
if time_span > MAXTIME:
MAXTIME = time_span
def mintime(self, time_span):
global MINTIME
if time_span < MINTIME:
MINTIME = time_span
def test(thread_count):
global TOTAL
global MAXTIME
global MINTIME
global EXCEPT
TOTAL = 0
MAXTIME = 0
MINTIME = 0
EXCEPT = 0
print("----------------task start------------------")
start_time = time.time()
i = 0
TOTAL = 0
while i < thread_count:
t = PrintThread("thread" + str(i))
t.start()
i += 1
print("----------------task end--------------------")
print("\n\n\n")
print("total time: {}".format(time.time()-start_time))
print("thread_count: {}".format(thread_count))
print("total: {}, maxtime: {}, mintime: {}, EXCEPT: {}\
".format(TOTAL, MAXTIME, MINTIME, EXCEPT))
if __name__ == "__main__":
while True:
test(1)
time.sleep(10)
print("\n\n\n")
|
dimitdim/GetARoom | Main/flask/Scripts/easy_install-2.7-script.py | Python | gpl-2.0 | 346 | 0.00578 | #!C:\Users\Mitko\Docum | ents\GitHub\GetARoom\Main\flask\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==0.6c11','console_scripts','easy_install-2.7'
__requires__ = 'setuptools==0.6c11'
import sys
from pkg_resources import load_entry_point
sys.exit(
load_entry_point('setuptools==0.6c11', 'console_scr | ipts', 'easy_install-2.7')()
)
|
CyberLabs-BR/face_detect | pyimagesearch/utils/agegenderhelper.py | Python | mit | 5,618 | 0.028124 | # import the necessary packages
import numpy as np
import glob
import cv2
import os
class AgeGenderHelper:
def __init__(self, config):
# store the configuration object and build the age bins used
# for constructing class labels
self.config = config
self.ageBins = self.buildAgeBins()
def buildAgeBins(self):
# initialize the list of age bins based on the Adience
# dataset
ageBins = [(0, 2), (4, 6), (8, 13), (15, 20), (25, 32),
(38, 43), (48, 53), (60, np.inf)]
# return the age bins
return ageBins
def toLabel(self, age, gender):
# check to see if we should determine the age label
if self.config.DATASET_TYPE == "age":
return self.toAgeLabel(age)
# otherwise, assume we are determining the gender label
return self.toGenderLabel(gender)
def toAgeLabel(self, age):
# initialize the label
label = None
# break the age tuple into integers
age = age.replace("(", "").replace(")", "").split(", ")
(ageLower, ageUpper) = np.array(age, dtype="int")
# loop over the age bins
for (lower, upper) in self.ageBins:
# determine if the age falls into the current bin
if ageLower >= lower and ageUpper <= upper:
label = "{}_{}".format(lower, upper)
break
# return the label
return label
def toGenderLabel(self, gender):
# return 0 if the gender is male, 1 if the gender is female
return 0 if gender == "m" else 1
def buildOneOffMappings(self, le):
# sort the class labels in ascending order (according to age)
# and initialize the one-off mappings for computing accuracy
classes = sorted(le.classes_, key=lambda x:
int(x.decode("utf-8").split("_")[0]))
oneOff = {}
# loop over the index and name of the (sorted) class labels
for (i, name) in enumerate(classes):
# determine the index of the *current* class label name
# in the *label encoder* (unordered) list, then
# initialize the index of the previous and next age
# groups adjacent to the current label
current = np.where(le.classes_ == name)[0][0]
prev = -1
next = -1
# check to see if we should compute previous adjacent
# age group
if i > 0:
prev = np.where(le.classes_ == classes[i - 1])[0][0]
# check to see if we should compute the next adjacent
# age group
if i < len(classes) - 1:
next = np.where(le.classes_ == classes[i + 1])[0][0]
# construct a tuple that consists of the current age
# bracket, the previous age bracket, and the next age
# bracket
oneOff[current] = (current, prev, next)
# return the one-off mappings
return oneOff
def buildPathsAndLabels(self):
# initialize the list of image paths and labels
paths = []
labels = []
# grab the paths to the folds files
foldPaths = os.path.sep.join([self.config.LABELS_PATH,
"*.txt"])
foldPaths = glob.glob(foldPaths)
# loop over the folds paths
for foldPath in foldPaths:
# load the contents of the folds file, skipping the
# header
rows = open(foldPath).read()
rows = rows.strip().split("\n")[1:]
# loop over the rows
for row in rows:
# unpack the needed components of the row
row = row.split("\t")
(userID, imagePath, faceID, age, gender) = row[:5]
# if the age or gender is invalid, ignore the sample
if age[0] != "(" or gender not in ("m", "f"):
continue
# construct the path to the input image and build
# the class label
p = "landmark_aligned_face.{}.{}".format(faceID,
imagePath)
p = os.path.sep.join([self.config.IMAGES_PATH,
userID, p])
label = self.toLabel(age, gender)
# if the label is None, then the age does not fit
# into our age brackets, ignore the sample
if label is None:
continue
# update the respective image paths and labels lists
paths.append(p)
labels.append(label)
# return a tuple of image paths and labels
return (paths, labels)
@staticmethod
def visualizeAge(agePreds, le):
# initialize the canvas and sort the predictions according
# to their probability
canvas = np.zeros((250, 310, 3), dtype="uint8")
idxs = np.argsort(agePreds)[::-1]
# loop over the age predictions in ascending order
for (i, j) in enumerate(idxs):
# construct the text for the prediction
#ageLabel = le.inverse_transform(j) # Python 2.7
ageLabel = le.inverse_transform(j).decode("utf-8")
ageLabel = ageLabel.replace("_", "-")
ageLabel = ageLabel.replace("-inf", "+")
text = "{}: {:.2f}%".format(ageLabel, agePreds[j] * 100)
# draw the label + probability bar on the canvas
w = int(agePreds[j] * 300) + 5
cv2.rectangle(canvas, (5, (i * 35) + 5),
(w, (i * 35) + 35), (0, 0, 255), -1)
cv2.putText(canvas, text, (10, (i * 35) + 23),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (255, 255, 255), 2)
# return the visualization
return c | anvas
@staticmethod
def visualizeGender(genderPreds, le):
# initialize the canvas and sort the predictions according
# to their probability
canvas = np.zeros((100, 310, 3), dtype="uint8")
idxs = np.argsort(genderPreds)[::-1]
# loop over the gender predictions in ascending order
for (i, j) in enumerate(idxs):
# construct the text for the prediction
gender = le.inverse_transform(j)
gender = "Male" if gender == 0 else "Fe | male"
text = "{}: {:.2f}%".format(gender, genderPreds[j] * 100)
# draw the label + probability bar on the canvas
w = int(genderPreds[j] * 300) + 5
cv2.rectangle(canvas, (5, (i * 35) + 5),
(w, (i * 35) + 35), (0, 0, 255), -1)
cv2.putText(canvas, text, (10, (i * 35) + 23),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (255, 255, 255), 2)
# return the canvas
return canvas |
arju88nair/projectCulminate | venv/lib/python3.5/site-packages/astroid/__pkginfo__.py | Python | apache-2.0 | 2,173 | 0.001841 | # Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2014-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Google, Inc.
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""astroid packaging information"""
from sys import version_info as py_version
from pkg_resources import parse_version
from setuptools import __version__ as setuptools_version
distname = 'astroid'
modname = 'astroid'
version = '1.5.3'
numversion = tuple(map(int, version.split('.')))
extras_require = {}
install_requires = ['lazy_object_proxy', 'six', 'wrapt']
def has_environment_marker_range_operators_support():
"""Code extracted from 'pytest/setup.py'
https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31
The first known release to support environment marker with range operators
it is 17.1, | see: https://setuptools.readthedocs.io/en/latest/history.html#id113
"""
return parse_version(setuptools_version) >= parse_version('17.1')
if has_environment_marker_range_operators_support():
extras_require[':python_version<"3.4"'] = ['enum34>=1.1.3', 'singledispatch']
extras_require[':python_version<"3.3"'] = ['backports.functools_lru_cache']
else:
if py_version < (3, | 4):
install_requires.extend(['enum34', 'singledispatch'])
if py_version < (3, 3):
install_requires.append('backports.functools_lru_cache')
# pylint: disable=redefined-builtin; why license is a builtin anyway?
license = 'LGPL'
author = 'Python Code Quality Authority'
author_email = 'code-quality@python.org'
mailinglist = "mailto://%s" % author_email
web = 'https://github.com/PyCQA/astroid'
description = "A abstract syntax tree for Python with inference support."
classifiers = ["Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Quality Assurance",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
]
|
a-ro/preimage | preimage/learners/structured_krr.py | Python | bsd-2-clause | 3,990 | 0.002005 | __author__ = 'amelie'
import numpy
from scipy import linalg
from sklearn.base import BaseEstimator
class StructuredKernelRidgeRegression(BaseEstimator):
"""Structured Kernel Ridge Regression.
Attributes
----------
alpha : float
Regularization term.
kernel : Callable
Kernel function that computes the similarity between the samples.
inference_model : Model
Inference model used to solve the pre-image problem.
weights_ : array, shape=[n_samples, n_samples]
Learned weights, where n_samples is the number of training samples.
X_train_ : array, shape=[n_samples, n_features]
Training samples.
"""
def __init__(self, alpha, kernel, inference_model):
self.alpha = alpha
self.kernel = kernel
self.inference_model = inference_model
self.weights_ = None
self.X_train_ = None
def fit(self, X, Y, y_lengths=None):
"""Learn the weights.
Parameters
----------
X : array, shape=[n_samples, n_features]
Training vectors, where n_samples is the number of samples and and n_features is the number of features
in X.
Y : array, shape=[n_samples, ]
Target strings, where n_samples is the number of training samples.
y_lengths : array, shape=[n_samples]
Length of the training strings.
Returns
-------
gram_matrix : array, shape = [n_samples_x1, n_samples_x2]
Similarity of each string of X1 with each string of X2, n_samples_x1 is the number of samples in X1 and
n_samples_x2 is the number of samples in X2.
"""
gram_matrix = self.kernel(X, X)
self.weights_ = self._solve(gram_matrix)
self.X_train_ = X
inference_parameters = InferenceFitParameters(self.weights_, gram_matrix, Y, y_lengths)
self.inference_model.fit(inference_parameters)
return self
def _solve(self, gram_matrix):
diagonal = numpy.copy(gram_matrix.diagonal())
numpy.fill_diagonal(gram_matrix, diagonal + self.alpha)
weights = linalg.inv(gram_matrix)
numpy.fill_diagonal(gram_matrix, diagonal)
return weights
def predict(self, X, y_lengths=None):
"""Predict the target strings.
Parameters
----------
X : array, shape=[n_samples, n_features]
Testing vectors, where n_samples is the number of samples and and n_features is the number of features
in X.
y_lengths : array, shape=[n_samples]
Length of the strings to predict, where n_samples is the number of testing samples.
Returns
-------
Y_predicted : array, shape = [n_samples]
Predicted strings, where n_samples is the number of testing samples.
"""
if self.weights_ is None:
raise ValueError("The fit function must be called before predict")
gram_matrix = self.kernel(self.X_train_, X)
Y_weights = numpy.dot(self.weights_, gram_matrix).T
Y_predicted = self.inference_model.predict(Y_weights, y_lengths)
return Y_predicted
class InferenceFitParameters:
"""Parameters for the inference model.
That way inference_model.fit(parameters) doesn't have unused parameters but only access the one it ne | eds
.
Attributes
----------
weights : array, shape = [n_samples, n_samples]
Learned weights, where n_samples is the number of training samples.
gram_matrix : array, shape = [n_samples, n_samples]
Gram_matrix | of the training samples.
Y_train : array, shape = [n_samples, ]
Training strings.
y_lengths : array, shape = [n_samples]
Length of each training string in Y_train.
"""
def __init__(self, weights, gram_matrix, Y, y_lengths):
self.weights = weights
self.gram_matrix = gram_matrix
self.Y_train = Y
self.y_lengths = y_lengths |
shinymud/ShinyMUD | src/shinymud/models/shiny_types.py | Python | mit | 2,852 | 0.01087 | from shinymud.lib.world import World
import json
world = World.get_world()
def to_bool(val):
"""Take a string representation of true or false and convert it to a boolean
value. Returns a boolean value or None, if no corresponding boolean value
exists.
"""
bool_states = {'true': True, 'false': False, '0': False, '1': True}
if not val:
return None
if isinstance(val, bool):
return val
val = str(val)
val = val.strip().lower()
return bool_states.get(val)
def read_dict(val):
# val is a string like "foo=bar,name=fred"
# return {'foo':'bar', 'name':'fred'}
return dict([thing.split('=') for thing in val.split(',')])
def write_dict(val):
return ",".join('='.join([str(k),str(v)]) for k,v in val.items())
def copy_dict(val):
return dict(val.items())
def read_list(val):
if isinstance(val, list):
return val
if not val:
return []
return val.s | plit(',')
def write_list(val):
if not val:
ret | urn None
return ','.join(map(str,val))
def copy_list(val):
return val[:]
def read_area(val):
if isinstance(val, basestring):
return world.get_area(val)
return val
def write_area(val):
if isinstance(val, basestring):
return val
return val.name
def read_merchandise(val):
return [read_dict(each) for each in val.split('<>')]
def write_merchandise(val):
lst = []
for dicts in val:
if dicts.get('keywords'):
del dicts['keywords']
lst.append(write_dict(dicts))
return '<>'.join(lst)
def read_json(val):
return json.loads(val)
def write_json(val):
return json.dumps(val)
def write_model(val):
if isinstance(val, int):
return val
return val.dbid
def read_int_dict(val):
d = {}
if val:
for a in val.split(','):
key, val = a.split('=')
d[key] = int(val)
return d
def write_int_dict(val):
s = []
if val:
for key, val in val.items():
s.append("%s=%s" % (str(key), str(val)))
return ",".join(s)
def read_damage(val):
dmg = []
if val:
for d in val.split('|'):
dmg.append(Damage(d))
return dmg
def write_damage(val):
return '|'.join([str(d) for d in val])
def read_channels(val):
d = {}
for pair in val.split(','):
k,v = pair.split('=')
d[k] = to_bool(v)
return d
def read_location(val):
#loc == 'area,id'
loc = val.split(',')
return world.get_location(loc[0], loc[1])
def write_location(val):
if val:
return '%s,%s' % (val.area.name, val.id)
return None
def read_int(val):
try:
r = int(val)
except ValueError:
r = 0
return r
def read_float(val):
try:
r = float(val)
except ValueError:
r = 0.0
return r
|
zerolab/wagtail | wagtail/sites/views.py | Python | bsd-3-clause | 1,799 | 0.001112 | from django.utils.translation import gettext_lazy as _
from wagtail.admin.ui.tables import Column, StatusFlagColumn, TitleColumn
from wagtail.admin.views import generic
from wagtail.admin.viewsets.model import ModelViewSet
from wagtail.core.models import Site
from wagtail.core.permissions import site_permission_policy
from wagtail.sites.forms import SiteForm
class IndexView(generic.IndexView):
page_title = _("Sites")
add_item_label = _("Add a site")
context_object_name = 'sites'
default_ordering = 'hostname'
columns = [
TitleColumn('hostname', label=_("Site"), sort_key='hostname', url_name='wagtailsites:edit'),
Column('port', sort_key='port'),
Column('site_name'),
Column('root_page'),
StatusFlagColumn('is_default_site', label=_("Default?"), true_label=_("Default")),
]
class CreateView(generic.CreateView):
page_title = _("Add site")
success_message = _("Site '{0}' created.")
template_name = 'wagtailsites/create.html'
class EditView(generic.EditView):
success_message = _("Site '{0}' updated.")
error_message = _("The site could not be saved due to errors.")
delete_item_label = _("Delete site")
context_obj | ect_name = 'site'
template_name = 'wagtailsites/edit.html'
class DeleteView(generic.DeleteView):
success_message = _("Site '{0}' deleted.")
page_title = _("Delete site")
confirmation_message = _("Are you sure you want to delete this site?")
class SiteViewSet(ModelViewSet):
icon = 'site'
model = Site
permission_policy = site_permissio | n_policy
index_view_class = IndexView
add_view_class = CreateView
edit_view_class = EditView
delete_view_class = DeleteView
def get_form_class(self, for_update=False):
return SiteForm
|
lynxis/pkpgcounter | setup.py | Python | gpl-3.0 | 2,361 | 0.015248 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# pkpgcounter : a generic Page Description Language parser
#
# (c) 2003-2009 Jerome Alet <alet@librelogiciel.com>
# This pr | ogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later ve | rsion.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# $Id$
#
#
import sys
import glob
import os
import shutil
try :
from distutils.core import setup
except ImportError as msg :
sys.stderr.write("%s\n" % msg)
sys.stderr.write("You need the DistUtils Python module.\nunder Debian, you may have to install the python-dev package.\nOf course, YMMV.\n")
sys.exit(-1)
try :
from PIL import Image
except ImportError :
sys.stderr.write("You need the Python Imaging Library (aka PIL).\nYou can grab it from http://www.pythonware.com\n")
sys.exit(-1)
sys.path.insert(0, "pkpgpdls")
from pkpgpdls.version import __version__, __doc__
data_files = []
mofiles = glob.glob(os.sep.join(["po", "*", "*.mo"]))
for mofile in mofiles :
lang = mofile.split(os.sep)[1]
directory = os.sep.join(["share", "locale", lang, "LC_MESSAGES"])
data_files.append((directory, [ mofile ]))
docdir = "share/doc/pkpgcounter"
docfiles = ["README", "COPYING", "BUGS", "CREDITS", "AUTHORS", "TODO"]
data_files.append((docdir, docfiles))
if os.path.exists("ChangeLog") :
data_files.append((docdir, ["ChangeLog"]))
directory = os.sep.join(["share", "man", "man1"])
manpages = glob.glob(os.sep.join(["man", "*.1"]))
data_files.append((directory, manpages))
setup(name = "pkpgcounter", version = __version__,
license = "GNU GPL",
description = __doc__,
author = "Jerome Alet",
author_email = "alet@librelogiciel.com",
url = "http://www.pykota.com/software/pkpgcounter/",
packages = [ "pkpgpdls" ],
scripts = [ "bin/pkpgcounter" ],
data_files = data_files)
|
rtevans/tacc_stats_old | analyze/process_pickles/htrate.py | Python | lgpl-2.1 | 2,109 | 0.045993 | #!/usr/bin/env python
import sys
sys.path.append('../../monitor')
import datetime, glob, job_stats, os, subprocess, time
import matplotlib
if not 'matplotlib.pyplot' in sys.modules:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy
import scipy, scipy.stats
import argparse
import tspl, tspl_utils
class Colors:
def __init__(self):
self.colors=['b','g','r','c','m','y','k']
self.loc=0
def next(self):
if self.loc == len(self.colors):
self.loc=0
c=self.colors[self.loc]
self.loc+=1
return c
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f', help='Set full mode', action='store_true')
parser.add_argument('filearg', help='File, directory, or quoted'
' glob pattern', nargs='?',default='jobs')
n=parser.parse_args()
filelis | t=tspl_utils.getfilelist(n.filearg)
for file in filelist:
try:
full=''
ts=tspl.TSPLBase(file,['amd64_sock', 'amd64_sock', 'amd64_sock'],
['HT0', 'HT1', 'HT2'])
except tspl.TSPLException as e:
continue
if not tspl_utils.checkjob(ts,3600,16): # 1 hour, 16way only
continue
eli | f ts.numhosts < 2: # At least 2 hosts
print ts.j.id + ': 1 host'
continue
print ts.j.id
tmid=(ts.t[:-1]+ts.t[1:])/2.0
dt=numpy.diff(ts.t)
fig,ax=plt.subplots(1,1,figsize=(8,6),dpi=80)
ax.hold=True
xmin,xmax=[0.,0.]
c=Colors()
for k in ts.j.hosts.keys():
h=ts.j.hosts[k]
col=c.next()
for i in range(3):
for j in range(4):
rate=numpy.divide(numpy.diff(ts.data[i][k][j]),dt)
xmin,xmax=[min(xmin,min(rate)),max(xmax,max(rate))]
ax.plot(tmid/3600,rate,'-'+col)
if xmax > 2.0e9:
print ts.j.id + ' over limit: %(v)8.3f' % {'v' : xmax}
else:
plt.close()
continue
plt.suptitle(ts.title)
xmin,xmax=tspl_utils.expand_range(xmin,xmax,.1)
ax.set_ylim(bottom=xmin,top=xmax)
fname='_'.join(['graph',ts.j.id,'HT_rates'])
fig.savefig(fname)
plt.close()
if __name__ == '__main__':
main()
|
NORDUnet/opennsa | opennsa/backends/force10.py | Python | bsd-3-clause | 10,163 | 0.009544 | """
Force10 Backend.
This backend will only work with SSH version 2 capable Force10 switches.
This excludes most, if not all, of the etherscale series.
The backend has been developed for the E series.
The backend has been developed and tested on a Terascale E300 switch.
The switch (or router, depending on your level off pedanticness) is configured
by the backend logging via ssh, requesting a cli, and firing the necessary
command for configuring a VLAN. This approach was choosen over netconf / XML,
as a fairly reliable source said that not all the necessary functionality
needed was available via the previously mentioned interfaces.
Currently the backend does support VLAN rewriting, and I am not sure if/how it
is supported.
Configuration:
To setup a VLAN connection:
configure
interface vlan $vlan_id
name $name
description $description
no shut
tagged $source_port
tagged $dest_port
end
Teardown:
configure
no interface vlan $vlan_id
end
Ensure that the interfaces are configure to be layer 2.
Ralph developed a backend for etherscale, where a lot of the input from this
backend comes from.
Authors: Henrik Thostrup Jensen <htj@nordu.net>
Ralph Koning <R.Koning@uva.nl>
Copyright: NORDUnet (2011-2013)
"""
import string
import random
import os
from twisted.python import log
from twisted.internet import defer
from twisted.conch.ssh import session
from opennsa import constants as cnt, config
from opennsa.backends.common import ssh, genericbackend
LOG_SYSTEM = 'Force10'
COMMAND_ENABLE = 'enable'
COMMAND_CONFIGURE = 'configure'
COMMAND_END = 'end'
COMMAND_EXIT = 'exit'
COMMAND_WRITE = 'write' # writes config
COMMAND_INTERFACE_VLAN = 'interface vlan %(vlan)i'
COMMAND_NAME = 'name %(name)s'
COMMAND_NO_SHUTDOWN = 'no shutdown'
COMMAND_TAGGED = 'tagged %(interface)s'
COMMAND_NO_INTERFACE = 'no interface vlan %(vlan)i'
def _portToInterfaceVLAN(nrm_port):
interface, vlan = nrm_port.rsplit('.')
vlan = int(vlan)
return interface, vlan
def _createSetupCommands(source_nrm_port, dest_nrm_port):
s_interface, s_vlan = _portToInterfaceVLAN(source_nrm_port)
d_interface, d_vlan = _portToInterfaceVLAN(dest_nrm_port)
assert s_vlan == d_vlan, 'Source and destination VLANs differ, unpossible!'
name = 'opennsa-%i' % s_vlan
cmd_vlan = COMMAND_INTERFACE_VLAN % { 'vlan' : s_vlan }
cmd_name = COMMAND_NAME % { 'name' : name }
cmd_s_intf = COMMAND_TAGGED % { 'interface' : s_interface }
cmd_d_intf = COMMAND_TAGGED % { 'interface' : d_interface }
commands = [ cmd_vlan, cmd_name, cmd_s_intf, cmd_d_intf, COMMAND_NO_SHUTDOWN, COMMAND_END ]
return commands
def _createTeardownCommands(source_nrm_port, dest_nrm_port):
_, s_vlan = _portToInterfaceVLAN(source_nrm_port)
_, d_vlan = _portToInterfaceVLAN(dest_nrm_port)
assert s_vlan == d_vlan, 'Source and destination VLANs differ, unpossible!'
cmd_no_intf = COMMAND_NO_INTERFACE % { 'vlan' : s_vlan }
commands = [ cmd_no_intf, COMMAND_END ]
return commands
class SSHChannel(ssh.SSHChannel):
name = 'session'
def __init__(self, conn):
ssh.SSHChannel.__init__(self, conn=conn)
self.data = ''
self.wait_defer = None
self.wait_data = None
@defer.inlineCallbacks
def sendCommands(self, commands, enable_password):
LT = '\r' # line termination
try:
log.msg('Requesting shell for sending commands', debug=True, system=LOG_SYSTEM)
term = os.environ.get('TERM', 'xterm')
winSize = (25,80,0,0)
ptyReqData = session.packRequest_pty_req(term, winSize, '')
yield self.conn.sendRequest(self, 'pty-req', ptyReqData, wantReply=1)
yield self.conn.sendRequest(self, 'shell', '', wantReply=1)
log.msg('Got shell', system=LOG_SYSTEM, debug=True)
d = self.waitForData('>')
yield d
log.msg('Got shell ready', system=LOG_SYSTEM, debug=True)
# so far so good
d = self.waitForData(':')
self.write(COMMAND_ENABLE + LT) # This one fails for some reason
yield d
log.msg('Got enable password prompt', system=LOG_SYSTEM, debug=True)
d = self.waitForData('#')
self.write(enable_password + LT)
yield d
log.msg('Entered enabled mode', debug=True, system=LOG_SYSTEM)
d = self.waitForData('#')
self.write(COMMAND_CONFIGURE + LT) # This one fails for some reason
yield d
log.msg('Entered configure mode', debug=True, system=LOG_SYSTEM)
for cmd in commands:
log.msg('CMD> %s' % cmd, debug=True, system=LOG_SYSTEM)
d = self.waitForData('#')
self.write(cmd + LT)
yield d
# Superfluous COMMAND_END has been removed by hopet
log.msg('Configuration done, writing configuration.', debug=True, system=LOG_SYSTEM)
d = self.waitForData('#')
self.write(COMMAND_WRITE + LT)
yield d
log.msg('Configuration written. Exiting.', debug=True, system=LOG_SYSTEM)
self.write(COMMAND_EXIT + LT)
# Waiting for the prompt removed by hopet - we could wait forever here! :(
except Exception as e:
log.msg('Error sending commands: %s' % str(e))
raise e
log.msg('Commands successfully send', system=LOG_SYSTEM)
self.sendEOF()
self.closeIt()
def waitForData(self, data):
self.wait_data = data
self.wait_defer = defer.Deferred()
return self.wait_defer
def dataReceived(self, data):
log.msg("DATA:" + data, system=LOG_SYSTEM, debug=True)
if len(data) == 0:
pass
else:
self.data += data
if self.wait_data and self.wait_data in self.data:
d = self.wait_defer
self.data = ''
self.wait_data = None
self.wait_defer = None
d.callback(self)
class Force10CommandSender:
def __init__(self, ssh_connection_creator, enable_pas | sword):
self.ssh_connection_creator = ssh_connection_creator
self.enable_password = enable_password
@defer.inline | Callbacks
def sendCommands(self, commands):
# Note: FTOS does not allow multiple channels in an SSH connection,
# so we open a connection for each request. Party like it is 1988.
# The "correct" solution for this would be to create a connection pool,
# but that won't happen just now.
log.msg('Creating new SSH connection', debug=True, system=LOG_SYSTEM)
ssh_connection = yield self.ssh_connection_creator.getSSHConnection()
try:
channel = SSHChannel(conn=ssh_connection)
ssh_connection.openChannel(channel)
log.msg("Opening channel", system=LOG_SYSTEM, debug=True)
yield channel.channel_open
log.msg("Channel open, sending commands", system=LOG_SYSTEM, debug=True)
yield channel.sendCommands(commands, self.enable_password)
finally:
ssh_connection.transport.loseConnection()
class Force10ConnectionManager:
def __init__(self, log_system, port_map, cfg):
self.log_system = log_system
self.port_map = port_map
host = cfg[config.FORCE10_HOST]
port = cfg.get(config.FORCE10_PORT, 22)
host_fingerprint = cfg[config.FORCE10_HOST_FINGERPRINT]
user = cfg[config.FORCE10_USER]
if config.FORCE10_PASSWORD in cfg:
password = cfg[config.FORCE10_PASSWORD]
ssh_connection_creator = ssh.SSHConnectionCreator(host, port, [ host_fingerprint ], user, password=password)
else:
ssh_public_key = cfg[config.FORCE10_SSH_PUBLIC_KEY]
ssh_private_key = cfg[config.FORCE10_SSH_PRIVATE_KEY]
ssh_connec |
ProgressiveFX/pyblish-pfx | pyblish_pfx/plugins/maya/modeling/_validate_displaylayer.py | Python | lgpl-3.0 | 692 | 0 | import pyblish.api
import maya.cmds as cmds
import pymel
class ValidateDisplaylayer(pyblish.api.Validator):
""" Ensure no construction history exists on the nodes in the instance """
families = ['scene']
optional = True
label = 'Modeling - Display Layers'
def process(self, instance):
"""Process all the nodes in the instance """
layers = []
for layer in cmds. | ls(type='displayLayer'):
# skipping references
if pymel.core.PyNode(layer).isReferenced():
return
if layer != 'defaultLayer':
layers.append(layer)
assert not layers, 'Scene ha | s displayLayers: %s' % layers
|
vwc/agita | src/vwcollective.simplecontact/vwcollective/simplecontact/tests/test_contactsportlet.py | Python | mit | 3,908 | 0.003582 | from zope.component import getUtility, getMultiAdapter
from plone.portlets.interfaces import IPortletType
from plone.portlets.interfaces import IPortletManager
from plone.portlets.interfaces import IPortletAssignment
from plone.portlets.interfaces import IPortletDataProvider
from plone.portlets.interfaces import IPortletRenderer
from plone.app.portlets.storage import PortletAssignmentMapping
from vwcollective.simplecontact.portlets import contactsportlet
from vwcollective.simplecontact.portlets.tests.base_contactsportlet import TestCase
class TestPortlet(TestCase):
def afterSetUp(self):
self.setRoles(('Manager',))
def test_portlet_type_registered(self):
portlet = getUtility(IPortletType, name='vwcollective.simplecontact.portlets.ContactsPortlet')
self.assertEquals(portlet.addview, 'vwcollective.simplecontact.portlets.ContactsPortlet')
def test_interfaces(self):
# TODO: Pass any keywoard arguments to the Assignment constructor
portlet = contactsportlet.Assignment()
self.failUnless(IPortletAssignment.providedBy(portlet))
self.failUnless(IPortletDataProvider.providedBy(portlet.data))
def test_invoke_add_view(self):
portlet = getUtility(IPortletType, name='vwcollective.simplecontact.portlets.ContactsPortlet')
mapping = self.portal.restrictedTraverse('++contextportlets++plone.leftcolumn')
for m in mapping.keys():
del mapping[m]
addview = mapping.restrictedTraverse('+/' + portlet.addview)
# TODO: Pass a dictionary containing du | mmy form inputs from the add form
addview.createAndAdd(data={})
self.assertEquals(len(mapping), 1)
self.failUnless(isinstance(mapping.values()[0], contactsportlet.Assignment))
# NOTE: This test can be re | moved if the portlet has no edit form
def test_invoke_edit_view(self):
mapping = PortletAssignmentMapping()
request = self.folder.REQUEST
mapping['foo'] = contactsportlet.Assignment()
editview = getMultiAdapter((mapping['foo'], request), name='edit')
self.failUnless(isinstance(editview, contactsportlet.EditForm))
def test_obtain_renderer(self):
context = self.folder
request = self.folder.REQUEST
view = self.folder.restrictedTraverse('@@plone')
manager = getUtility(IPortletManager, name='plone.rightcolumn', context=self.portal)
# TODO: Pass any keywoard arguments to the Assignment constructor
assignment = contactsportlet.Assignment()
renderer = getMultiAdapter((context, request, view, manager, assignment), IPortletRenderer)
self.failUnless(isinstance(renderer, contactsportlet.Renderer))
class TestRenderer(TestCase):
def afterSetUp(self):
self.setRoles(('Manager',))
def renderer(self, context=None, request=None, view=None, manager=None, assignment=None):
context = context or self.folder
request = request or self.folder.REQUEST
view = view or self.folder.restrictedTraverse('@@plone')
manager = manager or getUtility(IPortletManager, name='plone.rightcolumn', context=self.portal)
# TODO: Pass any default keywoard arguments to the Assignment constructor
assignment = assignment or contactsportlet.Assignment()
return getMultiAdapter((context, request, view, manager, assignment), IPortletRenderer)
def test_render(self):
# TODO: Pass any keywoard arguments to the Assignment constructor
r = self.renderer(context=self.portal, assignment=contactsportlet.Assignment())
r = r.__of__(self.folder)
r.update()
output = r.render()
# TODO: Test output
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestPortlet))
suite.addTest(makeSuite(TestRenderer))
return suite
|
mambocab/python-driver | tests/unit/cython/test_types.py | Python | apache-2.0 | 1,042 | 0 | # Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License fo | r the specific language governing permissions and
# limitations under the License.
from tests.unit.cython.utils | import cyimport, cythontest
types_testhelper = cyimport('tests.unit.cython.types_testhelper')
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
class TypesTest(unittest.TestCase):
@cythontest
def test_datetype(self):
types_testhelper.test_datetype(self.assertEqual)
@cythontest
def test_date_side_by_side(self):
types_testhelper.test_date_side_by_side(self.assertEqual)
|
obnam-mirror/cliapp | cliapp/pluginmgr_tests.py | Python | gpl-2.0 | 3,602 | 0 | # Copyright (C) 2009-2012 Lars Wirzenius
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import unittest
from cliapp import PluginManager
class PluginManagerInitialStateTests(unittest.TestCase):
def setUp(self):
self.pm = PluginManager()
def test_locations_is_empty_list(self):
self.assertEqual(self.pm.locations, [])
def test_plugins_is_empty_list(self):
self.assertEqual(self.pm.plugins, [])
def test_application_version_is_zeroes(self):
self.assertEqual(self.pm.application_version, '0.0.0')
def test_plugin_files_is_empty(self):
self.assertEqual(self.pm.plugin_files, [])
def test_plugin_arguments_is_empty(self):
self.assertEqual(self.pm.plugin_arguments, [])
def test_plugin_keyword_arguments_is_empty(self):
self.assertEqual(self.pm.plugin_keyword_arguments, {})
class PluginManagerTests(unittest.TestCase):
def setUp(self):
self.pm = PluginManager()
self.pm.locations = ['test-plugins', 'not-exist']
self.pm.plugin_arguments = ('fooarg',)
self.pm.plugin_keyword_arguments = {'bar': 'bararg'}
self.files = sorted(['test-plugins/hello_plugin.py',
'test-plugins/aaa_hello_plugin.py',
'test-plugins/oldhello_plugin.py',
'test-plugins/wrongversion_plugin.py'])
def test_finds_the_right_plugin_files(self):
self.assertEqual(self.pm.find_plugin_files(), self.files)
def test_plugin_files_attribute_implicitly_searches(self):
self.assertEqual(self.pm.plugin_files, self.files)
def test_loads_hello_plugin(self):
plugins = self.pm.load_plugins()
self.assertEqual(len(plugins), 1)
self.assertEqual(plugins[0].name, 'Hello')
def test_plugins_attribute_implicitly_searches(self):
self.assertEqual(len(self.pm.plugins), 1)
self.assertEqual(self.pm.plugins[0].name, 'Hello')
def test_initializes_hello_with_correct_args(self):
plugin = self.pm['Hello']
self.assertEqual(plugin.foo, 'fooarg')
self.assertEqual(plugin.bar, 'bararg')
def test_raises_keyerror_for_unknown_plugin(self):
self.assertRaises(KeyError, self.pm.__getitem__, 'Hithere')
class PluginManagerCompatibleApplicationVersionTests(unittest.TestCase):
def setUp(self):
self.pm = PluginManager()
self.pm.application_version = '1.2.3'
def test_rejects_zero(self):
self.assertFalse(self.pm.compatible_version('0'))
def test_rejects_two(self):
self.assertFalse(self.pm.compatible_version('2'))
def test_rejects_one_two_four(self):
self.assertFalse(self.pm.compatible_version('1.2.4'))
def test_accepts_one(self):
self.assertTrue(self.pm.compati | ble_version('1'))
def test_accepts_one_two_three(self):
self.ass | ertTrue(self.pm.compatible_version('1.2.3'))
|
Fenugreek/tamarind | functions.py | Python | gpl-3.0 | 4,617 | 0.006065 | """
Some useful utility functions missing from numpy/scipy.
Copyright 2016 Deepak Subburam
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
"""
import numpy as np
def dir_clip(data, clips):
"""
'Directional' clip. Dimension of data and clips must be the same. Values in data
are clipped according to corresponding values in clips and returned as a new array.
new_value = portion of value between 0 and clip.
If clip is nan, new_value = value.
"""
if isinstance(data, np.ndarray): results = data.copy()
else: results = np.array(data)
mask = (np.sign(data) != np.sign(clips)) \
& ~np.isnan(data) & ~np.isnan(clips)
results[mask] = 0.0
mask = ~mask & (abs(data) > abs(clips))
results[mask] = clips[mask]
return results
def toward_zero(data, value):
"""
Subtract value from postive values of data, and add value to negative values
of data. Do not cross zero.
"""
results = data.copy()
results[data > 0] -= value
results[data < 0] += value
results[(data > 0) & (results < 0)] = 0.0
results[(data < 0) & (results > 0)] = 0.0
return results
def per_clip(data, caps):
"""
Return values in data clipped between %le values of (caps[0], caps[1]).
If caps is a scalar, only large values are capped.
"""
if np.isscalar(caps):
return np.fmin(data, np.percentile(data, caps))
low, high = np.percentile(data, caps)
return np.clip(data, low, high)
def scale2unit(data, eps=.1, dtype=None, soft_clip=99.99):
"""
Scale values to between -1.0 and +1.0 strictly, and less strictly between
-1.0 + <eps> or 1.0 - <eps>.
More precisely, amplitude is scaled such that <large_value> is set to
-1.0 + <eps> or 1.0 - <eps>, where <large_value> is
if soft_clip is None:
the max value of abs(data)
else:
soft_clip %le value of abs(data)
Result is returned as type <dtype>, which defaults to
if data.dtype is an integer type: float32
else: data.dtype
"""
if dt | ype is None:
dtype = data.dtype
if 'int' in str(dtype): dtype = np.float32
data = data / (np.percentile(abs(data), soft_clip) if soft_clip
else np.max(abs(data)))
if eps: data *= 1. - eps
if soft_clip: data = np.clip(data, -1.0, 1.0)
return data.astype(dtype, copy=False)
def softmax(data, axis=None):
"""Scale exp(data) to sum to unit along axis."""
edata = np.exp(data)
return edata / np.sum(ed | ata, axis=axis)[:, None].swapaxes(-1, axis)
def sigmoid(data):
"""Sigmoid activation function."""
return 1 / (1 + np.exp(-data))
def logit(data, eps=1e-8):
"""Inverse of the sigmoid function."""
return -np.log(1 / (data + eps) - 1 + eps)
def elu(data, alpha=1.0, copy=True):
"""Exponential LU activation function."""
if copy: result = data.copy()
else: result = data
mask = data < 0
result[mask] = alpha * (np.exp(data[mask]) - 1.0)
return result
def celu(data, alpha, copy=True):
"""Continuously differentiable exponential LU activation function."""
if copy: result = data.copy()
else: result = data
mask = data < 0
result[mask] = alpha * (np.exp(data[mask] / alpha) - 1.0)
return result
def ielu(data, copy=True, eps=1e-20):
"""Inverse exponential LU activation function."""
if copy: result = data.copy()
else: result = data
mask = data < 0
result[mask] = np.log(data[mask] + 1.0 + eps)
return result
def llu(data, copy=True):
"""
Linear-log activation function; linear inside of +/-1.0,
log outside of it.
"""
if copy: result = data.copy()
else: result = data
mask = data > 1.0
result[mask] = np.log(data[mask]) + 1.0
mask = data < -1.0
result[mask] = -np.log(-data[mask]) - 1.0
return result
def illu(data, copy=True):
"""Inverse of llu."""
if copy: result = data.copy()
else: result = data
mask = data > 1.0
result[mask] = np.exp(data[mask] - 1.0)
mask = data < -1.0
result[mask] = -np.exp(-data[mask] - 1.0)
return result
def sroot(data, power=0.5):
"""
'Signed' square-root (default power = 0.5):
raised abs(data) to power, then multiply by sign(data).
"""
result = np.abs(data)**power
return np.sign(data) * result
|
Gabotero/GNURadioNext | gr-digital/python/qa_ofdm_txrx.py | Python | gpl-3.0 | 5,471 | 0.004387 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy
import scipy
import random
from gnuradio import gr, gr_unittest
import blocks_swig as blocks
import digital_swig as digital
import channels_swig as channels
from ofdm_txrx import ofdm_tx, ofdm_rx
from utils import tagged_streams
# Set this to true if you need to write out data
LOG_DEBUG_INFO=False
class ofdm_tx_fg (gr.top_block):
def __init__(self, data, len_tag_key):
gr.top_block.__init__(self, "ofdm_tx")
tx_data, tags = tagged_streams.packets_to_vectors((data,), len_tag_key)
src = blocks.vector_source_b(data, False, 1, tags)
self.tx = ofdm_tx(packet_length_tag_key=len_tag_key, debug_log=LOG_DEBUG_INFO)
self.sink = blocks.vector_sink_c()
self.connect(src, self.tx, self.sink)
def get_tx_samples(self):
return self.sink.data()
class ofdm_rx_fg (gr.top_block):
def __init__(self, samples, len_tag_key, channel=None, prepend_zeros=100):
gr.top_block.__init__(self, "ofdm_rx")
if prepend_zeros:
| samples = (0,) * prepend_zeros + tuple(samples)
src = blocks.vector_source_c(tuple(samples) + (0,) * 1000)
self.rx = ofdm_rx(frame_length_tag_key=len_tag_key, debug_log=LOG_DEBUG_INFO)
if channel is not None:
self.connect(src, channel, self.rx)
else:
self.connect(src, self.rx)
| self.sink = blocks.vector_sink_b()
self.connect(self.rx, self.sink)
def get_rx_bytes(self):
return self.sink.data()
class test_ofdm_txrx (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_tx (self):
""" Just make sure the Tx works in general """
len_tag_key = 'frame_len'
n_bytes = 52
n_samples_expected = (numpy.ceil(1.0 * (n_bytes + 4) / 6) + 3) * 80
test_data = [random.randint(0, 255) for x in range(n_bytes)]
tx_data, tags = tagged_streams.packets_to_vectors((test_data,), len_tag_key)
src = blocks.vector_source_b(test_data, False, 1, tags)
tx = ofdm_tx(packet_length_tag_key=len_tag_key)
tx_fg = ofdm_tx_fg(test_data, len_tag_key)
tx_fg.run()
self.assertEqual(len(tx_fg.get_tx_samples()), n_samples_expected)
def test_002_rx_only_noise(self):
""" Run the RX with only noise, check it doesn't crash
or return a burst. """
len_tag_key = 'frame_len'
samples = (0,) * 1000
channel = channels.channel_model(0.1)
rx_fg = ofdm_rx_fg(samples, len_tag_key, channel)
rx_fg.run()
self.assertEqual(len(rx_fg.get_rx_bytes()), 0)
def test_003_tx1packet(self):
""" Transmit one packet, with slight AWGN and slight frequency + timing offset.
Check packet is received and no bit errors have occurred. """
len_tag_key = 'frame_len'
n_bytes = 21
fft_len = 64
test_data = tuple([random.randint(0, 255) for x in range(n_bytes)])
# 1.0/fft_len is one sub-carrier, a fine freq offset stays below that
freq_offset = 1.0 / fft_len * 0.7
#channel = channels.channel_model(0.01, freq_offset)
channel = None
# Tx
tx_fg = ofdm_tx_fg(test_data, len_tag_key)
tx_fg.run()
tx_samples = tx_fg.get_tx_samples()
# Rx
rx_fg = ofdm_rx_fg(tx_samples, len_tag_key, channel, prepend_zeros=100)
rx_fg.run()
rx_data = rx_fg.get_rx_bytes()
self.assertEqual(tuple(tx_fg.tx.sync_word1), tuple(rx_fg.rx.sync_word1))
self.assertEqual(tuple(tx_fg.tx.sync_word2), tuple(rx_fg.rx.sync_word2))
self.assertEqual(test_data, rx_data)
def test_004_tx1packet_large_fO(self):
""" Transmit one packet, with slight AWGN and large frequency offset.
Check packet is received and no bit errors have occurred. """
fft_len = 64
len_tag_key = 'frame_len'
n_bytes = 21
test_data = tuple([random.randint(0, 255) for x in range(n_bytes)])
#test_data = tuple([255 for x in range(n_bytes)])
# 1.0/fft_len is one sub-carrier
frequency_offset = 1.0 / fft_len * 2.5
channel = channels.channel_model(0.00001, frequency_offset)
# Tx
tx_fg = ofdm_tx_fg(test_data, len_tag_key)
tx_fg.run()
tx_samples = tx_fg.get_tx_samples()
# Rx
rx_fg = ofdm_rx_fg(tx_samples, len_tag_key, channel, prepend_zeros=100)
rx_fg.run()
rx_data = rx_fg.get_rx_bytes()
self.assertEqual(test_data, rx_data)
if __name__ == '__main__':
gr_unittest.run(test_ofdm_txrx, "test_ofdm_txrx.xml")
|
thinkopensolutions/odoo-saas-tools | oauth_provider/models/oauth_provider.py | Python | lgpl-3.0 | 3,090 | 0.002265 | # -*- coding: utf-8 -*-
from odoo import models, fields, api
from datetime import datetime, timedelta
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT
try:
from oauthlib import common as oauthlib_common
except:
pass
import uuid
class OauthApplication(models.Model):
CLIENT_ID_CHARACTER_SET = r'_-.:;=?!@0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
_name = 'oauth.application'
_rec_name = 'client_id'
def generate_client_id(self):
return str(uuid.uuid1())
client_id = fields.Char('Client ID', index=True, required=True, default=generate_client_id)
token_ids = fields.One2many('oauth.access_token', 'application_id', 'Tokens')
_sql_constraints = [
('client_id_uniq', 'unique (client_id)', 'client_id should be unique!'),
]
@api.multi
def _get_access_token(self, user_id=None, create=False):
self.ensure_one()
if not user_id:
user_id = self.env.user.id
access_token = self.env['oauth.access_token'].sudo().search([('application_id', '=', self.id), ('user_id', '=', user_id)], order='id DESC', limit=1)
if access_token:
access_token = access_token[0]
if access_token.is_expired():
access_token = None
if not access_token and create:
expires = datetime.now() + timedelta(seconds=60 * 60)
vals = {
'user_id': user_id,
'scope': 'userinfo',
'expires': expires.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'token': oauthlib_common.generate_token(),
'application_id': self.id,
}
access_token = self.env['oauth.access_token'].create(vals)
# we have to commit now, because /oauth2/tokeninfo could
# be called before we finish current transaction.
self._cr.commit()
if not access_token:
return None
return access_token.token
class OauthAccessToken(models.Model):
_name = 'oauth.access_token'
application_id = fields.Many2one('oauth.application', string='Application')
token = fields.Char('Access Token', required=True)
user_id = fields.Many2one('res.users', | string='User', required=True)
expires = fields.Datetime('Expires', required=True)
scope = fields.Char('Scope')
@api.multi
def is_valid(self, scopes=None):
"""
Checks if the access token is valid.
:param scopes: An iterable containing the scopes to check or None
"""
self.ensure_one()
return not self.is_expired() and self._allow_scopes(scopes)
@api.multi
def is_expired(self):
self.ensure_one()
return datetime.now() | > datetime.strptime(self.expires, DEFAULT_SERVER_DATETIME_FORMAT)
@api.multi
def _allow_scopes(self, scopes):
self.ensure_one()
if not scopes:
return True
provided_scopes = set(self.scope.split())
resource_scopes = set(scopes)
return resource_scopes.issubset(provided_scopes)
|
alexshepard/aledison | text.py | Python | mit | 877 | 0.011403 | #!/usr/bin/python
import yaml
config = yaml.safe_load(open("config.yml"))
twilio_account_sid = config["twilio"]["account_sid"]
twilio_auth_token = config["twilio"]["auth_token"]
twilio_from_number = config["t | wilio"]["from_number"] |
from twilio.rest import TwilioRestClient
twilio_client = TwilioRestClient(twilio_account_sid, twilio_auth_token)
from contacts import Contacts, Contact
c = Contacts()
# syntax: text.py <contact> <message>
import sys
script_name = sys.argv.pop(0)
name = sys.argv.pop(0)
msg = " ".join([str(x) for x in sys.argv])
contact = c.find_contact_by_name(name)
if contact and msg:
print("from " + str(twilio_from_number))
message = twilio_client.messages.create(
body=msg,
from_=twilio_from_number,
to=contact.number
)
print("message is " + message.sid)
else:
print("couldn't find contact '" + name + "' or empty message")
|
LaurentClaessens/mazhe | src_yanntricks/yanntricksASHYooUVHkak.py | Python | gpl-3.0 | 1,071 | 0.063492 | # -*- coding: utf8 -*-
from yanntricks import *
def ASHYooUVHkak():
pspict,fig = SinglePicture("ASHYooUVHkak")
pspict.dilatation_X(1)
pspict.dilatation_Y(1)
delta=1.2
t1=2
t2=-1.5
s1=Segment( Point(t2-0.4,delta),Point(t1+1,delta) )
s1.parameters.style="dashed"
P=Point(0,delta) |
P.put_m | ark(0.2,45,"\( \delta\)",pspict=pspict,position="corner")
v=Vector(0.3,-0.2)
Q=Point(t1,delta)
R=Point(t2,delta)
m1=Segment( P.translate(-v),P.translate(v) )
m2=Segment( Q.translate(-v),Q.translate(v) )
m3=Segment( R.translate(-v),R.translate(v) )
T=Point(t1,0)
T.put_mark(0.2,text="\( t_1\)",pspict=pspict,position="N")
U=Point(t2,0)
U.put_mark(0.2,text="\( t_2\)",pspict=pspict,position="N")
vert1=Segment(T,Q)
vert1.parameters.style="dotted"
vert2=Segment(U,R)
vert2.parameters.style="dotted"
pspict.DrawGraphs(s1,P,m1,m2,T,U,vert1,vert2,m3)
pspict.axes.no_graduation()
pspict.DrawDefaultAxes()
fig.no_figure()
fig.conclude()
fig.write_the_file()
|
auvsi-suas/interop | server/auvsi_suas/models/access_log.py | Python | apache-2.0 | 4,701 | 0.000851 | """Model for an access log."""
import functools
import logging
import numpy as np
from django.conf import settings
from django.db import models
from django.utils import timezone
logger = logging.getLogger(__name__)
class AccessLogMixin(models.Model):
"""Base class which logs access of information."""
# The user which accessed the data.
user = models.ForeignKey(settings.AUTH_USER_MODEL,
db_index=True,
on_delete=models.CASCADE)
# Timestamp of the access.
timestamp = models.DateTimeField(db_index=True)
class Meta:
abstract = True
index_together = (('user', 'timestamp'), )
def __init__(self, *args, **kwargs):
super(AccessLogMixin, self).__init__(*args, **kwargs)
if self.timestamp is None:
self.timestamp = timezone.now()
@classmethod
def by_user(cls, user, start_time=None, end_time=None):
"""Gets the time-sorted list of access log for the given user.
Args:
user: The user to get the access log for.
start_time: Optional. Inclusive start time.
end_time: Optional. Exclusive end time.
Returns:
A list of access log objects for the given user sorted by timestamp.
"""
query = cls.objects.filter(user_id=user.pk)
if start_time:
query = query.filter(timestamp__gte=start_time)
if end_time:
query = query.filter(timestamp__lt=end_time)
return query.order_by('timestamp')
@classmethod
def last_for_user(cls, user, start_time=None, end_time=None):
"""Gets the last access log for the user.
Args:
user: The user to get the access log for.
start_time: Optional. Inclusive start time.
end_time: Optional. Exclusive end time.
Returns:
The last access log for the user.
"""
return cls.by_user(user, start_time, end_time).last()
@classmethod
def by_time_period(cls, user, time_periods):
"""Gets a list of time-sorted lists of access logs for each time period.
The method returns the full sets of AccessLogMixins for each TimePeriod. If
overlapping TimePeriods are provided, the results may contain duplicate
logs.
Args:
user: The user to get the access log for.
time_periods: A list of TimePeriod objects.
Returns:
A list of AccessLogMixin lists, where each AccessLogMixin list contains all
AccessLogMixins corresponding to the related TimePeriod.
"""
return [cls.by_user(user, p.start, p.end) for p in time_periods]
@classmethod
def rates(cls, user, time_periods, time_period_logs=None):
"""Gets the access log rates.
Args:
user: The user to get the access log rates for.
time_periods: A list of TimePeriod objects. Note: to avoid
computing rates with duplicate logs, ensure that all
time periods are non-overlapping.
time_period_logs: Optional. A sequence of AccessLogMixin sequences,
| where each AccessLogMixin sequence contains all | AccessLogMixins
corresponding to the related TimePeriod. If None, will obtain
by calling by_time_period().
Returns:
A (max, avg) tuple. The max is the max time between logs, and avg
is the avg time between logs.
"""
# Check that time periods were provided.
if not time_periods:
return (None, None)
# Check that all time periods are closed.
for time_period in time_periods:
if time_period.duration() is None:
return (None, None)
# If logs were not provided, obtain.
if not time_period_logs:
time_period_logs = cls.by_time_period(user, time_periods)
# Utility generator for time durations.
def time_between_logs(time_periods, time_period_logs):
for ix, period in enumerate(time_periods):
prev_time = period.start
for log in time_period_logs[ix]:
yield (log.timestamp - prev_time).total_seconds()
prev_time = log.timestamp
yield (period.end - prev_time).total_seconds()
# Calculate max, sum, count for time durations.
(m, s, c) = functools.reduce(
lambda r, d: (max(r[0], d), r[1] + d, r[2] + 1),
time_between_logs(time_periods, time_period_logs), (0.0, 0.0, 0))
# Convert to max and average.
return (m, s / c)
|
saicoco/leetcode | array/414_Third_Max_Num.py | Python | gpl-3.0 | 325 | 0.003077 | class Solution(object):
def thirdMax(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
new_nums = set(nums)
if len(new_nums)<3:
return max(new_nums)
new_nums.remove(max(new_nums))
new_nums.remove(max(new_nums)) |
return max(new_nums)
| |
luogangyi/bcec-nova | nova/virt/libvirt/driver.py | Python | apache-2.0 | 229,211 | 0.000358 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to a hypervisor through libvirt.
Supports KVM, LXC, QEMU, UML, and XEN.
**Related Flags**
:driver_type: Libvirt domain type. Can be kvm, qemu, uml, xen (default: kvm).
:connection_uri: Override for the default libvirt URI (depends on
driver_type).
:disk_prefix: Override the default disk prefix for the devices
attached to a server.
:rescue_image_id: Rescue ami image (None = original image).
:rescue_kernel_id: Rescue aki image (None = original image).
:rescue_ramdisk_id: Rescue ari image (None = original image).
:injected_network_template: Template file for injected network
:allow_same_net_traffic: Whether to allow in project network traffic
"""
import errno
import eventlet
import functools
import glob
import mmap
import os
import shutil
import socket
import sys
import tempfile
import threading
import time
import uuid
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from eventlet import util as eventlet_util
from lxml import etree
from oslo.config import cfg
from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
from nova.image import glance
from nova.objects import block_device as block_device_obj
from nova.objects import flavor as flavor_obj
from nova.objects import instance as instance_obj
from nova.objects import service as service_obj
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common.gettextutils import _LW
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
from nova.openstack.common import units
from nova.openstack.common import xmlutils
from nova.pci import pci_manager
from nova.pci import pci_utils
from nova.pci import pci_whitelist
from nova import rpc
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt import cpu
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import firewall
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt import netutils
from nova.virt import watchdog_actions
from nova import volume
from nova.volume import encryptors
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue")
libvirt = None
LOG = logging.getLogger(__name__)
libvirt_opts = [
cfg.StrOpt('rescue_image_id',
help='Rescue ami image',
deprecated_group='DEFAULT'),
cfg.StrOpt('rescue_kernel_id',
help='Rescue aki image',
| deprecated_group='DEFAULT'),
cfg.StrOpt('rescue_ramdisk_id',
help='Rescue ari image',
deprecated_group='DEFAULT'),
cfg.StrOpt('virt_type',
default='kvm',
| help='Libvirt domain type (valid options are: '
'kvm, lxc, qemu, uml, xen)',
deprecated_group='DEFAULT',
deprecated_name='libvirt_type'),
cfg.StrOpt('connection_uri',
default='',
help='Override the default libvirt URI '
'(which is dependent on virt_type)',
deprecated_group='DEFAULT',
deprecated_name='libvirt_uri'),
cfg.BoolOpt('inject_password',
default=False,
help='Inject the admin password at boot time, '
'without an agent.',
deprecated_name='libvirt_inject_password',
deprecated_group='DEFAULT'),
cfg.BoolOpt('inject_key',
default=False,
help='Inject the ssh public key at boot time',
deprecated_name='libvirt_inject_key',
deprecated_group='DEFAULT'),
cfg.IntOpt('inject_partition',
default=-2,
help='The partition to inject to : '
'-2 => disable, -1 => inspect (libguestfs only), '
'0 => not partitioned, >0 => partition number',
deprecated_name='libvirt_inject_partition',
deprecated_group='DEFAULT'),
cfg.BoolOpt('use_usb_tablet',
default=True,
help='Sync virtual and real mouse cursors in Windows VMs',
deprecated_group='DEFAULT'),
cfg.StrOpt('live_migration_uri',
default="qemu+tcp://%s/system",
help='Migration target URI '
'(any included "%s" is replaced with '
'the migration target hostname)',
deprecated_group='DEFAULT'),
cfg.StrOpt('live_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER',
help='Migration flags to be set for live migration',
deprecated_group='DEFAULT'),
cfg.StrOpt('block_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_NON_SHARED_INC',
help='Migration flags to be set for block migration',
deprecated_group='DEFAULT'),
cfg.IntOpt('live_migration_bandwidth',
default=0,
help='Maximum bandwidth to be used during migration, in Mbps',
deprecated_group='DEFAULT'),
cfg.StrOpt('snapshot_image_format',
help='Snapshot image format (valid options are : '
'raw, qcow2, vmdk, vdi). '
'Defaults to same as source image',
deprecated_group='DEFAULT'),
cfg.StrOpt('vif_driver',
default='nova.virt.libvirt.vif.LibvirtGenericVIFDriver',
help='DEPRECATED. The libvirt VIF driver to configure the VIFs.'
'This option is deprecated and will be removed in the '
'Juno release.',
deprecated_name='libvirt_vif_driver',
deprecated_group='DEFAULT'),
cfg.ListOpt('volume_drivers',
default=[
'iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver',
'iser=nova.virt.libvirt.volume.LibvirtISERVolumeDriver',
'local=nova.virt.libvirt.volume.LibvirtVolumeDriver',
'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
|
sergiomt/centorion | vagrant-setup/cassandra/cqlshlib/cqlhandling.py | Python | apache-2.0 | 27,443 | 0.004409 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# code for dealing with CQL's syntax, rules, interpretation
# i.e., stuff that's not necessarily cqlsh-specific
import re
from . import pylexotron
from itertools import izip
Hint = pylexotron.Hint
columnfamily_options = (
# (CQL option name, Thrift option name (or None if same))
('comment', None),
('comparator', 'comparator_type'),
('row_cache_provider', None),
('key_cache_size', None),
('row_cache_size', None),
('read_repair_chance', None),
('gc_grace_seconds', None),
('default_validation', 'default_validation_class'),
('min_compaction_threshold', None),
('max_compaction_threshold', None),
('row_cache_save_period_in_seconds', None),
('key_cache_save_period_in_seconds', None),
('replicate_on_write', None)
)
cql_type_to_apache_class = {
'ascii': 'AsciiType',
'bigint': 'LongType',
'blob': 'BytesType',
'boolean': 'BooleanType',
'counter': 'CounterColumnType',
'decimal': 'DecimalType',
'double': 'DoubleType',
'float': 'FloatType',
'int': 'Int32Type',
'text': 'UTF8Type',
'timestamp': 'DateType',
'uuid': 'UUIDType',
'varchar': 'UTF8Type',
'varint': 'IntegerType'
}
apache_class_to_cql_type = dict((v,k) for (k,v) in cql_type_to_apache_class.items())
cql_types = sorted(cql_type_to_apache_class.keys())
def find_validator_class(cqlname):
return cql_type_to_apache_class[cqlname]
replication_strategies = (
'SimpleStrategy',
'OldNetworkTopologyStrategy',
'NetworkTopologyStrategy'
)
consistency_levels = (
'ANY',
'ONE',
'QUORUM',
'ALL',
'LOCAL_QUORUM',
'EACH_QUORUM'
)
# if a term matches this, it shouldn't need to be quoted to be valid cql
valid_cql_word_re = re.compile(r"^(?:[a-z][a-z0-9_]*|-?[0-9][0-9.]*)$", re.I)
def is_valid_cql_word(s):
return valid_cql_word_re.match(s) is not None
def tokenize_cql(cql_text):
return CqlLexotron.scan(cql_text)[0]
def cql_detokenize(toklist):
return ' '.join([t[1] for t in toklist])
# note: commands_end_with_newline may be extended by an importing module.
commands_end_with_newline = set()
def token_dequote(tok):
if tok[0] == 'stringLiteral':
# strip quotes
return tok[1][1:-1].replace("''", "'")
if tok[0] == 'unclosedString':
# strip one quote
return tok[1][1:].replace("''", "'")
if tok[0] == 'unclosedComment':
return ''
return tok[1]
def cql_dequote(cqlword):
cqlword = cqlword.strip()
if cqlword == '':
return cqlword
if cqlword[0] == "'":
cqlword = cqlword[1:-1].replace("''", "'")
return cqlword
def token_is_word(tok):
return tok[0] == 'identifier'
def cql_escape(value):
if value is None:
return 'NULL' # this totally won't work
if isinstance(value, float):
return '%f' % value
if isinstance(value, int):
return str(value)
return "'%s'" % value.replace("'", "''")
def maybe_cql_escape(value):
if is_valid_cql_word(value):
return value
return cql_escape(value)
def cql_typename(classname):
fq_classname = 'org.apache.cassandra.db.marshal.'
if classname.startswith(fq_classname):
classname = classname[len(fq_classname):]
try:
return apache_class_to_cql_type[classname]
except KeyError:
return cql_escape(classname)
special_completers = []
def completer_for | (rulename, symname):
def registrator(f):
def completerwrapper(ctxt):
cass = ctxt.get_binding('cassandra_conn', None)
if cass is None:
return ()
return f(ctxt, cass)
completerwrapper.func_name = 'completerwrapper_on_' + f.func_name
special_completers.append((rulename, symname, completerwrapper))
return completerwrapper
ret | urn registrator
def explain_completion(rulename, symname, explanation=None):
if explanation is None:
explanation = '<%s>' % (symname,)
@completer_for(rulename, symname)
def explainer(ctxt, cass):
return [Hint(explanation)]
return explainer
def is_counter_col(cfdef, colname):
col_info = [cm for cm in cfdef.column_metadata if cm.name == colname]
return bool(col_info and cql_typename(col_info[0].validation_class) == 'counter')
# BEGIN SYNTAX/COMPLETION RULE DEFINITIONS
syntax_rules = r'''
<Start> ::= <CQL_Statement>*
;
<CQL_Statement> ::= [statements]=<statementBody> ";"
;
# the order of these terminal productions is significant:
<endline> ::= /\n/ ;
JUNK ::= /([ \t\r\f\v]+|(--|[/][/])[^\n\r]*([\n\r]|$)|[/][*].*?[*][/])/ ;
<stringLiteral> ::= /'([^']|'')*'/ ;
<float> ::= /-?[0-9]+\.[0-9]+/ ;
<integer> ::= /-?[0-9]+/ ;
<uuid> ::= /[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/ ;
<identifier> ::= /[a-z][a-z0-9_:]*/ ;
<star> ::= "*" ;
<range> ::= ".." ;
<endtoken> ::= ";" ;
<op> ::= /[-+=,().]/ ;
<cmp> ::= /[<>]=?/ ;
<unclosedString> ::= /'([^']|'')*/ ;
<unclosedComment> ::= /[/][*][^\n]*$/ ;
<symbol> ::= <star>
| <range>
| <op>
| <cmp>
;
<name> ::= <identifier>
| <stringLiteral>
| <integer>
;
<term> ::= <stringLiteral>
| <integer>
| <float>
| <uuid>
;
<colname> ::= <term>
| <identifier>
;
<statementBody> ::= <useStatement>
| <selectStatement>
| <dataChangeStatement>
| <schemaChangeStatement>
;
<dataChangeStatement> ::= <insertStatement>
| <updateStatement>
| <deleteStatement>
| <truncateStatement>
| <batchStatement>
;
<schemaChangeStatement> ::= <createKeyspaceStatement>
| <createColumnFamilyStatement>
| <createIndexStatement>
| <dropKeyspaceStatement>
| <dropColumnFamilyStatement>
| <dropIndexStatement>
| <alterTableStatement>
;
<consistencylevel> ::= cl=<identifier> ;
<storageType> ::= typename=( <identifier> | <stringLiteral> );
'''
@completer_for('consistencylevel', 'cl')
def cl_completer(ctxt, cass):
return consistency_levels
@completer_for('storageType', 'typename')
def storagetype_completer(ctxt, cass):
return cql_types
syntax_rules += r'''
<useStatement> ::= "USE" ksname=<name>
;
'''
@completer_for('useStatement', 'ksname')
def use_ks_completer(ctxt, cass):
return map(maybe_cql_escape, cass.get_keyspace_names())
syntax_rules += r'''
<selectStatement> ::= "SELECT" <whatToSelect>
"FROM" ( selectks=<name> "." )? selectsource=<name>
("USING" "CONSISTENCY" <consistencylevel>)?
("WHERE" <selectWhereClause>)?
("LIMIT" <integer>)?
;
<selectWhereClause> ::= <relation> ("AND" <relation>)*
| keyname=<colname> "IN" "(" <term> ("," <term>)* ")"
;
<relation> ::= [rel_lhs]=<colname> ("=" | "<" | ">" | "<=" | ">=") <colname>
;
<whatToSelect> ::= colname=<colname> |
jeremyherbert/TumblrServ | tumblrserv.py | Python | gpl-2.0 | 3,373 | 0.013638 | #!/usr/bin/env python
## tumblrserv.py implements a Tumblr (http://www.tumblr.com) markup parsing
## engine and compatible webserver.
##
## Version: 0.2 final
##
## Copyright (C) 2009 Jeremy Herbert
## Contact mailto:jeremy@jeremyherbert.net
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
## 02110-1301, USA.
import os, sys, ftplib, yaml, cherrypy, re, urllib2
from src.post_classes import *
from src import json
from src.constants import *
from src.support import *
from src.net import *
from src.server import *
post_types = ['Regular', 'Photo', 'Quote', 'Link', 'Conversation', 'Video', 'Audio', 'Conversation']
args_dict = {
'autoreload': 0, # Whether to add the meta refresh tag
'publish': False, # Whether to push the new theme data to tumblr
'data_source': DATA_LOCAL, # Whether to use local data in the theme
}
########################################
# take the arguments and place them in a mutable list
arguments = sys.argv
# if the script has been run with the interpreter prefix, get rid of it
if arguments[0] == 'python' or arguments[0] == 'ipython' \
or arguments[0] == 'python2.5':
arguments.pop(0)
# pop off the script name
arguments.pop(0)
# load the configuration file
config_path = 'data/config.yml'
if contains(arguments, '--config'):
if os.path.exists(next_arg(arguments, '--config')):
config_path = next_arg(arguments, '--config')
config = get_config(config_path)
# now we check if there are any data processing flags
if contains(arguments, '--pull-data'):
# call pull_data with the argument afte | r the flag
pull_data( next_arg(arguments, '--pull-data') )
if contains(arguments, '--theme'):
if not os.path.exists("themes/" + next_arg(arguments, '--theme') + '.thtml'):
err_exit("The theme file %s.thtml does not exist in the themes\
direct | ory." % next_arg(arguments, '--theme'))
config['defaults']['theme_name'] = next_arg(arguments, '--theme')
if contains(arguments, '--publish'):
if not has_keys(config['publishing_info'], \
( 'url', 'username', 'password' )):
err_exit('The configuration file is missing some critical publishing\
information. Please make sure you have specified your url, username and\
password.')
publish_theme(config['publishing_info']['url'],\
config['publishing_info']['username'],\
config['publishing_info']['password'],\
get_markup('themes/%s.thtml' % config['defaults']['theme_name']))
if contains(arguments, '--do-nothing'):
config['optimisations']['do_nothing'] = True
# start the server up
cherrypy.config.update('data/cherrypy.conf')
cherrypy.quickstart(TumblrServ(config), '/') |
ohmu/poni | poni/errors.py | Python | apache-2.0 | 1,028 | 0.016537 | """
error types
Copyright (c) 2010-2012 Mika Eloranta
See LICENSE for details.
"""
class Error(Exception):
"""error"""
class InvalidProperty(Error):
"""invalid property"""
class MissingProperty(Error):
"""missing property"""
class UserError(Error):
"""user error"""
class InvalidRange(Error):
""" | invalid range"""
class SettingsError(Error):
"""settings error"""
class VerifyError(Error):
"""verify error"""
class TemplateError(Error):
"""template rendering error"""
class CloudError(Error):
"""cloud error"""
class RemoteError(Error):
"""remote error"""
class RemoteFileDoesNotExist(RemoteError):
"""remote file does not exist"""
class RepoError(Error):
"""repository error"""
class ImporterError(Error):
"""importer error"""
class MissingLibraryError(Error):
| """missing library error"""
class RequirementError(Error):
"""requirement error"""
class ControlError(Error):
"""control error"""
class OperationError(Error):
"""operation error"""
|
mozilla-services/FunkLoad | src/funkload/Recorder.py | Python | gpl-2.0 | 15,749 | 0.000889 | # (C) Copyright 2005 Nuxeo SAS <http://nuxeo.com>
# Author: bdelbosc@nuxeo.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
#
"""TCPWatch FunkLoad Test Recorder.
Requires tcpwatch-httpproxy or tcpwatch.py available at:
* http://hathawaymix.org/Software/TCPWatch/tcpwatch-1.3.tar.gz
Credits goes to Ian Bicking for parsing tcpwatch files.
$Id$
"""
import os
import sys
import re
from cStringIO import StringIO
from optparse import OptionParser, TitledHelpFormatter
from tempfile import mkdtemp
import rfc822
from cgi import FieldStorage
from urlparse import urlsplit
from utils import truncate, trace, get_version, Data
def get_null_file():
if sys.platform.lower().startswith('win'):
return "NUL"
else:
return "/dev/null"
class Request:
"""Store a tcpwatch reques | t."""
def __init__(self, file_path):
"""Load a tcpwatch request file."""
self.file_path = file_path
f = open(file_path, 'rb')
line = f.readline().split(None, 2)
if not line:
trace('# Warning: empty first line on %s\n' % self.file_path)
line = f.readline().split(None, 2)
self.method = line[0]
url = line[1]
scheme, host, path, query, fragment = urlsplit(url)
self.host = scheme + '://' | + host
self.rurl = url[len(self.host):]
self.url = url
self.path = path
self.version = line[2].strip()
self.headers = dict(rfc822.Message(f).items())
self.body = f.read()
f.close()
def extractParam(self):
"""Turn muti part encoded form into params."""
params = []
try:
environ = {
'CONTENT_TYPE': self.headers['content-type'],
'CONTENT_LENGTH': self.headers['content-length'],
'REQUEST_METHOD': 'POST',
}
except KeyError:
trace('# Warning: missing header content-type or content-length'
' in file: %s not an http request ?\n' % self.file_path)
return params
form = FieldStorage(fp=StringIO(self.body),
environ=environ,
keep_blank_values=True)
try:
keys = form.keys()
except TypeError:
trace('# Using custom data for request: %s ' % self.file_path)
params = Data(self.headers['content-type'], self.body)
return params
for item in form.list:
key = item.name
value = item.value
filename = item.filename
if filename is None:
params.append([key, value])
else:
# got a file upload
filename = filename or ''
params.append([key, 'Upload("%s")' % filename])
if filename:
if os.path.exists(filename):
trace('# Warning: uploaded file: %s already'
' exists, keep it.\n' % filename)
else:
trace('# Saving uploaded file: %s\n' % filename)
f = open(filename, 'w')
f.write(str(value))
f.close()
return params
def __repr__(self):
params = ''
if self.body:
params = self.extractParam()
return '<request method="%s" url="%s" %s/>' % (
self.method, self.url, str(params))
class Response:
"""Store a tcpwatch response."""
def __init__(self, file_path):
"""Load a tcpwatch response file."""
self.file_path = file_path
f = open(file_path, 'rb')
line = f.readline().split(None, 2)
self.version = line[0]
self.status_code = line[1].strip()
if len(line) > 2:
self.status_message = line[2].strip()
else:
self.status_message = ''
self.headers = dict(rfc822.Message(f).items())
self.body = f.read()
f.close()
def __repr__(self):
return '<response code="%s" type="%s" status="%s" />' % (
self.status_code, self.headers.get('content-type'),
self.status_message)
class RecorderProgram:
"""A tcpwatch to funkload recorder."""
tcpwatch_cmd = ['tcpwatch-httpproxy', 'tcpwatch.py', 'tcpwatch']
MYFACES_STATE = 'org.apache.myfaces.trinidad.faces.STATE'
MYFACES_FORM = 'org.apache.myfaces.trinidad.faces.FORM'
USAGE = """%prog [options] [test_name]
%prog launch a TCPWatch proxy and record activities, then output
a FunkLoad script or generates a FunkLoad unit test if test_name is specified.
The default proxy port is 8090.
Note that tcpwatch-httpproxy or tcpwatch.py executable must be accessible from your env.
See http://funkload.nuxeo.org/ for more information.
Examples
========
%prog foo_bar
Run a proxy and create a FunkLoad test case,
generates test_FooBar.py and FooBar.conf file.
To test it: fl-run-test -dV test_FooBar.py
%prog -p 9090
Run a proxy on port 9090, output script to stdout.
%prog -i /tmp/tcpwatch
Convert a tcpwatch capture into a script.
"""
def __init__(self, argv=None):
if argv is None:
argv = sys.argv[1:]
self.verbose = False
self.tcpwatch_path = None
self.prefix = 'watch'
self.port = "8090"
self.server_url = None
self.class_name = None
self.test_name = None
self.loop = 1
self.script_path = None
self.configuration_path = None
self.use_myfaces = False
self.parseArgs(argv)
def getTcpWatchCmd(self):
"""Return the tcpwatch cmd to use."""
tcpwatch_cmd = self.tcpwatch_cmd[:]
if os.getenv("TCPWATCH"):
tcpwatch_cmd.insert(0, os.getenv("TCPWATCH"))
for cmd in tcpwatch_cmd:
ret = os.system(cmd + ' -h 2> %s' % get_null_file())
if ret == 0:
return cmd
raise RuntimeError('Tcpwatch is not installed no %s found. '
'Visit http://funkload.nuxeo.org/INSTALL.html' %
str(self.tcpwatch_cmd))
def parseArgs(self, argv):
"""Parse programs args."""
parser = OptionParser(self.USAGE, formatter=TitledHelpFormatter(),
version="FunkLoad %s" % get_version())
parser.add_option("-v", "--verbose", action="store_true",
help="Verbose output")
parser.add_option("-p", "--port", type="string", dest="port",
default=self.port, help="The proxy port.")
parser.add_option("-i", "--tcp-watch-input", type="string",
dest="tcpwatch_path", default=None,
help="Path to an existing tcpwatch capture.")
parser.add_option("-l", "--loop", type="int",
dest="loop", default=1,
help="Loop mode.")
options, args = parser.parse_args(argv)
if len(args) == 1:
test_name = args[0]
else:
test_name = None
self.verbose = options.verbose
self.tcpwatch_path = options.tcpwatch_path
self.port = options.port
if not test_name and not self.tcpwatch_path:
self.loop = options.loop
if test_name:
test_name = test_name.replace('-', '_')
class_ |
yosshy/nova | nova/tests/functional/api_samples_test_base.py | Python | apache-2.0 | 15,165 | 0.001912 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
from oslo_serialization import jsonutils
import six
from nova import test
from nova.tests.functional import integrated_helpers
class NoMatch(test.TestingException):
pass
class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
ctype = 'json'
all_extensions = False
extension_name = None
sample_dir = None
request_api_version = None
_use_common_server_api_samples = False
def _pretty_data(self, data):
data = jsonutils.dumps(jsonutils.loads(data), sort_keys=True,
indent=4)
return '\n'.join(line.rstrip() for line in data.split('\n')).strip()
def _objectify(self, data):
if not data:
return {}
# NOTE(vish): allow non-quoted replacements to survive json
data = re.sub(r'([^"])%\((.+)\)s([^"])', r'\1"%(int:\2)s"\3', data)
return jsonutils.loads(data)
@classmethod
def _get_sample_path(cls, name, dirname, suffix='', api_version=None):
parts = [dirname]
parts.append('api_samples')
if cls.all_extensions:
parts.append('all_extensions')
# Note(gmann): if _use_common_server_api_samples is set to True
# then common server sample files present in 'servers' directory
# will be used. As of now it is being used for server POST request
# to avoid duplicate copy of server req and resp sample files.
# Example - ServersSampleBase's _post_server method.
elif cls._use_common_server_api_samples:
parts.append('servers')
else:
if cls.sample_dir:
parts.append(cls.sample_dir)
elif cls.extension_name:
parts.append(cls.extension_name)
if api_version:
parts.append('v' + api_version)
parts.append(name + "." + cls.ctype + suffix)
return os.path.join(*parts)
@classmethod
def _get_sample(cls, name, api_version=None):
dirname = os.path.dirname(os.path.abspath(__file__))
dirname = os.path.normpath(os.path.join(dirname,
"../../../doc"))
return cls._get_sample_path(name, dirname, api_version=api_version)
@classmethod
def _get_template(cls, name, api_version=None):
dirname = os.path.dirname(os.path.abspath(__file__))
dirname = os.path.normpath(os.path.join(dirname,
"./api_sample_tests"))
return cls._get_sample_path(name, dirname, suffix='.tpl',
api_version=api_version)
def _read_template(self, name):
template = self._get_template(name, self.request_api_version)
with open(template) as inf:
return inf.read().strip()
def _write_template(self, name, data):
with open(self._get_template(name,
self.request_api_version), 'w') as outf:
outf.write(data)
def _write_sample(self, name, data):
with open(self._get_sample(
name, self.request_api_version), 'w') as outf:
outf.write(data)
def _compare_result(self, subs, expected, result, result_str):
matched_value = None
if isinstance(expected, dict):
if not isinstance(result, dict):
raise NoMatch('%(result_str)s: %(result)s is not a dict.'
% {'result_str': result_str, 'result': result})
ex_keys = sorted(expected.keys())
res_keys = sorted(result.keys())
if ex_keys != res_keys:
ex_delta = []
res_delta = []
for key in ex_keys:
if key not in res_keys:
ex_delta.append(key)
for key in res_keys:
if key not in ex_keys:
res_delta.append(key)
raise NoMatch(
'Dictionary key mismatch:\n'
'Extra key(s) in template:\n%(ex_delta)s\n'
'Extra key(s) in %(result_str)s:\n%(res_delta)s\n' %
{'ex_delta': ex_delta, 'result_str': result_str,
'res_delta': res_delta})
for key in ex_keys:
res = self._compare_result(subs, expected[key], result[key],
result_str)
matched_value = res or matched_value
elif isinstance(expected, list):
if not isinstance(result, list):
raise NoMatch(
'%(result_str)s: %(result)s is not a list.' %
{'result_str': result_str, 'result': result})
expected = expected[:]
extra = []
for res_obj in result:
for i, ex_obj in enumerate(expected):
try:
matched_value = self._compare_result(subs, ex_obj,
res_obj,
result_str)
del expected[i]
break
| except NoMatch:
pass
else:
extra.append(res_obj)
error = []
if expected:
error.append('Extra list items in template:')
error.extend([repr(o) for o in expected])
if extra:
error.append('Extra list items in %(result_str)s:' %
{'result_str': result | _str})
error.extend([repr(o) for o in extra])
if error:
raise NoMatch('\n'.join(error))
elif isinstance(expected, six.string_types) and '%' in expected:
# NOTE(vish): escape stuff for regex
for char in '[]<>?':
expected = expected.replace(char, '\\%s' % char)
# NOTE(vish): special handling of subs that are not quoted. We are
# expecting an int but we had to pass in a string
# so the json would parse properly.
if expected.startswith("%(int:"):
result = str(result)
expected = expected.replace('int:', '')
expected = expected % subs
expected = '^%s$' % expected
match = re.match(expected, result)
if not match:
raise NoMatch(
'Values do not match:\n'
'Template: %(expected)s\n%(result_str)s: %(result)s' %
{'expected': expected, 'result_str': result_str,
'result': result})
try:
matched_value = match.group('id')
except IndexError:
if match.groups():
matched_value = match.groups()[0]
else:
if isinstance(expected, six.string_types):
# NOTE(danms): Ignore whitespace in this comparison
expected = expected.strip()
if isinstance(result, six.string_types):
result = result.strip()
if expected != result:
# NOTE(tdurakov):this attempt to parse string as JSON
# is needed for correct comparison of hypervisor.cpu_info,
# which is stringified JSON object
#
# TODO(tdurakov): remove this check as soon as
|
fifoforlifo/pynja | test2/code/java1/java1.py | Python | apache-2.0 | 331 | 0.003021 | import os
import pynja
import repo
@pynja.project
class java | 1(repo.JavaProject):
def emit(self):
sources = [
"com/java1/Counte | r.java",
]
with self.java_compile_ex(sources) as task:
task.workingDir = os.path.join(self.projectDir, "source")
self.jar_create("java1.jar")
|
pburdet/hyperspy | hyperspy/tests/signal/test_kramers_kronig_transform.py | Python | gpl-3.0 | 6,051 | 0.000331 | # Copyright 2007-2011 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import os
import numpy as np
from nose.tools import assert_true, assert_equal, assert_not_equal, raises
from hyperspy.signals import EELSSpectrum
from hyperspy.components import VolumePlasmonDrude, Lorentzian
from hyperspy.misc.eels.tools import eels_constant
from hyperspy.hspy import *
class Test2D:
def setUp(self):
"""To test the kramers_kronig_analysis we will generate 3
EELSSpectrum instances. First a model energy loss function(ELF),
in our case following the Drude bulk plasmon peak. Second, we
simulate the inelastic scattering to generate a model scattering
distribution (SPC). Finally, we use a lorentzian peak with
integral equal to 1 to simulate a ZLP.
"""
# Parameters
i0 = 1.
t = signals.Signal(np.arange(10, 70, 10).reshape((2, 3))) # thickness
t.axes_manager.set_signal_dimension(0)
scale = 0.02
# Create an 3x2x2048 spectrum with Drude plasmon
s = EELSSpectrum(np.zeros((2, 3, 2 * 2048)))
s.set_microscope_parameters(
beam_energy=300.0,
convergence_angle=5,
collection_angle=10.0)
s.axes_manager.signal_axes[0].scale = scale
k = eels_constant(s, i0, t)
vpm = VolumePlasmonDrude()
m = create_model(s, auto_background=False)
m.append(vpm)
vpm.intensity.map['values'][:] = 1
vpm.plasmon_energy.map['values'] = np.array([[8., 18.4, 15.8],
[16.6, 4.3, 3.7]])
vpm.fwhm.map['values'] = np.array([[2.3, 4.8, 0.53],
[3.7, 0.3, 0.3]])
vpm.intensity.map['is_set'][:] = True
vpm.plasmon_energy.map['is_set'][:] = True
vpm.fwhm.map['is_set'][:] = True
s.data = (m.as_signal() * k).data
# Create ZLP
z = s.deepcopy()
z.axes_manager.signal_axes[0].scale = scale
z.axes_manager.signal_axes[0].offset = -10
zlp = Lorentzian()
zlp.A.value = i0
zlp.gamma.value = 0.2
zlp.centre.value = 0.0
z.data[:] = zlp.function(z.axes_manager[-1].axis).reshape((1, 1, -1))
z.data *= scale
self.s = s
self.thickness = t
self.k = k
self.zlp = z
def test_df_given_n(self):
"""The kramers kronig analysis method applied to the signal we
have just designed above will return the CDF for the Drude bulk
plasmon. Hopefully, we recover the signal by inverting the CDF.
"""
# i use n=1000 to simulate a metal (enormous n)
cdf = self.s.kramers_kronig_analysis(zlp=self.zlp,
iterations=1,
n=1000.)
s = cdf.get_electron_energy_loss_spectrum(self.zlp, self.thickness)
assert_true(np.allclose(s.data,
self.s.data[..., 1:],
rtol=0.01))
def test_df_given_thickness(self):
"""The kramers kronig analysis method applied to the signal we
have just designed above will return the CDF for the Drude bulk
plasmon. Hopefully, we recover the signal by inverting the CDF.
"""
cdf = self.s.kramers_kronig_analysis(zlp=self.zlp,
iterations=1,
t=self.thickness)
s = cdf.get_electron_energy_loss_spectrum(self.zlp, self.thickness)
assert_true(np.allclose(s.data,
self.s.data[..., 1:],
rtol=0.01))
def test_bethe_sum_rule(self):
df = self.s.kramers_kronig_analysis(zlp=self.zlp,
iterations=1,
n=1000.)
neff1, neff2 = df.get_number_of_effective_electrons(nat=50e27,
cumulative=False)
assert_true(np.allclose(neff1.data,
np.array([[0.91187657, 4.72490711, 3.60594653],
[3.88077047, 0.26759741, 0.19813647]])))
assert_true(np.allclose(neff2.data,
np.array([[0.91299039, 4.37469112, 3.41580094],
[3.64866394, 0.15693674, 0.11146413]])))
def test_thickness_estimation(self):
"""Kramers kronig analysis gives a rough estimation of sample
thickness. As we have predefined sample thickness for our
scattering distribution, we can use it for testing putposes.
"""
cdf, output = self.s.kramers_kronig_analysis(zlp=self.zlp,
iterations=1,
| n=10 | 00.,
full_output=True)
assert_true(np.allclose(self.thickness.data,
output['thickness'].data, rtol=0.01))
@raises(ValueError)
def test_thicness_input_array(self):
cdf = self.s.kramers_kronig_analysis(zlp=self.zlp,
iterations=1,
t=self.thickness.data)
|
pmghalvorsen/gramps_branch | gramps/gen/filters/rules/person/_hasidof.py | Python | gpl-2.0 | 1,631 | 0.005518 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Pu | blic License for more details.
#
# You should have received a copy of the GNU Gener | al Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .. import HasGrampsId
#-------------------------------------------------------------------------
#
# HasIdOf
#
#-------------------------------------------------------------------------
class HasIdOf(HasGrampsId):
"""Rule that checks for a person with a specific GRAMPS ID"""
name = _('Person with <Id>')
description = _("Matches person with a specified Gramps ID")
|
jtopjian/st2 | st2common/tests/unit/test_isotime.py | Python | apache-2.0 | 5,334 | 0.001687 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import unittest
from st2common.util import isotime
class TestTimeUtil(unittest.TestCase):
def test_add_utc_tz_info(self):
dt = datetime.datetime.utcnow()
self.assertIsNone(dt.tzinfo)
dt = isotime.add_utc_tz(dt)
self.assertIsNotNone(dt.tzinfo)
self.assertEqual(dt.tzinfo.tzname(None), 'UTC')
def test_validate(self):
self.assertTrue(isotime.validate('2000-01-01 12:00:00Z'))
self.assertTrue(isotime.validate('2000-01-01 12:00:00+00'))
self.assertTrue(isotime.validate('2000-01-01 12:00:00+0000'))
self.assertTrue(isotime.validate('2000-01-01 12:00:00+00:00'))
self.assertTrue(isotime.validate('2000-01-01 12:00:00.000000Z'))
self.assertTrue(isotime.validate('2000-01-01 12:00:00.000000+00'))
self.assertTrue(isotime.validate('2000-01-01 12:00:00.000000+0000'))
self.assertTrue(isotime.validate('2000-01-01 12:00:00.000000+00:00'))
self.assertTrue(isotime.validate('2000-01-01T12:00:00Z'))
self.assertTrue(isotime.validate('2000-01-01T12:00:00.000000Z'))
self.assertTrue(isotime.validate('2000-01-01T12:00:00+00:00'))
self.assertTrue(isotime.validate('2000-01-01T12:00:00.000000+00:00'))
self.assertTrue(isotime.validate('2015-02-10T21:21:53.399Z'))
self.assertFalse(isotime.validate('2000-01-01', raise_exception=False))
self.assertFalse(isotime.validate('2000-01-01T12:00:00', raise_exception=False))
self.assertFalse(isotime.validate('2000-01-01T12:00:00+00:00Z', raise_exception=False))
self.assertFalse(isotime.validate('2000-01-01T12:00:00.000000', raise_exception=False))
self.assertFalse(isotime.validate('Epic!', raise_exception=False))
self.assertFalse(isotime.validate(object(), raise_exception=False))
self.assertRaises(ValueError, isotime.validate, 'Epic!', True)
def test_parse(self):
dt = isotime.add_utc_tz(datetime.datetime(2000, 1, 1, 12))
self.assertEqual(isotime.parse('2000-01-01 12:00:00Z'), dt)
self.assertEqual(isotime.parse('2000-01-01 12:00:00+00'), dt)
| self.assertEqual(isotime.parse('2000-01-01 12:00:00+0000'), dt)
self.assertEqual(isotime.parse('2000-01-01 12:00:00+00:00'), dt)
self.assertEqual(isotime.parse('2000-01-01 12:00:00.000000Z'), dt)
self.assertEqual(isotime.parse('2000-01-01 12:00:00.000000+00'), dt)
self.assertEqual(isot | ime.parse('2000-01-01 12:00:00.000000+0000'), dt)
self.assertEqual(isotime.parse('2000-01-01 12:00:00.000000+00:00'), dt)
self.assertEqual(isotime.parse('2000-01-01T12:00:00Z'), dt)
self.assertEqual(isotime.parse('2000-01-01T12:00:00+00:00'), dt)
self.assertEqual(isotime.parse('2000-01-01T12:00:00.000000Z'), dt)
self.assertEqual(isotime.parse('2000-01-01T12:00:00.000000+00:00'), dt)
self.assertEqual(isotime.parse('2000-01-01T12:00:00.000Z'), dt)
def test_format(self):
dt = isotime.add_utc_tz(datetime.datetime(2000, 1, 1, 12))
dt_str_usec_offset = '2000-01-01T12:00:00.000000+00:00'
dt_str_usec = '2000-01-01T12:00:00.000000Z'
dt_str_offset = '2000-01-01T12:00:00+00:00'
dt_str = '2000-01-01T12:00:00Z'
dt_unicode = u'2000-01-01T12:00:00Z'
self.assertEqual(isotime.format(dt, usec=True, offset=True), dt_str_usec_offset)
self.assertEqual(isotime.format(dt, usec=True, offset=False), dt_str_usec)
self.assertEqual(isotime.format(dt, usec=False, offset=True), dt_str_offset)
self.assertEqual(isotime.format(dt, usec=False, offset=False), dt_str)
self.assertEqual(isotime.format(dt_str, usec=False, offset=False), dt_str)
self.assertEqual(isotime.format(dt_unicode, usec=False, offset=False), dt_unicode)
def test_format_tz_naive(self):
dt1 = datetime.datetime.utcnow()
dt2 = isotime.parse(isotime.format(dt1, usec=True))
self.assertEqual(dt2, isotime.add_utc_tz(dt1))
def test_format_tz_aware(self):
dt1 = isotime.add_utc_tz(datetime.datetime.utcnow())
dt2 = isotime.parse(isotime.format(dt1, usec=True))
self.assertEqual(dt2, dt1)
def test_format_sec_truncated(self):
dt1 = isotime.add_utc_tz(datetime.datetime.utcnow())
dt2 = isotime.parse(isotime.format(dt1, usec=False))
dt3 = datetime.datetime(dt1.year, dt1.month, dt1.day, dt1.hour, dt1.minute, dt1.second)
self.assertLess(dt2, dt1)
self.assertEqual(dt2, isotime.add_utc_tz(dt3))
|
eadgarchen/tensorflow | tensorflow/contrib/gan/python/features/python/conditioning_utils_impl.py | Python | apache-2.0 | 3,816 | 0.005241 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellanous utilities for TFGAN code and examples.
Includes:
1) Conditioning the value of a Tensor, based on t | echniques from
https://arxiv.org/abs/1609.03499.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import | layers
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
__all__ = [
'condition_tensor',
'condition_tensor_from_onehot',
]
def _get_shape(tensor):
tensor_shape = array_ops.shape(tensor)
static_tensor_shape = tensor_util.constant_value(tensor_shape)
return (static_tensor_shape if static_tensor_shape is not None else
tensor_shape)
def condition_tensor(tensor, conditioning):
"""Condition the value of a tensor.
Conditioning scheme based on https://arxiv.org/abs/1609.03499.
Args:
tensor: A minibatch tensor to be conditioned.
conditioning: A minibatch Tensor of to condition on. Must be 2D, with first
dimension the same as `tensor`.
Returns:
`tensor` conditioned on `conditioning`.
Raises:
ValueError: If the non-batch dimensions of `tensor` aren't fully defined.
ValueError: If `conditioning` isn't at least 2D.
ValueError: If the batch dimension for the input Tensors don't match.
"""
tensor.shape[1:].assert_is_fully_defined()
num_features = tensor.shape[1:].num_elements()
mapped_conditioning = layers.linear(
layers.flatten(conditioning), num_features)
if not mapped_conditioning.shape.is_compatible_with(tensor.shape):
mapped_conditioning = array_ops.reshape(
mapped_conditioning, _get_shape(tensor))
return tensor + mapped_conditioning
def _one_hot_to_embedding(one_hot, embedding_size):
"""Get a dense embedding vector from a one-hot encoding."""
num_tokens = one_hot.shape[1]
label_id = math_ops.argmax(one_hot, axis=1)
embedding = variable_scope.get_variable(
'embedding', [num_tokens, embedding_size])
return embedding_ops.embedding_lookup(
embedding, label_id, name='token_to_embedding')
def _validate_onehot(one_hot_labels):
one_hot_labels.shape.assert_has_rank(2)
one_hot_labels.shape[1:].assert_is_fully_defined()
def condition_tensor_from_onehot(tensor, one_hot_labels, embedding_size=256):
"""Condition a tensor based on a one-hot tensor.
Conditioning scheme based on https://arxiv.org/abs/1609.03499.
Args:
tensor: Tensor to be conditioned.
one_hot_labels: A Tensor of one-hot labels. Shape is
[batch_size, num_classes].
embedding_size: The size of the class embedding.
Returns:
`tensor` conditioned on `one_hot_labels`.
Raises:
ValueError: `one_hot_labels` isn't 2D, if non-batch dimensions aren't
fully defined, or if batch sizes don't match.
"""
_validate_onehot(one_hot_labels)
conditioning = _one_hot_to_embedding(one_hot_labels, embedding_size)
return condition_tensor(tensor, conditioning)
|
rfadams/python-cvxopt | examples/book/chap6/huber.py | Python | gpl-3.0 | 1,874 | 0.017076 | # Figure 6.5, page 300.
# Robust regression.
from cvxopt import solvers, lapack, matrix, spmatrix
from pickle import load
#solvers.options['show_progress'] = 0
data = l | oad(open('huber.bin','rb'))
u, v = data['u'], data['v']
m, n = len(u), 2
A = matrix( [m*[1.0], [u]] )
b = +v
# Least squares solution.
xls = +b
lapack.gels(+A, xls)
xls = xls[:2]
# Rob | ust least squares.
#
# minimize sum( h( A*x-b ))
#
# where h(u) = u^2 if |u| <= 1.0
# = 2*(|u| - 1.0) if |u| > 1.0.
#
# Solve as a QP (see exercise 4.5):
#
# minimize (1/2) * u'*u + 1'*v
# subject to -u - v <= A*x-b <= u + v
# 0 <= u <= 1
# v >= 0
#
# Variables x (n), u (m), v(m)
novars = n+2*m
P = spmatrix([],[],[], (novars, novars))
P[n:n+m,n:n+m] = spmatrix(1.0, range(m), range(m))
q = matrix(0.0, (novars,1))
q[-m:] = 1.0
G = spmatrix([], [], [], (5*m, novars))
h = matrix(0.0, (5*m,1))
# A*x - b <= u+v
G[:m,:n] = A
G[:m,n:n+m] = spmatrix(-1.0, range(m), range(m))
G[:m,n+m:] = spmatrix(-1.0, range(m), range(m))
h[:m] = b
# -u - v <= A*x - b
G[m:2*m,:n] = -A
G[m:2*m,n:n+m] = spmatrix(-1.0, range(m), range(m))
G[m:2*m,n+m:] = spmatrix(-1.0, range(m), range(m))
h[m:2*m] = -b
# u >= 0
G[2*m:3*m,n:n+m] = spmatrix(-1.0, range(m), range(m))
# u <= 1
G[3*m:4*m,n:n+m] = spmatrix(1.0, range(m), range(m))
h[3*m:4*m] = 1.0
# v >= 0
G[4*m:,n+m:] = spmatrix(-1.0, range(m), range(m))
xh = solvers.qp(P, q, G, h)['x'][:n]
try: import pylab
except ImportError: pass
else:
pylab.figure(1,facecolor='w')
pylab.plot(u, v,'o',
[-11,11], [xh[0]-11*xh[1], xh[0]+11*xh[1]], '-g',
[-11,11], [xls[0]-11*xls[1], xls[0]+11*xls[1]], '--r',
markerfacecolor='w', markeredgecolor='b')
pylab.axis([-11, 11, -20, 25])
pylab.xlabel('t')
pylab.ylabel('f(t)')
pylab.title('Robust regression (fig. 6.5)')
pylab.show()
|
django-bmf/django-bmf | djangobmf/migrations/0003_auto_20160511_1609.py | Python | bsd-3-clause | 4,122 | 0.005822 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import djangobmf.fields.file
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('djangobmf', '0002_remove_old_report'),
]
operations = [
migrations.CreateModel(
name='PDFRenderer',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', prima | ry_key=True)),
('name', models.CharField(verbose_name='Name', max_length=20)),
('size', models.CharField(verbose_name='Size', max_length=10, default='A4')),
('form', models.CharField(verbose_name='Size', max_length=10, | default='A')),
('template_extends', models.CharField(null=True, verbose_name='Template Extends', blank=True, max_length=40)),
('letter', models.BooleanField(verbose_name='Letter', default=True)),
('letter_margin_top', models.PositiveIntegerField(verbose_name='Letter margin top', blank=True, null=True)),
('letter_margin_right', models.PositiveIntegerField(verbose_name='Letter margin right', default=40)),
('letter_margin_bottom', models.PositiveIntegerField(verbose_name='Letter margin bottom', default=10)),
('letter_margin_left', models.PositiveIntegerField(verbose_name='Letter margin left', blank=True, null=True)),
('page_margin_top', models.PositiveIntegerField(verbose_name='Page margin top', default=20)),
('page_margin_right', models.PositiveIntegerField(verbose_name='Page margin right', default=40)),
('page_margin_bottom', models.PositiveIntegerField(verbose_name='Page margin bottom', default=10)),
('page_margin_left', models.PositiveIntegerField(verbose_name='Page margin left', blank=True, null=True)),
('modified', models.DateTimeField(verbose_name='Modified', auto_now=True)),
('letter_background', djangobmf.fields.file.FileField(verbose_name='Letter background', blank=True, to='djangobmf.Document', related_name='+', null=True)),
('page_background', djangobmf.fields.file.FileField(verbose_name='Page background', blank=True, to='djangobmf.Document', related_name='+', null=True)),
],
options={
'get_latest_by': 'modified',
'abstract': False,
'verbose_name': 'PDF Renderer',
'verbose_name_plural': 'PDF Renderer',
},
),
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('name', models.CharField(verbose_name='Name', max_length=120)),
('slug', models.CharField(verbose_name='Slug', max_length=120)),
('renderer_pk', models.PositiveIntegerField(null=True, blank=True)),
('renderer_view', models.CharField(max_length=254)),
('has_object', models.NullBooleanField()),
('enabled', models.BooleanField(default=False)),
('modified', models.DateTimeField(auto_now=True, verbose_name='Modified', null=True)),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created', null=True)),
('contenttype', models.ForeignKey(null=True, blank=True, to='contenttypes.ContentType', related_name='bmf_report')),
('renderer_ct', models.ForeignKey(null=True, blank=True, to='contenttypes.ContentType', related_name='+')),
],
options={
'get_latest_by': 'modified',
'abstract': False,
'verbose_name': 'Report',
'verbose_name_plural': 'Reports',
},
),
migrations.AlterUniqueTogether(
name='report',
unique_together=set([('slug', 'contenttype')]),
),
]
|
clara-labs/imaplib3 | imaplib3/response.py | Python | mit | 513 | 0 | import re
list_re = re.compile(r'\((.*)\) \"(.*)\" \"(.*)\"')
class Response(object):
# There are three possible server completion responses
OK = "OK" # indicates success
NO = "NO" # indicates failure
| BAD = "BAD" # indicates a protocol error
class ListResponse(object):
def __init__(self, list_response):
match = list_re.match(list_response)
self.attributes = match.group(1).split()
self.hierarchy_delimiter = match.group(2)
| self.name = match.group(3)
|
dante092/Mega-Bus-Web-Crawler | megabus_request.py | Python | mit | 8,077 | 0.0026 | import webtools
import re
class Trip():
""" Models a megabus trip."""
def __init__(self, data, number, mode, crawling_day):
""" Initializes basic trip Data."""
self.data = data
self.trip_number = number
self.mode = mode
self.day = crawling_day
def price(self, verbose=True):
"""
Returns the price of the trip, and prints the price if verbose is set to True
:return: price = int
"""
data = self.data
price_regex = re.compile(r"\$\d\d") # Dollard Sign followed by two digits.
matches = price_regex.findall(data)
price = matches[0]
if verbose == True:
print('Price: ', price)
price = price.replace('$', '') # Cleans up data, so it can be converted to int easier later.
return int(price)
def departure_time(self):
"""Gets & Prints the departure time, :Returns: departure_time = str """
data = self.data
departure_regex = re.compile(r"^(Departs\d+:\d\d...)") # DepartsDigitormore, :, two more digits
matches = departure_regex.findall(data)
departure_time = matches[0]
departure_time = departure_time.replace('Departs', '')
print('Departing: ', departure_time)
return departure_time
def arrival_time(self):
"""Gets & Prints the arrival time, :Returns: arrival_time = str """
data = self.data
arrival_regex = re.compile(r"(Arrives\d+:\d\d...)")
matches = arrival_regex.findall(data)
arrival_time = matches[0]
arrival_time = arrival_time.replace('Arrives', '')
print('Arriving: ', arrival_time)
return arrival_time
def display_trip(self):
""" Displays some of the current trip attributes. """
print('\n')
if self.mode == 'inbound':
print(' Outbound Trip {0} '.center(50, '=').format(self.trip_number + 1))
if self.mode == 'outbound':
print(' Inbound Trip {0} '.center(50, '=').format(self.trip_number + 1))
self.price()
self.departure_time()
self.arrival_time()
def generate_city_code(citi):
"""
A dictionary of city codes used by megabus to identify each city.
:return: The proper city code, string.
"""
citi = citi.strip() # Strips the city provided of any extra spaces
citi = citi.upper()
citi_codes = {
'ALBANY, NY': '89',
'AMHERST, MA': '90',
'ANN ARBOR, MI': '91',
'ATLANTIC CITY, NJ': '92',
'BINGHAMTON, NY': '93',
'BOSTON, MA': '94',
'BUFFALO, NY': '95',
'BURLINGTON, VT': '96',
'CAMDEN': '97',
'CHAMPAIGN, IL': '98',
'CHARLOTTE, NC': '99',
'CHICAGO, IL': '100',
'CHRISTIANSBURG, VA': '101',
'CINCINNATI, OH': '102',
'CLEVELAND, OH': '103',
'COLUMBIA, MO': '104',
'COLUMBUS, OH': '105',
'DES MOINES, IA': '106',
'DETROIT, MI': '107',
'ERIE, PA': '108',
'FREDERICK, MD': '109',
'HAMPTON, VA': '110',
'HARRISBURG, PA': '111',
'HARTFORD, CT': '112',
'HOLYOKE, CT': '113',
'HYANNIS, MA': '114',
'INDIANAPOLIS, IN': '115',
'IOWA CITY, IA': '116',
'KANSAS CITY, MO': '117',
'KNOXVILLE, TN': '118',
'MADISON, WI': '119',
'MEMPHIS, TN': '120',
'MILWAUKEE, WI': '121',
'NEW HAVEN, CT': '122',
'NEW YORK, NY': '123',
'NIAGARA FALLS, ON': '124',
'NORMAL, IL': '125',
'OMAHA, NE': '126',
'PHILADELPHIA, PA': '127',
'PITTSBURGH, PA': '128',
'PORTLAND, ME': '129',
'PROVIDENCE, RI': '130',
'DURHAM, NC': '131',
'RICHMOND, VA': '132',
'RIDGEWOOD, NJ': '133',
'ROCHESTER, NY': '134',
'SECAUCUS, NJ': '135',
'ST LOUIS, MO': '136',
'STATE COLLEGE, PA': '137',
'STORRS, CT': '138',
'SYRACUSE, NY': '139',
'TOLEDO, OH': '140',
}
return citi_codes[citi] # Returns the city code to be formatted into an URL.
def generate_date(date):
"""
Formats the provided date, returns: String
"""
date = date
date = date.replace('/', '\n')
date = date.split()
month, day, year = date[0], date[1], date[2]
| date = month + '%2f' + day + '%2f' + year
return date
def format(origin, destination, crawling_date, passengers='2'):
""" Formats a Megabus URL with the destination information."""
base = 'http://us.megabus.com/JourneyResults.aspx?'
origincode = 'originCode=' + generate_city_code(origin)
destinationcod | e = '&destinationCode=' + generate_city_code(destination) # Crawling date is provided twice
departuredate = '&outboundDepartureDate=' + generate_date(crawling_date) # This is done to get both outgoing
coming_back = '&inboundDepartureDate=' + generate_date(crawling_date) # and ingoing trips with the same URL
passengers = '&passengerCount=' + passengers
rest_of_url = '&transportType=0&concessionCount=0&nusCount=0&outboundWheelchairSeated=0&outboundOtherDisabilityCount=0&inboundWheelchairSeated=0&inboundOtherDisabilityCount=0&outboundPcaCount=0&inboundPcaCount=0&promotionCode=&withReturn=1'
url = base + origincode + destinationcode + departuredate + coming_back + passengers + rest_of_url
return url
def params_message(soup):
""" prints a consise message of the search being done """
soup = soup
message = []
# The message is stored under tag div, in class "search_params"
print('|SEARCHING FOR TRIP TO|')
for word in soup.findAll('div', {"class": "search_params"}):
message.append(word.getText())
for word in message:
# Removes tabs and space.
word = word.replace('\t', '')
word = word.replace('\n', '')
print(word)
def format_trip_id(number, mode):
"""formats the ID to be search with the numerical id(number)"""
# This functions returns the link to be used to search for trips.
# mode refers to whereever the trip is inbound or outbound.
# This function is equipped to deal with both
# incoming trips and outgoing trips.
# ID is converted from int to str to be able to concantanate with url.
if mode == 'inbound':
if number > 9:
# If ID is a two digit number, it formats the last two digits.
number = str(number)
id = 'JourneyResylts_InboundList_GridViewResults_ctl07_row_item'
id = id.replace('07', number)
return id
else:
# If Id is a one digit number, it formats the last digit only.
number = str(number)
id = 'JourneyResylts_InboundList_GridViewResults_ctl07_row_item'
id = id.replace('7', number)
return id # returns the formatted ID to be used to search for trips
if mode == 'outbound':
if number > 9:
number = str(number)
id = 'JourneyResylts_OutboundList_GridViewResults_ctl09_row_item'
id = id.replace('09', number)
return id
else:
number = str(number)
id = 'JourneyResylts_OutboundList_GridViewResults_ctl09_row_item'
id = id.replace('9', number)
return id
else:
print("Something is wrong with Mode")
def download_trips(url, id, mode):
"""Returns a string with the trip information """
identification = format_trip_id(id, mode)
html = webtools.DownloadData(url)
temp = []
trip = []
for trip_data in html.findAll('ul', id=identification):
temp.append(trip_data.getText())
for word in temp:
word = word.replace('\t', '')
word = word.replace('\n', '')
word = word.replace('\r', '')
trip.append(word)
return trip
|
hhstore/learning-notes | python/src/project/py27/flask_base/flask_online_calculator/server/base.py | Python | mit | 165 | 0.007092 | # -*- coding:utf-8 | -*-
from flask import Flask
app = Flask(__name__)
app.config.from_object('config') # 告诉 Flask 去读取,并使用c | onfig.py配置参数
|
imgmix/django-avatar | avatar/migrations/__init__.py | Python | bsd-3-clause | 608 | 0 | """
Django migrations for django-avatar app
This package does not contain South migrations. South migrations can be found
in the ``south_migrations`` package.
"""
SOUTH_ERROR_MESSAGE = """\n
For South support, customize the SOUTH_MIGRATION_MODULE | S setting like | so:
SOUTH_MIGRATION_MODULES = {
'django-avatar': 'avatar.south_migrations',
}
"""
# Ensure the user is not using Django 1.6 or below with South
try:
from django.db import migrations # noqa
except ImportError:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(SOUTH_ERROR_MESSAGE)
|
cartwheelweb/packaginator | apps/apiv1/__init__.py | Python | mit | 448 | 0 | """Restful api for the packaginator, based on ``django-tastypie``
(`docs <http://django-tastypie.readthedocs.org/>`_
|
`pypi <http://pyp | i.python.org/pypi/django-tastypie/>`_
|
`repo <https://github.com/toastdriven/django-tastypie/>`_).
This module consists of two components - module :mod:`~apps.apiv1.a | pi`
and the resource definition module :mod:`~apps.apiv1.resources`.
The api urls are exposed in project's main :mod:`urls.py <urls>` file
"""
|
WarwickFilmSoc/WWW-V4 | config.py | Python | gpl-3.0 | 799 | 0.007509 | import os
class Config(object):
# Where to find the templates to build the pages from
TEMPLATE_ROOT = 'templates/default/' # folder containing current template relative to app folder
DEBUG = True
DEVELOPMENT = True
# Secret key
SECRET_KEY = os.urandom(24)
# Flask-SSO Configuration
SSO_ATTRIBUTE_MAP = {
'pid': (True, 'pid'),
'givenName': (True, 'givenName'),
'surname': (True, 'surname'),
'username': (True, 'username'),
'photo_id': (False, 'photo_id'),
'mail': (True, 'mail'),
'web_id': (True, 'web_id')
}
SSO_LOGIN_URL = '/login'
SSO_LO | GIN_ENDPOINT = '/login/sso'
class ProductionConfig(Config):
DEVELOPMENT = Fals | e
DEBUG = False
SECRET_KEY = open('/opt/secretkey').read() |
ganeshnalawade/ansible | lib/ansible/modules/yum.py | Python | gpl-3.0 | 68,822 | 0.00279 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Red Hat, Inc
# Written by Seth Vidal <skvidal at fedoraproject.org>
# Copyright: (c) 2014, Epic Games, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: yum
version_added: historical
short_description: Manages packages with the I(yum) package manager
description:
- Installs, upgrade, downgrades, removes, and lists packages and groups with the I(yum) package manager.
- This module only works on Python 2. If you require Python 3 support see the M(ansible.builtin.dnf) module.
options:
use_backend:
description:
- This module supports C(yum) (as it always has), this is known as C(yum3)/C(YUM3)/C(yum-deprecated) by
upstream yum developers. As of Ansible 2.7+, this module also supports C(YUM4), which is the
"new yum" and it has an C(dnf) backend.
- By default, this module will select the backend based on the C(ansible_pkg_mgr) fact.
default: "auto"
choices: [ auto, yum, yum4, dnf ]
type: str
version_added: "2.7"
name:
description:
- A package name or package specifier with version, like C(name-1.0).
- If a previous version is specified, the task also needs to turn C(allow_downgrade) on.
See the C(allow_downgrade) documentation for caveats with downgrading packages.
- When using state=latest, this can be C('*') which means run C(yum -y update).
- You can also pass a url or a local path to a rpm file (using state=present).
To operate on several packages this can accept a comma separated string of packages or (as of 2.0) a list of packages.
aliases: [ pkg ]
type: list
elements: str
exclude:
description:
- Package name(s) to exclude when state=present, or latest
type: list
elements: str
version_added: "2.0"
list:
description:
- "Package name to run the equivalent of yum list --show-duplicates <package> against. In addition to listing packages,
use can also list the following: C(installed), C(updates), C(available) and C(repos)."
- This parameter is mutually exclusive with C(name).
type: str
state:
description:
- Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package.
- C(present) and C(installed) will simply ensure that a desired package is installed.
- C(latest) will update the specified package if it's not of the latest available version.
- C(absent) and C(removed) will remove the specified package.
- Default is C(None), however in effect the default action is C(present) unless the C(autoremove) option is
enabled for this module, then C(absent) is inferred.
type: str
choices: [ absent, installed, latest, present, removed ]
enablerepo:
description:
- I(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a C(",").
- As of Ansible 2.7, this can alternatively be a list instead of C(",")
separated string
type: list
elements: str
version_added: "0.9"
disablerepo:
description:
- I(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a C(",").
- As of Ansible 2.7, this can alternatively be a list instead of C(",")
separated string
type: list
elements: str
version_added: "0.9"
conf_file:
description:
- The remote yum configuration file to use for the transaction.
type: str
version_added: "0.6"
disable_gpg_check:
description:
- Whether to disable the GPG checking of signatures of packages being
installed. Has an effect only if state is I(present) or I(latest).
type: bool
default: "no"
version_added: "1.2"
skip_broken:
description:
- Skip packages with broken dependencies(devsolve) and are causing problems.
type: bool
default: "no"
version_added: "2.3"
update_cache:
description:
- Force yum to check if cache is out of date and redownload if needed.
Has an effect only if state is I(present) or I(latest).
type: bool
default: "no"
aliases: [ expire-cache ]
version_added: "1.9"
validate_certs:
description:
- This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
- Prior to 2.1 the code worked as if this was set to C(yes).
type: bool
default: "yes"
version_added: "2.1"
update_only:
description:
- When using latest, only update installed packages. Do not install packages.
- Has an effect only if state is I(latest)
default: "no"
type: bool
version_added: "2.5"
installroot:
description:
- Specifies an alternative installroot, relative to which all packages
will be installed.
default: "/"
type: str
version_added: "2.3"
security:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked security related.
type: bool
default: "no"
version_added: "2.4"
bugfix:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked bugfix related.
default: "no"
type: bool
version_added: "2.6"
allow_downgrade:
description:
- Specify if the named package and version is allowed to downgrade
a maybe already installed higher version of that package.
Note that setting allow_downgrade=True can make this module
behave in a non-idempotent way. The task could end up with a set
of packages that does not match the complete list of specified
packages to install (because dependencies between the downgraded
package and others can cause changes to the packages which were
in the earlier transaction).
type: bool
default: "no"
version_added: "2.4"
enable_plugin:
description:
- I(Plugin) name to enable for the install/update operation.
The enabled plugin will not persist beyond the transaction.
type: list
elements: str
version_added: "2.5"
disable_plugin:
description:
- I(Plugin) name to disable for the install/update operation.
The disabled plugins will not persist beyo | nd the transaction.
type: list
elements: str
version_added: "2.5"
releasever:
description:
- Specifies an alternative release from which all packages will be
installed.
type: str
version_added: "2.7"
autoremove:
description:
- If C(yes), removes all "leaf" packages f | rom the system that were originally
installed as dependencies of user-installed packages but which are no longer
required by any such package. Should be used alone or when state is I(absent)
- "NOTE: This feature requires yum >= 3.4.3 (RHEL/CentOS 7+)"
type: bool
default: "no"
version_added: "2.7"
disable_excludes:
description:
- Disable the excludes defined in YUM config files.
- If set to C(all), disables all excludes.
- If set to C(main), disable excludes defined in [main] in yum.conf.
- If set to C(repoid), disable excludes defined for given repo id.
type: str
version_added: "2.7"
download_only:
description:
- Only download the packages, do not install them.
default: "no"
type: bool
version_added: "2.7"
lock_timeout:
description:
- Amount of time to wait for the yum lockfile to be freed.
required: false
default: 30
type: int
version_added: "2.8"
|
trachelr/mne-python | mne/surface.py | Python | bsd-3-clause | 39,201 | 0.000204 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Denis A. Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import os
from os import path as op
import sys
from struct import pack
from glob import glob
import numpy as np
from scipy import sparse
from .bem import read_bem_surfaces
from .io.constants import FIFF
from .io.open import fiff_open
from .io.tree import dir_tree_find
from .io.tag import find_tag
from .io.write import (write_int, start_file, end_block,
start_block, end_file, write_string,
write_float_sparse_rcs)
from .channels.channels import _get_meg_system
from .transforms import transform_surface_to
from .utils import logger, verbose, get_subjects_dir
from .externals.six import string_types
###############################################################################
# AUTOMATED SURFACE FINDING
def get_head_surf(subject, source=('bem', 'head'), subjects_dir=None):
"""Load the subject head surface
Parameters
----------
subject : str
Subject name.
source : str | list of str
Type to load. Common choices would be `'bem'` or `'head'`. We first
try loading `'$SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-$SOURCE.fif'`, and
then look for `'$SUBJECT*$SOURCE.fif'` in the same directory by going
through all files matching the pattern. The head surface will be read
from the first file containing a head surface. Can also be a list
to try multiple strings.
subjects_dir : str, or None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
Returns
-------
surf : dict
The head surface.
"""
# Load the head surface from the BEM
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
# use realpath to allow for linked surfaces (c.f. MNE manual 196-197)
if isinstance(source, string_types):
source = [source]
surf = None
for this_source in source:
this_head = op.realpath(op.join(subjects_dir, subject, 'bem',
'%s-%s.fif' % (subject, this_source)))
if op.exists(this_head):
surf = read_bem_surfaces(this_head, True,
FIFF.FIFFV_BEM_SURF_ID_HEAD)
else:
# let's do a more sophisticated search
path = op.join(subjects_dir, subject, 'bem')
if not op.isdir(path):
raise IOError('Subject bem directory "%s" doe | s not exist'
% path)
files = sorted(glob(op.join(path, '%s*%s.fif'
% (subject, this_source))))
for this_head in files:
try:
surf = read_bem_surfaces(this_head, True,
FIFF.FIFFV_BEM_SURF_ID_HEAD)
except ValueError:
pass
else:
break
if surf is | not None:
break
if surf is None:
raise IOError('No file matching "%s*%s" and containing a head '
'surface found' % (subject, this_source))
return surf
def get_meg_helmet_surf(info, trans=None):
"""Load the MEG helmet associated with the MEG sensors
Parameters
----------
info : instance of io.meas_info.Info
Measurement info.
trans : dict
The head<->MRI transformation, usually obtained using
read_trans(). Can be None, in which case the surface will
be in head coordinates instead of MRI coordinates.
Returns
-------
surf : dict
The MEG helmet as a surface.
"""
system = _get_meg_system(info)
fname = op.join(op.split(__file__)[0], 'data', 'helmets',
system + '.fif.gz')
surf = read_bem_surfaces(fname, False, FIFF.FIFFV_MNE_SURF_MEG_HELMET)
# Ignore what the file says, it's in device coords and we want MRI coords
surf['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
transform_surface_to(surf, 'head', info['dev_head_t'])
if trans is not None:
transform_surface_to(surf, 'mri', trans)
return surf
###############################################################################
# EFFICIENCY UTILITIES
def fast_cross_3d(x, y):
"""Compute cross product between list of 3D vectors
Much faster than np.cross() when the number of cross products
becomes large (>500). This is because np.cross() methods become
less memory efficient at this stage.
Parameters
----------
x : array
Input array 1.
y : array
Input array 2.
Returns
-------
z : array
Cross product of x and y.
Notes
-----
x and y must both be 2D row vectors. One must have length 1, or both
lengths must match.
"""
assert x.ndim == 2
assert y.ndim == 2
assert x.shape[1] == 3
assert y.shape[1] == 3
assert (x.shape[0] == 1 or y.shape[0] == 1) or x.shape[0] == y.shape[0]
if max([x.shape[0], y.shape[0]]) >= 500:
return np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1],
x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2],
x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0]]
else:
return np.cross(x, y)
def _fast_cross_nd_sum(a, b, c):
"""Fast cross and sum"""
return ((a[..., 1] * b[..., 2] - a[..., 2] * b[..., 1]) * c[..., 0] +
(a[..., 2] * b[..., 0] - a[..., 0] * b[..., 2]) * c[..., 1] +
(a[..., 0] * b[..., 1] - a[..., 1] * b[..., 0]) * c[..., 2])
def _accumulate_normals(tris, tri_nn, npts):
"""Efficiently accumulate triangle normals"""
# this code replaces the following, but is faster (vectorized):
#
# this['nn'] = np.zeros((this['np'], 3))
# for p in xrange(this['ntri']):
# verts = this['tris'][p]
# this['nn'][verts, :] += this['tri_nn'][p, :]
#
nn = np.zeros((npts, 3))
for verts in tris.T: # note this only loops 3x (number of verts per tri)
for idx in range(3): # x, y, z
nn[:, idx] += np.bincount(verts, weights=tri_nn[:, idx],
minlength=npts)
return nn
def _triangle_neighbors(tris, npts):
"""Efficiently compute vertex neighboring triangles"""
# this code replaces the following, but is faster (vectorized):
#
# this['neighbor_tri'] = [list() for _ in xrange(this['np'])]
# for p in xrange(this['ntri']):
# verts = this['tris'][p]
# this['neighbor_tri'][verts[0]].append(p)
# this['neighbor_tri'][verts[1]].append(p)
# this['neighbor_tri'][verts[2]].append(p)
# this['neighbor_tri'] = [np.array(nb, int) for nb in this['neighbor_tri']]
#
verts = tris.ravel()
counts = np.bincount(verts, minlength=npts)
reord = np.argsort(verts)
tri_idx = np.unravel_index(reord, (len(tris), 3))[0]
idx = np.cumsum(np.r_[0, counts])
# the sort below slows it down a bit, but is needed for equivalence
neighbor_tri = [np.sort(tri_idx[v1:v2])
for v1, v2 in zip(idx[:-1], idx[1:])]
return neighbor_tri
def _triangle_coords(r, geom, best):
"""Get coordinates of a vertex projected to a triangle"""
r1 = geom['r1'][best]
tri_nn = geom['nn'][best]
r12 = geom['r12'][best]
r13 = geom['r13'][best]
a = geom['a'][best]
b = geom['b'][best]
c = geom['c'][best]
rr = r - r1
z = np.sum(rr * tri_nn)
v1 = np.sum(rr * r12)
v2 = np.sum(rr * r13)
det = a * b - c * c
x = (b * v1 - c * v2) / det
y = (a * v2 - c * v1) / det
return x, y, z
@verbose
def _complete_surface_info(this, do_neighbor_vert=False, verbose=None):
"""Complete surface info"""
# based on mne_source_space_add_geometry_info() in mne_add_geometry_info.c
# Main triangulation [mne_add_triangle_data()]
this['tri_area'] = np.zeros(this['ntri'])
r1 = this['rr'][this['tris'][:, 0], :]
r2 = this['rr'][this['tris'][:, 1], :]
r3 = this['rr'][ |
chjp2046/fbthrift | thrift/test/py/UTF8StringTest.py | Python | apache-2.0 | 2,919 | 0.003083 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# @lint-avoid-python-3-compatibility-imports
from ThriftTest.ttypes import *
from thrift.transport import TTranspor | t
from thrift.transport import TSocket
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TCompactProtocol
from thrift.util import Serializer
import unittest
import sys
if sys.version_info[0] < 3:
chr = unichr
class AbstractTest():
def setUp(self):
self. | obj = ListTypeVersioningV2(
strings=["plain thing", chr(40960) + 'fun' + chr(1972)],
hello=u"hello\xac\u1234\u20ac\U00008000"
)
def _serialize(self, obj):
return Serializer.serialize(self.protocol_factory, obj)
def _deserialize(self, objtype, data):
return Serializer.deserialize(self.protocol_factory, data, objtype())
def testUnicodeString(self):
obj2 = self._deserialize(ListTypeVersioningV2,
self._serialize(self.obj))
self.assertEquals(obj2.strings[0], self.obj.strings[0])
self.assertEquals(obj2.strings[1], self.obj.strings[1])
self.assertEquals(obj2.hello, self.obj.hello)
class NormalBinaryTest(AbstractTest, unittest.TestCase):
protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()
class AcceleratedBinaryTest(AbstractTest, unittest.TestCase):
protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()
class NormalCompactTest(AbstractTest, unittest.TestCase):
protocol_factory = TCompactProtocol.TCompactProtocolFactory()
class AcceleratedCompactTest(AbstractTest, unittest.TestCase):
protocol_factory = TCompactProtocol.TCompactProtocolAcceleratedFactory()
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(NormalBinaryTest))
suite.addTest(loader.loadTestsFromTestCase(AcceleratedBinaryTest))
suite.addTest(loader.loadTestsFromTestCase(NormalCompactTest))
suite.addTest(loader.loadTestsFromTestCase(AcceleratedCompactTest))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite",
testRunner=unittest.TextTestRunner(verbosity=2))
|
dc3-plaso/plaso | plaso/lib/timelib.py | Python | apache-2.0 | 30,598 | 0.006242 | # -*- coding: utf-8 -*-
"""Time manipulation functions and variables.
This module contain common methods that can be used to convert timestamps
from various formats into number of micro seconds since January 1, 1970,
00:00:00 UTC that is used internally to store timestamps.
It also contains various functions to represent timestamps in a more
human readable form.
"""
import calendar
import datetime
import logging
import time
import construct
import dateutil.parser
import pytz
from plaso.lib import errors
from plaso.lib import py2to3
MONTH_DICT = {
u'jan': 1,
u'feb': 2,
u'mar': 3,
u'apr': 4,
u'may': 5,
u'jun': 6,
u'jul': 7,
u'aug': 8,
u'sep': 9,
u'oct': 10,
u'nov': 11,
u'dec': 12}
class Timestamp(object):
"""Class for converting timestamps to plaso timestamps.
The Plaso timestamp is a 64-bit signed timestamp value containing:
micro seconds since 1970-01-01 00:00:00.
The timestamp is not necessarily in UTC.
"""
# The minimum timestamp in seconds
TIMESTAMP_MIN_SECONDS = -(((1 << 63) - 1) / 1000000)
# The maximum timestamp in seconds
TIMESTAMP_MAX_SECONDS = ((1 << 63) - 1) / 1000000
# The minimum timestamp in micro seconds
TIMESTAMP_MIN_MICRO_SECONDS = -((1 << 63) - 1)
# The maximum timestamp in micro seconds
TIMESTAMP_MAX_MICRO_SECONDS = (1 << 63) - 1
# Timesta | mp that represents the timestamp representing not
# a date and time value.
# TODO: replace this with a real None implementation.
NONE_TIMESTAMP = 0
# The days per month of a non leap year
DAYS_PER_MONTH = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# The number of seconds in a day
SECONDS_PER_DAY = 24 * 60 * 60
# The number of micro seconds per second
MICRO_SECONDS_PER_SECOND = 1000000
# The multiplication factor to change milliseconds to micro second | s.
MILLI_SECONDS_TO_MICRO_SECONDS = 1000
# The difference between Jan 1, 1980 and Jan 1, 1970 in seconds.
FAT_DATE_TO_POSIX_BASE = 315532800
# The difference between Jan 1, 1601 and Jan 1, 1970 in micro seconds
WEBKIT_TIME_TO_POSIX_BASE = 11644473600 * 1000000
# The difference between Jan 1, 1601 and Jan 1, 1970 in 100 nanoseconds.
FILETIME_TO_POSIX_BASE = 11644473600 * 10000000
# The difference between Nov 10, 1582 and Jan 1, 1970 in 100 nanoseconds.
UUID_TIME_TO_POSIX_BASE = 12219292800 * 10000000
# The number of seconds between January 1, 1904 and Jan 1, 1970.
# Value confirmed with sleuthkit:
# http://svn.sleuthkit.org/repos/sleuthkit/trunk/tsk3/fs/tsk_hfs.h
# and linux source file linux/include/linux/hfsplus_fs.h
HFSTIME_TO_POSIX_BASE = 2082844800
# The number of seconds between January 1, 1970 and January 1, 2001.
# As specified in:
# https://developer.apple.com/library/ios/documentation/
# cocoa/Conceptual/DatesAndTimes/Articles/dtDates.html
COCOA_TIME_TO_POSIX_BASE = 978307200
# The difference between POSIX (Jan 1, 1970) and DELPHI (Dec 30, 1899).
# http://docwiki.embarcadero.com/Libraries/XE3/en/System.TDateTime
DELPHI_TIME_TO_POSIX_BASE = 25569
# The Windows SYSTEMTIME structure.
SYSTEMTIME_STRUCT = construct.Struct(
u'timestamp',
construct.ULInt16(u'year'),
construct.ULInt16(u'month'),
construct.ULInt16(u'weekday'),
construct.ULInt16(u'day'),
construct.ULInt16(u'hour'),
construct.ULInt16(u'minutes'),
construct.ULInt16(u'seconds'),
construct.ULInt16(u'milliseconds'))
@classmethod
def CopyFromString(cls, time_string):
"""Copies a timestamp from a string containing a date and time value.
Args:
time_string: A string containing a date and time value formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the
seconds fraction can be either 3 or 6 digits. The time
of day, seconds fraction and timezone offset are optional.
The default timezone is UTC.
Returns:
The timestamp which is an integer containing the number of micro seconds
since January 1, 1970, 00:00:00 UTC.
Raises:
ValueError: if the time string is invalid or not supported.
"""
if not time_string:
raise ValueError(u'Invalid time string.')
time_string_length = len(time_string)
# The time string should at least contain 'YYYY-MM-DD'.
if (time_string_length < 10 or time_string[4] != u'-' or
time_string[7] != u'-'):
raise ValueError(u'Invalid time string.')
# If a time of day is specified the time string it should at least
# contain 'YYYY-MM-DD hh:mm:ss'.
if (time_string_length > 10 and (
time_string_length < 19 or time_string[10] != u' ' or
time_string[13] != u':' or time_string[16] != u':')):
raise ValueError(u'Invalid time string.')
try:
year = int(time_string[0:4], 10)
except ValueError:
raise ValueError(u'Unable to parse year.')
try:
month = int(time_string[5:7], 10)
except ValueError:
raise ValueError(u'Unable to parse month.')
if month not in range(1, 13):
raise ValueError(u'Month value out of bounds.')
try:
day_of_month = int(time_string[8:10], 10)
except ValueError:
raise ValueError(u'Unable to parse day of month.')
if day_of_month not in range(1, 32):
raise ValueError(u'Day of month value out of bounds.')
hours = 0
minutes = 0
seconds = 0
if time_string_length > 10:
try:
hours = int(time_string[11:13], 10)
except ValueError:
raise ValueError(u'Unable to parse hours.')
if hours not in range(0, 24):
raise ValueError(u'Hours value out of bounds.')
try:
minutes = int(time_string[14:16], 10)
except ValueError:
raise ValueError(u'Unable to parse minutes.')
if minutes not in range(0, 60):
raise ValueError(u'Minutes value out of bounds.')
try:
seconds = int(time_string[17:19], 10)
except ValueError:
raise ValueError(u'Unable to parse day of seconds.')
if seconds not in range(0, 60):
raise ValueError(u'Seconds value out of bounds.')
micro_seconds = 0
timezone_offset = 0
if time_string_length > 19:
if time_string[19] != u'.':
timezone_index = 19
else:
for timezone_index in range(19, time_string_length):
if time_string[timezone_index] in [u'+', u'-']:
break
# The calculation that follow rely on the timezone index to point
# beyond the string in case no timezone offset was defined.
if timezone_index == time_string_length - 1:
timezone_index += 1
if timezone_index > 19:
fraction_of_seconds_length = timezone_index - 20
if fraction_of_seconds_length not in [3, 6]:
raise ValueError(u'Invalid time string.')
try:
micro_seconds = int(time_string[20:timezone_index], 10)
except ValueError:
raise ValueError(u'Unable to parse fraction of seconds.')
if fraction_of_seconds_length == 3:
micro_seconds *= 1000
if timezone_index < time_string_length:
if (time_string_length - timezone_index != 6 or
time_string[timezone_index + 3] != u':'):
raise ValueError(u'Invalid time string.')
try:
timezone_offset = int(time_string[
timezone_index + 1:timezone_index + 3])
except ValueError:
raise ValueError(u'Unable to parse timezone hours offset.')
if timezone_offset not in range(0, 24):
raise ValueError(u'Timezone hours offset value out of bounds.')
# Note that when the sign of the timezone offset is negative
# the difference needs to be added. We do so by flipping the sign.
if time_string[timezone_index] == u'-':
timezone_offset *= 60
else:
timezone_offset *= -60
try:
timezone_offset += int(time_string[
timezone_index + 4:timezone_index + 6])
except ValueError:
raise Val |
anthill-services/anthill-common | anthill/common/social/xsolla.py | Python | mit | 4,007 | 0.000998 |
from tornado.httpclient import HTTPRequest, HTTPError
import ujson
import abc
import socket
from urllib import parse
from .. import admin as a
from .. social import SocialNetworkAPI, APIError, SocialPrivateKey
class XsollaAPI(SocialNetworkAPI, metaclass=abc.ABCMeta):
XSOLLA_API = "https://api.xsolla.com"
NAME = "xsolla"
def __init__(self, cache):
super(XsollaAPI, self).__init__(XsollaAPI.NAME, cache)
async def api_get(self, operation, merchant_id, api_key, **kwargs):
request = HTTPRequest(
XsollaAPI.XSOLLA_API + "/merchant/merchants/" +
str(merchant_id) + "/" + operation + "?" + parse.urlencode(kwargs),
method="GET",
auth_mode="basic",
auth_username=str(merchant_id),
auth_password=str(api_key),
headers={
"Content-Type": "application/json",
"Accept": "application/json"
})
result = await self.client.fetch(request)
try:
response_object = ujson.loads(result.body)
except (KeyError, ValueError):
raise APIError(500, "Corrupted xsolla response")
return response_object
async def api_post(self, operation, merchant_id, api_key, **kwargs):
request = HTTPRequest(
XsollaAPI.XSOLLA_API + "/merchant/merchants/" + str(merchant_id) + "/" + operation,
body=ujson.dumps(kwargs),
method="POST",
auth_mode="basic",
auth_username=str(merchant_id),
auth_password=str(api_key),
headers={
"Content-Type": "application/json",
"Accept": "application/json"
})
try:
result = await self.client.fetch(request)
except socket.error as e:
raise APIError(500, "Connection error: " + str(e))
except HTTPError as e:
try:
parsed = ujson.loads(e.response.body)
except (KeyError, ValueError):
raise APIError(e.code, "Internal API error")
else:
code = parsed.get("http_status_code", e.code)
message = parsed.get("message", "Internal API error")
raise APIError(code, message)
try:
response_object = | ujson.loads(result.body)
except (KeyError, ValueError):
raise APIError(500, "Corrupted xsolla response")
return response_object
def has_private_key(self):
return True
def new_private_key(self, data) | :
return XsollaPrivateKey(data)
class XsollaPrivateKey(SocialPrivateKey):
def __init__(self, key):
super(XsollaPrivateKey, self).__init__(key)
self.api_key = self.data["api_key"] if self.data else None
self.project_key = self.data["project_key"] if self.data else None
self.merchant_id = self.data["merchant_id"] if self.data else None
def get_app_id(self):
return self.merchant_id
def dump(self):
return {
"api_key": self.api_key,
"project_key": self.project_key,
"merchant_id": self.merchant_id,
}
def has_ui(self):
return True
def get(self):
return {
"api_key": self.api_key,
"project_key": self.project_key,
"merchant_id": self.merchant_id
}
def render(self):
return {
"merchant_id": a.field(
"Merchant ID", "text", "primary", "non-empty",
order=1,),
"project_key": a.field(
"Project Key", "text", "primary", "non-empty",
order=2),
"api_key": a.field(
"API Key", "text", "primary", "non-empty",
order=2)
}
def update(self, merchant_id, project_key, api_key, **ignored):
self.merchant_id = merchant_id
self.project_key = project_key
self.api_key = api_key
|
dpo/opal | opal/core/datamanager.py | Python | lgpl-3.0 | 3,673 | 0.003539 | import os
import string
import types
import time
import shutil
import log
import copy
#import logging
#import utility
from testproblem import TestProblem
from mafrw import Agent
from .. import config
class DataManager(Agent):
"""
An object of this class is responsable to collect data,
support access to data of a test corresponding to one set
of parameter
During a session working of data generator, it is activated
at first, wait for signal of a task to collect the data and
store data following its rules. Normally, the data is stored
in external memory (for example a file) but a part can be loaded
into memory (a variable of DataController object).
The other components needing data sends to this object a
request and get the data.
"""
def __init__(self,
name='data manager',
rows=None,
columns=None,
storage=None,
logHandlers=[]):
self.file_name = 'data_storage.txt'
Agent.__init__(self, name=name, logHandlers=logHandlers)
self.message_handlers['inform-measure-values'] = self.add_data
self.message_handlers['cfp-collect'] = self.collect_data
return
# Management functionality
def update(self, problem, parameterTag, data):
return
def find(self, query):
return
# Message handlers
def add_data(self, info):
if info is None:
return
paramTag = info['proposition']['parameter-tag']
prob = info['proposition']['problem']
data = info['proposition']['values']
# Update the entry
self.update(parameterTag=paramTag,
problem=prob,
data=data)
# Get all data entry corresponding to the tag
dataEntry = self.find(self, query={'tag':paramTag})
# Send a message informing availbility of data
if dataEntry.is_complete():
msg = Message(sender=sel | f.id,
performative='inform',
content={'proposition':{'what':'data-availability',
'how':'complete',
| 'data':dataEntry}
}
)
else:
msg = Message(sender=self.id,
performative='inform',
content={'proposition':{'what':'data-availability',
'how':'partial',
'data':dataEntry}
}
)
self.send_message(msg)
return
def find_data(self, info):
(paramValues, problem) = message.content['proposition']
measureValues = self.query_data(parameters=paramValues,
problem=problem)
# If getting data is successful
if measureValues is not None:
# Create a reply message whose content contains the
# measure values
msgCont = self.encrypt(measureValues)
msg = Message(sender=self.id,
content=msgCont,
performative='inform',
reference=message.id)
return
def collect_data(self, message):
(paramValues, problem, measureFile) = message.content['proposition']
f = open(measureFile)
f.close()
return
# Private methods
def query_data(self, parameters=None, problem=None):
return None
|
tigeraniya/django-allauth | test_settings.py | Python | mit | 4,239 | 0.000236 | # -*- coding: utf-8 -*-
import django
SEC | RET_KEY = 'psst'
SITE_ID = 1
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'USER': '',
'PASSWORD': '',
| 'HOST': '',
'PORT': '',
}
}
ROOT_URLCONF = 'allauth.urls'
if django.VERSION >= (1, 8):
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
],
},
},
]
else:
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.amazon',
'allauth.socialaccount.providers.angellist',
'allauth.socialaccount.providers.baidu',
'allauth.socialaccount.providers.bitbucket',
'allauth.socialaccount.providers.bitly',
'allauth.socialaccount.providers.coinbase',
'allauth.socialaccount.providers.douban',
'allauth.socialaccount.providers.dropbox',
'allauth.socialaccount.providers.dropbox_oauth2',
'allauth.socialaccount.providers.evernote',
'allauth.socialaccount.providers.feedly',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.flickr',
'allauth.socialaccount.providers.foursquare',
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.github',
'allauth.socialaccount.providers.hubic',
'allauth.socialaccount.providers.instagram',
'allauth.socialaccount.providers.linkedin',
'allauth.socialaccount.providers.linkedin_oauth2',
'allauth.socialaccount.providers.mailru',
'allauth.socialaccount.providers.windowslive',
'allauth.socialaccount.providers.odnoklassniki',
'allauth.socialaccount.providers.openid',
'allauth.socialaccount.providers.orcid',
'allauth.socialaccount.providers.paypal',
'allauth.socialaccount.providers.persona',
'allauth.socialaccount.providers.soundcloud',
'allauth.socialaccount.providers.spotify',
'allauth.socialaccount.providers.stackexchange',
'allauth.socialaccount.providers.tumblr',
'allauth.socialaccount.providers.twitch',
'allauth.socialaccount.providers.twitter',
'allauth.socialaccount.providers.vimeo',
'allauth.socialaccount.providers.weibo',
'allauth.socialaccount.providers.vk',
'allauth.socialaccount.providers.xing',
)
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = '/tmp/' # Dummy
STATIC_URL = '/static/'
|
maparent/virtuoso-python | virtuoso/vstore.py | Python | bsd-3-clause | 29,332 | 0.002864 | """
"""
__dist__ = __import__("pkg_resources").get_distribution("rdflib")
from builtins import (next, chr, zip, range, object)
from future import standard_library
standard_library.install_aliases()
from builtins import (next, chr, zip, range, object)
import threading
from io import StringIO
import os
import sys
from struct import unpack
from itertools import islice
from rdflib.graph import Graph
from rdflib.term import URIRef, BNode, Literal, Variable
from rdflib.namespace import XSD, Namespace, NamespaceManager
from rdflib.plugins.sparql.sparql import FrozenBindings
from rdflib.query import Result, ResultRow
from rdflib.store import Store, VALID_STORE
import pyodbc
__all__ = ['Virtuoso', 'OperationalError', 'resolve', 'VirtRDF']
VirtRDF = Namespace('http://www.openlinksw.com/schemas/virtrdf#')
from virtuoso.common import READ_COMMITTED
import logging
log = logging.getLogger(__name__)
## hack to change BNode's random identifier generator to be
## compatible with Virtuoso's needs
from time import time
from random import choice, | seed
from string import ascii_letters, digits
seed(time())
__bnode_old_new__ = BNode.__new__
def __bnode_new__(cls, value=None, *av, **kw):
| if value is None:
value = choice(ascii_letters) + \
"".join(choice(ascii_letters + digits) for x in range(7))
return __bnode_old_new__(cls, value, *av, **kw)
BNode.__new__ = staticmethod(__bnode_new__)
## end hack
import re
_ws_re = r'(\s*#[^\n]*\n)*\s*'
_start_re = r'^SPARQL\s+' \
r'(DEFINE[ \t]+\S+[ \t]+("[^"]*"|<[^>]*>|[0-9]+)\s+)*' \
r'{WS}' \
r'(BASE\b{WS}<[^>]*>{WS})?' \
r'(PREFIX\b{WS}\w*:{WS}<[^>]*>{WS})*' \
.format(WS=_ws_re)
_ask_re = re.compile(_start_re + r'(ASK)\b', re.IGNORECASE + re.MULTILINE)
_construct_re = re.compile(_start_re + r'(CONSTRUCT|DESCRIBE)\b', re.IGNORECASE + re.MULTILINE)
_select_re = re.compile(_start_re + r'SELECT\b', re.IGNORECASE + re.MULTILINE)
_base_re = re.compile(r'(BASE[ \t]+<[^>]*>\s+)?', re.IGNORECASE + re.MULTILINE)
class OperationalError(Exception):
"""
Raised when transactions are mis-managed
"""
def _all_none(binding):
"""
Return True if binding contains only None values.
"""
for i in binding:
if i is not None:
return False
return True
class EagerIterator(object):
"""A wrapper for an iterator that calculates one element ahead.
Allows to start context handlers within the inner generator."""
def __init__(self, g):
self.g = g
self.done = False
try:
# Work around stupid virtuoso bug that can return
# (None, None, None) if you ask for an empty graph on a store.
while True:
self.next_val = next(g)
if not _all_none(self.next_val):
break
except StopIteration:
self.done = True
def __iter__(self):
return self
def next(self):
if self.done:
raise StopIteration()
a = self.next_val
try:
while True:
self.next_val = next(self.g)
if not _all_none(self.next_val):
break
except StopIteration:
self.done = True
finally:
return a
EagerIterator.__next__ = EagerIterator.next
class VirtuosoResultRow(ResultRow):
"""
Subclass of ResultRow which is more efficiently created
"""
@staticmethod
def prepare_var_dict(var_list):
"""
Make a dict as expected by __new__ from an iterable of Variable's.
"""
return dict((str(x[1]), x[0]) for x in enumerate(var_list))
def __new__(cls, values, var_dict):
instance = tuple.__new__(cls, values)
instance.labels = var_dict
return instance
class VirtuosoResult(Result):
"""
Subclass of Result to work better with EagerIterator.
"""
_bindings_tuples = None
_bindings_tuples_complete = False
_bindings = None
def __init__(self, inner_result):
if type(inner_result) is EagerIterator:
Result.__init__(self, "SELECT")
self._eagerIterator = inner_result
self.vars = inner_result.vars
elif type(inner_result) is bool:
Result.__init__(self, "ASK")
self.askAnswer = inner_result
elif type(inner_result) is Graph:
Result.__init__(self, "CONSTRUCT")
self.graph = inner_result
else:
raise ValueError("Unrecognized inner_result %r" % inner_result)
@property
def bindings(self):
if self.type != "SELECT":
return None
if self._bindings is None:
self_vars = self.vars
self._bindings = [ FrozenBindings(None, dict(zip(self_vars, tpl)))
for tpl in self ]
return self._bindings
def __iter__(self):
"""
Iter over all bindings as tuples of rdflib Terms.
"""
if self.type != "SELECT":
return Result.__iter__(self)
elif self._bindings_tuples is not None:
if not self._bindings_tuples_complete:
raise ValueError("Can not access bindings while iterating")
return iter(self._bindings_tuples)
else:
self._bindings_tuples = []
return self._iter_tuples()
def __len__(self):
try:
return Result.__len__(self)
except ValueError:
return None # __len__ called during __iter__
def _iter_tuples(self):
self_bindings_tuples_append = self._bindings_tuples.append
for i in self._eagerIterator:
yield i
self_bindings_tuples_append(i)
self._bindings_tuples_complete = True
class Virtuoso(Store):
"""
RDFLib Storage backed by Virtuoso
.. automethod:: virtuoso.vstore.Virtuoso.cursor
.. automethod:: virtuoso.vstore.Virtuoso.query
.. automethod:: virtuoso.vstore.Virtuoso.sparql_query
.. automethod:: virtuoso.vstore.Virtuoso.transaction
.. automethod:: virtuoso.vstore.Virtuoso.commit
.. automethod:: virtuoso.vstore.Virtuoso.rollback
"""
context_aware = True
transaction_aware = True
formula_aware = True # Not sure whether this is true; needed to read N3.
def __init__(self, *av, **kw):
self.long_iri = kw.pop('long_iri', False)
self.inference = kw.pop('inference', None)
self.quad_storage = kw.pop('quad_storage', None)
self.signal_void = kw.pop('signal_void', None)
connection = kw.pop('connection', None)
if connection is not None:
if not isinstance(connection, pyodbc.Connection):
from sqlalchemy.engine.base import Connection
if isinstance(connection, Connection):
# extract the pyodbc connection
connection = connection._Connection__connection.connection
assert isinstance(connection, pyodbc.Connection)
self._connection = connection
self.initialize_connection()
super(Virtuoso, self).__init__(*av, **kw)
self._transaction = None
def initialize_connection(self):
connection = self._connection
if sys.version_info[0] < 3:
connection.setdecoding(pyodbc.SQL_CHAR, 'utf-8', pyodbc.SQL_CHAR)
connection.setdecoding(pyodbc.SQL_WCHAR, 'utf-32LE', pyodbc.SQL_WCHAR, unicode)
connection.setdecoding(pyodbc.SQL_WMETADATA, 'utf-32LE', pyodbc.SQL_WCHAR, unicode)
connection.setencoding(unicode, 'utf-32LE', pyodbc.SQL_WCHAR)
connection.setencoding(str, 'utf-8', pyodbc.SQL_CHAR)
else:
connection.setdecoding(pyodbc.SQL_CHAR, 'utf-8', pyodbc.SQL_CHAR)
connection.setdecoding(pyodbc.SQL_WCHAR, 'utf-32LE', pyodbc.SQL_WCHAR)
connection.setdecoding(pyodbc.SQL_WMETADATA, 'utf-32LE', pyodbc.SQL_WCHAR)
connection.setencoding('utf-32LE', pyodbc.SQL_WCHAR)
connection.setencoding('utf-8', pyodbc.SQL_CHAR)
self.__init_ns_decls__()
de |
youtube/cobalt | third_party/blink/Tools/Scripts/webkitpy/style/filter.py | Python | bsd-3-clause | 11,780 | 0.00034 | # Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains filter-related code."""
def validate_filter_rules(filter_rules, all_categories):
"""Validate the given filter rules, and raise a ValueError if not valid.
Args:
filter_rules: A list of boolean filter rules, for example--
["-whitespace", "+whitespace/braces"]
all_categories: A list of all available category names, for example--
["whitespace/tabs", "whitespace/braces"]
Raises:
ValueError: An error occurs if a filter rule does not begin
with "+" or "-" or if a filter rule does not match
the beginning of some category name in the list
of all available categories.
"""
for rule in filter_rules:
if not (rule.startswith('+') or rule.startswi | th('-')):
raise ValueError('Invalid filter rule "%s": every rule '
'must start with + or -.' % rule)
for category in all_categories:
if category.startswith(rule[1:]):
break
else:
raise ValueError('Suspected incorrect filter rule "%s": '
'the rule does not match the begin | ning '
'of any category name.' % rule)
class _CategoryFilter(object):
"""Filters whether to check style categories."""
def __init__(self, filter_rules=None):
"""Create a category filter.
Args:
filter_rules: A list of strings that are filter rules, which
are strings beginning with the plus or minus
symbol (+/-). The list should include any
default filter rules at the beginning.
Defaults to the empty list.
Raises:
ValueError: Invalid filter rule if a rule does not start with
plus ("+") or minus ("-").
"""
if filter_rules is None:
filter_rules = []
self._filter_rules = filter_rules
self._should_check_category = {} # Cached dictionary of category to True/False
def __str__(self):
return ','.join(self._filter_rules)
# Useful for unit testing.
def __eq__(self, other):
"""Return whether this CategoryFilter instance is equal to another."""
return self._filter_rules == other._filter_rules
# Useful for unit testing.
def __ne__(self, other):
# Python does not automatically deduce from __eq__().
return not (self == other)
def should_check(self, category):
"""Return whether the category should be checked.
The rules for determining whether a category should be checked
are as follows. By default all categories should be checked.
Then apply the filter rules in order from first to last, with
later flags taking precedence.
A filter rule applies to a category if the string after the
leading plus/minus (+/-) matches the beginning of the category
name. A plus (+) means the category should be checked, while a
minus (-) means the category should not be checked.
"""
if category in self._should_check_category:
return self._should_check_category[category]
should_check = True # All categories checked by default.
for rule in self._filter_rules:
if not category.startswith(rule[1:]):
continue
should_check = rule.startswith('+')
self._should_check_category[category] = should_check # Update cache.
return should_check
class FilterConfiguration(object):
"""Supports filtering with path-specific and user-specified rules."""
def __init__(self, base_rules=None, path_specific=None, user_rules=None):
"""Create a FilterConfiguration instance.
Args:
base_rules: The starting list of filter rules to use for
processing. The default is the empty list, which
by itself would mean that all categories should be
checked.
path_specific: A list of (sub_paths, path_rules) pairs
that stores the path-specific filter rules for
appending to the base rules.
The "sub_paths" value is a list of path
substrings. If a file path contains one of the
substrings, then the corresponding path rules
are appended. The first substring match takes
precedence, i.e. only the first match triggers
an append.
The "path_rules" value is a list of filter
rules that can be appended to the base rules.
user_rules: A list of filter rules that is always appended
to the base rules and any path rules. In other
words, the user rules take precedence over the
everything. In practice, the user rules are
provided by the user from the command line.
"""
if base_rules is None:
base_rules = []
if path_specific is None:
path_specific = []
if user_rules is None:
user_rules = []
self._base_rules = base_rules
self._path_specific = path_specific
self._path_specific_lower = None
"""The backing store for self._get_path_specific_lower()."""
self._user_rules = user_rules
self._path_rules_to_filter = {}
"""Cached dictionary of path rules to CategoryFilter instance."""
# The same CategoryFilter instance can be shared across
# multiple keys in this dictionary. This allows us to take
# greater advantage of the caching done by
# CategoryFilter.should_check().
self._path_to_filter = {}
"""Cached dictionary of file path to CategoryFilter instance."""
# Useful for unit testing.
def __eq__(self, other):
"""Return whether this FilterConfiguration is equal to another."""
if self._base_rules != other._base_rules:
return False
if self._path_specific != other._path_specific:
return False
if self._user_rules != other._user_rules:
return False
return True
# Useful for unit testing.
def __ne__(self, other):
# Python does not automatically deduce this from __eq__().
return not self.__eq__(other)
# We use the prefix "_get" since the name "_path_specific_lower"
# is already taken up by the data attribute backing store.
def _get_path_specific_lower(self):
"""Return a copy of self._path_specific with the |
esikachev/scenario | sahara/openstack/common/periodic_task.py | Python | apache-2.0 | 8,319 | 0 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import logging
import random
import time
from oslo.config import cfg
import six
from sahara.openstack.common._i18n import _, _LE, _LI
periodic_opts = [
cfg.BoolOpt('run_external_periodic_tasks',
default=True,
help='Some periodic tasks can be run in a separate process. '
'Should we run them here?'),
]
CONF = cfg.CONF
CONF.register_opts(periodic_opts)
LOG = logging.getLogger(__name__)
DEFAULT_INTERVAL = 60.0
def list_opts():
"""Entry point for oslo.config-generator."""
return [(None, copy.deepcopy(periodic_opts))]
class InvalidPeriodicTaskArg(Exception):
message = _("Unexpected argument for periodic task creation: %(arg)s.")
def periodic_task(*args, **kwargs):
"""Decorator to indicate that a method is a periodic task.
This decorator can be used in two ways:
1. Without arguments '@periodic_task', this will be run on the default
interval of 60 seconds.
2. With arguments:
@periodic_task(spacing=N [, run_immediately=[True|False]]
[, name=[None|"string"])
this will be run on approximately every N seconds. If this number is
negative the periodic task will be disabled. If the run_immediately
argument is provided and has a value of 'True', the first run of the
task will be shortly after task scheduler starts. If
run_immediately is omitted or set to 'False', the first time the
task runs will be approximately N seconds after the task scheduler
starts. If name is not provided, __name__ of function is used.
"""
def decorator(f):
# Test for old style invocation
if 'ticks_between_runs' in kwargs:
raise InvalidPeriodicTaskArg(arg='ticks_between_runs')
# Control if run at all
f._periodic_task = True
f._periodic_external_ok = kwargs.pop('external_process_ok', False)
if f._periodic_external_ok and not CONF.run_external_periodic_tasks:
f._periodic_enabled = False
else:
f._periodic_enabled = kwargs.pop('enabled', True)
f._periodic_name = kwargs.pop('name', f.__name__)
# Control frequency
f._periodic_spacing = kwargs.pop('spacing', 0)
f._periodic_immediate = kwargs.pop('run_immediately', False)
if f._periodic_immediate:
f._periodic_last_run = None
else:
f._periodic_last_run = time.time()
return f
# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
# and without parenthesis.
#
# In the 'with-parenthesis' case (with kwargs present), this function needs
# to return a decorator function since the interpreter will invoke it like:
#
# periodic_task(*args, **kwargs)(f)
#
# In the 'without-parenthesis' case, the original function will be passed
# in as the first argument, like:
#
# periodic_task(f)
if kwargs:
return decorator
else:
return decorator(args[0])
class _PeriodicTasksMeta(type):
def _add_periodic_task(cls, task):
"""Add a periodic task to the list of periodic tasks.
The task should already be decorated by @periodic_task.
:return: whether task was actually enabled
"""
name = task._periodic_name
if task._periodic_spacing < 0:
LOG.info(_LI('Skipping periodic task %(task)s because '
'its interval is negative'),
{'task': name})
return False
if not task._periodic_enabled:
LOG.info(_LI('Skipping periodic task %(task)s because '
'it is disabled'),
{'task': name})
return False
# A periodic spacing of zero indicates that this task should
# be run on the default interval to avoid running too
# frequently.
if task._periodic_spacing == 0:
task._periodic_spacing = DEFAULT_INTERVAL
cls._periodic_tasks.append((name, task))
cls._periodic_spacing[name] = task._periodic_spacing
return True
def __init__(cls, names, bases, dict_):
"""Metaclass that allows us to collect decorated periodic tasks."""
super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
# NOTE(sirp): if the attribute is not present then we must be the base
# class, so, go ahead an initialize it. If the attribute is present,
# then we're a subclass so make a copy of it so we don't step on our
# parent's toes.
try:
cls._periodic_tasks = cls._periodic_tasks[:]
except AttributeError:
cls._periodic_tasks = []
try:
cls._periodic_spacing = cls._periodic_spacing.copy()
except AttributeError:
cls._periodic_spacing = {}
for value in cls.__dict__.values():
if getattr(value, '_periodic_task', False):
cls._add_periodic_task(value)
def _nearest_boundary(last_run, spacing):
"""Find nearest boundary which is in the past, which is a multiple of the
spacing with the last run as an offset.
Eg if last run was 10 and spacing was 7, the new last run could be: 17, 24,
31, 38...
0% to 5% of the spacing value will be added to this value to ensure tasks
do not synchronize. This jitter is rounded to the nearest second, this
means that spacings smaller than 20 seconds will not have jitter.
"""
current_time = time.time()
if last_run is None:
return current_time
delta = current_time - last_run
offset = delta % spacing
# Add up to 5% jitter
jitter = int(spacing * (random.random() / 20))
return current_time - offset + jitter
@six.add_metaclass(_PeriodicTasksMeta)
class PeriodicTasks(object):
def __init__(self):
super(PeriodicTasks, self).__init__()
self._periodic_last_run = {}
for name, task in self._periodic_tasks:
self._periodic_last_run[name] = task._periodic_last_run
def add_periodic_task(self, task):
"""Add a periodic task to the list of periodic tasks.
The task should already be decorated by @periodic_task.
"""
if self.__class__._add_periodic_task(task):
self._periodic_last_run[task._periodic_name] = (
task._periodic_last_run)
def run_periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
idle_for = DEFAULT_INTERVAL
for task_name, task in self._periodic_tasks:
full_task_name = '.'.join([self.__class__.__name__, task_name])
spacing = self._periodic_spacing[task_name]
last_run = self._pe | riodic_last_run[task_name]
# Check if due, if not skip
idle_for = min(idle_for, spacing)
if last_run is not None:
delta = last_run + spacing - time.time()
if delta > 0:
idle_for = min(idle_for, delta)
continue
LOG.debug("Running periodic task %(full_task_name)s",
{"full_task_name": full_task_name})
self._periodic_last_run[t | ask_name] = _nearest_boundary(
last_run, spacing)
try:
task(self, context)
except Exception as e:
if raise_on_error:
raise
LOG.exception(_LE("Error during %(full_task_nam |
asottile/setuptools-golang | testing/internal_imports/setup.py | Python | mit | 478 | 0 | from __future__ import annotations
fr | om setuptools import Extension
from setuptools import setup
setup(
name='internal_imports',
ext_modules=[Extension('hello_lib', ['hello_lib/main.go'])],
build_golang={'root': 'github.com/asottile/fake'},
# Would do this, but we're testing *our* implementation and this would
# install from pypi. We can rely on setuptools-golang being already
# installed under test.
# setup_r | equires=['setuptools-golang'],
)
|
lukas-hetzenecker/home-assistant | tests/components/kraken/test_sensor.py | Python | apache-2.0 | 9,736 | 0.000308 | """Tests for the kraken sensor platform."""
from datetime import timedelta
from unittest.mock import patch
from pykrakenapi.pykrakenapi import KrakenAPIError
from homeassistant.components.kraken.const import (
CONF_TRACKED_ASSET_PAIRS,
DEFAULT_SCAN_INTERVAL,
DEFAULT_TRACKED_ASSET_PAIR,
DOMAIN,
)
from homeassistant.const import CONF_SCAN_INTERVAL, EVENT_HOMEASSISTANT_START
import homeassistant.util.dt as dt_util
from .const import (
MISSING_ | PAIR_TICKER_INFORMATION_RESPONSE,
MISSING_PAIR_TRADEABLE_ASSET_PAIR_RESPONSE,
TICKER_INFORMATION_RESPONSE,
TRADEABLE_ASSET_PAIR_RESPONSE,
)
from tests.common import MockConfigEntry, async_fire_time_changed
async def test_sensor(hass):
"""Test that sensor has a value."""
utcnow = dt_util.utcnow()
# Patching 'u | tcnow' to gain more control over the timed update.
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"pykrakenapi.KrakenAPI.get_tradable_asset_pairs",
return_value=TRADEABLE_ASSET_PAIR_RESPONSE,
), patch(
"pykrakenapi.KrakenAPI.get_ticker_information",
return_value=TICKER_INFORMATION_RESPONSE,
):
entry = MockConfigEntry(
domain=DOMAIN,
unique_id="0123456789",
options={
CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL,
CONF_TRACKED_ASSET_PAIRS: [
"ADA/XBT",
"ADA/ETH",
"XBT/EUR",
"XBT/GBP",
"XBT/USD",
"XBT/JPY",
],
},
)
entry.add_to_hass(hass)
registry = await hass.helpers.entity_registry.async_get_registry()
# Pre-create registry entries for disabled by default sensors
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_ask_volume",
suggested_object_id="xbt_usd_ask_volume",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_last_trade_closed",
suggested_object_id="xbt_usd_last_trade_closed",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_bid_volume",
suggested_object_id="xbt_usd_bid_volume",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_volume_today",
suggested_object_id="xbt_usd_volume_today",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_volume_last_24h",
suggested_object_id="xbt_usd_volume_last_24h",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_volume_weighted_average_today",
suggested_object_id="xbt_usd_volume_weighted_average_today",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_volume_weighted_average_last_24h",
suggested_object_id="xbt_usd_volume_weighted_average_last_24h",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_number_of_trades_today",
suggested_object_id="xbt_usd_number_of_trades_today",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_number_of_trades_last_24h",
suggested_object_id="xbt_usd_number_of_trades_last_24h",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_low_last_24h",
suggested_object_id="xbt_usd_low_last_24h",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_high_last_24h",
suggested_object_id="xbt_usd_high_last_24h",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_opening_price_today",
suggested_object_id="xbt_usd_opening_price_today",
disabled_by=None,
)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
xbt_usd_sensor = hass.states.get("sensor.xbt_usd_ask")
assert xbt_usd_sensor.state == "0.0003494"
assert xbt_usd_sensor.attributes["icon"] == "mdi:currency-usd"
xbt_eur_sensor = hass.states.get("sensor.xbt_eur_ask")
assert xbt_eur_sensor.state == "0.0003494"
assert xbt_eur_sensor.attributes["icon"] == "mdi:currency-eur"
ada_xbt_sensor = hass.states.get("sensor.ada_xbt_ask")
assert ada_xbt_sensor.state == "0.0003494"
assert ada_xbt_sensor.attributes["icon"] == "mdi:currency-btc"
xbt_jpy_sensor = hass.states.get("sensor.xbt_jpy_ask")
assert xbt_jpy_sensor.state == "0.0003494"
assert xbt_jpy_sensor.attributes["icon"] == "mdi:currency-jpy"
xbt_gbp_sensor = hass.states.get("sensor.xbt_gbp_ask")
assert xbt_gbp_sensor.state == "0.0003494"
assert xbt_gbp_sensor.attributes["icon"] == "mdi:currency-gbp"
ada_eth_sensor = hass.states.get("sensor.ada_eth_ask")
assert ada_eth_sensor.state == "0.0003494"
assert ada_eth_sensor.attributes["icon"] == "mdi:cash"
xbt_usd_ask_volume = hass.states.get("sensor.xbt_usd_ask_volume")
assert xbt_usd_ask_volume.state == "15949"
xbt_usd_last_trade_closed = hass.states.get("sensor.xbt_usd_last_trade_closed")
assert xbt_usd_last_trade_closed.state == "0.0003478"
xbt_usd_bid_volume = hass.states.get("sensor.xbt_usd_bid_volume")
assert xbt_usd_bid_volume.state == "20792"
xbt_usd_volume_today = hass.states.get("sensor.xbt_usd_volume_today")
assert xbt_usd_volume_today.state == "146300.24906838"
xbt_usd_volume_last_24h = hass.states.get("sensor.xbt_usd_volume_last_24h")
assert xbt_usd_volume_last_24h.state == "253478.04715403"
xbt_usd_volume_weighted_average_today = hass.states.get(
"sensor.xbt_usd_volume_weighted_average_today"
)
assert xbt_usd_volume_weighted_average_today.state == "0.000348573"
xbt_usd_volume_weighted_average_last_24h = hass.states.get(
"sensor.xbt_usd_volume_weighted_average_last_24h"
)
assert xbt_usd_volume_weighted_average_last_24h.state == "0.000344881"
xbt_usd_number_of_trades_today = hass.states.get(
"sensor.xbt_usd_number_of_trades_today"
)
assert xbt_usd_number_of_trades_today.state == "82"
xbt_usd_number_of_trades_last_24h = hass.states.get(
"sensor.xbt_usd_number_of_trades_last_24h"
)
assert xbt_usd_number_of_trades_last_24h.state == "128"
xbt_usd_low_last_24h = hass.states.get("sensor.xbt_usd_low_last_24h")
assert xbt_usd_low_last_24h.state == "0.0003446"
xbt_usd_high_last_24h = hass.states.get("sensor.xbt_usd_high_last_24h")
assert xbt_usd_high_last_24h.state == "0.0003521"
xbt_usd_opening_price_today = hass.states.get(
"sensor.xbt_usd_opening_price_today"
)
assert xbt_usd_opening_price_today.state == "0.0003513"
async def test_missing_pair_marks_sensor_unavailable(hass):
"""Test that a missing tradable asset pair marks the sensor unavailable."""
utcnow = dt_util.utcnow()
# Patching 'utcnow' to gain more control over the timed update.
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"pykrakenapi.KrakenAPI.get_tradable_asset_pairs",
return_value=TRADEABLE_ASSET_PAIR_ |
namccart/pybombs | pybombs/package_manager.py | Python | gpl-3.0 | 12,528 | 0.002395 | #
# Copyright 2015 Free Software Foundation, Inc.
#
# This file is part of PyBOMBS
#
# PyBOMBS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# PyBOMBS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyBOMBS; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Package Manager: Manages packages (no shit)
"""
from pybombs import pb_logging
from pybombs.pb_exception import PBException
from pybombs.config_manager import config_manager
from pybombs import recipe
from pybombs import packagers
from pybombs.utils import utils
INSTALL_TYPES = ("any", "source", "binary")
class PackageManagerCache(object):
" Remember what's installed and installable "
def __init__(self):
# Dict: key == package name, value == boolean install status
# If key doesn't exist, we don't know the install/installable status
self.known_installable = {}
# Dict install_type -> dict: package name -> install status
self.known_installed = {k: {} for k in INSTALL_TYPES}
PACKAGE_MANAGER_CACHE = PackageManagerCache()
def _get_valid_install_type(install_type):
" The return value is a valid install type. "
if install_type is None:
return "any"
assert install_type in INSTALL_TYPES
return install_type
class PackageManager(object):
"""
Meta-package manager. This will determine, according to our system
and the configuration, who takes care of managing packages and
then dispatches specific package managers. For example, this might
dispatch an apt backend on Ubuntu and Debian systems, or a
yum/dnf backend on Fedora systems.
"""
def __init__(self,):
# Set up logger:
self.log = pb_logging.logger.getChild("PackageManager")
self.cfg = config_manager
self.pmc = PACKAGE_MANAGER_CACHE
self.prefix_available = self.cfg.get_active_prefix().prefix_dir is not None
# Create a source package manager
if self.prefix_available:
self.src = packagers.Source()
self.prefix = self.cfg.get_active_prefix()
else:
self.log.debug("No prefix specified. Skipping source package manager.")
self.src = None
# Create sorted list of binary package managers
self.binary_pkgrs = packagers.filter_available_packagers(
self.cfg.get('packagers'),
packagers.__dict__.values(),
self.log
)
# Now we can use self.binary_pkgrs, in order, for our commands.
def check_package_flag(self, pkgname, flag):
"""
See if package 'pkgname' has 'flag' set (return the boolean value
of that flag if yes, or None otherwise).
"""
return bool(
self.cfg.get_package_flags(
pkgname,
recipe.get_recipe(pkgname).category
).get(flag)
)
def get_packagers(self, pkgname, install_type=None, ignore_pkg_flag=False):
"""
Return a valid list of packagers for a given package.
This will take care of cases where e.g. a source packager is
required (and then only return that).
"""
install_type = _get_valid_install_type(install_type)
force_build = not ignore_pk | g_flag and self.check_package_flag(pkgname, 'forcebuild')
if force_build:
self.log.debug("Package {pkg} is requesting a source build.".format(pkg=pkgname))
if install_type == "source" or (install_type == "any" and force_build):
return [self.src,]
if install_type == "binary" or self.src is None:
if force_build:
self.log.debug(
"Returning no packagers -- package is requesting source build, but binary b | uild is requested."
)
return []
return self.binary_pkgrs
# if install_type == "any":
return [self.src,] + self.binary_pkgrs
def exists(self, name, return_pkgr_name=False):
"""
Check to see if this package is available on this platform.
Returns True or a version string if yes, False if not.
If return_pkgr_name is True, it'll return a list of packagers that
can install this package.
"""
if not return_pkgr_name and name in self.pmc.known_installable:
self.log.obnoxious("{0} has cached installable-status: {1}".format(
name, self.pmc.known_installable.get(name)
))
return True
self.log.debug("Checking if package {0} is installable...".format(name))
if self.check_package_flag(name, 'forceinstalled'):
self.log.debug("Package {0} is forced to state 'installed'.".format(name))
return ['force-installed'] if return_pkgr_name else True
r = recipe.get_recipe(name)
pkgrs = []
for pkgr in self.get_packagers(name):
pkg_version = pkgr.exists(r)
if pkg_version is None or not pkg_version:
continue
else:
self.pmc.known_installable[name] = True
if return_pkgr_name:
pkgrs.append(pkgr.name)
else:
return pkg_version
if return_pkgr_name and len(pkgrs):
self.pmc.known_installable[name] = True
return pkgrs
self.log.debug("Package {0} is not installable.".format(name))
self.pmc.known_installable[name] = False
return False
def installed(self, name, return_pkgr_name=False, install_type=None, ignore_pkg_flag=False):
"""
Check to see if this recipe is installed (identified by its name).
If not, return False. If yes, return value depends on return_pkgr_name
and is either a list of packager name that installed it, or a version
string (if the version string can't be determined, returns True instead).
ignore_pkg_flag is passed to get_packagers().
"""
install_type = _get_valid_install_type(install_type)
if not return_pkgr_name and name in self.pmc.known_installed.get(install_type, {}):
self.log.obnoxious("{0} has cached installed-status: {1}".format(
name, self.pmc.known_installed.get(install_type, {}).get(name)
))
return self.pmc.known_installed.get(install_type, {}).get(name)
self.log.debug("Checking if package {0} is installed...".format(name))
if self.check_package_flag(name, 'forceinstalled'):
self.log.debug("Package {0} is forced to state 'installed'.".format(name))
# TODO maybe we can figure out a version string
return ['force-installed'] if return_pkgr_name else True
r = recipe.get_recipe(name)
pkgrs = []
for pkgr in self.get_packagers(name, install_type, ignore_pkg_flag):
pkg_version = pkgr.installed(r)
if pkg_version is None or not pkg_version:
continue
else:
self.pmc.known_installed[install_type][name] = True
if return_pkgr_name:
pkgrs.append(pkgr.name)
else:
return pkg_version
if return_pkgr_name and len(pkgrs):
return pkgrs
self.pmc.known_installed[install_type][name] = False
self.log.debug("Package {0} is not installed.".format(name))
return False
def install(
self,
name,
install_type=None,
static=False,
verify=False,
fail_silently=False
):
"""
Install the given package. Returns |
vex1023/vxData | vxData/exception.py | Python | mit | 585 | 0.003527 | # endcoding = utf-8
'''
author : vex1023
email : vex1023@qq.com
'''
ERROR_TEMPLATE = '''{"error_code": "%s", "error_msg": "%s", "reason": "%s"}'''
class APIError(Exception):
'''
交易错误和代码定义
'''
ERROR_CODE = '0 | '
ERROR_MSG = 'Success'
| def __init__(self, reason):
super(APIError, self).__init__(ERROR_TEMPLATE % (self.ERROR_CODE, self.ERROR_MSG, reason))
class InternalError(APIError):
ERROR_CODE = '999'
ERROR_MSG = 'Internal Error'
class NotSupportError(APIError):
ERROR_CODE = '-1'
ERROR_MSG = 'Not Support APIs'
|
ollie314/kafka | tests/kafkatest/sanity_checks/test_console_consumer.py | Python | apache-2.0 | 4,449 | 0.003372 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from ducktape.mark import matrix
from ducktape.mark import parametrize
from ducktape.mark.resource import cluster
from ducktape.tests.test import Test
from ducktape.utils.util import wait_until
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.services.kafka import KafkaService
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.utils.remote_account import line_count, file_exists
from kafkatest.version import LATEST_0_8_2
class ConsoleConsumerTest(Test):
"""Sanity checks on console consumer service class."""
def __init__(self, test_context):
super(ConsoleConsumerTest, self).__init__(test_context)
self.topic = "topic"
self.zk = ZookeeperService(test_context, num_nodes=1)
self.kafka = KafkaService(self.test_context, num_nodes=1, zk=self.zk, zk_chroot="/kafka",
topics={self.topic: {"partitions": 1, "replication-factor": 1}})
self.consumer = ConsoleConsumer(self.test_context, num_nodes=1, kafka=self.kafka, topic=self.topic)
def setUp(self):
self.zk.start()
@cluster(num_nodes=3)
@matrix(security_protoco | l=['PLAINTEXT', 'SSL'])
@cluster(num_nodes=4)
@matrix(security_protocol=['SASL_SSL'], sasl_mechanism=['PLAIN', 'SCRAM-SHA-256', 'SCRAM-SHA-512'])
@matrix(security_protocol=['SASL_PLAINTEXT', 'SASL_SSL'])
def test_lifecycle(self, security_protocol, sasl_mechanism='GSSAPI'):
"""Check that console consumer starts/stops properly, and that we are capturing log output."""
self.kafka.security_protocol = security_protocol
self.kafka.client_sasl_mecha | nism = sasl_mechanism
self.kafka.interbroker_sasl_mechanism = sasl_mechanism
self.kafka.start()
self.consumer.security_protocol = security_protocol
t0 = time.time()
self.consumer.start()
node = self.consumer.nodes[0]
wait_until(lambda: self.consumer.alive(node),
timeout_sec=10, backoff_sec=.2, err_msg="Consumer was too slow to start")
self.logger.info("consumer started in %s seconds " % str(time.time() - t0))
# Verify that log output is happening
wait_until(lambda: file_exists(node, ConsoleConsumer.LOG_FILE), timeout_sec=10,
err_msg="Timed out waiting for consumer log file to exist.")
wait_until(lambda: line_count(node, ConsoleConsumer.LOG_FILE) > 0, timeout_sec=1,
backoff_sec=.25, err_msg="Timed out waiting for log entries to start.")
# Verify no consumed messages
assert line_count(node, ConsoleConsumer.STDOUT_CAPTURE) == 0
self.consumer.stop_node(node)
@cluster(num_nodes=4)
def test_version(self):
"""Check that console consumer v0.8.2.X successfully starts and consumes messages."""
self.kafka.start()
num_messages = 1000
self.producer = VerifiableProducer(self.test_context, num_nodes=1, kafka=self.kafka, topic=self.topic,
max_messages=num_messages, throughput=1000)
self.producer.start()
self.producer.wait()
self.consumer.nodes[0].version = LATEST_0_8_2
self.consumer.new_consumer = False
self.consumer.consumer_timeout_ms = 1000
self.consumer.start()
self.consumer.wait()
num_consumed = len(self.consumer.messages_consumed[1])
num_produced = self.producer.num_acked
assert num_produced == num_consumed, "num_produced: %d, num_consumed: %d" % (num_produced, num_consumed)
|
openqt/algorithms | leetcode/python/lc993-cousins-in-binary-tree.py | Python | gpl-3.0 | 1,692 | 0.007092 | # coding=utf-8
import unittest
"""993. Cousins in Binary Tree
https://leetcode.com/problems/cousins-in-binary-tree/description/
In a binary tree, the root node is at depth `0`, and children of each depth
`k` node are at depth `k+1`.
Two nodes of a binary tree are _cousins_ if they have the same depth, but have
**different parents**.
We are given the `root` of a binary tree with unique values, and the values
`x` and `y` of two different nodes in the tree.
Return `true` if and only if the nodes corresponding to the values `x` and `y`
are cousins.
**Example 1:
**
**Input:** root = [1,2,3,4], x = 4, y = 3
**Output:** false
**Example 2:
**
**Input:** ro | ot = [1,2,3,null,4,null,5], x = 5, y = 4
**Output:** true
**Example 3:**
****
**Input:** root = [1,2,3,null,4], x = 2, y = 3
| **Output:** false
**Note:**
1. The number of nodes in the tree will be between `2` and `100`.
2. Each node has a unique integer value from `1` to `100`.
Similar Questions:
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isCousins(self, root, x, y):
"""
:type root: TreeNode
:type x: int
:type y: int
:rtype: bool
"""
def test(self):
pass
if __name__ == "__main__":
unittest.main()
|
collective/cyn.in | src/ubify.viewlets/ubify/viewlets/browser/spaceicon.py | Python | gpl-3.0 | 3,414 | 0.009666 | ###############################################################################
#cyn.in is an open source Collaborative Knowledge Management Appliance that
#enables teams to seamlessly work together on files, documents and content in
#a secure central environment.
#
#cyn.in v2 an open source appliance is distributed under the GPL v3 license
#along with commercial support options.
#
#cyn.in is a Cynapse Invention.
#
#Copyright (C) 2008 Cynapse India Pvt. Ltd.
#
#This program is free software: you can redistribute it and/or modify it under
#the terms of the GNU General Public License as published by the Free Software
#Foundation, either version 3 of the License, or any later version and observe
#the Additional Terms applicable to this program and must display appropriate
#legal notices. In accordance with Section 7(b) of the GNU General Public
#License version 3, these Appropriate Legal Notices must retain the display of
#the "Powered by cyn.in" AND "A Cynapse Invention" logos. You should have
#received a copy of the detailed Additional Terms License with this program.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
#Public License for more details.
#
#You should have received a copy of the GNU General Public License along with
#this program. If not, see <http://ww | w.gnu.org/licenses/>.
#
#You can contact Cynapse at support@cynapse.com with any problems with cyn.in.
#For any queries regarding the licensing, please send your mails to
# legal@cynapse.com
#
#You can also contact Cynapse at:
#802, Building No. 1,
#Dh | eeraj Sagar, Malad(W)
#Mumbai-400064, India
###############################################################################
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone.app.layout.viewlets.common import ViewletBase
from zope.component import getMultiAdapter
class SpaceIconViewlet(ViewletBase):
render = ViewPageTemplateFile('space_icon.pt')
def update(self):
portal_state = getMultiAdapter((self.context, self.request),
name=u'plone_portal_state')
cportal_url = portal_state.portal_url()
current_object = self.context.aq_inner
self.has_space_icon = False
self.space_icon = ""
self.space_url = ""
parentslist = current_object.aq_chain
new_object = None
found = 0
try:
for type in parentslist:
if type.portal_type == 'Space' and type.meta_type == 'Space':
new_object = type
found = 1
if found == 1:
break
except AttributeError:
a = self.space_icon
if new_object <> None:
#implement code here for binding space icon
if new_object.space_icon <> "":
self.space_icon = cportal_url + "/" + new_object.space_icon
else:
self.space_icon = default_space_icon
self.space_url = new_object.absolute_url()
self.has_space_icon = True
else:
self.site_icon = portal_state.portal_url() + "/logo.jpg"
self.site_url = portal_state.portal_url()
self.render = ViewPageTemplateFile('site_logo.pt')
|
lehinevych/cfme_tests | utils/tests/test_simple_locators.py | Python | gpl-2.0 | 1,500 | 0.001333 | import pytest
@pytest.fixture(scope='module')
def test_page(browser, datafile):
test_page_html = datafile('elements.html').read()
pytest.sel.get('data:text/html;base64,{}'.format(test_page_html.encode('base64')))
pytestmark = pytest.mark.usefixtures('test_page')
def assert_len(locator, required_len):
assert len(pytest.sel.elements(locator)) == required_len
def test_by_id():
# should exist
assert_len('#id1', 1)
assert_len('#id2', 1)
# shouldn't exist
assert_len('#id3', 0)
def test_by_class():
# should exist
assert_len('.class1', 2)
assert_len('.class2', 1)
# shouldn't exist
assert_len('.class3', 0)
def test_by_element_with_ | id():
# should exist
assert_len('h1#id1', 1)
assert_len('h2#id2', 1)
# shouldn't exist
assert_len('h1#id2', 0)
assert_len('h2#id1', 0)
def test_by_element_with_class():
# should exist
assert_len('h1.class1', 1)
asser | t_len('h2.class1', 1)
assert_len('h2.class2', 1)
# shouldn't exist
assert_len('h1.class3', 0)
def test_by_element_with_id_and_class():
# should exist
assert_len('h1#id1.class1', 1)
assert_len('h2#id2.class2', 1)
assert_len('h2#id2.class2', 1)
# shouldn't exist
assert_len('h1#id1.class2', 0)
assert_len('h3#h2.class1', 0)
assert_len('h1#h2.class3', 0)
def test_by_locator_list():
# should exist
assert_len(['#id1', '.class2'], 2)
# shouldn't exist
assert_len(['#id3', '.class3'], 0)
|
naphthalene/hubcave | hubcave/core/mixins/tables.py | Python | mit | 4,047 | 0.002224 | from __future__ import absolute_import, unicode_literals
from django.core.paginator import Paginator
from django.core import urlresolvers
from django.utils.html import mark_safe, escape
import django_tables2 as tables
from django_tables2.tables import Table
from django_tables2.utils import Accessor as A, AttributeDict
class ActionsColumn(tables.Column):
"""
This column allows you to pass in a list of links | that will form an Action Column
"""
empty_values = ()
links = None
delimiter = None
def __init__(self, links=None, delimiter=' | ', **kwargs) | :
super(ActionsColumn, self).__init__(**kwargs)
self.orderable = False
self.delimiter = delimiter
if links is not None:
self.links = links
def render(self, value, record, bound_column):
if not self.links:
raise NotImplementedError('Links not assigned.')
if not isinstance(self.links, (list, tuple,dict)):
raise NotImplementedError('Links must be an iterable.')
links = []
for link in self.links:
title = link['title']
url = link['url']
attrs = link['attrs'] if 'attrs' in link else None
if 'args' in link:
args = [a.resolve(record) if isinstance(a, A) else a for a in link['args']]
else:
args = None
attrs = AttributeDict(attrs if attrs is not None else self.attrs.get('a', {}))
try:
attrs['href'] = urlresolvers.reverse(url, args=args)
except urlresolvers.NoReverseMatch:
attrs['href'] = url
links.append('<a {attrs}>{text}</a>'.format(
attrs=attrs.as_html(),
text=mark_safe(title)
))
return mark_safe(self.delimiter.join(links))
class PaginateTable(Table):
"""Generic table class that makes use of Django's built in paginate functionality"""
def __init__(self, *args, **kwargs):
super(PaginateTable, self).__init__(*args, **kwargs)
self.template = kwargs.get('template', 'fancy_paged_tables/table.html')
def paginate(self, klass=Paginator, per_page=None, page=1, *args, **kwargs):
"""
Paginates the table using a paginator and creates a ``page`` property
containing information for the current page.
:type klass: Paginator class
:param klass: a paginator class to paginate the results
:type per_page: `int`
:param per_page: how many records are displayed on each page
:type page: `int`
:param page: which page should be displayed.
Extra arguments are passed to the paginator.
Pagination exceptions (`~django.core.paginator.EmptyPage` and
`~django.core.paginator.PageNotAnInteger`) may be raised from this
method and should be handled by the caller.
"""
self.per_page_options = [20, 50, 100, 200] # This should probably be a passed in option
self.per_page = per_page = per_page or self._meta.per_page
self.paginator = klass(self.rows, per_page, *args, **kwargs)
self.page = self.paginator.page(page)
# Calc variables for use in displaying first, adjacent, and last page links
adjacent_pages = 1 # This should probably be a passed in option
# Starting page (first page between the ellipsis)
start_page = max(self.page.number - adjacent_pages, 1)
if start_page <= 3:
start_page = 1
# Ending page (last page between the ellipsis)
end_page = self.page.number + adjacent_pages + 1
if end_page >= self.paginator.num_pages - 1:
end_page = self.paginator.num_pages + 1
# Paging vars used in template
self.page_numbers = [n for n in range(start_page, end_page) if 0 < n <= self.paginator.num_pages]
self.show_first = 1 not in self.page_numbers
self.show_last = self.paginator.num_pages not in self.page_numbers
|
jodal/pykka | tests/test_future.py | Python | apache-2.0 | 6,910 | 0.000289 | import asyncio
import sys
import traceback
import pytest
from pykka import Future, Timeout, get_all
def run_async(coroutine):
loop = asyncio.get_event_loop()
f = asyncio.ensure_future(coroutine, loop=loop)
return loop.run_until_complete(f)
def test_base_future_get_is_not_implemented():
future = Future()
with pytest.raises(NotImplementedError):
future.get()
def test_base_future_set_is_not_implemented():
future = Future()
with pytest.raises(NotImplementedError):
future.set(None)
def test_base_future_set_exception_is_not_implemen | ted():
future = Future()
with pytest.raises(NotImplementedError):
future.set_exception(None)
def test_set_multiple_times_fails(future):
future.set(0)
with pytest.raises(Exception):
future.set(0)
def test_get_all_blocks_until_ | all_futures_are_available(futures):
futures[0].set(0)
futures[1].set(1)
futures[2].set(2)
result = get_all(futures)
assert result == [0, 1, 2]
def test_get_all_raises_timeout_if_not_all_futures_are_available(futures):
futures[0].set(0)
futures[1].set(1)
# futures[2] is unset
with pytest.raises(Timeout):
get_all(futures, timeout=0)
def test_get_all_can_be_called_multiple_times(futures):
futures[0].set(0)
futures[1].set(1)
futures[2].set(2)
result1 = get_all(futures)
result2 = get_all(futures)
assert result1 == result2
def test_future_in_future_works(runtime):
inner_future = runtime.future_class()
inner_future.set("foo")
outer_future = runtime.future_class()
outer_future.set(inner_future)
assert outer_future.get().get() == "foo"
def test_get_raises_exception_with_full_traceback(runtime):
exc_class_get = None
exc_class_set = None
exc_instance_get = None
exc_instance_set = None
exc_traceback_get = None
exc_traceback_set = None
future = runtime.future_class()
try:
raise NameError("foo")
except NameError:
exc_class_set, exc_instance_set, exc_traceback_set = sys.exc_info()
future.set_exception()
# We could move to another thread at this point
try:
future.get()
except NameError:
exc_class_get, exc_instance_get, exc_traceback_get = sys.exc_info()
assert exc_class_set == exc_class_get
assert exc_instance_set == exc_instance_get
exc_traceback_list_set = list(reversed(traceback.extract_tb(exc_traceback_set)))
exc_traceback_list_get = list(reversed(traceback.extract_tb(exc_traceback_get)))
# All frames from the first traceback should be included in the
# traceback from the future.get() reraise
assert len(exc_traceback_list_set) < len(exc_traceback_list_get)
for i, frame in enumerate(exc_traceback_list_set):
assert frame == exc_traceback_list_get[i]
def test_future_supports_await_syntax(future):
async def get_value():
return await future
future.set(1)
assert run_async(get_value()) == 1
def test_future_supports_yield_from_syntax(future):
def get_value():
val = yield from future
return val
future.set(1)
assert run_async(get_value()) == 1
def test_filter_excludes_items_not_matching_predicate(future):
filtered = future.filter(lambda x: x > 10)
future.set([1, 3, 5, 7, 9, 11, 13, 15, 17, 19])
assert filtered.get(timeout=0) == [11, 13, 15, 17, 19]
def test_filter_on_noniterable(future):
filtered = future.filter(lambda x: x > 10)
future.set(1)
with pytest.raises(TypeError):
filtered.get(timeout=0)
def test_filter_preserves_the_timeout_kwarg(future):
filtered = future.filter(lambda x: x > 10)
with pytest.raises(Timeout):
filtered.get(timeout=0)
def test_filter_reuses_result_if_called_multiple_times(future, mocker):
raise_on_reuse_func = mocker.Mock(side_effect=[False, True, Exception])
filtered = future.filter(raise_on_reuse_func)
future.set([1, 2])
assert filtered.get(timeout=0) == [2]
assert filtered.get(timeout=0) == [2] # First result is reused
assert filtered.get(timeout=0) == [2] # First result is reused
def test_join_combines_multiple_futures_into_one(futures):
joined = futures[0].join(futures[1], futures[2])
futures[0].set(0)
futures[1].set(1)
futures[2].set(2)
assert joined.get(timeout=0) == [0, 1, 2]
def test_join_preserves_timeout_kwarg(futures):
joined = futures[0].join(futures[1], futures[2])
futures[0].set(0)
futures[1].set(1)
# futures[2] is unset
with pytest.raises(Timeout):
joined.get(timeout=0)
def test_map_returns_future_which_passes_result_through_func(future):
mapped = future.map(lambda x: x + 10)
future.set(30)
assert mapped.get(timeout=0) == 40
def test_map_works_on_dict(future):
# Regression test for issue #64
mapped = future.map(lambda x: x["foo"])
future.set({"foo": "bar"})
assert mapped.get(timeout=0) == "bar"
def test_map_does_not_map_each_value_in_futures_iterable_result(future):
# Behavior changed in Pykka 2.0:
# This used to map each value in the future's result through the func,
# yielding [20, 30, 40].
mapped = future.map(lambda x: x + 10)
future.set([10, 20, 30])
with pytest.raises(TypeError):
mapped.get(timeout=0)
def test_map_preserves_timeout_kwarg(future):
mapped = future.map(lambda x: x + 10)
with pytest.raises(Timeout):
mapped.get(timeout=0)
def test_map_reuses_result_if_called_multiple_times(future, mocker):
raise_on_reuse_func = mocker.Mock(side_effect=[10, Exception])
mapped = future.map(raise_on_reuse_func)
future.set(30)
assert mapped.get(timeout=0) == 10
assert mapped.get(timeout=0) == 10 # First result is reused
def test_reduce_applies_function_cumulatively_from_the_left(future):
reduced = future.reduce(lambda x, y: x + y)
future.set([1, 2, 3, 4])
assert reduced.get(timeout=0) == 10
def test_reduce_accepts_an_initial_value(future):
reduced = future.reduce(lambda x, y: x + y, 5)
future.set([1, 2, 3, 4])
assert reduced.get(timeout=0) == 15
def test_reduce_on_noniterable(future):
reduced = future.reduce(lambda x, y: x + y)
future.set(1)
with pytest.raises(TypeError):
reduced.get(timeout=0)
def test_reduce_preserves_the_timeout_kwarg(future):
reduced = future.reduce(lambda x, y: x + y)
with pytest.raises(Timeout):
reduced.get(timeout=0)
def test_reduce_reuses_result_if_called_multiple_times(future, mocker):
raise_on_reuse_func = mocker.Mock(side_effect=[3, 6, Exception])
reduced = future.reduce(raise_on_reuse_func)
future.set([1, 2, 3])
assert reduced.get(timeout=0) == 6
assert reduced.get(timeout=0) == 6 # First result is reused
assert reduced.get(timeout=0) == 6 # First result is reused
|
sassoftware/catalog-service | catalogService_test/mockedModules/mint/db/schema.py | Python | apache-2.0 | 8,225 | 0.001945 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
rBuilder database schema
This includes rules to create from scratch all tables and indices used
by rBuilder. For migration from previous versions, see the
L{migrate<mint.migrate>} module.
'''
def _addTableRows(db, table, uniqueKey, rows):
"""
Adds rows to the table, if they do not exist already
The rows argument is a list of dictionaries
"""
if not rows:
return
cu = db.cursor()
inserts = []
sql = "SELECT 1 FROM %s WHERE %s = ?" % (table, uniqueKey)
tableCols = rows[0].keys()
for row in rows:
cu.execute(sql, row[unique | Key])
if cu.fetchall():
continue
inserts.append(tuple(row[c] for c in tableCols))
if not inserts:
| return False
sql = "INSERT INTO %s (%s) VALUES (%s)" % (table,
','.join(tableCols), ','.join('?' for c in tableCols))
cu.executemany(sql, inserts)
return True
def _createInventorySchema(db):
cu = db.cursor()
changed = False
if 'inventory_managed_system' not in db.tables:
cu.execute("""
CREATE TABLE "inventory_managed_system" (
"id" %(PRIMARYKEY)s,
"registration_date" timestamp with time zone NOT NULL,
"generated_uuid" varchar(64),
"local_uuid" varchar(64),
"ssl_client_certificate" varchar(8092),
"ssl_client_key" varchar(8092),
"ssl_server_certificate" varchar(8092)
) %(TABLEOPTS)s""" % db.keywords)
db.tables['inventory_managed_system'] = []
changed = True
if 'inventory_system_target' not in db.tables:
cu.execute("""
CREATE TABLE "inventory_system_target" (
"id" %(PRIMARYKEY)s,
"managed_system_id" integer
REFERENCES "inventory_managed_system" ("id")
DEFERRABLE INITIALLY DEFERRED,
"target_id" integer NOT NULL
REFERENCES "targets" ("targetid")
DEFERRABLE INITIALLY DEFERRED,
"target_system_id" varchar(256)
) %(TABLEOPTS)s""" % db.keywords)
db.tables['inventory_system_target'] = []
changed = True
return changed
if 'inventory_system' not in db.tables:
cu.execute("""
CREATE TABLE "inventory_system" (
"system_id" %(PRIMARYKEY)s,
) %(TABLEOPTS)s""" % db.keywords)
db.tables['inventory_system'] = []
changed = True
return changed
def _createJobsSchema(db):
cu = db.cursor()
changed = False
if 'job_types' not in db.tables:
cu.execute("""
CREATE TABLE job_types
(
job_type_id %(PRIMARYKEY)s,
name VARCHAR NOT NULL UNIQUE,
description VARCHAR NOT NULL
) %(TABLEOPTS)s""" % db.keywords)
db.tables['job_types'] = []
changed = True
changed |= _addTableRows(db, 'job_types', 'name',
[ dict(name="instance-launch", description='Instance Launch'),
dict(name="instance-update", description='Instance Update'),
dict(name="image-deployment", description='Image Upload'),
dict(name="platform-load", description='Platform Load'),
dict(name="software-version-refresh", description='Software Version Refresh'), ])
if 'job_states' not in db.tables:
cu.execute("""
CREATE TABLE job_states
(
job_state_id %(PRIMARYKEY)s,
name VARCHAR NOT NULL UNIQUE
) %(TABLEOPTS)s""" % db.keywords)
db.tables['job_states'] = []
changed = True
changed |= _addTableRows(db, 'job_states', 'name', [ dict(name='Queued'),
dict(name='Running'), dict(name='Completed'), dict(name='Failed') ])
if 'rest_methods' not in db.tables:
cu.execute("""
CREATE TABLE rest_methods
(
rest_method_id %(PRIMARYKEY)s,
name VARCHAR NOT NULL UNIQUE
) %(TABLEOPTS)s""" % db.keywords)
db.tables['rest_methods'] = []
changed = True
changed |= _addTableRows(db, 'rest_methods', 'name', [ dict(name='POST'),
dict(name='PUT'), dict(name='DELETE') ])
if 'jobs' not in db.tables:
cu.execute("""
CREATE TABLE jobs
(
job_id %(PRIMARYKEY)s,
job_type_id INTEGER NOT NULL
REFERENCES job_types ON DELETE CASCADE,
job_state_id INTEGER NOT NULL
REFERENCES job_states ON DELETE CASCADE,
job_uuid VARCHAR(64) NOT NULL UNIQUE,
created_by INTEGER NOT NULL
REFERENCES Users ON DELETE CASCADE,
created NUMERIC(14,4) NOT NULL,
modified NUMERIC(14,4) NOT NULL,
expiration NUMERIC(14,4),
ttl INTEGER,
pid INTEGER,
message VARCHAR,
error_response VARCHAR,
rest_uri VARCHAR,
rest_method_id INTEGER
REFERENCES rest_methods ON DELETE CASCADE,
rest_args VARCHAR
) %(TABLEOPTS)s""" % db.keywords)
db.tables['jobs'] = []
changed = True
if 'job_history' not in db.tables:
cu.execute("""
CREATE TABLE job_history
(
job_history_id %(PRIMARYKEY)s,
-- job_history_type needed
job_id INTEGER NOT NULL
REFERENCES jobs ON DELETE CASCADE,
timestamp NUMERIC(14,3) NOT NULL,
content VARCHAR NOT NULL
) %(TABLEOPTS)s""" % db.keywords)
db.tables['job_history'] = []
changed = True
if 'job_results' not in db.tables:
cu.execute("""
CREATE TABLE job_results
(
job_result_id %(PRIMARYKEY)s,
job_id INTEGER NOT NULL
REFERENCES jobs ON DELETE CASCADE,
data VARCHAR NOT NULL
) %(TABLEOPTS)s""" % db.keywords)
db.tables['job_results'] = []
changed = True
if 'job_target' not in db.tables:
cu.execute("""
CREATE TABLE job_target
(
job_id INTEGER NOT NULL
REFERENCES jobs ON DELETE CASCADE,
targetId INTEGER NOT NULL
REFERENCES Targets ON DELETE CASCADE
) %(TABLEOPTS)s""" % db.keywords)
db.tables['job_target'] = []
changed = True
if 'job_system' not in db.tables:
cu.execute("""
CREATE TABLE job_system
(
job_id INTEGER NOT NULL
REFERENCES jobs ON DELETE CASCADE,
system_id INTEGER NOT NULL
REFERENCES inventory_system ON DELETE CASCADE
) %(TABLEOPTS)s""" % db.keywords)
db.tables['job_system'] = []
changed = True
if 'job_managed_system' not in db.tables:
cu.execute("""
CREATE TABLE job_managed_system
(
job_id INTEGER NOT NULL
REFERENCES jobs ON DELETE CASCADE,
managed_system_id INTEGER NOT NULL
REFERENCES inventory_managed_systems ON DELETE CASCADE
) %(TABLEOPTS)s""" % db.keywords)
db.tables['job_managed_system'] = [ |
zenshade/vim_config | bundle/UltiSnips-2.1/plugin/UltiSnips/text_objects/_shell_code.py | Python | mit | 1,117 | 0.001791 | #!/usr/bin/env python
# encoding: utf-8
import os
import subprocess
import stat
import tempfile
from UltiSnips.compatibility import as_unicode
from UltiSnips.text_objects._base import NoneditableTextObject
class ShellCode(NoneditableTextObject):
def __init__(self, parent, token):
NoneditableTextObject.__init__(self, parent, token)
self._code = token.code.replace("\\`", "`")
def _update(self, done, not_done):
# Write the code to a temporary file
handle, path = tempfile.mkstemp | (text=True)
os.write(handle, self._code.encode("utf-8"))
os.close(handle)
os.chmod(path, stat.S_IRWXU)
# Execute the file and read stdout
proc = subprocess.Popen(path, shell=True, stdout=subprocess.PIPE)
proc.wait()
output = as_unicode(proc.stdout.read())
if len(out | put) and output[-1] == '\n':
output = output[:-1]
if len(output) and output[-1] == '\r':
output = output[:-1]
os.unlink(path)
self.overwrite(output)
self._parent._del_child(self)
return True
|
sidv1993/Graph-Traversal-and-Game-Playing | Graph-Traversal/GraphTraversal.py | Python | apache-2.0 | 13,929 | 0.031948 |
# Defining the goal state
goal_items = [1, 2, 3, 8, 0, 4, 7, 6, 5]
visited_vertices = []
# Defining imput states with increasing levels of difficulty
easy_initial_state = [1, 3, 4, 8, 6, 2, 7, 0, 5]
medium_initial_state = [2, 8, 1, 0, 4, 3, 7, 6, 5]
hard_initial_state = [5, 6, 7, 4, 0, 8, 3, 2, 1]
max = 0
import sys
import time
'''Defining a class vertex with the following attributes:
vertex.items is the current state of the elements of 8-puzzle in the vertex
vertex.ancestor is the ancestor of the vertex
vertex.operator is the decision to move the null "0" element in the 8-puzzle
vertex.depth is the depth level of the vertex'''
class vertex:
def __init__( self, items, ancestor, operator, depth, cost ):
# Contains the items of the vertex
self.items = items
# Contains the ancestor vertex that generated this vertex
self.ancestor = ancestor
# Contains the operation that generated this vertex from the ancestor
self.operator = operator
# Contains the depth of this vertex (ancestor.depth +1)
self.depth = depth
# Contains the path cost of this vertex from depth 0. Not used for depth/breadth first.
self.cost = cost
# Main method
def main():
print("------- SEARCH RUN MENU ------")
print("1. BFS - Easy")
print("2. BFS - Medium")
print("3. BFS - Hard")
print("4. DFS - Easy")
print("5. DFS - Medium")
print("6. DFS - Hard")
print("7. IDS - Easy")
print("8. IDS - Medium")
print("9. IDS - Hard")
print("10. A* - Easy")
print("11. A* - Medium")
print("12. A* - Hard")
print("13. Greedy BFS - Easy")
print("14. Greedy BFS - Medium")
print("15. Greedy BFS - Hard")
print("16. IDA* - Easy")
print("17. IDA* - Medium")
print("18. IDA* - Hard")
n = raw_input("Enter what would you like to run: ")
n = int(n)
start = time.clock() #Starting time.clock() to count time taken for the function to execute
if(n == 1):
print "Initial Puzzle State:"
print_vertices(easy_initial_state)
print "\nGoal Puzzle State: "
output = bfs( easy_initial_state, goal_items )
elif(n == 2):
print_vertices(medium_initial_state)
print "\nGoal Puzzle State: "
output = bfs( medium_initial_state, goal_items )
elif(n == 3):
print_vertices(hard_initial_state)
print "\nGoal Puzzle State: "
output = bfs( hard_initial_state, goal_items )
elif(n == 4):
print_vertices(easy_initial_state)
print "\nGoal Puzzle State: "
output = dfs( easy_initial_state, goal_items )
elif(n == 5):
print_vertices(medium_initial_state)
print "\nGoal Puzzle State: "
output = dfs( medium_initial_state, goal_items )
elif(n == 6):
print_vertices(hard_initial_state)
print "\nGoal Puzzle State: "
output = dfs( hard_initial_state, goal_items )
elif(n == 7):
print_vertices(easy_initial_state)
print "\nGoal Puzzle State: "
output = ids( easy_initial_state, goal_items )
elif(n == 8):
print_vertices(medium_initial_state)
print "\nGoal Puzzle State: "
output = ids( medium_initial_state, goal_items )
elif(n == 9):
print_vertices(hard_initial_state)
print "\nGoal Puzzle State: "
output = ids( hard_initial_state, goal_items )
elif(n == 10):
print_vertices(easy_initial_state)
print "\nGoal Puzzle State: "
output = astar( easy_initial_state, goal_items )
elif(n == 11):
print_vertices(medium_initial_state)
print "\nGoal Puzzle State: "
output = astar( medium_initial_state, goal_items )
elif(n == 12):
print_vertices(hard_initial_state)
print "\nGoal Puzzle State: "
output = astar( hard_initial_state, goal_items )
elif(n == 13):
print_vertices(easy_initial_state)
print "\nGoal Puzzle State: "
output = gbfs( easy_initial_state, goal_items )
elif(n == 14):
print_vertices(medium_initial_state)
print "\nGoal Puzzle State: "
output = gbfs( medium_initial_state, goal_items )
elif(n == 15):
print_vertices(hard_initial_state)
print "\nGoal Puzzle State: "
output = gbfs( hard_initial_state, goal_items )
elif(n == 16):
print_vertices(easy_initial_state)
print "\nGoal Puzzle State: | "
output = idastar( easy_initial_state, goal_items )
elif(n == 17):
print_vertices(medium_initial_state)
print "\nGoal Puzzle State: "
output = idastar( medium_initial_state, goal_items )
elif(n == 18):
print_vertices(hard_initial_state)
print "\nGoal Puzzle State: "
| output = idastar( hard_initial_state, goal_items )
else:
print "Wrong Input!"
print "Direction of Moves:"
print output
print "Moves taken: ", len(output)
print "Nodes visited: ", len(visited_vertices) - 1
print "Max. length of Node List: ", max
print "Time taken: ", time.clock() - start
def print_vertices( vertex_items ):
print "| %i %i %i |" % (vertex_items[0], vertex_items[1], vertex_items[2])
print "| %i %i %i |" % (vertex_items[3], vertex_items[4], vertex_items[5])
print "| %i %i %i |" % (vertex_items[6], vertex_items[7], vertex_items[8])
def traverse_left( items ):
new_state = items[:]
index = new_state.index( 0 )
# Sanity check
if index not in [0, 3, 6]:
temp = new_state[index - 1] #Exchanging null element with positioned element
new_state[index - 1] = new_state[index]
new_state[index] = temp
return new_state
else: #
return None
#Function defined for moving the "null" element one place right in the 8-puzzle
def traverse_right( items ):
# Perform object copy
new_state = items[:]
index = new_state.index( 0 )
# Sanity check
if index not in [2, 5, 8]:
# Swap the values.
temp = new_state[index + 1]
new_state[index + 1] = new_state[index]
new_state[index] = temp
return new_state
else:
#Return non if no moves possible
return None
#Function defined for moving the "null" element one place up in the 8-puzzle
def traverse_up( items ):
new_state = items[:]
index = new_state.index( 0 )
# Sanity check
if index not in [0, 1, 2]:
# Swap the values.
temp = new_state[index - 3]
new_state[index - 3] = new_state[index]
new_state[index] = temp
return new_state
else:
# Can't move it, return None
return None
#Function defined for moving the "null" element one place up in the 8-puzzle
def traverse_down( items ):
new_state = items[:]
index = new_state.index( 0 )
# Sanity check
if index not in [6, 7, 8]:
# Swap the values.
temp = new_state[index + 3]
new_state[index + 3] = new_state[index]
new_state[index] = temp
return new_state
else:
# Can't move, return None
return None
#Defining a function for initializing a node
def vertex_init(items, ancestor, operator, depth, cost ):
return vertex(items, ancestor, operator, depth, cost )
def expand_vertex( vertex, vertices, visited_vertices):
#Returns a list of expanded child vertices
child_vertices = []
if vertex.items not in visited_vertices:
visited_vertices.extend([vertex.items])
child_vertices.append( vertex_init( traverse_up( vertex.items ), vertex, 'UP ->', vertex.depth + 1, 0 ) )
child_vertices.append( vertex_init( traverse_down( vertex.items ), vertex, 'DOWN ->', vertex.depth + 1, 0 ) )
child_vertices.append( vertex_init( traverse_left( vertex.items ), vertex, 'LEFT ->', vertex.depth + 1, 0 ) )
child_vertices.append( vertex_init( traverse_right( vertex.items), vertex, 'RIGHT ->', vertex.depth + 1, 0 ) )
child_vertices = [vertex for vertex in child_vertices if vertex.items != None]
return child_vertices
#Defining a breadth first search function
def bfs( start, goal ):
global max
vertices = []
# Create the queue with the root vertex in it.
vertices.append( vertex_init( start, None, None, 0, 0 ) )
while True:
# If no states exist
if len( vertices ) == 0:
return None
vertex = vertices.pop(0)
#returning list of directions/moves taken to get to goal state
if vertex.items == goal:
moves = []
print_vertices(vertex.items)
temp = vertex
while True:
moves.insert(0, temp.operator)
if temp.depth == 1: break
|
open-craft/xblock-poll | tests/integration/test_submit_button.py | Python | agpl-3.0 | 2,450 | 0.001224 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 McKinsey Academy
#
# Authors:
# Jonathan Piacenti <jonathan@opencraft.com>
#
# This software's license gives you freedom; you can copy, convey,
# propagate, redistribute and/or modify this program under the terms of
# the GNU Affero General Public License (AGPL) as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version of the AGPL published by the FSF.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program in a file in the toplevel directory called
# "AGPLv3". If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
from tests.integration.base_test import PollBaseTest
from unittest import skip
class TestSubmitButton(PollBaseTest):
@skip("Flaky test")
def test_submit_button(self):
"""
Goal: We have to make sure that submit button gets disabled right
after it is clicked. We cannot test with 100% assurance by adding a
method in other tests such as test_functions.py because in that case
submit button is anyway disabled after the ajax request.
We can utilize infinite submission feature and check if | the submit
button was disabled (because of js) and then re-enabled (because of
ajax request).
"""
self.go_to_page('Poll Submit Button')
# Find all the radio choices
answer_elements = self.browser.find_elements_by_css_selector('label.poll-answer-text')
# Select the | first choice
answer_elements[1].click()
# When an answer is selected, make sure submit is enabled.
self.wait_until_exists('input[name=poll-submit]:enabled')
submit_button = self.get_submit()
submit_button.click()
# Make sure that submit button is disabled right away
self.assertFalse(submit_button.is_enabled())
self.wait_until_clickable(self.browser.find_element_by_css_selector('.poll-voting-thanks'))
# Wait until the ajax request is finished and submit button is enabled
self.assertTrue(self.get_submit().is_enabled())
|
botswana-harvard/edc-map | edc_map/admin.py | Python | gpl-2.0 | 1,678 | 0 | from django.contrib import admin
from django_revision.modeladmin_mixin import ModelAdminRevisionMixin
from edc_base.modeladmin_mixins import (
ModelAdminNextUrlRedirectMixin, ModelAdminFormInstructionsMixin,
ModelAdminFormAutoNumberMixin,
ModelAdminReadOnlyMixin, ModelAdminAuditFieldsMixin)
from .admin_site import edc_map_admin
from .forms import ContainerForm, InnerContainerForm
from .models import Container, InnerContainer
class ModelAdminMixin(ModelAdminFormInstructionsMixin,
ModelAdminNextUrlRedirectMixin,
ModelAdminFormAutoNumberMixin,
ModelAdminRevisionMixin,
ModelAdminAuditFieldsMixin,
ModelAdminReadOnlyMixin,
admin.ModelAdmin):
list_per_page = 10
date_hierarchy = 'modified'
empty_value_display = '-'
@admin.register(Container, site=edc_map_admin)
class ContainerAdmin(ModelAdminMixin):
form = ContainerForm
list_per_page = 10
list_display = ('name', 'map_area', 'created', 'modified')
list_filter = (
'created | ',
'modified',
'map_area',
'map_area',
'hostname_modified')
search_fields = ('map_area', 'id')
@admin.register(InnerContainer, site=edc_map_admin)
class InnerContainerAdmin(ModelAdminMixin):
form = InnerContainerForm
list_per_page = 10
list_display = ('map_area', 'de | vice_id', 'name', 'created', 'modified')
list_filter = (
'created',
'modified',
'map_area',
'container__name',
'name',
'hostname_modified')
search_fields = ('device_id', 'name', 'id')
|
wendlers/usherpa-pysherpa | test-src/packet.py | Python | lgpl-2.1 | 2,349 | 0.022563 | ##
# This file is part of the uSherpa Python Library project
#
# Copyright (C) 2012 Stefan Wendler <sw@kaltpost.de>
#
# The uSherpa Python Library is free software; you can redistribute
# it and/or modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# uSherpa Python Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with the JSherpa firmware; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307 USA.
##
'''
This file is part of the uSherpa Python Library project
'''
from array import array
from usherpa.comm import Packet, PacketException
print "Packet Test"
# incomplete packet
p1 = Packet()
print "p1: " + p1.__str__()
# fill from bytes
pNull = array('B', [0x24, 0x04, 0x00, 0x28])
try:
p1.fromByteArray(pNull)
print "p1: " + p1.__str__()
except PacketException as e:
print e
# from single parameters, including CRC
try:
p2 = Packet()
p2.fromFields(0x24, 0x04, 0x02, None, 0x2a)
print "p2: " + p2.__str__()
except PacketException as e:
print e
# from single parameters, excluding CRC
try:
p3 | = Packet()
p3.fromFields(0x24, 0x04, 0x02, None)
print "p3: " + p3.__str__()
except PacketException as e:
print e
# again from byte array
try:
p4 = Packet()
p4.fromByteArray(array('B', [0x24, 0x07, 0x06, 0x22, 0x20, 0x4e, 0xc1]))
print "p4: " + p4.__str__()
except PacketException as e:
print e
# consume from byte array with leading garbage
try:
data = array('B', [ 0x18, 0x01, 0x24, 0x07, 0x06, 0x22, 0x20, 0x4e, | 0xc1, 0xFF, 0x24, 0x06, 0x05, 0x13, 0x03, 0x45, ])
p5 = Packet()
for b in data:
p5.addByte(b)
if p5.isComplete() :
print "Found packet in stream: " + p5.__str__()
p5.clear()
except PacketException as e:
print e
# from single parameters, including WRONG CRC
try:
p6 = Packet()
p6.fromFields(0x24, 0x04, 0x02, None, 0x20)
print "p6: " + p6.__str__()
except PacketException as e:
print e
|
ownport/ansiblite | src/ansiblite/parsing/splitter.py | Python | gpl-3.0 | 10,657 | 0.001971 | # (c) 2014 James Cammarata, <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import codecs
from ansiblite.errors import AnsibleParserError
from ansiblite.utils._text import to_text
from ansiblite.parsing.quoting import unquote
# Decode escapes adapted from rspeer's answer here:
# http://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python
_HEXCHAR = '[a-fA-F0-9]'
_ESCAPE_SEQUENCE_RE = re.compile(r'''
( \\U{0} # 8-digit hex escapes
| \\u{1} # 4-digit hex escapes
| \\x{2} # 2-digit hex escapes
| \\N\{{[^}}]+\}} # Unicode characters by name
| \\[\\'"abfnrtv] # Single-character escapes
)'''.format(_HEXCHAR*8, _HEXCHAR*4, _HEXCHAR*2), re.UNICODE | re.VERBOSE)
def _decode_escapes(s):
def decode_match(match):
return codecs.decode(match.group(0), 'unicode-escape')
return _ESCAPE_SEQUENCE_RE.sub(decode_match, s)
def parse_kv(args, check_raw=False):
'''
Convert a string of key/value items to a dict. If any free-form params
are found and the check_raw option is set to True, they will be added
to a new parameter called '_raw_params'. If check_raw is not enabled,
they will simply be ignored.
'''
args = to_text(args, nonstring='passthru')
options = {}
if args is not None:
try:
vargs = split_args(args)
except ValueError as ve:
if 'no closing quotation' in str(ve).lower():
raise AnsibleParserError("error parsing argument string, try quoting the entire line.")
else:
raise
raw_params = []
for orig_x in vargs:
x = _decode_escapes(orig_x)
if "=" in x:
pos = 0
try:
while True:
pos = x.index('=', pos + 1)
if pos > 0 and x[pos - 1] != '\\':
break
except ValueError:
# ran out of string, but we must have some escaped equals,
# so replace those and append this to the list of raw params
raw_params.append(x.replace('\\=', '='))
continue
k = x[:pos]
v = x[pos + 1:]
# FIXME: make the retrieval of this list of shell/command
# options a function, so the list is centralized
if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn'):
raw_params.append(orig_x)
else:
options[k.strip()] = unquote(v.strip())
else:
raw_params.append(orig_x)
# recombine the free-form params, if any were found, and assign
# them to a special option for use later by the shell/command module
if len(raw_params) > 0:
options[u'_raw_params'] = ' '.join(raw_params)
return options
def _get_quote_state(token, quote_char):
'''
the goal of this block is to determine if the quoted string
is unterminated in which case it needs to be put back together
'''
# the char before the current one, used to see if
# the current character is escaped
prev_char = None
for idx, cur_char in enumerate(token):
if idx > 0:
prev_char = token[idx-1]
if cur_char in '"\'' and prev_char != '\\':
if quote_char:
if cur_char == quote_char:
quote_char = None
else:
q | uote_char = cur_char
return quote_char
def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
'''
this fun | ction counts the number of opening/closing blocks for a
given opening/closing type and adjusts the current depth for that
block based on the difference
'''
num_open = token.count(open_token)
num_close = token.count(close_token)
if num_open != num_close:
cur_depth += (num_open - num_close)
if cur_depth < 0:
cur_depth = 0
return cur_depth
def split_args(args):
'''
Splits args on whitespace, but intelligently reassembles
those that may have been split over a jinja2 block or quotes.
When used in a remote module, we won't ever have to be concerned about
jinja2 blocks, however this function is/will be used in the
core portions as well before the args are templated.
example input: a=b c="foo bar"
example output: ['a=b', 'c="foo bar"']
Basically this is a variation shlex that has some more intelligence for
how Ansible needs to use it.
'''
# the list of params parsed out of the arg string
# this is going to be the result value when we are done
params = []
# Initial split on white space
args = args.strip()
items = args.strip().split('\n')
# iterate over the tokens, and reassemble any that may have been
# split on a space inside a jinja2 block.
# ex if tokens are "{{", "foo", "}}" these go together
# These variables are used
# to keep track of the state of the parsing, since blocks and quotes
# may be nested within each other.
quote_char = None
inside_quotes = False
print_depth = 0 # used to count nested jinja2 {{ }} blocks
block_depth = 0 # used to count nested jinja2 {% %} blocks
comment_depth = 0 # used to count nested jinja2 {# #} blocks
# now we loop over each split chunk, coalescing tokens if the white space
# split occurred within quotes or a jinja2 block of some kind
for itemidx,item in enumerate(items):
# we split on spaces and newlines separately, so that we
# can tell which character we split on for reassembly
# inside quotation characters
tokens = item.strip().split(' ')
line_continuation = False
for idx,token in enumerate(tokens):
# if we hit a line continuation character, but
# we're not inside quotes, ignore it and continue
# on to the next token while setting a flag
if token == '\\' and not inside_quotes:
line_continuation = True
continue
# store the previous quoting state for checking later
was_inside_quotes = inside_quotes
quote_char = _get_quote_state(token, quote_char)
inside_quotes = quote_char is not None
# multiple conditions may append a token to the list of params,
# so we keep track with this flag to make sure it only happens once
# append means add to the end of the list, don't append means concatenate
# it to the end of the last token
appended = False
# if we're inside quotes now, but weren't before, append the token
# to the end of the list, since we'll tack on more to it later
# otherwise, if we're inside any jinja2 block, inside quotes, or we were
# inside quotes (but aren't now) concat this token to the last param
if inside_quotes and not was_inside_quotes and not(print_depth or block_depth or comment_depth):
params.append(token)
appended = True
elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
if idx == 0 and was_inside |
ericholscher/django | django/contrib/contenttypes/views.py | Python | bsd-3-clause | 3,355 | 0.001192 | from __future__ import unicode_literals
from django import http
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site, get_current_site
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext as _
def shortcut(request, content_type_id, object_id):
"""
Redirect to an object's page based on a content-type ID and an object ID.
"""
# Look up the object, making sure it's got a get_absolute_url() function.
try:
content_type = ContentType.objects.get(pk=content_type_id)
if not content_type.model_class():
raise http.Http404 | (_("Content type %(ct_id)s object has no associated model") %
{'ct_id': content_type_id})
obj = content_type.get_object_for_this_type(pk=object_id)
except (ObjectDo | esNotExist, ValueError):
raise http.Http404(_("Content type %(ct_id)s object %(obj_id)s doesn't exist") %
{'ct_id': content_type_id, 'obj_id': object_id})
try:
get_absolute_url = obj.get_absolute_url
except AttributeError:
raise http.Http404(_("%(ct_name)s objects don't have a get_absolute_url() method") %
{'ct_name': content_type.name})
absurl = get_absolute_url()
# Try to figure out the object's domain, so we can do a cross-site redirect
# if necessary.
# If the object actually defines a domain, we're done.
if absurl.startswith('http://') or absurl.startswith('https://'):
return http.HttpResponseRedirect(absurl)
# Otherwise, we need to introspect the object's relationships for a
# relation to the Site object
object_domain = None
if Site._meta.installed:
opts = obj._meta
# First, look for an many-to-many relationship to Site.
for field in opts.many_to_many:
if field.rel.to is Site:
try:
# Caveat: In the case of multiple related Sites, this just
# selects the *first* one, which is arbitrary.
object_domain = getattr(obj, field.name).all()[0].domain
except IndexError:
pass
if object_domain is not None:
break
# Next, look for a many-to-one relationship to Site.
if object_domain is None:
for field in obj._meta.fields:
if field.rel and field.rel.to is Site:
try:
object_domain = getattr(obj, field.name).domain
except Site.DoesNotExist:
pass
if object_domain is not None:
break
# Fall back to the current site (if possible).
if object_domain is None:
try:
object_domain = get_current_site(request).domain
except Site.DoesNotExist:
pass
# If all that malarkey found an object domain, use it. Otherwise, fall back
# to whatever get_absolute_url() returned.
if object_domain is not None:
protocol = request.scheme
return http.HttpResponseRedirect('%s://%s%s'
% (protocol, object_domain, absurl))
else:
return http.HttpResponseRedirect(absurl)
|
didrocks/quickly | data/templates/ubuntu-application/project_root/tests/test_lint.py | Python | gpl-3.0 | 1,023 | 0.013685 | #!/usr/bin/python
# -*- Mode: Python; coding: utf-8; indent-ta | bs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# This file is in the public domain
### END LICENSE
import unittest
import subprocess
class TestPylint(unittest.TestCase):
def test_project_errors_only(self):
'''run pylint in error only mode
your code may well work even with pylint errors
but have some unusual code'''
return_code = subproces | s.call(["pylint", '-E', 'python_name'])
# not needed because nosetests displays pylint console output
#self.assertEqual(return_code, 0)
# un-comment the following for loads of diagnostics
#~ def test_project_full_report(self):
#~ '''Only for the brave
#~
#~ you will have to make judgement calls about your code standards
#~ that differ from the norm'''
#~ return_code = subprocess.call(["pylint", 'python_name'])
if __name__ == '__main__':
'you will get better results with nosetests'
unittest.main()
|
BD2KGenomics/slugflow | src/toil/test/src/workerTest.py | Python | apache-2.0 | 4,647 | 0.002152 | # Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from toil.common import Config
from toil.job import CheckpointJobDescription, JobDescription
from toil.jobStores.fileJobStore import FileJobStore
from toil.test import ToilTest, travis_test
from toil.worker import nextChainable
class WorkerTests(ToilTest):
"""Test miscellaneous units of the worker."""
def setUp(self):
super(WorkerTests, self).setUp()
path = self._getTestJobStorePath()
self.jobStore = FileJobStore(path)
self.config = Config()
self.config.jobStore = 'file:%s' % path
self.jobStore.initialize(sel | f.config)
self.jobNumber = 0
@travis_test
def testNextChainable(self):
"""Make sure chainable/non-chainable jobs are identified correctly."" | "
def createTestJobDesc(memory, cores, disk, preemptable, checkpoint):
"""
Create a JobDescription with no command (representing a Job that
has already run) and return the JobDescription.
"""
name = 'job%d' % self.jobNumber
self.jobNumber += 1
descClass = CheckpointJobDescription if checkpoint else JobDescription
jobDesc = descClass(requirements={'memory': memory, 'cores': cores, 'disk': disk, 'preemptable': preemptable}, jobName=name)
# Assign an ID
self.jobStore.assignID(jobDesc)
# Save and return the JobDescription
return self.jobStore.create(jobDesc)
for successorType in ['addChild', 'addFollowOn']:
# Try with the branch point at both child and follow-on stages
# Identical non-checkpoint jobs should be chainable.
jobDesc1 = createTestJobDesc(1, 2, 3, True, False)
jobDesc2 = createTestJobDesc(1, 2, 3, True, False)
getattr(jobDesc1, successorType)(jobDesc2.jobStoreID)
chainable = nextChainable(jobDesc1, self.jobStore, self.config)
self.assertNotEqual(chainable, None)
self.assertEqual(jobDesc2.jobStoreID, chainable.jobStoreID)
# Identical checkpoint jobs should not be chainable.
jobDesc1 = createTestJobDesc(1, 2, 3, True, False)
jobDesc2 = createTestJobDesc(1, 2, 3, True, True)
getattr(jobDesc1, successorType)(jobDesc2.jobStoreID)
self.assertEqual(None, nextChainable(jobDesc1, self.jobStore, self.config))
# If there is no child we should get nothing to chain.
jobDesc1 = createTestJobDesc(1, 2, 3, True, False)
self.assertEqual(None, nextChainable(jobDesc1, self.jobStore, self.config))
# If there are 2 or more children we should get nothing to chain.
jobDesc1 = createTestJobDesc(1, 2, 3, True, False)
jobDesc2 = createTestJobDesc(1, 2, 3, True, False)
jobDesc3 = createTestJobDesc(1, 2, 3, True, False)
getattr(jobDesc1, successorType)(jobDesc2.jobStoreID)
getattr(jobDesc1, successorType)(jobDesc3.jobStoreID)
self.assertEqual(None, nextChainable(jobDesc1, self.jobStore, self.config))
# If there is an increase in resource requirements we should get nothing to chain.
reqs = {'memory': 1, 'cores': 2, 'disk': 3, 'preemptable': True, 'checkpoint': False}
for increased_attribute in ('memory', 'cores', 'disk'):
jobDesc1 = createTestJobDesc(**reqs)
reqs[increased_attribute] += 1
jobDesc2 = createTestJobDesc(**reqs)
getattr(jobDesc1, successorType)(jobDesc2.jobStoreID)
self.assertEqual(None, nextChainable(jobDesc1, self.jobStore, self.config))
# A change in preemptability from True to False should be disallowed.
jobDesc1 = createTestJobDesc(1, 2, 3, True, False)
jobDesc2 = createTestJobDesc(1, 2, 3, False, True)
getattr(jobDesc1, successorType)(jobDesc2.jobStoreID)
self.assertEqual(None, nextChainable(jobDesc1, self.jobStore, self.config))
|
kn45/RNNRegressor | rnn_regressor/mlfutil.py | Python | mit | 3,971 | 0.000252 | # -*- coding=utf-8 -*-
import cPickle
import numpy as np
import os
import sys
"""Common tools for this project.
Utils are defined in this module for sharing.
"""
PROJ_DIR = os.path.split(os.path.realpath(__file__))[0]
def draw_progress(iteration, total, pref='Progr | ess:', suff='',
decimals=1, barlen=50):
"""Call in a loop to create terminal progress bar
"""
formatStr = "{0:." + str(decimals) + "f}"
pcts = formatStr.format(100 * (iteration / float(total)))
filledlen = int(round(barlen * iteration / float(total)))
bar = '█' * filledlen + '-' * (barlen - filledlen)
out_str = '\r%s |%s| %s%s %s' % (pref, bar, pcts, '%', suff)
out_str = '\x1b[0;34;40m' + out_str + '\x1b[0m'
| sys.stdout.write(out_str),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
class CatEncoder(object):
"""Transform category to global uniq index
"""
def __init__(self):
self.cats = {}
def build_dict(self, ifnames, columns):
"""need override
ifnames are ',' separated
fields are ',' separated, from 0. means from ... to
"""
self.cats = {}
cat_idx = 0
ifnames = ifnames.split(',')
cols = columns.split(',')
col_st = int(cols[0])
col_ed = int(cols[1]) if len(cols) > 1 else -1
for ifname in ifnames:
with open(ifname) as f:
data = map(lambda l: l.strip('\n').split('\t'), f.readlines())
for fields in data:
for idx in xrange(col_st, len(fields) if col_ed < 0 else
col_ed+1):
if fields[idx] not in self.cats and fields[idx] != '':
self.cats[fields[idx]] = cat_idx
cat_idx += 1
def save_dict(self, ofname):
with open(ofname, 'w') as fo:
for key in self.cats:
print >> fo, \
'\t'.join([key.encode('utf8'), str(self.cats[key])])
def load_dict(self, dfname):
self.cats = {}
with open(dfname) as f:
data = [l.strip('\n').decode('utf8').split('\t')
for l in f.readlines()]
for fields in data:
self.cats[fields[0]] = int(fields[1])
def n_cat(self):
return len(self.cats)
def cat2idx(self, cat):
if cat in self.cats:
return self.cats[cat]
else:
return -1
def cat2onehot(self, cat, missing=False):
idx = self.cat2idx(cat)
if missing:
res = [0] * (self.n_cat() + 1)
idx = idx if idx >= 0 else (len(res) - 1)
res[idx] = 1
return res
else:
res = [0] * self.n_cat
if idx > 0:
res[idx] = 1
return res
class PortEncoder(CatEncoder):
def init(self, ifnames='data_all/data_all.tsv', cols='11,11'):
self.build_dict(ifnames, cols)
def encode(self, port):
return self.cat2onehot(port, missing=True)
class CharEncoder(CatEncoder):
def build_dict(self, ifname):
"""PAD: 0
UNK: -1
"""
self.cats = {} # clean inner dict
cat_idx = 1
with open(ifname) as f:
data = [x.strip('\n').split('\t')[1] for x in f.readlines()]
for sent in data:
for char in sent.decode('utf8'):
if char not in self.cats:
self.cats[char] = cat_idx
cat_idx += 1
self.cats['UNK'] = cat_idx
def fill_missing_value(rec_fields):
for idx, col in enumerate(rec_fields):
if col == '':
rec_fields[idx] = '-999.0'
return rec_fields
if __name__ == '__main__':
print "PROJ_DIR:\t" + PROJ_DIR
from time import sleep
for i in range(50):
sleep(0.05)
draw_progress(i, 49, pref='Progress:')
|
edx/edx-platform | common/djangoapps/student/tests/test_userstanding.py | Python | agpl-3.0 | 4,098 | 0.002928 | """
These are tests for disabling and enabling student accounts, and for making sure
that students with disabled accounts are unable to access the courseware.
"""
import unittest
from django.conf import settings
from django.test import Client, TestCase
from django.urls import reverse
from common.djangoapps.student.models import UserStanding
from common.djangoapps.student.tests.factories import UserFactory, UserStandingFactory
class UserStandingTest(TestCase):
"""test suite for user standing view for enabling and disabling accounts"""
def setUp(self):
super().setUp()
# create users
self.bad_user = UserFactory.create(
username='bad_user',
)
self.good_user = UserFactory.create(
username='good_user',
)
self.non_staff = UserFactory.create(
username='non_staff',
)
self.admin = UserFactory.create(
username='admin',
is_staff=True,
)
# create clients
self.bad_user_client = Client()
self.good_user_client = Client()
self.non_staff_client = Client()
self.admin_client = Client()
for user, client in [
(self.bad_user, self.bad_user_client),
(self.good_user, self.good_user_client),
(self.non_staff, self.non_staff_client),
(self.admin, self.admin_client),
]:
client.login(username=user.username, password='test')
UserStandingFactory.create(
user=self.bad_user,
account_status=UserStanding.ACCOUNT_DISABLED,
changed_by=self.admin
)
# set stock url to test disabled accounts' access to site
self.some_url = '/'
# since it's only possible to disable accounts from lms, we're going
# to skip tests for cms
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_can_access_manage_account_page(self):
response = self.admin_client.get(reverse('manage_user_standing'), {
'user': self.admin,
})
assert response.status_code == 200
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_disable_account(self):
assert UserStanding.objects.filter(user=self.good_user).count() == 0
response = self.admin_client.post(reverse('disable_account_ajax'), { # lint-amnesty, pylint: disable=unused-variable
'username': self.good_user.username,
'account_action': 'disable',
})
assert UserStanding.objects.get(user=self.good_user).account_status == UserStanding.ACCOUNT_DISABLED
def test_disabled_account_403s(self):
response = self.bad_user_client.get(self.some_url)
assert response.status_code == 403
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_reenable_account(self):
response = self.admin_client.post(reverse('disable_account_ajax'), { # lint-amnesty, pylint: disable=unused-variable
'username': self.bad_user.username,
'account_action': 'reenable'
})
assert UserStanding.objects.get(user=self.bad_user).account_status == UserStanding.ACCOUNT_ENABLED
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_non_staff_cant_access_disable_view(self):
response = self.non_staff_client.get(reverse('manage_user_standing'), {
'user': self.non_staff,
} | )
assert response.status_code == 404
@unittest.sk | ipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_non_staff_cant_disable_account(self):
response = self.non_staff_client.post(reverse('disable_account_ajax'), {
'username': self.good_user.username,
'user': self.non_staff,
'account_action': 'disable'
})
assert response.status_code == 404
assert UserStanding.objects.filter(user=self.good_user).count() == 0
|
srikrishna3118/Lora_GW | PythonFiles/Config.py | Python | gpl-3.0 | 632 | 0.015823 | _author_ = 'Srikrishna'
import ConfigParser
import sys
import logging
logger = logging.getLogger(__name__)
sections = 'Connection','Configuration','Testing'
class MyConfiguration(object):
def __init__(self, *file_names):
parser = ConfigParser.ConfigParser()
parser.optionxform = str # make option names case sensitive
| found = parser.read(file_names)
if not found:
raise ValueError('No config file found!')
for names in section | s:
self.__dict__.update(parser.items(names))
#config = MyConfiguration('lora.conf') # define the configurations in this file
|
hankcs/HanLP | plugins/hanlp_restful/hanlp_restful/__init__.py | Python | apache-2.0 | 14,166 | 0.004078 | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-11-29 17:48
import json
from typing import Union, List, Optional, Dict, Any, Tuple
from urllib.error import HTTPError
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from hanlp_common.document import Document
try:
# noinspection PyUnresolvedReferences
import requests
def _post(url, form: Dict[str, Any], headers: Dict[str, Any], timeout=5) -> str:
response = requests.post(url, json=form, headers=headers, timeout=timeout)
if response.status_code != 200:
raise HTTPError(url, response.status_code, response.text, response.headers, None)
return response.text
except ImportError:
def _post(url, form: Dict[str, Any], headers: Dict[str, Any], timeout=5) -> str:
request = Request(url, json.dumps(form).encode())
for k, v in headers.items():
request.add_header(k, v)
return urlopen(request, timeout=timeout).read().decode()
class HanLPClient(object):
def __init__(self, url: str, auth: str = None, language=None, timeout=5) -> None:
"""
Args:
url (str): An API endpoint to a service provider.
auth (str): An auth key licenced from a service provider.
language (str): The default language for each :func:`~hanlp_restful.HanLPClient.parse` call.
Contact the service provider for the list of languages supported.
Conventionally, ``zh`` is used for Chinese and ``mul`` for multilingual.
Leave ``None`` to use the default language on server.
timeout (int): Maximum waiting time in seconds for a request.
"""
super().__init__()
self._language = language
self._timeout = timeout
self._url = url
if auth is None:
import os
auth = os.getenv('HANLP_AUTH', None)
self._auth = auth
def parse(self,
text: Union[str, List[str]] = None,
tokens: List[List[str]] = None,
tasks: Optional[Union[str, List[str]]] = None,
skip_tasks: Optional[Union[str, List[str]]] = None,
language: str = None,
) -> Document:
"""
Parse a piece of text.
Args:
text: A document (str), or a list of sentences (List[str]).
tokens: A list of sentences where each sentence is a list of tokens.
tasks: The tasks to predict.
skip_tasks: The tasks to skip.
language: The language of input text or tokens. ``None`` to use the default language on server.
Returns:
A :class:`~hanlp_common.document.Document`.
Raises:
HTTPError: Any errors happening on the Internet side or the server side. Refer to the | ``code`` and ``msg``
of the exception for more details. A list of common errors :
- ``400 Bad Request`` indicates that the server cannot process the request due to a client
fault (e.g., text too long, language unsupported).
- ``401 Unauthorized`` indicates that the request lacks **valid** ``auth`` credentials for the API.
- ``422 Unprocessable Entity`` i | ndicates that the content type of the request entity is not in
proper json format.
- ``429 Too Many Requests`` indicates the user has sent too many requests in a given
amount of time ("rate limiting").
"""
assert text or tokens, 'At least one of text or tokens has to be specified.'
response = self._send_post_json(self._url + '/parse', {
'text': text,
'tokens': tokens,
'tasks': tasks,
'skip_tasks': skip_tasks,
'language': language or self._language
})
return Document(response)
def __call__(self,
text: Union[str, List[str]] = None,
tokens: List[List[str]] = None,
tasks: Optional[Union[str, List[str]]] = None,
skip_tasks: Optional[Union[str, List[str]]] = None,
language: str = None,
) -> Document:
"""
A shortcut of :meth:`~hanlp_restful.HanLPClient.parse`.
"""
return self.parse(text, tokens, tasks, skip_tasks)
def about(self) -> Dict[str, Any]:
"""Get the information about server and your client.
Returns:
A dict containing your rate limit and server version etc.
"""
info = self._send_get_json(self._url + '/about', {})
return Document(info)
def _send_post(self, url, form: Dict[str, Any]):
request = Request(url, json.dumps(form).encode())
self._add_headers(request)
return self._fire_request(request)
def _fire_request(self, request):
return urlopen(request, timeout=self._timeout).read().decode()
def _send_post_json(self, url, form: Dict[str, Any]):
headers = dict()
if self._auth:
headers['Authorization'] = f'Basic {self._auth}'
return json.loads(_post(url, form, headers, self._timeout))
def _send_get(self, url, form: Dict[str, Any]):
request = Request(url + '?' + urlencode(form))
self._add_headers(request)
return self._fire_request(request)
def _add_headers(self, request):
if self._auth:
request.add_header('Authorization', f'Basic {self._auth}')
def _send_get_json(self, url, form: Dict[str, Any]):
return json.loads(self._send_get(url, form))
def text_style_transfer(self, text: Union[str, List[str]], target_style: str, language: str = None) \
-> Union[str, List[str]]:
""" Text style transfer aims to change the style of the input text to the target style while preserving its
content.
Args:
text: Source text.
target_style: Target style.
language: The language of input text. ``None`` to use the default language.
Examples::
HanLP.text_style_transfer(['国家对中石油抱有很大的期望.', '要用创新去推动高质量的发展。'],
target_style='gov_doc')
# Output:
[
'国家对中石油寄予厚望。',
'要以创新驱动高质量发展。'
]
HanLP.text_style_transfer('我看到了窗户外面有白色的云和绿色的森林', target_style='modern_poetry')
# Output:
'我看见窗外的白云绿林'
Returns:
Text or a list of text of the target style.
"""
response = self._send_post_json(self._url + '/text_style_transfer',
{'text': text, 'target_style': target_style,
'language': language or self._language})
return response
def semantic_textual_similarity(self, text: Union[Tuple[str, str], List[Tuple[str, str]]], language: str = None) \
-> Union[float, List[float]]:
""" Semantic textual similarity deals with determining how similar two pieces of texts are.
Args:
text: A pair or pairs of text.
language: The language of input text. ``None`` to use the default language.
Examples::
HanLP.semantic_textual_similarity([
('看图猜一电影名', '看图猜电影'),
('无线路由器怎么无线上网', '无线上网卡和无线路由器怎么用'),
('北京到上海的动车票', '上海到北京的动车票'),
])
# Output:
[
0.9764469861984253, # Similarity of ('看图猜一电影名', '看图猜电影')
0.0, # Similarity of ('无线路由器怎么无线上网', '无线上网卡和无线路由器怎么用')
0.003458738327026367 # Similarity of ('北京到上海的动车票', '上海到北京的动车票')
]
Returns:
Similarities.
"""
response = self._send_post_json(self._url + '/semantic_textual_similarity',
{'text': text, 'language': language or self._language})
return response
def coreference_resolution(self, text: Optional[str] = None, tokens: Optional[List[List[str]]] = None,
speakers: Optional[List[str]] = None, language: Op |
vecnet/om | website/apps/ts_om/tests/utils/test_scenario_name_with_next_number.py | Python | mpl-2.0 | 1,719 | 0.004654 | # -*- coding: utf-8 -*-
#
# This file is part of the VecNet Open | Malaria Portal.
# For copyright and licensing information about this package, see the
# NOTICE.txt and LICENSE.txt files in its top-level directory; they are
# available at ht | tps://github.com/vecnet/om
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License (MPL), version 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.test.testcases import TestCase
from website.apps.ts_om.utils import scenario_name_with_next_number
class GetNumberAtTheEndOfStringTest(TestCase):
def test_1(self):
self.assertEqual(scenario_name_with_next_number(""), " - 2")
def test_2(self):
self.assertEqual(scenario_name_with_next_number(" - 2"), "- 3")
def test_3(self):
self.assertEqual(scenario_name_with_next_number("Scenario"), "Scenario - 2")
def test_4(self):
self.assertEqual(scenario_name_with_next_number("Scenario"), "Scenario - 2")
def test_5(self):
self.assertEqual(scenario_name_with_next_number("Scenario 1"), "Scenario 2")
def test_6(self):
self.assertEqual(scenario_name_with_next_number("Scenario - 2"), "Scenario - 3")
def test_7(self):
self.assertEqual(scenario_name_with_next_number("Scenario - 9"), "Scenario - 10")
def test_8(self):
self.assertEqual(scenario_name_with_next_number("Scenario - 999"), "Scenario - 1000")
def test_9(self):
self.assertEqual(scenario_name_with_next_number("Scenario999"), "Scenario1000")
def test_10(self):
self.assertEqual(scenario_name_with_next_number("Scenario #123"), "Scenario #124")
|
kilikkuo/py_simple_host_target | simple_host_target/simple_host_target/server.py | Python | mit | 6,080 | 0.006743 | import os
import sys
import time
import select
import socket
import traceback
import threading
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from simple_host_target.definition import OP_HT_DATA_BEGIN, OP_HT_DATA_END, OP_HT_DATA_MID
def msg_c(a, msg):
print("[%s] "%(str(a)) + msg)
class Server(object):
def __init__(self, ip = "", port = 5000, max_client = 1):
assert (ip != "")
self.socket = socket.socket()
self.socket.bind((ip, port))
self.socket.listen(max_client)
self.clients = {}
self.thread = threading.Thread(target=self.__loop_for_connections)
self.thread.daemon = True
self.evt_break = threading.Event()
self.evt_break.clear()
self.clients_temp_data = {}
self.callbacks_info = {}
def __close_connections(self):
try:
while len(self.clients) > 0:
c, a = self.clients.popitem()
print("Closing connection [%s] ..."%(str(a)))
c.close()
if self.socket:
self.socket.close()
except:
traceback.print_exc()
def shutdown(self):
print("[Server] Shutting down ...")
self.__close_connections()
if self.thread:
self.evt_break.set()
self.thread.join()
self.thread = None
print("[Server] Shutting down ... end")
def __loop_for_connections(self):
read_list = [self.socket]
try:
while 1:
if self.evt_break.is_set():
break
readable, writable, errored = select.select(read_list, [], [], 0)
# Data arrived.
for s in readable:
if s is self.socket:
# Accept connections from client's request.
client, addr = self.socket.accept()
self.clients[client] = addr
read_list.append(client)
print("[%s] Connected !"%(str(addr)))
else:
client = None
for c, a in list(self.clients.items()):
if c is s:
# Collect & append data.
self.__check_for_recv(c, a)
# Analyze if data is | received completely
if self.__extract_specific_task(c, a):
self.clients.pop(c)
| read_list.remove(c)
c.close()
print(" REMOVED & CLOSE a socket client !!!!!!! ")
time.sleep(0.01)
except:
traceback.print_exc()
print("[Exception] during server's loop for connections.")
finally:
self.__close_connections()
def run_server(self, callbacks_info = {}):
# Register the callback function when specific message is received.
# e.g.
# { 0 : { "pre" : OP_HT_DATA_BEGIN,
# "post": OP_HT_DATA_END,
# "mid" : OP_HT_DATA_MID,
# "callback" : callbak }}
#
# Data in between "pre" & "mid" is a repr form of ip-ports information dictionary.
# e.g. ip_port_pairs = { "host_ip" : string of IP,
# "host_port" : int of PORT,
# "sender_ip" : string of IP,
# "sender_port" : int of PORT}
#
# Data in between "mid" & "post" is a pickled bitstream.
#
# "callback" is going to be invoked when a *complete* message is received.
# *complete* - enclosed by "pre" & "post"
assert (self.thread != None)
for v in callbacks_info.values():
assert "pre" in v and "post" in v and "callback" in v and callable(v["callback"])
print("Start the server ...")
self.callbacks_info = callbacks_info
if self.thread and not self.thread.is_alive():
self.thread.start()
def __extract_specific_task(self, c, a):
# Check the completeness of received data, and callback if it's finished.
data = self.clients_temp_data.get((c, a), b"")
for info in self.callbacks_info.values():
pre_idx = data.find(info["pre"])
post_idx = data.find(info["post"])
if pre_idx >= 0 and post_idx >= 0:
if info.get("mid", "") and data.find(info["mid"]) >= 0:
mid_idx = data.find(info["mid"])
ipport = data[pre_idx+len(info["pre"]):mid_idx]
task = data[mid_idx+len(info["mid"]):post_idx]
info["callback"](ipport, task)
self.clients_temp_data.pop((c, a))
return True
else:
task = data[pre_idx+len(info["pre"]):post_idx]
info["callback"](task)
self.clients_temp_data.pop((c, a))
return True
return False
def __check_for_recv(self, c, a):
data = c.recv(2048)
if data and len(data):
self.clients_temp_data[(c,a)] = self.clients_temp_data.get((c,a), b"") + data
if __name__ == "__main__":
def callbak(msg):
print(msg)
srv = Server(ip = "127.0.0.1")
srv.run_server(callbacks_info = { 0 : { "pre" : OP_HT_DATA_BEGIN,
"post": OP_HT_DATA_END,
"mid" : OP_HT_DATA_MID,
"callback" : callbak }})
try:
for line in sys.stdin:
print(line)
except:
traceback.print_exc()
print("[Exception] while lining in ")
srv.shutdown()
|
JoshStegmaier/django-nimbus | nimbus/filehandling/fileupload.py | Python | mit | 295 | 0.00678 | import os.path
def handle_uploaded_file(f, path, new_name=Non | e):
if new_name:
file_name = new_name
else:
file_name = f.name
destination = open(os.path.join(path, file_name), 'wb+')
for chunk in f.chunks():
d | estination.write(chunk)
destination.close() |
mozilla/kitsune | kitsune/users/backends.py | Python | bsd-3-clause | 294 | 0 | from django.contrib.auth.backends import ModelBackend
# | Live sessions will still be using | this backend for a while.
# TODO: Remove after there are no more sessions using this in prod.
class Sha256Backend(ModelBackend):
"""Overriding the Django model backend without changes."""
pass
|
Aarononomous/SwitchCSSColorModel | switch_css_color_model.py | Python | mit | 11,674 | 0.00257 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sublime
import sublime_plugin
import re
SETTINGS_FILE = 'SwitchCSSColorModel.sublime-settings'
class F:
"""Formats rgba values into output string"""
# default for hex values is lowercase --
# change in SwitchColorModel.sublime-settings
lower = False
# helper methods
def to_h(i):
# one-digit hex number
return list('0123456789abcdef')[i]
def to_h_2(i):
# two-digit hex number with padding
return '{0:0{1}x}'.format(i, 2)
def to_pct(f):
# Max four digits after decimal point, but as few as possible
return '{:.4f}'.format(f).rstrip('0').rstrip('.')
def hsl_to_rgb(h, s, l):
# normalize hsl values
H, S, L = h/360, s/100, l/100
# create some temp variables
if L < 0.5:
temp1 = L*(1 + S)
else:
temp1 = L + S - L*S
temp2 = 2*L - temp1
# create temp rgb values
tempR = H + (1/3)
tempG = H
tempB = H - (1/3)
tempR, tempG, tempB = (tempR + 1) % 1, (tempG + 1) % 1, (tempB + 1) % 1
# tests for rgb vals
if tempR < (1/6):
R = temp2 + (temp1 - temp2)*6*tempR
elif tempR < (1/2):
R = temp1
elif tempR < (2/3):
R = temp2 + (temp1 - temp2)*6*((2/3) - tempR)
else:
R = temp2
if tempG < (1/6):
G = temp2 + (temp1 - temp2)*6*tempG
elif tempG < (1/2):
G = temp1
elif tempG < (2/3):
G = temp2 + (temp1 - temp2)*6*((2/3) - tempG)
else:
G = temp2
if tempB < (1/6):
B = temp2 + (temp1 - temp2)*6*tempB
elif tempB < (1/2):
B = temp1
elif tempB < (2/3):
B = temp2 + (temp1 - temp2)*6*((2/3) - tempB)
else:
B = temp2
R, G, B = round(255*R), round(255*G), round(255*B)
return (R, G, B)
def rgb_to_hsl(r, g, b):
# normalize rgb values
R, G, B = r/255, g/255, b/255
# luminiance
mini = min(R, G, B)
maxi = max(R, G, B)
L = (maxi + mini) / 2
# check for gray -- avoid division by zero
if (mini == maxi):
return (0, 0, L*100)
# saturation
if L < 0.5:
S = (maxi - mini) / (maxi + mini)
else:
S = (maxi - mini) / (2.0 - maxi - mini)
# hue
if R == maxi:
H = (G - B) / (maxi - mini)
elif G == maxi:
H = Hue = 2.0 + (B - R) / (maxi - mini)
else: # B == maxi
H = 4.0 + (R - G) / (maxi - mini)
H = H * 60
H = (H + 360) % 360
S, L = S*100, L*100
return (H, S, L)
def hex_3(r, g, b, a):
if r % 17 == 0 and g % 17 == 0 and b % 17 == 0:
s = '#' + F.to_h(r//17) + F.to_h(g//17) + F.to_h(b//17)
return s if F.lower else s.upper()
else:
return F.hex_6(r, g, b, 1.0)
def hex_6(r, g, b, a):
s = '#' + F.to_h_2(r) + F.to_h_2(g) + F.to_h_2(b)
return s if F.lower else s.upper()
def rgb(r, g, b, a):
return 'rgb(' + str(r) + ',' + str(g) + ',' + str(b) + ')'
def rgba(r, g, b, a):
return ('rgba(' + str(r) + ',' + str(g) + ',' + str(b) + ',' +
F.to_pct(a) + ')')
def rgb_pct(r, g, b, a):
return ('rgb(' + F.to_pct(r/255*100) + '%,' + F.to_pct(g/255*100) +
'%,' + F.to_pct(b/255*100) + '%)')
def rgba_pct(r, g, b | , a):
return ('rgba(' + F.to_pct(r/255*100) + '%,' +
F.to_pct(g/255*100) + '%,' + F.to_pct(b/2 | 55*100) + '%,' +
F.to_pct(a) + ')')
def hsl(r, g, b, a):
h, s, l = F.rgb_to_hsl(r, g, b)
return ('hsl(' + F.to_pct(h) + ',' + F.to_pct(s) + '%,' +
F.to_pct(l) + '%)')
def hsla(r, g, b, a):
h, s, l = F.rgb_to_hsl(r, g, b)
return ('hsla(' + F.to_pct(h) + ',' + F.to_pct(s) + '%,' +
F.to_pct(l) + '%,' + F.to_pct(a) + ')')
class GetRGBA:
"""Extracts RGBA values from a string."""
def hex_3(s):
colors = hex_3['re'].findall(s)[0]
r = int(colors[0], 16) * 17
g = int(colors[1], 16) * 17
b = int(colors[2], 16) * 17
return (r, g, b, 1.0)
def hex_6(s):
colors = hex_6['re'].findall(s)[0]
r = int(colors[0] + colors[1], 16)
g = int(colors[2] + colors[3], 16)
b = int(colors[4] + colors[5], 16)
return (r, g, b, 1.0)
def rgb(s):
r, g, b = rgb['re'].findall(s)[0]
return (int(r), int(g), int(b), 1.0)
def rgba(s):
r, g, b, a = rgba['re'].findall(s)[0]
return (int(r), int(g), int(b), float(a))
def rgb_pct(s):
colors = rgb_pct['re'].findall(s)[0]
r = float(colors[0]) * 255 / 100
g = float(colors[1]) * 255 / 100
b = float(colors[2]) * 255 / 100
r, g, b = round(r), round(g), round(b)
return (r, g, b, 1.0)
def rgba_pct(s):
colors = rgba_pct['re'].findall(s)[0]
r = float(colors[0]) * 255 / 100
g = float(colors[1]) * 255 / 100
b = float(colors[2]) * 255 / 100
a = float(colors[3])
r, g, b = round(r), round(g), round(b)
return (r, g, b, a)
def hsl(s):
colors = hsl['re'].findall(s)[0]
h = float(colors[0])
s = float(colors[1])
l = float(colors[2])
r, g, b = F.hsl_to_rgb(h, s, l)
r, g, b = round(r), round(g), round(b)
return (r, g, b, 1.0)
def hsla(s):
colors = hsla['re'].findall(s)[0]
h = float(colors[0])
s = float(colors[1])
l = float(colors[2])
a = float(colors[3])
r, g, b = F.hsl_to_rgb(h, s, l)
r, g, b = round(r), round(g), round(b)
return (r, g, b, a)
# regexes for color models
regexes = {
'hex_3': r'\B#([a-fA-F\d])([a-fA-F\d])([a-fA-F\d])\b',
'hex_6': r'\B#([a-fA-F\d])([a-fA-F\d])([a-fA-F\d])([a-fA-F\d])([a-fA-F\d])([a-fA-F\d])',
'rgb': r'\brgb\((\d{1,3}),\s*(\d{1,3}),\s*(\d{1,3})\)',
'rgba': r'\brgba\((\d{1,3}),\s*(\d{1,3}),\s*(\d{1,3}),\s*([\d.]+)\)',
'rgb_pct': r'\brgb\(([\d.]+)%,\s*([\d.]+)%,\s*([\d.]+)%\)',
'rgba_pct': r'\brgba\(([\d.]+)%,\s*([\d.]+)%,\s*([\d.]+)%,\s*([\d.]+)\)',
'hsl': r'\bhsl\(([\d.]+),\s*([\d.]+)%,\s*([\d.]+)%\)',
'hsla': r'\bhsla\(([\d.]+),\s*([\d.]+)%,\s*([\d.]+)%,\s*([\d.]+)\)'
}
hex_3 = {'re': re.compile(regexes['hex_3']), 'from': 'hex_3', 'to': 'hex_6'}
hex_6 = {'re': re.compile(regexes['hex_6']), 'from': 'hex_6', 'to': 'rgb'}
rgb = {'re': re.compile(regexes['rgb']), 'from': 'rgb', 'to': 'rgba'}
rgba = {'re': re.compile(regexes['rgba']), 'from': 'rgba', 'to': 'rgb_pct'}
rgb_pct = {
're': re.compile(regexes['rgb_pct']), 'from': 'rgb_pct', 'to': 'rgba_pct'}
rgba_pct = {
're': re.compile(regexes['rgba_pct']), 'from': 'rgba_pct', 'to': 'hsl'}
hsl = {'re': re.compile(regexes['hsl']), 'from': 'hsl', 'to': 'hsla'}
hsla = {'re': re.compile(regexes['hsla']), 'from': 'hsla', 'to': 'hex_3'}
# concatenate regexes to match multiple occurrences per line
color_models_re = ''
for regex in regexes.values():
color_models_re += regex + '|'
color_models_re = color_models_re[:-1] # remove final '|'
color_models = re.compile(color_models_re)
class SwitchCssColorModelCommand(sublime_plugin.TextCommand):
def run(self, edit):
# settings
settings = self.view.settings().get('SwitchCSSColorModel')
if settings is None:
settings = sublime.load_settings(SETTINGS_FILE)
# set lower/uppercase output for hex values
F.lower = settings.get('lowercase_hex')
# set next models as per other settings (alpha, hsl, and rgb%)
# using this handy chart - upper = true, lower = false, * = self
# ahp ahP aHp aHP Ahp AhP AHp AHP
#
# 3H 6H 6H 6H 6H 6H 6H 6H 6H
# 6H RGB RGB RGB RGB RGB RGB RGB RGB
# RGB 3H RGB% HSL RGB% RGBA RGBA RG |
zerobuzz/js.socialshareprivacy | js/socialshareprivacy/__init__.py | Python | bsd-3-clause | 398 | 0 | from fanstatic import Library, Resource
import js.jquery
library = Library('jquery.socialshareprivacy', 'resourc | es')
css = Resource(library, 'socialshareprivacy/socialshareprivacy.css')
socialshareprivacy = Resource(library, 'jquery.socialshareprivacy.js',
| minified='jquery.socialshareprivacy.min.js',
depends=[js.jquery.jquery, css])
|
rwl/PyCIM | CIM15/IEC61970/Wires/Fuse.py | Python | mit | 1,856 | 0.001616 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRA | CT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CON | NECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Wires.Switch import Switch
class Fuse(Switch):
"""An overcurrent protective device with a circuit opening fusible part that is heated and severed by the passage of overcurrent through it. A fuse is considered a switching device because it breaks current.An overcurrent protective device with a circuit opening fusible part that is heated and severed by the passage of overcurrent through it. A fuse is considered a switching device because it breaks current.
"""
def __init__(self, *args, **kw_args):
"""Initialises a new 'Fuse' instance.
"""
super(Fuse, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = []
_many_refs = []
|
adel-qod/zipper | zipper.py | Python | bsd-2-clause | 6,014 | 0.012305 | #!/usr/bin/python2.7 -tt
"""
Copyright (c) 2013, Adel Qodmani
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import tarfile # For the | compression
import os # For everything related to path
import logging
import sys # For the argv an | d exit
import datetime
def main():
""" zipper source-dir-full-path dest-dir-full-path
Tars and zips the source-dir and put it in the dest-dir with the name:
source-dir-name_date_time.tar.gz
"""
check_args()
source_path = sys.argv[1]
source_path = source_path.rstrip('/')
logging.debug("source_path: %s" % source_path)
dest_path = sys.argv[2]
dest_path = dest_path.rstrip('/')
logging.debug("dest_path: %s" % dest_path)
# source name is the name of the dir to be archived
source_name = source_path.split("/")[-1]
logging.debug("source_name: %s" % source_name)
# tar_path
tar_path = create_tar_path(source_name, dest_path)
logging.debug("tar_path: %s" % tar_path)
create_tar_file(tar_path, source_path)
def check_args():
""" Checks if the args supplied to the script are what it expects """
if len(sys.argv) > 1 and sys.argv[1] == "--help":
help_text = ("zipper creates a zipped tar-ball of the <source> directory"
+ "and puts it in \nthe <destination> directory ")
usage = "e.g: zipper /tmp/ /home/sally/Desktop/"
result = "will create a file called tmp_date_time.tar.gz in "
result += "/home/sally/Desktop/ which has all the contents of /tmp/"
print(help_text)
print(usage)
print(result)
sys.exit(0)
elif len(sys.argv) < 3:
print("Missing arguments!")
print("Usage:")
print("\tzipper source destination")
print("You can get the help by: zipper --help")
logging.error("Missing arguments!")
logging.error("Shutting down!")
sys.exit(1)
elif not os.path.isabs(sys.argv[1]):
print("Source directory is not an absolute path")
print("You can get the help by: zipper --help")
logging.error("Source is not absolute")
logging.error("Shutting down")
sys.exit(2)
elif not os.path.isabs(sys.argv[2]):
print("Destination directory is not an absolute path")
print("You can get the help by: zipper --help")
logging.error("Destination is not absolute")
logging.error("Shutting down")
sys.exit(3)
elif not os.path.isdir(sys.argv[1]):
print("Path given as a source directory is not a directory")
print("You can get the help by: zipper --help")
logging.error("Source is not a directory")
logging.error("Shutting down")
sys.exit(4)
elif not os.path.isdir(sys.argv[2]):
print("Path given as destination directory is not a directory")
print("You can get the help by: zipper --help")
logging.error("Destination is not a directory")
logging.error("Shutting down")
sys.exit(5)
def create_tar_path(source_name, dest_path):
""" Creates a path for a backup that will be in the desktop of the user
and the file name will be the /path/to/desktktop/source_name_date.tar.gz
"""
# Get the path to the desktop ready
path = os.path.expanduser('~') # changes ~ to home dir path
logging.debug(path)
path = os.path.join(path, dest_path+"/")
logging.debug(path)
# string from time(strftime): %Year %month %day %Hour %Minute $Second
now = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
logging.debug(now)
# The dest path is the path + source_name + date + extension
path = os.path.join(path, source_name)
logging.debug(path)
path += '_' + now + ".tar.gz"
logging.debug(path)
return path
def create_tar_file(tar_path, source_path):
# "w:gz" is open for writing a gz tarball
try:
tar = tarfile.open(tar_path, "w:gz")
tar.add(source_path)
tar.close()
logging.debug("Tar ball [%s] created for directory [%s]" % (tar_path,
source_path))
except IOError:
logging.critical("IOError exception! Aborting ..")
sys.exit(6)
except TarError:
logging.critical("TarError exception! Aborting ...")
sys.exit(7)
if __name__ == "__main__":
# Set up the logging env
# Format: (asctime) (filename) (funcname) (linenumber) (level) (msg)
# The time can be formated with the datefmt parameter
FORMAT = "%(asctime)s %(filename)s::%(funcName)s::%(lineno)d"
FORMAT += " [%(levelname)s]: %(msg)s"
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
try:
STREAM = open("/home/aral/learn/zipper/log", "a+")
except IOError:
print("Can't create a log file in [%s]" % STREAM)
sys.abort()
# Setting the log stream to go to stderr and print all log info from debug
# and higher levels (debug, info, warning, error, critical)
logging.basicConfig(stream=STREAM, level=logging.DEBUG, format=FORMAT,
datefmt=DATE_FORMAT)
main()
|
rspavel/spack | var/spack/repos/builtin/packages/argp-standalone/package.py | Python | lgpl-2.1 | 1,235 | 0.002429 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class ArgpStandalone(AutotoolsPackage):
"""Standalone version of the argp interface from glibc for parsing
unix-style arguments. """
| homepage = "https://www.lysator.liu.se/~nisse/misc"
url = "https://www.lysator.liu.se/~nisse/misc/argp-standalone-1.3.tar.gz"
version('1.3', sha256='dec79694da1319acd2238ce95df57f3680fea2482096e483323fddf3d818d8be')
# Homebrew (https://github.com/Homebrew/homebrew-core) patches
# argp-standalone to work on Darwin; the patchfile below was taken
# from
# https://raw.githubusercontent.com/Homebrew/formula-patches/b5f0ad3/argp-standalone/patch-argp- | fmtstream.h
patch('argp-fmtstream.h.patch', 0, 'platform=darwin', '.')
def install(self, spec, prefix):
make('install')
make('check')
mkdirp(self.spec.prefix.lib)
install('libargp.a', join_path(self.spec.prefix.lib, 'libargp.a'))
mkdirp(self.spec.prefix.include)
install('argp.h', join_path(self.spec.prefix.include, 'argp.h'))
|
111pontes/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ncs5500_coherent_portmode_oper.py | Python | apache-2.0 | 6,344 | 0.022541 | """ Cisco_IOS_XR_ncs5500_coherent_portmode_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR ncs5500\-coherent\-portmode package operational data.
This module contains definitions
for the following management objects\:
controller\-port\-mode\: Coherent PortMode operational data
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class ControllerPortMode(object):
"""
Coherent PortMode operational data
.. attribute:: optics_name
Name of optics controller
**type**\: list of :py:class:`OpticsName <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs5500_coherent_portmode_oper.ControllerPortMode.OpticsName>`
"""
_prefix = 'ncs5500-coherent-portmode-oper'
_revision = '2015-11-09'
def __init__(self):
self.optics_name = YList()
self.optics_name.parent = self
self.optics_name.name = 'optics_name'
class OpticsName(object):
"""
Name of optics controller
.. attribute:: interface_name <key>
Interface Name
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: port_mode_info
PortMode operational data
**type**\: :py:class:`PortModeInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs5500_coherent_portmode_oper.ControllerPortMode.OpticsName.PortModeInfo>`
"""
_prefix = 'ncs5500-coherent-portmode-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
self.port_mode_info = ControllerPortMode.OpticsName.PortModeInfo()
self.port_mode_info.parent = self
class PortModeInfo(object):
"""
PortMode operational data
.. attribute:: diff
Optics diff
**type**\: str
.. attribute:: fec
Optics fec
**type**\: str
.. attribute:: intf_name
Interface Name
**type**\: str
.. attribute:: modulation
Optics modulation
**type**\: str
.. attribute:: speed
Optics speed
**type**\: str
"""
_prefix = 'ncs5500-coherent-portmode-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.diff = None
self.fec = None
self.intf_name = None
self.modulation = None
self.speed = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ncs5500-coherent-portmode-oper:port-mode-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.diff is not None:
return True
if self.fec is not None:
return True
if self.intf_name is not None:
return True
if self.modulation is not None:
return True
if self.speed is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs5500_coherent_portmode_oper as meta
return meta._meta_table['ControllerPortMode.OpticsName.PortModeInfo']['meta_info']
@property
def _common_path(self):
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return '/Cisco-IOS-XR-ncs5500-coherent-portmode-oper:controller-port-mode/Cisco-IOS-XR-ncs5500-coherent-portmode-oper:optics-name[Cisco-IOS-XR-ncs5500-coherent-portmode-oper:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
if self.port_mode_info is not None and self.port_mode_info._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs5500_coherent_portmode_oper as meta
return meta._meta_table['ControllerPortMode.OpticsName']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ncs5500-coherent-portmode-oper:controller-port-mode'
def is_config | (self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.optics_name is not None:
for child_ref in self.optics_name:
if child_ref._has_data():
| return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs5500_coherent_portmode_oper as meta
return meta._meta_table['ControllerPortMode']['meta_info']
|
selfbus/software-arm-incubation | sensors/misc/raincenter-bim112/Phyton Raincenter Tests/test.py | Python | gpl-3.0 | 309 | 0.029126 | import sched, time
s = sched.scheduler(t | ime.time, time.sleep)
def pr | int_time():
print ("From print_time", time.time())
def print_some_times():
print (time.time())
s.enter(5, 1, print_time, ())
s.enter(10, 1, print_time, ())
s.run()
print (time.time())
print_some_times() |
netheosgithub/pcs_api | python/pcs_api/cexceptions.py | Python | apache-2.0 | 3,661 | 0.002185 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Netheos (http://www.netheos.net)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, unicode_literals, print_function
class CStorageError(Exception):
"""Base class for all cloud storage errors.
Such exceptions have an optional 'message' and 'cause' attribute."""
def __init__(self, message, cause=None):
super(CStorageError, self).__init__(message)
self.cause = cause
def __str__(self):
ret = "%s(%s)" % (self.__class__, self.message)
if self.cause:
ret += " (caused by %r)" % (self.cause,)
return ret
class CInvalidFileTypeError(CStorageError):
"""Raised when performing an operation on a folder when a blob is expected,
or when operating on a blob and a folder is expected.
Also raised when downloading provider special files (eg google drive native docs)."""
def __init__(self, c_path, ex | pected_blob, message=None):
""":param c_path: the problematic path
:param expected_blob: if True, a blob was expected but a folder was found.
if False, a folder was expected but a blob was found
:param message: optional message"""
if not message:
message = 'Invalid file type at %r (expected %s)' % \
| (c_path, 'blob' if expected_blob else 'folder')
super(CInvalidFileTypeError, self).__init__(message)
self.path = c_path
self.expected_blob = expected_blob
class CRetriableError(CStorageError):
"""Raised by RequestInvoker validation method, when request
has failed but should be retried.
This class is only a marker ; the underlying root exception
is given by the 'cause' attribute.
The optional 'delay' specifies how much one should wait before retrying"""
def __init__(self, cause, delay=None):
super(CRetriableError, self).__init__(message=None, cause=cause)
self.delay = delay
def get_delay(self):
return self.delay
#def __str__(self):
# return "%s(%s)" % (self.__class__, self.cause)
class CFileNotFoundError(CStorageError):
"""File has not been found (sometimes consecutive to http 404 error)"""
def __init__(self, message, c_path):
super(CFileNotFoundError, self).__init__(message)
self.path = c_path
class CHttpError(CStorageError):
"""Raised when providers server answers non OK answers"""
def __init__(self, request_method,
request_path,
status_code, reason,
message=None):
super(CHttpError, self).__init__(message)
self.request_method = request_method
self.request_path = request_path
self.status_code = status_code
self.reason = reason
def __str__(self):
ret = "%s(%d %s) %s %s" % (
self.__class__.__name__, self.status_code, self.reason, self.request_method, self.request_path )
if self.message:
ret += ' msg=%s' % self.message
return ret
class CAuthenticationError(CHttpError):
"""http 401 error"""
pass
|
stb-tester/stb-tester | stbt_run.py | Python | lgpl-2.1 | 2,110 | 0 | #!/usr/bin/python3
"""
Copyright 2012-2013 YouView TV Ltd.
2014-2017 stb-tester.com Ltd.
License: LGPL v2.1 or (at your option) any later version (see
https://github.com/stb-tester/stb-tester/blob/master/LICENSE for details).
"""
import argparse
import sys
import _stbt.core
from _stbt import imgproc_cache
from _stbt.logging import debug
from _stbt.stbt_run import (load_test_function,
sane_unicode_and_exception_handling, video)
def main(argv):
parser = _stbt.core.argparser()
parser.prog = 'stbt run'
parser.description = 'Run an stb-tester test script'
parser.add_argument(
'--cache', default=imgproc_cache.default_filename,
help="Path for image-processing cache (default: %(default)s")
parser.add_argument(
'--save-screenshot', default='on-failure',
choices=['always', 'on-failure', 'never'],
help="Save a screenshot at the end of the test to screenshot.png")
parser.add_argument(
'--save-thumbnail', default='never',
choices=['always', 'on-failure', 'never'],
help="Save a thumbnail at the end of the test to thumbnail.jpg")
parser.add_argument(
'script', metavar='FILE[::TESTCASE]', help=(
"The python test script to run. Optionally specify a python "
"function name to run that function; otherwise only the script's "
"top-level will be executed."))
parser.add_argument(
'args', nargs=argparse.REMAINDER, metavar='ARG',
help='Additional arguments passed on to the test script (in sy | s.argv)')
args = parser.parse_args(argv[1:])
deb | ug("Arguments:\n" + "\n".join([
"%s: %s" % (k, v) for k, v in args.__dict__.items()]))
dut = _stbt.core.new_device_under_test_from_config(args)
with sane_unicode_and_exception_handling(args.script), \
video(args, dut), \
imgproc_cache.setup_cache(filename=args.cache):
test_function = load_test_function(args.script, args.args)
test_function.call()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
dasseclab/dasseclab | clones/routersploit/tests/creds/cameras/acti/test_telnet_default_creds.py | Python | gpl-2.0 | 672 | 0.002976 | from routersploit.modules.creds.cameras.acti.telnet_default_creds import Exploit
def test_check_success(generic_target):
""" Test scena | rio - testing against Telnet server """
exploit = Exploit()
assert exploit.target == ""
assert exploit.port == 23
assert exploit.threads == 1
assert exploit.defaults == ["admin:12345", "admin:123456", "Admin:12345", "Admin:123456"]
assert exploit.stop_on_success | is True
assert exploit.verbosity is True
exploit.target = generic_target.host
exploit.port = generic_target.port
assert exploit.check() is True
assert exploit.check_default() is not None
assert exploit.run() is None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.