repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
sileht/deb-openstack-nova
|
nova/tests/test_virt_drivers.py
|
Python
|
apache-2.0
| 19,029
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import netaddr
import sys
import traceback
from nova import exception
from nova import flags
from nova import image
from nova import log as logging
from nova import test
from nova.tests import utils as test_utils
libvirt = None
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
def catch_notimplementederror(f):
"""Decorator to simplify catching drivers raising NotImplementedError
If a particular call makes a driver raise NotImplementedError, we
log it so that we can extract this information afterwards to
automatically generate a hypervisor/feature support matrix."""
def wrapped_func(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except NotImplementedError:
frame = traceback.extract_tb(sys.exc_info()[2])[-1]
LOG.error('%(driver)s does not implement %(method)s' % {
'driver': type(self.connection),
'method': frame[2]})
wrapped_func.__name__ = f.__name__
wrapped_func.__doc__ = f.__doc__
return wrapped_func
class _VirtDriverTestCase(test.TestCase):
def setUp(self):
super(_VirtDriverTestCase, self).setUp()
self.connection = self.driver_module.get_connection('')
self.ctxt = test_utils.get_test_admin_context()
self.image_service = image.get_default_image_service()
def _get_running_instance(self):
instance_ref = test_utils.get_test_instance()
network_info = test_utils.get_test_network_info()
image_info = test_utils.get_test_image_info(None, instance_ref)
self.connection.spawn(self.ctxt, instance=instance_ref,
image_meta=image_info,
network_info=network_info)
return instance_ref, network_info
@catch_notimplementederror
def test_init_host(self):
self.connection.init_host('myhostname')
@catch_notimplementederror
def test_list_instances(self):
self.connection.list_instances()
@catch_notimplementederror
def test_list_instances_detail(self):
self.connection.list_instances_detail()
@catch_notimplementederror
def test_spawn(self):
instance_ref, network_info = self._get_running_instance()
domains = self.connection.list_instances()
self.assertIn(instance_ref['name'], domains)
domains_details = self.connection.list_instances_detail()
self.assertIn(instance_ref['name'], [i.name for i in domains_details])
@catch_notimplementederror
def test_snapshot_not_running(self):
instance_ref = test_utils.get_test_instance()
img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
self.assertRaises(exception.InstanceNotRunning,
self.connection.snapshot,
self.ctxt, instance_ref, img_ref['id'])
@catch_notimplementederror
def test_snapshot_running(self):
img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
instance_ref, network_info = self._get_running_instance()
self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'])
@catch_notimplementederror
def test_reboot(self):
reboot_type = "SOFT"
instance_ref, network_info = self._get_running_instance()
self.connection.reboot(instance_ref, network_info, reboot_type)
@catch_notimplementederror
def test_get_host_ip_addr(self):
host_ip = self.connection.get_host_ip_addr()
# Will raise an exception if it's not a valid IP at all
ip = netaddr.IPAddress(host_ip)
# For now, assume IPv4.
self.assertEquals(ip.version, 4)
@catch_notimplementederror
def test_resize_running(self):
instance_ref, network_info = self._get_running_instance()
self.connection.resize(instance_ref, 7)
@catch_notimplementederror
def test_set_admin_password(self):
instance_ref, network_info = self._get_running_instance()
self.connection.set_admin_password(instance_ref, 'p4ssw0rd')
@catch_notimplementederror
def test_inject_file(self):
instance_ref, network_info = self._get_running_instance()
self.connection.inject_file(instance_ref,
base64.b64encode('/testfile'),
base64.b64encode('testcontents'))
@catch_notimplementederror
def test_agent_update(self):
instance_ref, network_info = self._get_running_instance()
self.connection.agent_update(instance_ref, 'http://www.openstack.org/',
'd41d8cd98f00b204e9800998ecf8427e')
@catch_notimplementederror
def test_rescue(self):
instance_ref, network_info = self._get_running_instance()
self.connection.rescue(self.ctxt, instance_ref, network_info, None)
@catch_notimplementederror
def test_unrescue_unrescued_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.unrescue(instance_ref, network_info)
@catch_notimplementederror
def test_unrescue_rescued_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.rescue(self.ctxt, instance_ref, network_info, None)
self.connection.unrescue(instance_ref, network_info)
@catch_notimplementederror
def test_poll_rebooting_instances(self):
self.connection.poll_rebooting_instances(10)
@catch_notimplementederror
def test_poll_rescued_instances(self):
self.connection.poll_rescued_instances(10)
@catch_notimplementederror
def test_poll_unco
|
nfirmed_resizes(self):
self.connection.poll_unconfirmed_resizes(10)
@catch_notimplementederror
def test_migrate_disk_and_power_off(self):
instance_ref, network_info = self._get_running_instanc
|
e()
instance_type_ref = test_utils.get_test_instance_type()
self.connection.migrate_disk_and_power_off(
self.ctxt, instance_ref, 'dest_host', instance_type_ref,
network_info)
@catch_notimplementederror
def test_pause(self):
instance_ref, network_info = self._get_running_instance()
self.connection.pause(instance_ref)
@catch_notimplementederror
def test_unpause_unpaused_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.unpause(instance_ref)
@catch_notimplementederror
def test_unpause_paused_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.pause(instance_ref)
self.connection.unpause(instance_ref)
@catch_notimplementederror
def test_suspend(self):
instance_ref, network_info = self._get_running_instance()
self.connection.suspend(instance_ref)
@catch_notimplementederror
def test_resume_unsuspended_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.resume(instance_ref)
@catch_notimplementederror
def test_resume_suspended_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.suspend(instance_ref)
self.connection.resume(instance_ref)
@catch_notimplementederror
def test_destroy_instance_nonexistant(self):
fake_instance = {'id': 42, 'name': 'I just made this up!',
|
smouzakitis/molly
|
molly/views.py
|
Python
|
apache-2.0
| 711
| 0.014104
|
from django.shortcuts import render
from django.http import HttpResponse
from django.utils import simplejson as json
import ner
def index(request):
params = {'current': 'home'}
return render(request, 'index.html', params)
def name_entity_recognition(request):
if request.method == 'GET':
#Get the array that contains the list of texts to recognize
input_tex
|
t_array = request.GET.getlist('text[]')
data = {}
i=0
for text in input_text_array:
#Recognize all strings / texts cont
|
ained in the array
data[i] = ner.recognize(text.strip())
i+=1
return HttpResponse(json.dumps(data), content_type = "application/json")
|
bnoi/scikit-tracker
|
sktracker/trajectories/__init__.py
|
Python
|
bsd-3-clause
| 533
| 0.001876
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import logging
log = logging.getLogger(__name__)
from .trajectories import Trajectories
try: # pragma: no cover
from . import draw
__all__ = ['Trajectories', 'draw']
except Imp
|
ortError: # pragm
|
a: no cover
log.warning('''Matplotlib can't be imported,'''
'''drawing module won't be available ''')
__all__ = ['Trajectories']
|
cclljj/AnySense_7688
|
lib/gas_co2_s8.py
|
Python
|
gpl-3.0
| 1,189
| 0.044575
|
import mraa
import time
from multiprocessing import Queue,Process
import move_avge
CO2_BYTE = 9
NUM_INCOME_BYTE = 13
S8_message = b"\xFE\x04\x00\x00\x00\x04\xE5\xC6"
class sensor(Process):
def __init__(self, q):
Process.__init__(self)
self.q = q
self.u=mraa.Uart(1)
self.u.setBaudRate(9600)
self.u.setMode(8, mraa.UART_PARITY_NONE, 1)
self.u.setFlowcontrol(False, False)
self.co2_avg = move_avge.move_avg(1)
def data_log(self, dstr):
bytedata = bytearray(dstr)
if self.checksum(dstr) is True:
CO2 = bytedata[CO2_B
|
YTE]*256 + bytedata[CO2_BYTE+1]
self.co2_avg.add(CO2)
else:
return
def checksum
|
(self, dstr):
return True
def get_data(self):
CO2 = self.co2_avg.get()
ret = { 'CO2': CO2
}
return ret
def run(self):
while True:
self.u.writeStr(S8_message)
self.u.flush()
if self.u.dataAvailable():
time.sleep(0.05)
getstr = self.u.readStr(NUM_INCOME_BYTE)
if len(getstr) == NUM_INCOME_BYTE:
self.data_log(getstr)
g = self.get_data()
self.q.put(g)
time.sleep(5)
if __name__ == '__main__':
q = Queue(maxsize=5)
p = sensor(q)
p.start()
while True:
print('co2: '+ str(q.get()))
|
rubendibattista/python-ransac-library
|
pyransac/features.py
|
Python
|
bsd-3-clause
| 6,919
| 0.021246
|
from __future__ import division
import abc
import numpy as n
import scipy.linalg as linalg
import scipy.optimize as opt
import scipy.spatial.distance as dist
class Feature(object):
'''
Abstract class that represents a feature to be used
with :py:class:`pyransac.ransac.RansacFeature`
'''
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self):
pass
@abc.abstractproperty
def min_points(self):
'''int: Minimum number of points needed to define the feature.'''
pass
@abc.abstractmethod
def points_distance(self,points):
'''
This function implements a method to compute the distance
of points from the feature.
Args:
points (numpy.ndarray): a numpy array of points the distance must be
computed of.
Returns:
distances (numpy.ndarray): the computed distances of the points from the feature.
'''
pass
@abc.abstractmethod
def print_feature(self,num_points):
'''
This method returns an array of x,y coordinates for
points that are in the feature.
Args:
num_points (numpy.ndarray): the number of points to be returned
Returns:
coords (numpy.ndarray): a num_points x 2 numpy array that contains
the points coordinates
'''
class Circle(Feature):
'''
Feature class for a Circle :math:`(x-x_c)^2 + (y-y_c)^2 - r = 0`
'''
min_points = 3
'''int: Minimum number of points needed to define the circle (3).'''
def __init__(self,points):
self.radius,self.xc,self.yc = self.__gen(points)
def __gen(self,points):
'''
Compute the radius and the center coordinates of a
circumference given three points
Args:
points (numpy.ndarray): a (3,2) numpy array, each row is a 2D Point.
Returns:
(tuple): A 3 elements tuple that contains the circumference radius
and center coordinates [radius,xc,yc]
Raises:
RuntimeError: If the circle computation does not succeed
a RuntimeError is raised.
'''
# Linear system for (D,E,F) in circle
# equations: D*xi + E*yi + F = -(xi**2 + yi**2)
# where xi, yi are the coordinate of the i-th point.
# Generating A matrix
A = n.array([(x,y,1) for x,y in points])
# Generating rhs
rhs = n.array([-(x**2+y**2) for x,y in points])
try:
#Solving linear system
D,E,F = linalg.lstsq(A,rhs)[0]
except linalg.LinAlgError:
raise RuntimeError('Circle calculation not successful. Please\
check the input data, probable collinear points')
xc = -D/2
yc = -E/2
r = n.sqrt(xc**2+yc**2-F)
return (r,xc,yc)
def points_distance(self,points):
r'''
Compute the distance of the points from the feature
:math:`d = \left| \sqrt{(x_i - x_c)^2 + (y_i-y_c)^2} - r \right|`
Args:
points (numpy.ndarray): a (3,2) numpy array, each row is a 2D Point.
Returns:
d (numpy.ndarray): the computed distances of the points from the feature.
'''
xa = n.array([self.xc,self.yc]).reshape((1,2))
d = n.abs(dist.cdist(points,xa) - self.radius)
return d
def print_feature(self, num_points):
'''
This method returns an array of x,y coordinates for
points that are in the feature.
A
|
rgs:
num_points (numpy.ndarray): the number of points to be returned
Returns:
coords (numpy.ndarray): a num_points x 2 numpy array t
|
hat contains
the points coordinates
'''
theta = n.linspace(0,2*n.pi,num_points)
x = self.xc + self.radius*n.cos(theta)
y = self.yc + self.radius*n.sin(theta)
return n.vstack((x,y))
class Exponential (Feature):
'''
Feature Class for an exponential curve :math:`y=ax^{k} + b`
'''
min_points = 3
def __init__(self,points):
self.a,self.k,self.b = self.__gen(points)
def __gen(self,points):
'''
Compute the three parameters that univocally determine the
exponential curve
Args:
points(numpy.ndarray): a (3,2) numpy array, each row is a 2D Point.
Returns:
exp(numpy.ndarray): A (3,) numpy array that contains the a,n,b parameters
[a,k,b]
Raises:
RuntimeError: If the circle computation does not succeed
a RuntimeError is raised.
'''
def exponential(x,points):
''' Non linear system function to use
with :py:func:`scypy.optimize.root`
'''
aa = x[0]
nn = x[1]
bb = x[2]
f = n.zeros((3,))
f[0] = n.abs(aa)*n.power(points[0,0],nn)+bb - points[0,1]
f[1] = n.abs(aa)*n.power(points[1,0],nn)+bb - points[1,1]
f[2] = n.abs(aa)*n.power(points[2,0],nn)+bb - points[2,1]
return f
exp = opt.root(exponential,[1,1,1],points,method='lm')['x']
return exp
def points_distance(self,points):
r'''
Compute the distance of the points from the feature
:math:`d = \sqrt{(x_i - x_c)^2 + (y_i-y_c)^2}`
Args:
points (numpy.ndarray): a (3,2) numpy array, each row is a 2D Point.
Returns:
d (numpy.ndarray): the computed distances of the points from the feature.
'''
x = points[:,0]
xa = n.array([x,self.a*n.power(x,self.k)+self.b])
xa = xa.T
d = dist.cdist(points,xa)
return n.diag(d)
def print_feature(self, num_points, a,b):
'''
This method returns an array of x,y coordinates for
points that are in the feature in the interval [a,b].
Args:
num_points (numpy.ndarray): the number of points to be returned
a (float): left end of the interval
b (float): right end of the interval
Returns:
coords (numpy.ndarray): a num_points x 2 numpy array that contains
the points coordinates
'''
x = n.linspace(a,b,num_points)
y = self.a*x**self.k + self.b
return n.vstack((x,y))
|
chrislit/abydos
|
abydos/distance/_upholt.py
|
Python
|
gpl-3.0
| 4,479
| 0
|
# Copyright 2018-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._upholt.
Upholt similarity
"""
from typing import Any, Counter as TCounter, Optional, Sequence, Set, Union
from ._token_distance import _TokenDistance
from ..tokenizer import _Tokenizer
__all__ = ['Upholt']
class Upholt(_TokenDistance):
r"""Upholt similarity.
For two sets X and Y and a population N, Upholt similarity, Upholt's S,
:cite:`Upholt:1977` is
.. math::
sim_{Upholt}(X, Y) =
\frac{1}{2}\Bigg(-\frac{2 \cdot |X \cap Y|}{|
|
X| + |Y|} +
|
\sqrt{\Big(\frac{2 \cdot |X \cap Y|}{|X| + |Y|}\Big)^2 +
8\frac{2 \cdot |X \cap Y|}{|X| + |Y|}}\Bigg)
In :ref:`2x2 confusion table terms <confusion_table>`, where a+b+c+d=n,
this is
.. math::
sim_{Upholt}(X, Y) =
\frac{1}{2}\Bigg(-\frac{2a}{2a+b+c} +
\sqrt{\Big(\frac{2a}{2a+b+c}\Big)^2 +
8\frac{2a}{2a+b+c}}\Bigg)
.. versionadded:: 0.4.0
"""
def __init__(
self,
alphabet: Optional[
Union[TCounter[str], Sequence[str], Set[str], int]
] = None,
tokenizer: Optional[_Tokenizer] = None,
intersection_type: str = 'crisp',
**kwargs: Any
) -> None:
"""Initialize Upholt instance.
Parameters
----------
alphabet : Counter, collection, int, or None
This represents the alphabet of possible tokens.
See :ref:`alphabet <alphabet>` description in
:py:class:`_TokenDistance` for details.
tokenizer : _Tokenizer
A tokenizer instance from the :py:mod:`abydos.tokenizer` package
intersection_type : str
Specifies the intersection type, and set type as a result:
See :ref:`intersection_type <intersection_type>` description in
:py:class:`_TokenDistance` for details.
**kwargs
Arbitrary keyword arguments
Other Parameters
----------------
qval : int
The length of each q-gram. Using this parameter and tokenizer=None
will cause the instance to use the QGram tokenizer with this
q value.
metric : _Distance
A string distance measure class for use in the ``soft`` and
``fuzzy`` variants.
threshold : float
A threshold value, similarities above which are counted as
members of the intersection for the ``fuzzy`` variant.
.. versionadded:: 0.4.0
"""
super(Upholt, self).__init__(
alphabet=alphabet,
tokenizer=tokenizer,
intersection_type=intersection_type,
**kwargs
)
def sim(self, src: str, tar: str) -> float:
"""Return the Upholt similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
Upholt similarity
Examples
--------
>>> cmp = Upholt()
>>> cmp.sim('cat', 'hat')
0.7807764064044151
>>> cmp.sim('Niall', 'Neil')
0.6901511860568581
>>> cmp.sim('aluminum', 'Catalan')
0.42980140370106323
>>> cmp.sim('ATCG', 'TAGC')
0.0
.. versionadded:: 0.4.0
"""
if src == tar:
return 1.0
self._tokenize(src, tar)
a = self._intersection_card()
b = self._src_only_card()
c = self._tar_only_card()
f = 2 * a / (2 * a + b + c)
return (-f + ((8 + f) * f) ** 0.5) / 2
if __name__ == '__main__':
import doctest
doctest.testmod()
|
porolakka/motioneye-jp
|
src/update.py
|
Python
|
gpl-3.0
| 1,486
| 0.007402
|
# Copyright (c) 2013 Calin Crisan
# This file is part of motionEye.
#
# motionEye is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
# versions
def get_version():
import motioneye
return motioneye.VERSION
def get_all_versions():
return []
def compare_versions(version1, version2)
|
:
version1 = [int(n) for n in version1.split('.')]
version2 = [int(n) for n in version2.split('.')]
len1 = len(version1)
len
|
2 = len(version2)
length = min(len1, len2)
for i in xrange(length):
p1 = version1[i]
p2 = version2[i]
if p1 < p2:
return -1
elif p1 > p2:
return 1
if len1 < len2:
return -1
elif len1 > len2:
return 1
else:
return 0
def perform_update(version):
logging.error('updating is not implemented')
return False
|
fstagni/DIRAC
|
WorkloadManagementSystem/Client/Matcher.py
|
Python
|
gpl-3.0
| 14,961
| 0.008689
|
""" Encapsulate here the logic for matching jobs
Utilities and classes here are used by MatcherHandler
"""
__RCSID__ = "$Id"
import time
from DIRAC import gLogger
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.Core.Utilities.PrettyPrint import printDict
from DIRAC.Co
|
re.Security import Properties
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.WorkloadManagementSystem.Cli
|
ent.Limiter import Limiter
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB, singleValueDefFields, multiValueMatchFields
from DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB import PilotAgentsDB
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
class Matcher(object):
""" Logic for matching
"""
def __init__(self, pilotAgentsDB=None, jobDB=None, tqDB=None, jlDB=None, opsHelper=None):
""" c'tor
"""
if pilotAgentsDB:
self.pilotAgentsDB = pilotAgentsDB
else:
self.pilotAgentsDB = PilotAgentsDB()
if jobDB:
self.jobDB = jobDB
else:
self.jobDB = JobDB()
if tqDB:
self.tqDB = tqDB
else:
self.tqDB = TaskQueueDB()
if jlDB:
self.jlDB = jlDB
else:
self.jlDB = JobLoggingDB()
if opsHelper:
self.opsHelper = opsHelper
else:
self.opsHelper = Operations()
self.log = gLogger.getSubLogger("Matcher")
self.limiter = Limiter(jobDB=self.jobDB, opsHelper=self.opsHelper)
self.siteClient = SiteStatus()
def selectJob(self, resourceDescription, credDict):
""" Main job selection function to find the highest priority job matching the resource capacity
"""
startTime = time.time()
resourceDict = self._getResourceDict(resourceDescription, credDict)
# Make a nice print of the resource matching parameters
toPrintDict = dict(resourceDict)
if "MaxRAM" in resourceDescription:
toPrintDict['MaxRAM'] = resourceDescription['MaxRAM']
if "NumberOfProcessors" in resourceDescription:
toPrintDict['NumberOfProcessors'] = resourceDescription['NumberOfProcessors']
toPrintDict['Tag'] = []
if "Tag" in resourceDict:
for tag in resourceDict['Tag']:
if not tag.endswith('GB') and not tag.endswith('Processors'):
toPrintDict['Tag'].append(tag)
if not toPrintDict['Tag']:
toPrintDict.pop('Tag')
self.log.info('Resource description for matching', printDict(toPrintDict))
negativeCond = self.limiter.getNegativeCondForSite(resourceDict['Site'])
result = self.tqDB.matchAndGetJob(resourceDict, negativeCond=negativeCond)
if not result['OK']:
raise RuntimeError(result['Message'])
result = result['Value']
if not result['matchFound']:
self.log.info("No match found")
return {}
jobID = result['jobId']
resAtt = self.jobDB.getJobAttributes(jobID, ['OwnerDN', 'OwnerGroup', 'Status'])
if not resAtt['OK']:
raise RuntimeError('Could not retrieve job attributes')
if not resAtt['Value']:
raise RuntimeError("No attributes returned for job")
if not resAtt['Value']['Status'] == 'Waiting':
self.log.error('Job matched by the TQ is not in Waiting state', str(jobID))
result = self.tqDB.deleteJob(jobID)
if not result['OK']:
raise RuntimeError(result['Message'])
raise RuntimeError("Job %s is not in Waiting state" % str(jobID))
self._reportStatus(resourceDict, jobID)
result = self.jobDB.getJobJDL(jobID)
if not result['OK']:
raise RuntimeError("Failed to get the job JDL")
resultDict = {}
resultDict['JDL'] = result['Value']
resultDict['JobID'] = jobID
matchTime = time.time() - startTime
self.log.info("Match time", "[%s]" % str(matchTime))
gMonitor.addMark("matchTime", matchTime)
# Get some extra stuff into the response returned
resOpt = self.jobDB.getJobOptParameters(jobID)
if resOpt['OK']:
for key, value in resOpt['Value'].items():
resultDict[key] = value
resAtt = self.jobDB.getJobAttributes(jobID, ['OwnerDN', 'OwnerGroup'])
if not resAtt['OK']:
raise RuntimeError('Could not retrieve job attributes')
if not resAtt['Value']:
raise RuntimeError('No attributes returned for job')
if self.opsHelper.getValue("JobScheduling/CheckMatchingDelay", True):
self.limiter.updateDelayCounters(resourceDict['Site'], jobID)
pilotInfoReportedFlag = resourceDict.get('PilotInfoReportedFlag', False)
if not pilotInfoReportedFlag:
self._updatePilotInfo(resourceDict)
self._updatePilotJobMapping(resourceDict, jobID)
resultDict['DN'] = resAtt['Value']['OwnerDN']
resultDict['Group'] = resAtt['Value']['OwnerGroup']
resultDict['PilotInfoReportedFlag'] = True
return resultDict
def _getResourceDict(self, resourceDescription, credDict):
""" from resourceDescription to resourceDict (just various mods)
"""
resourceDict = self._processResourceDescription(resourceDescription)
resourceDict = self._checkCredentials(resourceDict, credDict)
self._checkPilotVersion(resourceDict)
if not self._checkMask(resourceDict):
# Banned destinations can only take Test jobs
resourceDict['JobType'] = 'Test'
self.log.verbose("Resource description")
for key in resourceDict:
self.log.debug("%s : %s" % (key.rjust(20), resourceDict[key]))
return resourceDict
def _processResourceDescription(self, resourceDescription):
""" Check and form the resource description dictionary
:param resourceDescription: a ceDict coming from a JobAgent,
for example.
:return: updated dictionary of resource description parameters
"""
resourceDict = {}
for name in singleValueDefFields:
if name in resourceDescription:
resourceDict[name] = resourceDescription[name]
for name in multiValueMatchFields:
if name in resourceDescription:
resourceDict[name] = resourceDescription[name]
if resourceDescription.get('Tag'):
resourceDict['Tag'] = resourceDescription['Tag']
if 'RequiredTag' in resourceDescription:
resourceDict['RequiredTag'] = resourceDescription['RequiredTag']
if 'JobID' in resourceDescription:
resourceDict['JobID'] = resourceDescription['JobID']
# Convert MaxRAM and NumberOfProcessors parameters into a list of tags
maxRAM = resourceDescription.get('MaxRAM')
if maxRAM:
try:
maxRAM = int(maxRAM) / 1000
except ValueError:
maxRAM = None
nProcessors = resourceDescription.get('NumberOfProcessors')
if nProcessors:
try:
nProcessors = int(nProcessors)
except ValueError:
nProcessors = None
for param, key in [(maxRAM, 'GB'), (nProcessors, 'Processors')]:
if param and param <= 1024:
paramList = range(2, param + 1)
paramTags = ['%d%s' % (par, key) for par in paramList]
if paramTags:
resourceDict.setdefault("Tag", []).extend(paramTags)
# Add 'MultiProcessor' to the list of tags
if nProcessors > 1:
resourceDict.setdefault("Tag", []).append("MultiProcessor")
# Add 'WholeNode' to the list of tags
if "WholeNode" in resourceDescription:
resourceDict.setdefault("Tag", []).append("WholeNode")
if 'Tag' in resourceDict:
resourceDict['Tag'] = list(set(resourceDict['Tag']))
for k in ('DIRACVersion', 'ReleaseVersion', 'ReleaseProject', 'VirtualOrganization',
'PilotReference', 'PilotBenchmark', 'PilotInfoReportedFlag'):
if k in resourceDescription:
resourceDict[k] = resourceDescription[k]
return resourceDict
def _reportStatus(self, resourceDict, jobID):
""" Reports the status of the matched job in jobDB and jobLoggingDB
Do not fail if errors happen here
"""
attNames = ['Status', 'MinorStatus', 'ApplicationStatus', 'Site']
attValues = ['Matched', 'Assigned', 'Unknown', r
|
callen/Alky-Reborn
|
Convertor/setup.py
|
Python
|
lgpl-3.0
| 303
| 0.033003
|
#!/usr/bin/env python
# encoding: utf-8
"""
setup.py
Created by Cody Brocious on 2006-12-21.
Copyright (c) 2006 Falling Leaf Systems. All rights reserved.
"""
from dis
|
tutils.core import setup
import py2app
setup(
app = ['Convert.py'],
options
|
= dict(
py2app=dict(
argv_emulation=True
)
)
)
|
jfzhang95/lightML
|
SupervisedLearning/Neural Layers/methods.py
|
Python
|
mit
| 3,801
| 0.006969
|
#!usr/bin/env python
#-*- coding:utf-8 -*-
"""
@author: James Zhang
@date:
"""
import numpy as np
import theano
import theano.tensor as T
from theano.ifelse import ifelse
from theano.tensor.shared_randomstreams import RandomStreams
from collections import OrderedDict
import copy
import sys
sys.setrecursionlimit(1000000)
def handle_binary_vector(given_list, k):
# handle_binary_vector[0] 返回二值化后的列表
# handle_binary_vector[1] 返回原列表
tmp_list = copy.deepcopy(given_list)
given_list.sort(reverse=True)
new_sort_array = given_list[0:k]
index_list = []
for each_num in new_sort_array:
index_list.append(tmp_list.index(each_num))
new_vector_list=np.zeros(len(given_list),dtype='int64')
for each_position in index_list:
new_vector_list[each_position]=1
return (new_vector_list,tmp_list)
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def random_weights(shape, name=None):
# return theano.shared(floatX(np.random.randn(*shape) * 0.01), name=name)
return theano.shared(floatX(np.random.uniform(size=shape, low=-0.5, high=0.5)), name=name)
def zeros(shape, name=""):
return theano.shared(floatX(np.zeros(shape)), name=name)
def softmax(X, temperature=1.0):
e_x = T.exp((X - X.max(axis=1).dimshuffle(0, 'x')) / temperature) # dimshu
|
ffle(0, 'x') output 2 dim array
# return prob of each label. prob1+...+probn = 1
r
|
eturn e_x / e_x.sum(axis=1).dimshuffle(0, 'x') # dimshuffle(0, 'x') output 2 dim array
def sigmoid(X):
return 1 / (1 + T.exp(-X))
def dropout(X, dropout_prob=0.0):
retain_prob = 1 - dropout_prob
srng = RandomStreams(seed=1234)
X *= srng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
X /= retain_prob
return X
# def dropout(x, dropout_prob):
# if dropout_prob < 0. or dropout_prob > 1.:
# raise Exception('Dropout level must be in interval [0, 1]')
# retain_prob = 1. - dropout_prob
# sample=np.random.binomial(n=1, p=retain_prob, size=x.shape)
# x *= sample
# x /= retain_prob
# return x
def rectify(X):
return T.maximum(X, 0.)
def clip(X, epsilon):
return T.maximum(T.minimum(X, epsilon), -1*epsilon)
def scale(X, max_norm):
curr_norm = T.sum(T.abs_(X))
return ifelse(T.lt(curr_norm, max_norm), X, max_norm * (X / curr_norm))
def SGD(loss, params, learning_rate, lambda2=0.05):
updates = OrderedDict()
grads = T.grad(cost=loss, wrt=params)
for p, g in zip(params, grads):
# updates.append([p, p-learning_rate*(g+lambda2*p)]) # lambda*p regulzation
updates[p] = p - learning_rate * (g + lambda2 * p)
return updates, grads
def momentum(loss, params, caches, learning_rate=0.1, rho=0.1, clip_at=0.0, scale_norm=0.0, lambda2=0.0):
updates = OrderedDict()
grads = T.grad(cost=loss, wrt=params)
for p, c, g in zip(params, caches, grads):
if clip_at > 0.0:
grad = clip(g, clip_at)
else:
grad = g
if scale_norm > 0.0:
grad = scale(grad, scale_norm)
delta = rho * grad + (1-rho) * c
updates[p] = p - learning_rate * (delta + lambda2 * p)
return updates, grads
def get_params(layers):
params = []
for layer in layers:
for param in layer.get_params():
params.append(param)
return params
def make_caches(params):
caches = []
for p in params:
caches.append(theano.shared(floatX(np.zeros(p.get_value().shape))))
return caches
"""
make_caches的功能:
提供和p(参数)同shape的全0矩阵
用与梯度下降方法
"""
def one_step_updates(layers):
updates = []
for layer in layers:
updates += layer.updates()
return updates
|
ojii/readthedocs.org
|
readthedocs/rtd_tests/tests/test_redirects.py
|
Python
|
mit
| 3,374
| 0.005039
|
from django.test import TestCase
from builds.models import Version
from projects.models import Project
class RedirectTests(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username='eric', password='test')
r = self.client.post(
'/dashboard/import/',
|
{'repo_type': 'git', 'name': 'Pip',
'tags': 'big, fucking, monkey', 'default_branch': '',
'project_url': 'http://pip.rtfd.org',
'repo': 'https://github.com/fail/sauce',
|
'csrfmiddlewaretoken': '34af7c8a5ba84b84564403a280d9a9be',
'default_version': 'latest',
'privacy_level': 'public',
'version_privacy_level': 'public',
'description': 'wat',
'documentation_type': 'sphinx'})
pip = Project.objects.get(slug='pip')
pip_latest = Version.objects.create(project=pip, identifier='latest', verbose_name='latest', slug='latest', active=True)
def test_proper_url_no_slash(self):
r = self.client.get('/docs/pip')
# This is triggered by Django, so its a 301, basically just APPEND_SLASH
self.assertEqual(r.status_code, 301)
self.assertEqual(r._headers['location'], ('Location', 'http://testserver/docs/pip/'))
r = self.client.get(r._headers['location'][1])
self.assertEqual(r.status_code, 302)
r = self.client.get(r._headers['location'][1])
self.assertEqual(r.status_code, 200)
def test_proper_url(self):
r = self.client.get('/docs/pip/')
self.assertEqual(r.status_code, 302)
self.assertEqual(r._headers['location'], ('Location', 'http://testserver/docs/pip/en/latest/'))
r = self.client.get(r._headers['location'][1])
self.assertEqual(r.status_code, 200)
def test_inproper_url(self):
r = self.client.get('/docs/pip/en/')
self.assertEqual(r.status_code, 404)
def test_proper_url_full(self):
r = self.client.get('/docs/pip/en/latest/')
self.assertEqual(r.status_code, 200)
# Subdomains
def test_proper_subdomain(self):
r = self.client.get('/', HTTP_HOST = 'pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(r._headers['location'], ('Location', 'http://pip.readthedocs.org/en/latest/'))
# Keep this around for now, until we come up with a nicer interface
"""
def test_inproper_subdomain(self):
r = self.client.get('/en/', HTTP_HOST = 'pip.readthedocs.org')
self.assertEqual(r.status_code, 404)
"""
def test_proper_subdomain_and_url(self):
r = self.client.get('/en/latest/', HTTP_HOST = 'pip.readthedocs.org')
self.assertEqual(r.status_code, 200)
# Specific Page Redirects
def test_proper_page_on_subdomain(self):
r = self.client.get('/page/test.html', HTTP_HOST = 'pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(r._headers['location'], ('Location', 'http://pip.readthedocs.org/en/latest/test.html'))
# Specific Page Redirects
def test_proper_page_on_main_site(self):
r = self.client.get('/docs/pip/page/test.html')
self.assertEqual(r.status_code, 302)
self.assertEqual(r._headers['location'], ('Location', 'http://testserver/docs/pip/en/latest/test.html'))
|
alexAubin/hovercraft
|
hovercraft/tests/test_data/__init__.py
|
Python
|
mit
| 17,298
| 0
|
HTML_OUTPUTS = {
'simple': (
b'<!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml"><body>'
b'<div id="impress"><div class="step step-level-1" step="0" '
b'data-rotate-x="0" data-rotate-y="0" data-rotate-z="0" '
b'data-scale="1" data-x="0" data-y="0" data-z="0"><h1 '
b'id="simple-presentation">Simple Presentation</h1><p>This '
b'presentation has two slides, each with a '
b'header and some text.</p></div><div class="step step-level-1" '
b'step="1" data-rotate-x="0" data-rotate-y="0" data-rotate-z="0" '
b'data-scale="1" data-x="1600" data-y="0" data-z="0"><h1 '
b'id="second-slide">Second slide</h1><p>There is no positioning or '
b'anything fancy.</p></div></div><script type="text/javascript" '
b'src="js/impress.js"></script><script type="text/javascript" '
b'src="js/hovercraft-minimal.js"></script></body></html>'
),
'extra_css': (
b'<!DOCTYPE html SYSTEM "about:legacy-compat"><html '
b'xmlns="http://www.w3.org/1999/xhtml"><head><title></title><link '
b'rel="stylesh
|
eet" href="css/style.css" media="all"></link><link '
b'rel="stylesheet" href="css/print.css" media="print"></link><link '
b'rel="stylesheet" href="css/impressConsole.css" '
b'media="screen,projection"></link><link rel="stylesheet" '
b'href="extra.css" media="all"></link><script type="text/javascript" '
b'src="js/dummy.js"></script></head><body '
b'class="impress-not-supported"><div id="impress"><div class="step '
b'step-level-1" step="0" data-rota
|
te-x="0" data-rotate-y="0" '
b'data-rotate-z="0" data-scale="1" data-x="0" data-y="0" data-z="0">'
b'<h1 id="simple-presentation">Simple Presentation</h1><p>This '
b'presentation has two slides, each with a header and some text.</p>'
b'</div><div class="step step-level-1" step="1" data-rotate-x="0" '
b'data-rotate-y="0" data-rotate-z="0" data-scale="1" data-x="1600" '
b'data-y="0" data-z="0"><h1 id="second-slide">Second '
b'slide</h1><p>There is no positioning or anything '
b'fancy.</p></div></div><div id="hovercraft-help" '
b'class="show"><table><tr><th>Left, Down, Page Down, Space</th><td>'
b'Next slide</td></tr><tr><th>Right, Up, Page Up</th><td>Previous '
b'slide</td></tr><tr><th>H</th><td>Toggle this help</td>'
b'</tr></table></div><script type="text/javascript" '
b'src="js/impress.js"></script><script type="text/javascript" '
b'src="js/impressConsole.js"></script><script type="text/javascript" '
b'src="js/hovercraft.js"></script></body></html>'
),
'advanced': (
b'<!DOCTYPE html SYSTEM "about:legacy-compat"><html '
b'xmlns="http://www.w3.org/1999/xhtml"><head><title>Presentation '
b'title</title><link rel="stylesheet" href="css/style.css" '
b'media="all"></link><link rel="stylesheet" href="css/print.css" '
b'media="print"></link><link rel="stylesheet" '
b'href="css/impressConsole.css" media="screen,projection"></link>'
b'<link rel="stylesheet" href="extra.css" media="screen"></link>'
b'<script type="text/javascript" src="js/dummy.js"></script></head>'
b'<body class="impress-not-supported"><div id="impress" '
b'data-transition-duration="2000" auto-console="True"><div '
b'class="step step-level-1" step="0" data-x="1000" data-y="1600" '
b'data-rotate-x="0" data-rotate-y="0" data-rotate-z="0" '
b'data-scale="1" data-z="0"><h1 id="advanced-presentation">Advanced '
b'Presentation</h1><p>Here we show the positioning feature, where we '
b'can explicitly set a position\non one of the steps.</p></div><div '
b'class="step step-level-1" step="1" id="name-this-step" '
b'data-x="2600" data-rotate-x="0" data-rotate-y="0" data-rotate-z="0" '
b'data-scale="1" data-y="1600" data-z="0"><h1 id="formatting">'
b'Formatting</h1><p>Let us also try some basic formatting, like <em>'
b'italic</em>, and <strong>bold</strong>.</p><ul><li>We can also</li>'
b'<li>have a list</li><li>of things.</li></ul></div><div class="step '
b'step-level-1" step="2" data-rotate-x="0" data-rotate-y="0" '
b'data-rotate-z="0" data-scale="1" data-x="4200" data-y="1600" '
b'data-z="0"><p>There should also be possible to have\npreformatted '
b'text for code.</p><pre class="highlight code python"><span '
b'class="k">def</span> <span class="nf">foo</span><span class="p">'
b'(</span><span class="n">bar</span><span class="p">):</span>\n '
b'<span class="c"># Comment</span>\n <span class="n">a</span> '
b'<span class="o">=</span> <span class="mi">1</span> <span class="o">'
b'+</span> <span class="s">"hubbub"</span>\n <span class="k">'
b'return</span> <span class="bp">None</span></pre></div><div '
b'class="step step-level-1" step="3" data-rotate-x="0" '
b'data-rotate-y="0" data-rotate-z="0" data-scale="1" data-x="5800" '
b'data-y="1600" data-z="0"><p>An image, with attributes:</p><img '
b'src="images/python-logo-master-v3-TM.png" width="50%" '
b'class="imageclass"></img></div><div class="step step-level-1" '
b'step="4" data-rotate-x="0" data-rotate-y="0" data-rotate-z="0" '
b'data-scale="1" data-x="7400" data-y="1600" data-z="0"><h1 '
b'id="character-sets">Character sets</h1><p>The character set is '
b'UTF-8 as of now. Like this: åäö.</p></div></div>'
b'<div id="hovercraft-help" class="show"><table><tr><th>Left, Down, '
b'Page Down, Space</th><td>Next slide</td></tr><tr><th>Right, Up, '
b'Page Up</th><td>Previous slide</td></tr><tr><th>H</th><td>Toggle '
b'this help</td></tr></table></div><script type="text/javascript" '
b'src="js/impress.js"></script><script type="text/javascript" '
b'src="js/impressConsole.js"></script><script type="text/javascript" '
b'src="js/hovercraft.js"></script></body></html>'
),
'default-template': (
b'<!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml"><head>'
b'<title>Presentation title</title><meta name="generator" '
b'content="Hovercraft! 1.0 http://regebro.github.com/hovercraft">'
b'</meta><link rel="stylesheet" href="css/hovercraft.css" '
b'media="all"></link><link rel="stylesheet" '
b'href="css/impressConsole.css" media="all"></link>'
b'<link rel="stylesheet" href="css/highlight.css" media="all"></link>'
b'<link rel="stylesheet" href="extra.css" media="screen"></link>'
b'</head><body class="impress-not-supported"><div id="impress" '
b'data-transition-duration="2000" auto-console="True"><div '
b'class="step step-level-1" step="0" data-x="1000" data-y="1600" '
b'data-rotate-x="0" data-rotate-y="0" data-rotate-z="0" '
b'data-scale="1" data-z="0"><h1 id="advanced-presentation">Advanced '
b'Presentation</h1><p>Here we show the positioning feature, where we '
b'can explicitly set a position\non one of the steps.</p></div><div '
b'class="step step-level-1" step="1" id="name-this-step" '
b'data-x="2600" data-rotate-x="0" data-rotate-y="0" data-rotate-z="0" '
b'data-scale="1" data-y="1600" data-z="0"><h1 id="formatting">'
b'Formatting</h1><p>Let us also try some basic formatting, like '
b'<em>italic</em>, and <strong>bold</strong>.</p><ul><li>'
b'We can also</li><li>have a list</li><li>of things.</li></ul></div>'
b'<div class="step step-level-1" step="2" data-rotate-x="0" '
b'data-rotate-y="0" data-rotate-z="0" data-scale="1" data-x="4200" '
b'data-y="1600" data-z="0"><p>There should also be possible to '
b'have\npreformatted text for code.</p><pre class="highlight code'
b' python"><span class="k">def</span> <span class="nf">foo</span>'
b'<span class="p">(</span><span class="n">bar</span>'
b'<span cla
|
castlecms/castle.cms
|
castle/cms/search.py
|
Python
|
gpl-2.0
| 5,355
| 0.000747
|
from castle.cms.behaviors.search import ISearch
from castle.cms.social import COUNT_ANNOTATION_KEY
from collective.elasticsearch import mapping
from collective.elasticsearch import query
from collective.elasticsearch.interfaces import IAdditionalIndexDataProvider
from plone import api
from zope.annotation.interfaces import IAnnotations
from zope.interface import implements
class MappingAdapter(mapping.MappingAdapter):
_default_mapping = mapping.MappingAdapter._default_mapping.copy()
_default_mapping.update({
'page_views': {'store': True, 'type': 'integer', 'null_value': 0},
'facebook_shares': {'store': True, 'type': 'integer', 'null_value': 0},
'twitter_shares': {'store': True, 'type': 'integer', 'null_value': 0},
'linkedin_shares': {'store': True, 'type': 'integer', 'null_value': 0},
'pinterest_shares': {'store': True, 'type': 'integer',
'null_value': 0},
'searchterm_pins': {'store': True, 'type': 'text',
'index': False},
'contributors': {'store': False, 'type': 'text',
'index': True},
'immediate_folder': {'store': True, 'type': 'text',
'index': False},
'parent_folder': {'store': True, 'type': 'keyword',
'index': False}
})
class AdditionalIndexDataProvider(object):
implements(IAdditionalIndexDataProvider)
def __init__(self, obj):
self.obj = obj
def __call__(self, es, existing_data):
annotations = IAnnotations(self.obj)
data = {}
counts = annotations.get(COUNT_ANNOTATION_KEY, {})
for key, value in counts.items():
key = key.replace('_matomo', '')
if isinstance(value, dict):
value = value.get('total') or 0
if key in ('page_views',):
data[key] = value
else:
data[key + '_shares'] = value
sdata = ISearch(self.obj, None)
i
|
f sdata:
data['searchterm_pins'] = [
t.lower() for t in sdata.searchterm_pins or []
|
if t]
else:
data['searchterm_pins'] = []
try:
data['SearchableText'] = u'%s %s' % (
existing_data.get('SearchableText', ''),
u' '.join(data['searchterm_pins']))
except UnicodeError:
pass
try:
data['contributors'] = list(
self.obj.creators + self.obj.contributors)
except Exception:
pass
path = self.obj.getPhysicalPath()
data['parent_folder'] = '/'.join(path[:-1])
site_path = api.portal.get().getPhysicalPath()
if len(path) > (len(site_path) + 1):
data['immediate_folder'] = path[len(site_path):][0]
else:
data['immediate_folder'] = '/'
return data
class QueryAssembler(query.QueryAssembler):
def __call__(self, dquery):
dquery['trashed'] = False
query = super(QueryAssembler, self).__call__(dquery)
# take into account views, likes and custom weighting
try:
searchq = dquery.get('SearchableText', '')
if isinstance(searchq, dict):
searchq = searchq.get('query', '')
searchq = searchq.lower().strip('*')
query = {
'script_score': {
'query': query,
# "boost_mode": "sum", # add score and modified score,
'script': {
'lang': 'painless',
'source': '''int max_shares = 5000;
int max_popularity = 200000;
String[] socialFields = new String[4];
socialFields[0] = 'twitter';
socialFields[1] = 'facebook';
socialFields[2] = 'pinterest';
socialFields[3] = 'linkedin';
float boost = 1.0f;
float max_boost = 2.5f;
long shareCount = 0;
for (int i=0; i<socialFields.length; i++) {
String key = socialFields[i] + '_shares';
if(doc[key].size() != 0){
long docValue = doc[key].value;
shareCount += docValue;
}
}
boost += (shareCount / max_shares);
if (doc['page_views'].size() != 0) {
long docValue = doc['page_views'].value;
boost += (docValue / max_popularity);
}
boost = (float)Math.min(boost, max_boost);
return boost;'''
}
}
}
except KeyError:
pass
return query
|
Agent007/deepchem
|
examples/low_data/tox_graph_conv_one_fold.py
|
Python
|
mit
| 2,851
| 0.009821
|
"""
Train low-data Tox21 models with graph-convolution. Test last fold only.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.set_random_seed(123)
import deepchem as dc
from datasets import load_tox21_convmol
# 4-fold splits
K = 4
# num positive/negative ligands
n_pos = 10
n_neg = 10
n_trials = 20
tox21_tasks, dataset, transformers = load_tox21_convmol()
# Define metric
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, mode="classification")
task_splitter = dc.splits.TaskSplitter()
fold_datasets = task_splitter.k_fold_split(dataset, K)
train_folds = fold_datasets[:-1]
train_dataset = dc.splits.merge_fold_datasets(train_folds)
test_dataset = fold_datasets[-1]
# Get supports on test-
|
set
support_generator = dc.data.SupportGener
|
ator(test_dataset, n_pos, n_neg,
n_trials)
# Compute accuracies
task_scores = {task: [] for task in range(len(test_dataset.get_task_names()))}
for trial_num, (task, support) in enumerate(support_generator):
print("Starting trial %d" % trial_num)
# Number of features on conv-mols
n_feat = 75
# Batch size of models
batch_size = 50
graph_model = dc.nn.SequentialGraph(n_feat)
graph_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
graph_model.add(dc.nn.GraphPool())
graph_model.add(dc.nn.GraphConv(128, 64, activation='relu'))
graph_model.add(dc.nn.GraphPool())
graph_model.add(dc.nn.GraphConv(64, 128, activation='relu'))
graph_model.add(dc.nn.GraphPool())
graph_model.add(dc.nn.Dense(128, 64, activation='tanh'))
graph_model.add(dc.nn.GraphGather(batch_size, activation="tanh"))
model = dc.models.MultitaskGraphClassifier(
graph_model,
1,
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(support, nb_epoch=10)
# Test model
task_dataset = dc.data.get_task_dataset_minus_support(test_dataset, support,
task)
y_pred = model.predict_proba(task_dataset)
score = metric.compute_metric(task_dataset.y, y_pred, task_dataset.w)
print("Score on task %s is %s" % (str(task), str(score)))
task_scores[task].append(score)
# Join information for all tasks.
mean_task_scores = {}
std_task_scores = {}
for task in range(len(test_dataset.get_task_names())):
mean_task_scores[task] = np.mean(np.array(task_scores[task]))
std_task_scores[task] = np.std(np.array(task_scores[task]))
print("Mean scores")
print(mean_task_scores)
print("Standard Deviations")
print(std_task_scores)
print("Median of Mean Scores")
print(np.median(np.array(mean_task_scores.values())))
|
sje397/p2pool
|
p2pool/skiplists.py
|
Python
|
gpl-3.0
| 2,559
| 0.007034
|
from p2pool.util import forest, math
class WeightsSkipList(forest.TrackerSkipList):
# share_count, weights, total_weight
def get_delta(self, element):
from p2pool.bitcoin import data as bitcoin_data
share = self.tracker.shares[element]
att = bitcoin_data.target_to_average_attempts(share.target)
return 1, {share.share_data['new_script']: att*(65535-share.share_data['donation'])}, att*65535, att*share.share_data['donation']
def combine_deltas(self, (share_count1, weights1, total_weight1, total_donation_weight1), (share_count2, weights2, total_weight2, total_donation_weight2)
|
):
return share_count1 + share_count2, math.add_dicts(weights1, weights2), total_weight1 + total_weight2, total_donation_weight1 + total_donation_weight2
def initial_solution(self, start, (max_shares, desired_weight)):
assert desired_weight % 65535 == 0, divmod(desired_weight, 65535)
return 0, None, 0, 0
|
def apply_delta(self, (share_count1, weights_list, total_weight1, total_donation_weight1), (share_count2, weights2, total_weight2, total_donation_weight2), (max_shares, desired_weight)):
if total_weight1 + total_weight2 > desired_weight and share_count2 == 1:
assert (desired_weight - total_weight1) % 65535 == 0
script, = weights2.iterkeys()
new_weights = dict(script=(desired_weight - total_weight1)//65535*weights2[script]//(total_weight2//65535))
return share_count1 + share_count2, (weights_list, new_weights), desired_weight, total_donation_weight1 + (desired_weight - total_weight1)//65535*total_donation_weight2//(total_weight2//65535)
return share_count1 + share_count2, (weights_list, weights2), total_weight1 + total_weight2, total_donation_weight1 + total_donation_weight2
def judge(self, (share_count, weights_list, total_weight, total_donation_weight), (max_shares, desired_weight)):
if share_count > max_shares or total_weight > desired_weight:
return 1
elif share_count == max_shares or total_weight == desired_weight:
return 0
else:
return -1
def finalize(self, (share_count, weights_list, total_weight, total_donation_weight), (max_shares, desired_weight)):
assert share_count <= max_shares and total_weight <= desired_weight
assert share_count == max_shares or total_weight == desired_weight
return math.add_dicts(*math.flatten_linked_list(weights_list)), total_weight, total_donation_weight
|
Answeror/torabot
|
torabot/mods/bilibili/spy/bilibili/spiders/__init__.py
|
Python
|
mit
| 6,671
| 0.0006
|
# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
import json
from urllib import urlencode
from scrapy import log
from scrapy.http import Request
from scrapy.selector import Selector
from scrapy.contrib.loader import ItemLoader
from scrapy.contrib.loader.processor import Identity, TakeFirst
from torabot.spy.spiders.redis import RedisSpider
from torabot.spy.items import Result
from ..items import (
Bangumi,
User,
Post,
SearchResult,
SearchResultPost,
Recommendation,
QueryResult,
)
class Bilibili(RedisSpider):
name = 'bilibili'
def __init__(self, life=60, *args, **kargs):
super(Bilibili, self).__init__(*args, life=life, **kargs)
def make_requests_from_query(self, query):
query = json.loads(query)
for req in {
'bangumi': self.make_bangumi_requests,
'user': self.make_user_requests,
'username': self.make_username_requests,
'query': self.make_query_requests,
}[query['method']](query):
yield req
def make_username_requests(self, query):
yield Request(
make_username_search_uri(query['username']),
callback=self.parse_username_prepare,
meta=dict(query=query),
dont_filter=True,
)
def make_query_requests(self, query):
yield Request(
make_query_uri(query['query']),
callback=self.parse_query,
meta=dict(query=query),
dont_filter=True,
)
def make_bangumi_requests(self, query):
yield Request(
'http://www.bilibili.tv/index/bangumi.json',
callback=self.parse_bangumi,
meta=dict(query=query),
dont_filter=True,
)
def make_user_requests(self, query):
yield Request(
'http://space.bilibili.tv/' + query['user_id'],
callback=self.parse_user,
meta=dict(query=query),
dont_filter=True,
)
def parse_bangumi(self, response):
query = response.meta['query']
try:
return Bangumi(
query=query,
content=json.loads(response.body_as_unicode())
)
except:
log.msg('parse failed', level=log.ERROR)
return Result(ok=False, query=query)
def parse_user(self, response):
query = response.meta['query']
try:
sel = Selector(response)
return User(
user_uri=response.url,
query=query,
posts=[make_post(sub) for sub in sel.xpath('//div[@class="main_list"]/ul/li')]
)
except Exception as e:
return failed(query, str(e))
def parse_query(self, response):
query = response.meta['query']
try:
sel = Selector(response)
return QueryResult(
uri=response.url,
query=query,
posts=[make_search_post(sub) for sub in sel.xpath('//ul[@class="result"]/li')]
)
except Exception as e:
return failed(query, str(e))
def parse_username_prepare(self, response):
query = response.meta['query']
try:
sel = Selector(response)
posts = []
for li in sel.xpath('//ul[@class="result"]/li'):
post = make_search_post(li)
if query['username'] == post['upper']:
return Request(
post['user_uri'],
callback=self.parse_user,
meta=dict(query=query),
dont_filter=True,
)
posts.append(post)
return SearchResult(
query=query,
posts=[],
recommendations=make_recommendations(posts),
)
except Exception as e:
return failed(query, str(e))
def make_recommendations(posts):
def gen():
names = {}
for p in posts:
r = make_recommendation(p)
if r['username'] not in names
|
:
yield r
names[r['username']] = 1
return list(gen())
def make_recommendation(post):
return Recommendation(
user_uri=po
|
st['user_uri'],
username=post['upper'],
)
def failed(query, message):
log.msg('parse failed: %s' % message, level=log.ERROR)
return Result(ok=False, query=query, message=message)
class SearchResultPostLoader(ItemLoader):
default_item_class = SearchResultPost
default_input_processor = Identity()
default_output_processor = TakeFirst()
def date_in(self, values):
for s in values:
yield s.strip()
def make_search_post(sel):
loader = SearchResultPostLoader(selector=sel)
loader.add_xpath('title', 'string(.//div[@class="t"])')
loader.add_xpath('upper', 'string(.//a[@class="upper"])')
loader.add_xpath('kind', 'string(.//div[@class="t"]/span)')
loader.add_xpath('date', 'string(.//i[@class="date"])')
loader.add_xpath('intro', 'string(.//i[@class="intro"])')
# mylist don't have title a, use first a instead
# loader.add_xpath('uri', './/a[@class="title"]/@href')
loader.add_xpath('uri', './/a/@href')
loader.add_xpath('user_uri', './/a[@class="upper"]/@href')
loader.add_xpath('cover', './/a[@class="title"]//img/@src')
post = loader.load_item()
if post.get('title', '') and post['title'].startswith(post.get('kind', '')):
post['title'] = post['title'][len(post.get('kind', '')):]
return post
class PostLoader(ItemLoader):
default_item_class = Post
default_input_processor = Identity()
default_output_processor = TakeFirst()
def ctime_in(self, values):
for s in values:
yield s[5:]
def make_post(sel):
loader = PostLoader(selector=sel)
loader.add_xpath('title', 'string(.//a[@class="title"])')
loader.add_xpath('uri', './/a[@class="title"]/@href')
loader.add_xpath('cover', './/img/@src')
loader.add_xpath('kind', 'string(.//a[@class="l"])')
loader.add_xpath('ctime', 'string(.//div[@class="c"])')
loader.add_xpath('desc', 'string(.//div[@class="q"])')
return loader.load_item()
def make_username_search_uri(username):
return make_query_uri(u'@author %s' % username)
def make_query_uri(query):
return 'http://www.bilibili.tv/search?' + urlencode({
'keyword': query.encode('utf-8'),
'orderby': 'senddate',
})
|
xiaonanln/myleetcode-python
|
src/96. Unique Binary Search Trees.py
|
Python
|
apache-2.0
| 560
| 0.05
|
class Solution(object):
def numTrees(self, n):
"""
:type n: int
|
:rtype: int
"""
if n <= 1: return 1
nt = [0] * (n+1)
nt[0] = 1
nt[1] = 1
for i in xrange(2, n+1):
# i numbers
total = 0
for k in xrange(i):
# let kth number be the root, left has k numbers, right has i-k-1 numbers
total += nt[k] * nt[i-k-1]
# print n, total
nt[i] = total
return nt[n]
# print Solution().numTrees(0)
# print Solution().numTrees(1)
# print Solution().numTrees(2)
print S
|
olution().numTrees(3)
# print Solution().numTrees(4)
|
FutureMind/drf-friendly-errors
|
runtests.py
|
Python
|
mit
| 2,452
| 0.001223
|
#! /usr/bin/env python
"""
based on https://github.com/tomchristie/django-rest-framework/blob/master/runtests.py
"""
from __future__ import print_function
import pytest
import sys
import os
import subprocess
PYTEST_ARGS = {
'default': ['tests'],
'fast': ['tes
|
ts', '-q'],
}
FLAKE8_ARGS = ['rest_framework_friendly_errors', 'tests', '--ignore=E501']
sys.path.append(os.path.dirname(__file__))
def exit_on_failure(ret, message=None):
if ret:
sys.exit(ret)
def flake8_main(args):
print('Running flake8 code linting')
ret = subprocess.call(['flake8'] + args)
print('flake8 failed' if ret else 'flake8 passed')
return ret
|
def split_class_and_function(string):
class_string, function_string = string.split('.', 1)
return "%s and %s" % (class_string, function_string)
def is_function(string):
# `True` if it looks like a test function is included in the string.
return string.startswith('test_') or '.test_' in string
def is_class(string):
# `True` if first character is uppercase - assume it's a class name.
return string[0] == string[0].upper()
if __name__ == "__main__":
try:
sys.argv.remove('--nolint')
except ValueError:
run_flake8 = True
else:
run_flake8 = False
try:
sys.argv.remove('--lintonly')
except ValueError:
run_tests = True
else:
run_tests = False
try:
sys.argv.remove('--fast')
except ValueError:
style = 'default'
else:
style = 'fast'
run_flake8 = False
if len(sys.argv) > 1:
pytest_args = sys.argv[1:]
first_arg = pytest_args[0]
if first_arg.startswith('-'):
# `runtests.py [flags]`
pytest_args = ['tests'] + pytest_args
elif is_class(first_arg) and is_function(first_arg):
# `runtests.py TestCase.test_function [flags]`
expression = split_class_and_function(first_arg)
pytest_args = ['tests', '-k', expression] + pytest_args[1:]
elif is_class(first_arg) or is_function(first_arg):
# `runtests.py TestCase [flags]`
# `runtests.py test_function [flags]`
pytest_args = ['tests', '-k', pytest_args[0]] + pytest_args[1:]
else:
pytest_args = PYTEST_ARGS[style]
if run_tests:
exit_on_failure(pytest.main(pytest_args))
if run_flake8:
exit_on_failure(flake8_main(FLAKE8_ARGS))
|
e-democracy/edem.content.logo
|
setup.py
|
Python
|
gpl-3.0
| 1,382
| 0.014472
|
# -*- coding=utf-8 -*-
import os
from setuptools import setup, find_packages
from version import get_version
version = get_version()
setup(name='edem.content.logo',
version=version,
description="Logos for forums.e-democracy.org",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# See https://pypi.python.org/pypi?
|
%3Aaction=list
|
_classifiers for values
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Zope2",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Natural Language :: English",
"Operating System :: POSIX :: Linux"
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='',
author='Bill Bushey',
author_email='bill.bushey@e-democracy.org',
url='http://www.e-democracy.org/',
license='GPL 3',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['edem', 'edem.content'],
include_package_data=True,
zip_safe=True,
install_requires=[
'setuptools',
'edem.skin',
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",)
|
zhongliliu/muse
|
muse/Calculators/DirectOpt.py
|
Python
|
gpl-2.0
| 2,930
| 0.021843
|
"""
MUSE -- A Multi-algorithm-collaborative Universal Structure-prediction Environment
Copyright (C) 2010-2017 by Zhong-Li Liu
This program is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation
version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
E-mail: zl.liu@163.com
"""
import os
from muse.Readwrite import Read_Write
from muse.Symmetry import Findspg
from muse.Calculators import Submit
from muse.Readwrite.ReadInput import indict
def DirectOpt(BigDict,Old_cry,nu,ng):
with open('../log.muse','a') as logfile: print >>logfile
all_enthfile = open('../all-enthalpy-'+str(nu),'a')
if int(indict['Num_Keep'][0]) > 0:
i = 0
nn = 1
nkept = 1
spglist = []
whil
|
e nkept <= int(indict['Num_Keep'][0]):
if int(indict['IfReOptKept'][0]):
with open('../log.muse','a') as logfile: print >>logfile, "Direct reopt. ..."
spgnum = Findspg.Findspg(Old_cry[i][1])
if spgnum[0] not in spglist:
spglist.append(spgnum[0])
Read_Write.write_vasp('POSCAR',Old_cry[i][1],label=indict['NameSys'][0]+": "+s
|
tr(ng)+'-'+str(nn),direct=True,sort=True,vasp5=True)
nk,enth,BigDict = Submit.Submit(BigDict,nu,ng,nn,Old_cry)
nn += 1
nkept +=1
else:
spgnum = Findspg.Findspg(Old_cry[i][1])
if spgnum[0] not in spglist:
with open('../log.muse','a') as logfile: print >>logfile, "-"*23,"%d-%d"%(ng,nn),"-"*23
spglist.append(spgnum[0])
with open('../log.muse','a') as logfile:
print >>logfile, "%02d: %s, %s %10.4f kept, not reopt."%(i+1,spgnum[0],spgnum[1],Old_cry[i][0])
print >>logfile
BigDict[nu][ng][Old_cry[i][0]] = Old_cry[i][1].copy()
ifexist = os.system("grep %02d-%02d: %s"%(ng,nn,"../all-enthalpy-"+str(nu)))
if ifexist != 0:
all_enthfile.write(" %02d-%02d:%11s%9s%14.6f%14.6f%14s"%(ng,nn,spgnum[0],spgnum[1],Old_cry[i][0],Old_cry[i][1].get_volume(),'----')+'\n')
Read_Write.write_vasp('POSCAR',Old_cry[i][1],label=indict['NameSys'][0]+": "+"%02d-%02d"%(ng,nn)+' '+spgnum[0]+' '+str(spgnum[1])+' '+str(Old_cry[i][0]),direct=True,sort=True,vasp5=True)
os.system("cat POSCAR >> ../poscars-%d"%nu)
nn += 1
nkept +=1
i +=1
all_enthfile.close()
return BigDict
|
dpogue/korman
|
korman/ui/ui_toolbox.py
|
Python
|
gpl-3.0
| 2,031
| 0.003447
|
# This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
import bpy
class ToolboxPanel:
bl_category = "Tools"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
@classmethod
def poll(cls, context):
return context.object and context.scene.render.engine == "PLASMA_GAME"
class PlasmaToolboxPanel(ToolboxPanel, bpy.types.Panel):
bl_context = "objectmode"
bl_label = "Plasma"
def draw(self, context):
layout = self.layout
col = layout.column(align=True)
col.label("Plasma Objects:")
enable_all
|
= col.operator("object.plasma_toggle_all_objects", icon="OBJECT_DATA", text="Enable All")
enable_all.
|
enable = True
all_plasma_objects = all((i.plasma_object.enabled for i in bpy.context.selected_objects))
col.operator("object.plasma_toggle_selected_objects", icon="VIEW3D", text="Disable Selection" if all_plasma_objects else "Enable Selection")
disable_all = col.operator("object.plasma_toggle_all_objects", icon="OBJECT_DATA", text="Disable All")
disable_all.enable = False
col.label("Convert:")
col.operator("object.plasma_convert_plasma_objects", icon="OBJECT_DATA", text="Plasma Objects")
col.operator("texture.plasma_enable_all_textures", icon="TEXTURE")
col.operator("texture.plasma_convert_layer_opacities", icon="IMAGE_RGB_ALPHA", text="Layer Opacities")
|
domenicosolazzo/TweetMining
|
tests/test_tweetMining.py
|
Python
|
mit
| 13,682
| 0.007528
|
import unittest
from tweetMining import TweetMining, TweetProxy, TestProxy, HttpProxy
import nltk
class TweetMiningTestCase(unittest.TestCase):
def setUp(self):
self.tweetMining = TweetMining(proxy='test')
self.search = self.tweetMining.search(q="twitter")
self.userInfoResponse = self.tweetMining.userInfo(username="fakeusername")
def tearDown(self):
self.tweetMining = None
def test_instanceIsNotNone(self):
self.assertIsNotNone(self.tweetMining)
def test_tweetMiningIsInstanceOf(self):
self.assertIsInstance(self.tweetMining, TweetMining)
# setProxy
def test_setProxy_exists(self):
self.assertTrue(callable(getattr(self.tweetMining, "setProxy")))
def test_setProxy_Raises_ExceptionWithWrongInput(self):
self.assertRaises(Exception, self.tweetMining.setProxy, 1)
self.assertRaises(Exception, self.tweetMining.setProxy, "wrong")
def test_setProxy_Returns_TweetProxyInstance(self):
actual = self.tweetMining.setProxy('test')
self.assertIsInstance(actual, TweetProxy)
def test_setProxy_Returns_TestProxyInstance(self):
actual = self.tweetMining.setProxy('test')
self.assertIsInstance(actual, TestProxy)
def test_setProxy_Returns_HttpProxyInstance(self):
actual = self.tweetMining.setProxy('http')
self.assertIsInstance(actual, HttpProxy)
# Trends
def test_Trends_exists(self):
self.assertTrue(callable(getattr(self.tweetMining, "trends")))
def test_Trends_returnsADict(self):
self.assertIsInstance(self.tweetMining.trends(), type({}))
def test_Trends_containsTrendsKey(self):
result = self.tweetMining.trends()
actual = 'trends' in result.keys()
self.assertTrue(actual)
def test_TrendsKeyIsAnArray(self):
result = self.tweetMining.trends()
actual = result['trends']
self.assertTrue(isinstance(actual, list))
def test_Trends_containsAs_OfKey(self):
result = self.tweetMining.trends()
actual = 'as_of' in result.keys()
self.assertTrue(actual)
def test_As_OfKeyIsAString(self):
result = self.tweetMining.trends()
actual = str(result['as_of'])
self.assertTrue(isinstance(actual, str))
# Search
def test_search_exists(self):
self.assertTrue(callable(getattr(self.tweetMining, "search")))
def test_search_returnsADict(self):
self.assertIsInstance(self.search, type({}))
def test_search_containsResultsKey(self):
actual = 'results' in self.search.keys()
self.assertTrue(actual)
def test_ResultsKeyIsAnArray(self):
actual = self.search['results']
self.assertTrue(isinstance(actual, list))
def test_search_containsSince_IdKey(self):
actual = 'since_id' in self.search.keys()
self.assertTrue(actual)
def test_ResultsKeyIsAnArray(self):
actual = self.search['since_id']
self.assertTrue(isinstance(actual, int))
def test_search_containsQueryKey(self):
actual = 'query' in self.search.keys()
self.assertTrue(actual)
def test_QueryKeyIsAString(self):
actual = self.search['query']
self.assertTrue(isinstance(actual, (str, unicode)))
def test_search_containsResults_per_pageKey(self):
actual = 'results_per_page' in self.search.keys()
self.assertTrue(actual)
def test_Results_Per_PageKeyIsAnInt(self):
actual = self.search['results_per_page']
self.assertTrue(isinstance(actual, int))
def test_search_containsMaxIdKey(self):
actual = 'max_id' in self.search.keys()
self.assertTrue(actual)
def test_Max_IdKeyIsAnInteger(self):
actual = self.search['max_id']
self.assertTrue(isinstance(actual, (int, long)))
def test_serach_containsPageKey(self):
actual = 'page' in self.search.keys()
self.assertTrue(actual)
def test_PageKeyIsAnInt(self):
actual = self.search['page']
self.assertTrue(isinstance(actual, int))
def test_search_containsNextPageKey(self):
actual = 'next_page' in self.search.keys()
self.assertTrue(actual)
def test_NextPageKeyIsAString(self):
actual = self.search['next_page']
self.assertTrue(isinstance(actual, (str, unicode)))
def test_search_containsCompleted_InKey(self):
actual = 'completed_in' in self.search.keys()
self.assertTrue(actual)
def test_CompletedInKeyIsFloat(self):
actual = self.search['completed_in']
self.assertTrue(isinstance(actual, (float)))
def test_search_containsRefreshUrlKey(self):
actual = 'refresh_url' in self.search.keys()
self.assertTrue(actual)
def test_RefreshUrlKeyIsAString(self):
actual = self.search['refresh_url']
self.assertTrue(isinstance(actual, (str, unicode)))
# Words
def test_words_exists(self):
self.assertTrue(callable(getattr(self.tweetMining, "words")))
def test_words_raisesAnExceptionWithWrongInput(self):
self.assertRaises(Exception, self.tweetMining.words, 1)
self.assertRaises(Exception, self.tweetMining.words, "1")
self.assertRaises(Exception, self.tweetMining.words, (1,))
self.assertRaises(Exception, self.tweetMining.words, {1:1})
def test_words_acceptsAListAsInput(self):
self.assertIsInstance(self.tweetMining.words([]), list)
def test_words_returnsAnArray(self):
actual = self.tweetMining.words(self.search['results'])
self.assertIsInstance(actual, list)
# FreqDist
def test_freqDist_exists(self):
self.assertTrue(callable(getattr(self.tweetMining, "freqDist")))
def test_freqDist_raisesAnExceptionWithWrongInput(self):
self.assertRaises(Exception, self.tweetMining.freqDist, 1)
self.assertRaises(Exception, self.tweetMining.freqDist, "1")
self.assertRaises(Exception, self.tweetMining.freqDist, (1,))
self.assertRaises(Exception, self.tweetMining.freqDist, {1:1})
def test_freqDist_acceptsAListAsInput(self):
self.assertEquals(type(self.tweetMining.freqDist([])), nltk.probability.FreqDist)
def test_freqDist_returnsAnArray(self):
words = self.tweetMining.words(self.search['results'])
actual = self.tweetMining.freqDist(words)
self.assertEquals(type(actual), nltk.probability.FreqDist)
# _get_rt_sources
def test_getRTSource
|
s_exists(self):
self.assertTrue(callable(getattr(self.tweetMining, "_getRTSources")))
def test_getRTSources_returnsAList(self):
actual = self.tweetMining._getRTSources('RT @user la la la')
self.assertIsInstance(actual, list)
def test_getRTSources_raisesAnExceptionWithWrongInput(self):
self.assertRaises(Exception, self.tweetMining._getRTSources, 1
|
)
self.assertRaises(Exception, self.tweetMining._getRTSources, [])
self.assertRaises(Exception, self.tweetMining._getRTSources, {})
# buildRetweetGraph
def test_buildRetweetGraph_exists(self):
self.assertTrue(callable(getattr(self.tweetMining, "buildRetweetGraph")))
def test_buildRetweetGraph_ReturnsADict(self):
actual = self.tweetMining.buildRetweetGraph(self.search['results'])
self.assertIsInstance(actual, dict)
def test_buildRetweetGraph_Dict_containsGraphKey(self):
actual = self.tweetMining.buildRetweetGraph(self.search['results'])
self.assertTrue('graph' in actual.keys())
self.assertIsNotNone(actual['graph'])
def test_buildRetweetGraph_RaisesAnExceptionWithWrongInput(self):
self.assertRaises(Exception ,self.tweetMining.buildRetweetGraph, 1)
self.assertRaises(Exception ,self.tweetMining.buildRetweetGraph, "1")
self.assertRaises(Exception ,self.tweetMining.buildRetweetGraph, {})
# userInfo
def test_userInfo_exists(self):
self.assertTrue(callable(getattr(self.tweetMining, "userInfo")))
def test_userInfo_ReturnsADict(self):
actual = self.userInfoResponse
self.assertIsInstance(actual, dict)
def test_userInfo_Dict_ContainsAProfile_Background_TileKey(self):
|
mgeorgehansen/FIFE_Technomage
|
demos/shooter/scripts/common/baseobject.py
|
Python
|
lgpl-2.1
| 7,040
| 0.035938
|
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2010 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if
|
not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
from fife import fife
|
from fife.fife import FloatRect as Rect
SHTR_DEFAULT = 0
SHTR_PLAYER = 1
SHTR_LASTBOSS = 2
SHTR_PROJECTILE = 3
SHTR_ENEMYSHIP = 4
SHTR_POWERUP = 5
class SpaceObject(object):
"""
Space Object is the base class for all game objects.
"""
def __init__(self, scene, name, findInstance=True):
"""
@param scene: A reference to the Scene
@type scene: L{Scene}
@param name: The name of the space object
@type name: C{string}
@param findInstance: True if the instance you are looking for is already loaded
False if you want to load the instance yourself
@type findInstance: C{boolean}
"""
self._scene = scene
self._model = self._scene.model
self._layer = self._scene.objectlayer
self._name = name
self._xscale = self._layer.getCellGrid().getXScale()
self._yscale = self._layer.getCellGrid().getYScale()
self._velocity = fife.DoublePoint(0,0)
self._maxvelocity = 1.25
self._boundingBox = Rect(0,0,0,0)
self._running = False
self._changedPosition = False
self._scenenodeid = -1
self._type = SHTR_DEFAULT
if findInstance:
self._instance = self._layer.getInstance(self._name)
self._instance.thisown = 0
else:
self._instance = None
def start(self):
"""
You must execute this function for the object to be updated
"""
if self._instance:
self._running = True
def update(self):
"""
If the object is running this updates the FIFE instance location based on
the objects current velocity and time since last frame
"""
if self._running:
shiploc = self.location
exactloc = shiploc.getExactLayerCoordinates()
exactloc.x += self._velocity.x * (self._scene.timedelta/1000.0)/self._xscale
exactloc.y += self._velocity.y * (self._scene.timedelta/1000.0)/self._yscale
self._boundingBox.x = (exactloc.x * self._xscale - self._boundingBox.w/2)
self._boundingBox.y = (exactloc.y * self._yscale - self._boundingBox.h/2)
shiploc.setExactLayerCoordinates(exactloc)
if shiploc == self.location:
self._changePosition = False
else:
self._changedPosition = True
self.location = shiploc
def stop(self):
"""
Stops the object from being updated.
"""
self._running = False
def destroy(self):
"""
You are meant to override this function to specify what happens when the object
gets destroyed
"""
self._running = False
def applyThrust(self, vector):
"""
Applies a thrust vector to the object.
@note: Objects do not have mass and therefore no inertia.
@param vector A vector specifying the direction and intensity of thrust.
@type vector: L{fife.DoublePoint}
"""
self._velocity.x += (vector.x * (self._scene.timedelta/1000.0))/self._xscale
self._velocity.y += (vector.y * (self._scene.timedelta/1000.0))/self._yscale
if self._velocity.length() > self._maxvelocity:
norm = fife.DoublePoint(self._velocity)
norm.normalize()
self._velocity.x = norm.x * self._maxvelocity
self._velocity.y = norm.y * self._maxvelocity
def applyBrake(self, brakingForce):
"""
Applies a braking thrust in the opposite direction of the current velocity
@param brakingForce: a floating point value specifying how fast the object should decelerate
@type brakingForce: C{float}
"""
if self._velocity.length() <= .01:
self._velocity.x = 0
self._velocity.y = 0
return
#first normalize to get a unit vector of the direction we are traveling
norm = fife.DoublePoint(self._velocity)
norm.normalize()
if norm.length() == 0:
self._velocity.x = 0
self._velocity.y = 0
return
#negate to get opposite direction
norm.x = norm.x * -1
norm.y = norm.y * -1
#apply braking deceleration
norm.x *= brakingForce
norm.y *= brakingForce
self._velocity.x += (norm.x * (self._scene.timedelta/1000.0))/self._xscale
self._velocity.y += (norm.y * (self._scene.timedelta/1000.0))/self._yscale
def removeFromScene(self):
"""
Queues this object to be removed from the scene. The scene will remove the object
next time the garbage collection routines are called.
"""
self._scene.queueObjectForRemoval(self)
def _isRunning(self):
return self._running
def _getMaxVelocity(self):
return self._maxvelocity
def _setMaxVelocity(self, maxvel):
self._maxvelocity = maxvel/sqrt(self._xscale * self._yscale)
def _getLocation(self):
return self._instance.getLocation()
def _setLocation(self, loc):
self._instance.setLocation(loc)
def _getInstance(self):
return self._instance
def _setInstance(self, instance):
self._instance = instance
if self._instance:
self._instance.thisown = 0
def _getVelocity(self):
return self._velocity
def _setVelocity(self, velocity):
self._velocity = velocity
def _getBoundingBox(self):
return self._boundingBox
def _getW(self):
return self._boundingBox.w
def _getH(self):
return self._boundingBox.h
def _setW(self, w):
self._boundingBox.w = w
def _setH(self, h):
self._boundingBox.h = h
def _changedPosition(self):
return self._changedPosition
def _getNodeId(self):
return self._scenenodeid
def _setNodeId(self, id):
self._scenenodeid = id
def _getType(self):
return self._type
def _setType(self, objtype):
self._type = objtype
type = property(_getType, _setType)
width = property(_getW, _setW)
height = property(_getH, _setH)
boundingbox = property(_getBoundingBox)
location = property(_getLocation,_setLocation)
instance = property(_getInstance, _setInstance)
velocity = property(_getVelocity, _setVelocity)
maxvelocity = property(_getMaxVelocity, _setMaxVelocity)
running = property(_isRunning)
changedposition = property(_changedPosition)
scenenodeid = property(_getNodeId, _setNodeId)
|
vpetersson/docker-py
|
tests/integration/models_services_test.py
|
Python
|
apache-2.0
| 7,604
| 0
|
import unittest
import docker
from .. import helpers
from .base import TEST_API_VERSION
class ServiceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
client = docker.from_env(version=TEST_API_VERSION)
helpers.force_leave_swarm(client)
client.swarm.init('127.0.0.1', listen_addr=helpers.swarm_listen_addr())
@classmethod
def tearDownClass(cls):
helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
def test_create(self):
client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
service = client.services.create(
# create arguments
name=name,
labels={'foo': 'bar'},
# ContainerSpec arguments
image="alpine",
command="sleep 300",
container_labels={'container': 'label'}
)
assert service.name == name
assert service.attrs['Spec']['Labels']['foo'] == 'bar'
container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
assert "alpine" in container_spec['Image']
assert container_spec['Labels'] == {'container': 'label'}
def test_create_with_network(self):
client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
network = client.networks.create(
helpers.random_name(), driver='overlay'
)
service = client.services.create(
# create arguments
name=name,
# ContainerSpec arguments
image="alpine",
command="sleep 300",
networks=[network.id]
)
assert 'Networks' in service.attrs['Spec']['TaskTemplate']
networks = service.attrs['Spec']['TaskTemplate']['Networks']
assert len(networks) == 1
assert networks[0]['Target'] == network.id
def test_get(self):
client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
service = client.services.create(
name=name,
image="alpine",
command="sleep 300"
)
service = client.services.get(service.id)
assert service.name == name
def test_list_remove(self):
client = docker.from_env(version=TEST_API_VERS
|
ION)
service = client.services.create(
name=helpers.random_name(),
image="alpine",
command="sleep 300"
)
assert service in client.services.list()
service.remove()
assert se
|
rvice not in client.services.list()
def test_tasks(self):
client = docker.from_env(version=TEST_API_VERSION)
service1 = client.services.create(
name=helpers.random_name(),
image="alpine",
command="sleep 300"
)
service2 = client.services.create(
name=helpers.random_name(),
image="alpine",
command="sleep 300"
)
tasks = []
while len(tasks) == 0:
tasks = service1.tasks()
assert len(tasks) == 1
assert tasks[0]['ServiceID'] == service1.id
tasks = []
while len(tasks) == 0:
tasks = service2.tasks()
assert len(tasks) == 1
assert tasks[0]['ServiceID'] == service2.id
def test_update(self):
client = docker.from_env(version=TEST_API_VERSION)
service = client.services.create(
# create arguments
name=helpers.random_name(),
# ContainerSpec arguments
image="alpine",
command="sleep 300"
)
service.update(
# create argument
name=service.name,
# ContainerSpec argument
command="sleep 600"
)
service.reload()
container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
assert container_spec['Command'] == ["sleep", "600"]
def test_update_retains_service_labels(self):
client = docker.from_env(version=TEST_API_VERSION)
service = client.services.create(
# create arguments
name=helpers.random_name(),
labels={'service.label': 'SampleLabel'},
# ContainerSpec arguments
image="alpine",
command="sleep 300"
)
service.update(
# create argument
name=service.name,
# ContainerSpec argument
command="sleep 600"
)
service.reload()
labels = service.attrs['Spec']['Labels']
assert labels == {'service.label': 'SampleLabel'}
def test_update_retains_container_labels(self):
client = docker.from_env(version=TEST_API_VERSION)
service = client.services.create(
# create arguments
name=helpers.random_name(),
# ContainerSpec arguments
image="alpine",
command="sleep 300",
container_labels={'container.label': 'SampleLabel'}
)
service.update(
# create argument
name=service.name,
# ContainerSpec argument
command="sleep 600"
)
service.reload()
container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
assert container_spec['Labels'] == {'container.label': 'SampleLabel'}
def test_update_remove_service_labels(self):
client = docker.from_env(version=TEST_API_VERSION)
service = client.services.create(
# create arguments
name=helpers.random_name(),
labels={'service.label': 'SampleLabel'},
# ContainerSpec arguments
image="alpine",
command="sleep 300"
)
service.update(
# create argument
name=service.name,
labels={},
# ContainerSpec argument
command="sleep 600"
)
service.reload()
assert not service.attrs['Spec'].get('Labels')
def test_scale_service(self):
client = docker.from_env(version=TEST_API_VERSION)
service = client.services.create(
# create arguments
name=helpers.random_name(),
# ContainerSpec arguments
image="alpine",
command="sleep 300"
)
tasks = []
while len(tasks) == 0:
tasks = service.tasks()
assert len(tasks) == 1
service.update(
mode=docker.types.ServiceMode('replicated', replicas=2),
)
while len(tasks) == 1:
tasks = service.tasks()
assert len(tasks) >= 2
# check that the container spec is not overridden with None
service.reload()
spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
assert spec.get('Command') == ['sleep', '300']
@helpers.requires_api_version('1.25')
def test_restart_service(self):
client = docker.from_env(version=TEST_API_VERSION)
service = client.services.create(
# create arguments
name=helpers.random_name(),
# ContainerSpec arguments
image="alpine",
command="sleep 300"
)
initial_version = service.version
service.update(
# create argument
name=service.name,
# task template argument
force_update=10,
# ContainerSpec argument
command="sleep 600"
)
service.reload()
assert service.version > initial_version
|
hone5t/pyquick
|
basic/string1.py
|
Python
|
apache-2.0
| 3,606
| 0.013588
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
# +++your code here+++
if count < 10 : return 'Number of donuts: %d' % count
else: return 'Number of donuts: many'
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
# +++your code here+++
if len(s) < 2 : return ""
return s[:2]+s[-2:]
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
# +++your code here+++
first=s[0]
return first+s[1:].replace(first,'*')
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
# +++your code here+++
return b[:2]+a[2:] + ' ' + a[:2]+b[2:]
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls
|
the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line c
|
alls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
|
alex/sqlalchemy
|
test/orm/test_froms.py
|
Python
|
mit
| 95,771
| 0.010076
|
from sqlalchemy.testing import eq_, assert_raises, assert_raises_message
import operator
from sqlalchemy import *
from sqlalchemy import exc as sa_exc, util
from sqlalchemy.sql import compiler, table, column
from sqlalchemy.engine import default
from sqlalchemy.orm import *
from sqlalchemy.orm import attributes
from sqlalchemy.testing import eq_
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy.testing import AssertsCompiledSQL, engines
from sqlalchemy.testing.schema import Column
from test.orm import _fixtures
from sqlalchemy.testing import fixtures
from sqlalchemy.orm.util import join, outerjoin, with_parent
class QueryTest(_fixtures.FixtureTest):
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def setup_mappers(cls):
Node, composite_pk_table, users, Keyword, items, Dingaling, \
order_items, item_keywords, Item, User, dingalings, \
Address, keywords, CompositePk, nodes, Order, orders, \
addresses = cls.classes.Node, \
cls.tables.composite_pk_table, cls.tables.users, \
cls.classes.Keyword, cls.tables.items, \
cls.classes.Dingaling, cls.tables.order_items, \
cls.tables.item_keywords, cls.classes.Item, \
cls.classes.User, cls.tables.dingalings, \
cls.classes.Address, cls.tables.keywords, \
cls.classes.CompositePk, cls.table
|
s.nodes, \
cls.classes.Order, cls.tables.orders, cls.tables.addresses
mapper(User, users, properties={
'addresses':relationship(Address, backref='user', order_by=addresses.c.id),
'orders':relationship(Order, backref='user', order_by=orders.c.id), # o2m, m2o
})
mapper(Address, addresses, propertie
|
s={
'dingaling':relationship(Dingaling, uselist=False, backref="address") #o2o
})
mapper(Dingaling, dingalings)
mapper(Order, orders, properties={
'items':relationship(Item, secondary=order_items, order_by=items.c.id), #m2m
'address':relationship(Address), # m2o
})
mapper(Item, items, properties={
'keywords':relationship(Keyword, secondary=item_keywords) #m2m
})
mapper(Keyword, keywords)
mapper(Node, nodes, properties={
'children':relationship(Node,
backref=backref('parent', remote_side=[nodes.c.id])
)
})
mapper(CompositePk, composite_pk_table)
configure_mappers()
class QueryCorrelatesLikeSelect(QueryTest, AssertsCompiledSQL):
query_correlated = "SELECT users.name AS users_name, " \
"(SELECT count(addresses.id) AS count_1 FROM addresses " \
"WHERE addresses.user_id = users.id) AS anon_1 FROM users"
query_not_correlated = "SELECT users.name AS users_name, " \
"(SELECT count(addresses.id) AS count_1 FROM addresses, users " \
"WHERE addresses.user_id = users.id) AS anon_1 FROM users"
def test_as_scalar_select_auto_correlate(self):
addresses, users = self.tables.addresses, self.tables.users
query = select(
[func.count(addresses.c.id)],
addresses.c.user_id==users.c.id
).as_scalar()
query = select([users.c.name.label('users_name'), query])
self.assert_compile(query, self.query_correlated,
dialect=default.DefaultDialect()
)
def test_as_scalar_select_explicit_correlate(self):
addresses, users = self.tables.addresses, self.tables.users
query = select(
[func.count(addresses.c.id)],
addresses.c.user_id==users.c.id
).correlate(users).as_scalar()
query = select([users.c.name.label('users_name'), query])
self.assert_compile(query, self.query_correlated,
dialect=default.DefaultDialect()
)
def test_as_scalar_select_correlate_off(self):
addresses, users = self.tables.addresses, self.tables.users
query = select(
[func.count(addresses.c.id)],
addresses.c.user_id==users.c.id
).correlate(None).as_scalar()
query = select([ users.c.name.label('users_name'), query])
self.assert_compile(query, self.query_not_correlated,
dialect=default.DefaultDialect()
)
def test_as_scalar_query_auto_correlate(self):
sess = create_session()
Address, User = self.classes.Address, self.classes.User
query = sess.query(func.count(Address.id))\
.filter(Address.user_id==User.id)\
.as_scalar()
query = sess.query(User.name, query)
self.assert_compile(query, self.query_correlated,
dialect=default.DefaultDialect()
)
def test_as_scalar_query_explicit_correlate(self):
sess = create_session()
Address, User = self.classes.Address, self.classes.User
query = sess.query(func.count(Address.id))\
.filter(Address.user_id==User.id)\
.correlate(self.tables.users)\
.as_scalar()
query = sess.query(User.name, query)
self.assert_compile(query, self.query_correlated,
dialect=default.DefaultDialect()
)
def test_as_scalar_query_correlate_off(self):
sess = create_session()
Address, User = self.classes.Address, self.classes.User
query = sess.query(func.count(Address.id))\
.filter(Address.user_id==User.id)\
.correlate(None)\
.as_scalar()
query = sess.query(User.name, query)
self.assert_compile(query, self.query_not_correlated,
dialect=default.DefaultDialect()
)
class RawSelectTest(QueryTest, AssertsCompiledSQL):
"""compare a bunch of select() tests with the equivalent Query using
straight table/columns.
Results should be the same as Query should act as a select() pass-
thru for ClauseElement entities.
"""
__dialect__ = 'default'
def test_select(self):
addresses, users = self.tables.addresses, self.tables.users
sess = create_session()
self.assert_compile(sess.query(users).select_entity_from(
users.select()).with_labels().statement,
"SELECT users.id AS users_id, users.name AS users_name FROM users, "
"(SELECT users.id AS id, users.name AS name FROM users) AS anon_1",
)
self.assert_compile(sess.query(users, exists([1], from_obj=addresses)
).with_labels().statement,
"SELECT users.id AS users_id, users.name AS users_name, EXISTS "
"(SELECT 1 FROM addresses) AS anon_1 FROM users",
)
# a little tedious here, adding labels to work around Query's
# auto-labelling.
s = sess.query(addresses.c.id.label('id'),
addresses.c.email_address.label('email')).\
filter(addresses.c.user_id == users.c.id).correlate(users).\
statement.alias()
self.assert_compile(sess.query(users, s.c.email).select_entity_from(
users.join(s, s.c.id == users.c.id)
).with_labels().statement,
"SELECT users.id AS users_id, users.name AS users_name, "
"anon_1.email AS anon_1_email "
"FROM users JOIN (SELECT addresses.id AS id, "
"addresses.email_address AS email FROM addresses, users "
"WHERE addresses.user_id = users.id) AS anon_1 "
"ON anon_1.id = users.id",
)
x = func.lala(users.c.id).label('foo')
self.assert_compile(sess.query(x).filter(x == 5).statement,
"SELECT lala(users.id) AS foo FROM users WHERE "
"lala(users.id) = :param_1")
self.assert_compile(sess.query(func.sum(x).label('bar')).statement,
"SELECT sum(lala(users.id)) AS bar FROM users")
class FromSelfTest(QueryTest, AssertsCompiledSQL):
__dialect__ = 'default'
def test_filter(self):
User = self.classes.User
eq_(
[User(id=8), User(id=
|
dwadler/QGIS
|
python/plugins/processing/algs/saga/SagaAlgorithm.py
|
Python
|
gpl-2.0
| 20,651
| 0.003099
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
SagaAlgorithm.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import importlib
from qgis.core import (Qgis,
QgsApplication,
QgsProcessingUtils,
QgsProcessingException,
QgsMessageLog,
QgsProcessing,
QgsProcessingAlgorithm,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterBoolean,
QgsProcessingParameterNumber,
QgsProcessingParameterEnum,
QgsProcessingParameterMultipleLayers,
QgsProcessingParameterMatrix,
QgsProcessingParameterString,
QgsProcessingParameterField,
QgsProcessingParameterFile,
QgsProcessingParameterExtent,
QgsProcessingParameterRasterDestination,
QgsProcessingParameterVectorDestination)
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.parameters import getParameterFromString
from processing.algs.help import shortHelp
from processing.tools.system import getTempFilename
from processing.algs.saga.SagaNameDecorator import decoratedAlgorithmName, decoratedGroupName
from . import SagaUtils
from .SagaAlgorithmBase import SagaAlgorithmBase
pluginPath = os.path.normpath(os.path.join(
os.path.split(os.path.dirname(__file__))[0], os.pardir))
sessionExportedLayers = {}
class SagaAlgorithm(SagaAlgorithmBase):
OUTPUT_EXTENT = 'OUTPUT_EXTENT'
def __init__(self, descriptionfile):
super().__init__()
self.hardcoded_strings = []
self.allow_nonmatching_grid_extents = False
self.description_file = descriptionfile
self.undecorated_group = None
self._name = ''
self._display_name = ''
self._group = ''
self._groupId = ''
self.params = []
self.defineCharacteristicsFromFile()
def createInstance(self):
return SagaAlgorithm(self.description_file)
def initAlgorithm(self, config=None):
for p in self.params:
self.addParameter(p)
def name(self):
return self._name
def displayName(self):
return self._display_name
def group(self):
return self._group
def groupId(self):
return self._groupId
def shortHelpString(self):
return shortHelp.get(self.id(), None)
def icon(self):
return QgsApplication.getThemeIcon("/providerSaga.svg")
def svgIconPath(self):
return QgsApplication.iconPath("providerSaga.svg")
def flags(self):
# TODO - maybe it's safe to background thread this?
return super().flags() | QgsProcessingAlgorithm.FlagNoThreading
def defineCharacteristicsFromFile(self):
with open(self.description_file, encoding="utf-8") as lines:
line = lines.readline().strip('\n').strip()
self._name = line
if '|' in self._name:
tokens = self._name.split('|')
self._name = tokens[0]
# cmdname is the name of the algorithm in SAGA, that is, the name to use to call it in the console
self.cmdname = tokens[1]
else:
self.cmdname = self._name
self._display_name = self.tr(str(self._name))
self._name = decoratedAlgorithmName(self._name)
self._display_name = self.tr(str(self._name))
self._name = self._name.lower()
validChars = \
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789:'
self._name = ''.join(c for c in self._name if c in validChars)
line = lines.readline().strip('\n').strip()
self.undecorated_group = line
self._group = self.tr(decoratedGroupName(self.undecorated_group))
validChars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789:'
grpName = decoratedGroupName(self.undecorated_group).lower()
self._groupId = ''.join(c for c in grpName if c in validChars)
line = lines.readline().strip('\n').strip()
while line != '':
if line.startswith('Hardcoded'):
self.hardcoded_strings.append(line[len('Hardcoded|'):])
elif line.startswith('QgsProcessingParameter') or line.startswith('Parameter'):
self.params.append(getParameterFromString(line))
elif line.startswith('AllowUnmatching'):
self.allow_nonmatching_grid_extents = True
else:
pass # TODO
#self.addOutput(getOutputFromString(line))
line = lines.readline().strip('\n').strip()
def processAlgorithm(self, parameters, context, feedback):
commands = list()
self.exportedLayers = {}
self.preProcessInputs()
extent = None
crs = None
# 1: Export rasters to sgrd and vectors to shp
# Tables must be in dbf format. We check that.
for param in self.parameterDefinitions():
if isinstance(param, QgsProcessingParameterRasterLayer):
if param.name() not in parameters or parameters[param.name()] is None:
continue
if isinstance(parameters[param.name()], str):
if parameters[param.name()].lower().endswith('sdat'):
self.exportedLayers[param.name()] = parameters[param.name()][:-4] + 'sgrd'
if parameters[param.name()].lower().endswith('sgrd'):
self.exportedLayers[param.name()] = parameters[param.name()]
|
else:
layer = self.parameterAsRasterLayer(parameters, param.name(), context)
exportCommand = self.exportRasterLayer(param.name(), layer)
if exportCommand is not None:
comma
|
nds.append(exportCommand)
else:
if parameters[param.name()].source().lower().endswith('sdat'):
self.exportedLayers[param.name()] = parameters[param.name()].source()[:-4] + 'sgrd'
if parameters[param.name()].source().lower().endswith('sgrd'):
self.exportedLayers[param.name()] = parameters[param.name()].source()
else:
exportCommand = self.exportRasterLayer(param.name(), parameters[param.name()])
if exportCommand is not None:
commands.append(exportCommand)
elif isinstance(param, QgsProcessingParameterFeatureSource):
if param.name() not in parameters or parameters[param.name()] is None:
continue
if not crs:
source = self.parameterAsSource(parameters, param.name()
|
mdrasmus/spimap
|
test/all_terms.py
|
Python
|
gpl-2.0
| 4,112
| 0.009971
|
import sys, os
import pygsl
import pygsl.sf
while "python" not in os.listdir("."):
os.chdir("..")
sys.path.append("python")
import spidir
from rasmus.common import *
from rasmus.bio import phylo
from test import *
if os.system("which xpdf 2>/dev/null") != 0:
rplot_set_viewer("display")
def exc_default(func, val, exc=Exception):
"""Specify a default value for when an exception occurs"""
try:
return func()
except exc:
return val
class TestAllTerms (unittest.TestCase):
def test_all_terms(self):
"""Test all terms"""
prep_dir("test/output/all_terms")
out = open("test/output/all_terms/flies.txt", "w")
#out = sys.stderr
treeids = os.listdir("test/data/flies")[:100]
#treeids = ["0"]
for treeid in treeids:
tree = read_tree("test/data/flies/%s/%s.nt.tree" % (treeid, treeid))
align = read_fasta("test/data/flies/%s/%s.nt.align" % (treeid, treeid))
print >>out, treeid
draw_tree(tree, out=out)
stree = read_tree("test/data/flies.norm.stree")
gene2species = phylo.read_gene2species("test/data/flies.smap")
params = spidir.read_params("test/data/flies.nt.param")
birth = .4
death = .39
pretime = 1.0
nsamples = 100
maxdoom = 20
bgfreq = [.258,.267,.266,.209]
kappa = 1.59
recon = phylo.reconcile(tree, stree, gene2species)
events = phylo.label_events(tree, recon)
branchp, topp, seqlk = spidir.calc_joint_prob(
align, tree, stree, recon, events, params,
birth, death, pretime,
bgfreq, kappa, maxdoom=maxdoom, terms=True)
joint = topp + branchp + seqlk
print >>out, "topp ", topp
print >>out, "branchp", branchp
print >>out, "seqlk ", seqlk
print >>out, "joint ", joint
out.close()
def test_search(self):
"""Test all terms"""
prep_dir("test/output/all_terms_search")
out = open("test/output/all_terms_search/flies.txt", "w")
#out = sys.stderr
treeids = os.listdir("test/data/flies")
#treeids = ["3"]
for treeid
|
in treeids:
tree_correct = read_tree("test/data/flies.nt/%s/%s.tree" %
(treeid, treeid))
align = read_fasta("test/data/flies.nt/%s/%s.align" %
(treeid, treeid))
phylo.hash_order_tree(tree_correct)
|
print >>out, treeid
print >>out, "correct"
drawTree(tree_correct, out=out)
stree = read_tree("test/data/flies.norm.stree")
gene2species = phylo.read_gene2species("test/data/flies.smap")
params = spidir.read_params("test/data/flies.nt.param")
birth = .4
death = .39
pretime = 1.0
maxdoom = 20
bgfreq = [.258,.267,.266,.209]
kappa = 1.59
genes = align.keys()
seqs = align.values()
tree = spidir.search_climb(genes, seqs,
stree, gene2species,
params, birth, death, pretime,
bgfreq, kappa,
maxdoom=maxdoom,
niter=50, quickiter=100,
nsamples=100, branch_approx=True)
phylo.hash_order_tree(tree)
print >>out, "constructed"
drawTree(tree, out=out)
print >>out, "is_correct:", (phylo.hash_tree(tree) ==
phylo.hash_tree(tree_correct))
out.close()
if __name__ == "__main__":
unittest.main(testRunner=TestRunner(), argv=sys.argv)
|
agharibi/linchpin
|
linchpin/provision/InventoryFilters/OpenstackInventory.py
|
Python
|
gpl-3.0
| 1,255
| 0
|
#!/usr/bin/env python
import StringIO
from InventoryFilter import InventoryFilter
class OpenstackInventory(InventoryFilter):
def get_host_ips(self, topo):
host_public_ips = []
for group in topo['os_server_res']:
grp = group.get('openstack', [])
if isinstance(grp, list):
|
for server in grp:
host_public_ips.append(str(server['accessIPv4']))
if
|
isinstance(grp, dict):
host_public_ips.append(str(grp['accessIPv4']))
return host_public_ips
def get_inventory(self, topo, layout):
if len(topo['os_server_res']) == 0:
return ""
inven_hosts = self.get_host_ips(topo)
# adding sections to respective host groups
host_groups = self.get_layout_host_groups(layout)
self.add_sections(host_groups)
# set children for each host group
self.set_children(layout)
# set vars for each host group
self.set_vars(layout)
# add ip addresses to each host
self.add_ips_to_groups(inven_hosts, layout)
self.add_common_vars(host_groups, layout)
output = StringIO.StringIO()
self.config.write(output)
return output.getvalue()
|
CarterBain/Medici
|
ib/client/msg_wrapper.py
|
Python
|
bsd-3-clause
| 6,312
| 0.003961
|
__author__ = 'oglebrandon'
import logging as logger
import types
from ib.ext.EWrapper import EWrapper
def showmessage(message, mapping):
try:
del(mapping['self'])
except (KeyError, ):
pass
items = mapping.items()
items.sort()
print '### %s' % (message, )
for k, v in items:
print ' %s:%s' % (k, v)
class Observable(object):
"""
Sender -> dispatches messages to interested callables
"""
def __init__(self):
self.listeners = {}
self.logger = logger.getLogger()
def register(self,listener,events=None):
"""
register a listener function
Parameters
-----------
listener : external listener function
events : tuple or list of relevant events (default=None)
"""
if events is not None and type(events) not in \
(types.TupleType,types.ListType):
events = (events,)
self.listeners[listener] = events
def dispatch(self,event=None, msg=None):
"""notify listeners """
for listener,events in self.listeners.items():
if events is None or event is None or
|
event in events:
try:
listener(self,event,msg)
except (Exception,):
self.unregister(listener)
errmsg = "Exception in message dispatch: Handler '{0}' " \
"unregistered for event " \
"'{1}' ".format(listener.func_name,event)
self.logger.exception(errmsg)
def unregister(self,listener):
""" unregister listener function """
|
del self.listeners[listener]
class ReferenceWrapper(EWrapper,Observable):
# contract = None
# tickerId
# field
# price
def __init__ (self,subs={}):
super(ReferenceWrapper, self).__init__()
self.orderID = None
self.subscriptions = subs
def setSubscriptions (self,subs):
self.subscriptions = subs
def tickGeneric(self, tickerId, field, price):
pass
def tickPrice(self, tickerId, field, price, canAutoExecute):
showmessage('tickPrice', vars())
def tickSize(self, tickerId, field, size):
showmessage('tickSize', vars())
def tickString(self, tickerId, tickType, value):
#showmessage('tickString', vars())
pass
def tickOptionComputation(self, tickerId, field,
impliedVolatility, delta,
x, c, q, w, e, r):
#showmessage('tickOptionComputation', vars())
pass
def openOrderEnd(self):
pass
def orderStatus(self, orderId, status, filled, remaining,
avgFillPrice, permId, parentId, lastFillPrice,
clientId, whyHeId):
if filled:
self.dispatch(event='execution',msg=[1,2,3])
showmessage('orderStatus', vars())
def openOrder(self, orderId, contract, order, state):
showmessage('openOrder', vars())
def connectionClosed(self):
showmessage('connectionClosed', {})
def updateAccountValue(self, key, value, currency, accountName):
showmessage('updateAccountValue', vars())
def updatePortfolio(self, contract, position, marketPrice,
marketValue, averageCost, unrealizedPNL,
realizedPNL, accountName):
showmessage('updatePortfolio', vars())
def updateAccountTime(self, timeStamp):
showmessage('updateAccountTime', vars())
def nextValidId(self, orderId):
self.orderID = orderId
showmessage('nextValidId', vars())
def contractDetails(self, reqId, contractDetails):
showmessage('contractDetails', vars())
print contractDetails.__dict__
def bondContractDetails(self, reqId, contractDetails):
showmessage('bondContractDetails', vars())
def execDetails(self, orderId, contract, execution):
showmessage('execDetails', vars())
def error(self, id=None, errorCode=None, errorMsg=None):
showmessage('error', vars())
def updateMktDepth(self, tickerId, position, operation, side, price, size):
showmessage('updateMktDepth', vars())
def updateMktDepthL2(self, tickerId, position,
marketMaker, operation,
side, price, size):
showmessage('updateMktDepthL2', vars())
def updateNewsBulletin(self, msgId, msgType, message, origExchange):
showmessage('updateNewsBulletin', vars())
def managedAccounts(self, accountsList):
showmessage('managedAccounts', vars())
def receiveFA(self, faDataType, xml):
showmessage('receiveFA', vars())
def historicalData(self, reqId, date,
open, high, low, close,
volume, count, WAP, hasGaps):
showmessage('historicalData', vars())
def scannerParameters(self, xml):
showmessage('scannerParameters', vars())
def scannerData(self, reqId, rank, contractDetails,
distance, benchmark, projection, legsStr):
showmessage('scannerData', vars())
def accountDownloadEnd(self, accountName):
showmessage('accountDownloadEnd', vars())
def contractDetailsEnd(self, reqId):
showmessage('contractDetailsEnd', vars())
def currentTime(self):
showmessage('currentTime', vars())
def deltaNeutralValidation(self):
showmessage('deltaNeutralValidation', vars())
def error_0(self):
showmessage('error_0', vars())
def error_1(self):
showmessage('error_1', vars())
def execDetailsEnd(self):
showmessage('execDetailsEnd', vars())
def fundamentalData(self):
showmessage('fundamentalData', vars())
def realtimeBar(self):
showmessage('realtimeBar', vars())
def scannerDataEnd(self):
showmessage('scannerDataEnd', vars())
def tickEFP(self):
showmessage('tickEFP', vars())
def tickSnapshotEnd(self):
showmessage('tickSnapshotEnd', vars())
def marketDataType(self):
showmessage('marketDataType', vars())
def commissionReport(self, commissionReport):
showmessage('commissionReport', vars())
|
quisas/albus
|
cli_tools/openpyxl/tests/test_named_range.py
|
Python
|
agpl-3.0
| 6,909
| 0.002461
|
# Copyright (c) 2010-2014 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
# Python stdlib imports
import os.path
# 3rd-party imports
from nose.tools import eq_, assert_raises, ok_
# package imports
from openpyxl.tests.helper import DATADIR, TMPDIR, clean_tmpdir, make_tmpdir
from openpyxl.namedrange import split_named_range, NamedRange
from openpyxl.reader.workbook import read_named_ranges
from openpyxl.shared.exc import NamedRangeException
from openpyxl.reader.excel import load_workbook
from openpyxl.writer.workbook import write_workbook
from openpyxl.workbook import Workbook
def test_split():
eq_([('My Sheet', '$D$8'), ], split_named_range("'My Sheet'!$D$8"))
def test_split_no_quotes():
eq_([('HYPOTHESES', '$B$3:$L$3'), ], split_named_range('HYPOTHESES!$B$3:$L$3'))
def test_bad_range_name():
assert_raises(NamedRangeException, split_named_range, 'HYPOTHESES$B$3')
def test_range_name_worksheet_special_chars():
class DummyWs(object):
title = 'My Sheeet with a , and \''
def __str__(self):
return self.title
ws = DummyWs()
class DummyWB(object):
def get_sheet_by_name(self, name):
if name == ws.title:
return ws
handle = open(os.path.join(DATADIR, 'reader', 'workbook_namedrange.xml'))
try:
content = handle.read()
named_ranges = read_named_ranges(content, DummyWB())
eq_(1, len(named_ranges))
ok_(isinstance(named_ranges[0], NamedRange))
eq_([(ws, '$U$16:$U$24'), (ws, '$V$28:$V$36')], named_ranges[0].destinations)
finally:
handle.close()
def test_read_named_ranges():
class DummyWs(object):
title = 'My Sheeet'
def __str__(self):
return self.title
class DummyWB(object):
def get_sheet_by_name(self, name):
return DummyWs()
handle = open(os.path.join
|
(DATADIR, 'reader', 'workbook.xml'))
try:
content = handle.read()
named_ranges = read_named_ranges(content, DummyWB())
eq_(["My Sheeet!$D$8"], [str(range) for range in named_ranges])
finally:
handle.close()
def test_oddly_shaped_named_ranges():
ranges_counts = ((4, 'TEST_RANGE'),
(3, 'TRAP_1'),
(13, 'TRAP_2'))
def
|
check_ranges(ws, count, range_name):
eq_(count, len(ws.range(range_name)))
wb = load_workbook(os.path.join(DATADIR, 'genuine', 'merge_range.xlsx'),
use_iterators = False)
ws = wb.worksheets[0]
for count, range_name in ranges_counts:
yield check_ranges, ws, count, range_name
def test_merged_cells_named_range():
wb = load_workbook(os.path.join(DATADIR, 'genuine', 'merge_range.xlsx'),
use_iterators = False)
ws = wb.worksheets[0]
cell = ws.range('TRAP_3')
eq_('B15', cell.get_coordinate())
eq_(10, cell.value)
def test_print_titles():
wb = Workbook()
ws1 = wb.create_sheet()
ws2 = wb.create_sheet()
ws1.add_print_title(2)
ws2.add_print_title(3, rows_or_cols='cols')
def mystr(nr):
return ','.join(['%s!%s' % (sheet.title, name) for sheet, name in nr.destinations])
actual_named_ranges = set([(nr.name, nr.scope, mystr(nr)) for nr in wb.get_named_ranges()])
expected_named_ranges = set([('_xlnm.Print_Titles', ws1, 'Sheet1!$1:$2'),
('_xlnm.Print_Titles', ws2, 'Sheet2!$A:$C')])
assert(actual_named_ranges == expected_named_ranges)
class TestNameRefersToValue(object):
def setup(self):
self.wb = load_workbook(os.path.join(DATADIR, 'genuine', 'NameWithValueBug.xlsx'))
self.ws = self.wb.get_sheet_by_name("Sheet1")
make_tmpdir()
def tearDown(self):
clean_tmpdir()
def test_has_ranges(self):
ranges = self.wb.get_named_ranges()
eq_(['MyRef', 'MySheetRef', 'MySheetRef', 'MySheetValue', 'MySheetValue', 'MyValue'], [range.name for range in ranges])
def test_workbook_has_normal_range(self):
normal_range = self.wb.get_named_range("MyRef")
eq_("MyRef", normal_range.name)
def test_workbook_has_value_range(self):
value_range = self.wb.get_named_range("MyValue")
eq_("MyValue", value_range.name)
eq_("9.99", value_range.value)
def test_worksheet_range(self):
range = self.ws.range("MyRef")
def test_worksheet_range_error_on_value_range(self):
assert_raises(NamedRangeException, self.ws.range, "MyValue")
def range_as_string(self, range, include_value=False):
def scope_as_string(range):
if range.scope:
return range.scope.title
else:
return "Workbook"
retval = "%s: %s" % (range.name, scope_as_string(range))
if include_value:
if isinstance(range, NamedRange):
retval += "=[range]"
else:
retval += "=" + range.value
return retval
def test_handles_scope(self):
ranges = self.wb.get_named_ranges()
eq_(['MyRef: Workbook', 'MySheetRef: Sheet1', 'MySheetRef: Sheet2', 'MySheetValue: Sheet1', 'MySheetValue: Sheet2', 'MyValue: Workbook'],
[self.range_as_string(range) for range in ranges])
def test_can_be_saved(self):
FNAME = os.path.join(TMPDIR, "foo.xlsx")
self.wb.save(FNAME)
wbcopy = load_workbook(FNAME)
eq_(['MyRef: Workbook=[range]', 'MySheetRef: Sheet1=[range]', 'MySheetRef: Sheet2=[range]', 'MySheetValue: Sheet1=3.33', 'MySheetValue: Sheet2=14.4', 'MyValue: Workbook=9.99'],
[self.range_as_string(range, include_value=True) for range in wbcopy.get_named_ranges()])
|
everAspiring/Sentiment-Analysis
|
PickleAlgos.py
|
Python
|
gpl-3.0
| 5,592
| 0.007332
|
import nltk
import random
import pickle
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression, SGDClassifier
from nltk.classify import ClassifierI
from statistics import mode
from nltk.tokenize import word_tokenize
class VoteClassifier(ClassifierI):
def __init__(self, *classifiers):
self._classifiers = classifiers
def classify(self, features):
votes = []
for c in self._classifiers:
v = c.classify(features)
votes.append(v)
return mode(votes)
def confidence(self, features):
votes = []
for c in self._classifiers:
v = c.classify(features)
votes.append(v)
choice_votes = votes.count(mode(votes))
conf = choice_votes / len(votes)
return conf
short_pos = open("IPL_Positive.txt","r").read()
short_neg = open("IPL_Negative.txt","r").read()
#print(short_pos)
# move this up here
all_words = []
documents = []
# j is adject, r is adverb, and v is verb
#allowed_word_types = ["J","R","V"]
allowed_word_types = ["J"]
for p in short_pos.split('\n'):
documents.append( (p, "pos") )
words = word_tokenize(p)
pos = nltk.pos_tag(words)
for w in pos:
if w[1][0] in allowed_word_types:
all_words.append(w[0].lower())
for p in short_neg.split('\n'):
documents.append( (p, "neg") )
words = word_tokenize(p)
pos = nltk.pos_tag(words)
for w in pos:
if w[1][0] in allowed_word_types:
all_words.append(w[0].lower())
save_documents = open("documents.pickle","wb")
pickle.dump(documents, save_documents, protocol=2)
save_documents.close()
all_words = nltk.FreqDist(all_words)
word_features = list(all_words.keys())[:100]
save_word_features = open("word_features5k.pickle","wb")
pickle.dump(word_features, save_word_features, protocol=2)
save_word_features.close()
def find_features(document):
words = word_tokenize(document)
features = {}
for w in word_features:
features[w] = (w in words)
return features
featuresets = [(find_features(rev), category) for (rev, category) in documents]
random.shuffle(featuresets)
print(len(featuresets))
testing_set = featuresets[250:]
training_set = featuresets[:250]
#print("testinggggg:")
#print(testing_set)
#print("\n")
#print("trainggggggggggggggggg:::")
#print(training_set)
classifier = nltk.NaiveBayesClassifier.train(training_set)
print("Original Naive Bayes Algo accuracy percent:", (nltk.classify.accuracy(classifier, testing_set))*100)
classifier.show_most_informative_features(15)
###############
save_classifier = open("originalnaivebayes5k.pickle","wb")
pickle.dump(classifier, save_classifier, protocol=2)
save_classifier.close()
MNB_classifier = SklearnClassifier(MultinomialNB())
MNB_classifier.train(training_set)
print("MNB_classifier accuracy percent:", (nltk.classify.accuracy(MNB_classifier, testing_set))*100)
save_classifier = open("MNB_classifier5k.pickle","wb")
pickle.dump(MNB_classifier, save_classifier, protocol=2)
save_classifier.close()
BernoulliNB_classifier = SklearnClassifier(BernoulliNB())
BernoulliNB_classifier.train(training_set)
print("BernoulliNB_classifier accuracy percent:", (nltk.classify.accuracy(BernoulliNB_classifier, testing_set))*100)
save_classifier = open("BernoulliNB_classifier5k.pickle","wb")
pick
|
le.dump(BernoulliNB_classifier, save_classifier, protocol=2)
save_classifier.close()
LinearSVC_classifier = SklearnClassifier(LinearSVC())
LinearSVC_classifier.train(training_set)
print("LinearSVC_classifier accuracy percent:", (nltk.classify.accuracy(LinearSVC_classifier, testing_set))*100)
save_classifier = open("LinearSVC_classifier5k.pickle","wb")
pickle.dump(LinearSVC_classifier, save_classifier, protocol=2)
save_classifier.clo
|
se()
LogisticRegression_classifier = SklearnClassifier(LogisticRegression())
LogisticRegression_classifier.train(training_set)
print("LogisticRegression_classifier accuracy percent:", (nltk.classify.accuracy(LogisticRegression_classifier, testing_set))*100)
save_classifier = open("LogisticRegression_classifier5k.pickle","wb")
pickle.dump(LogisticRegression_classifier, save_classifier, protocol=2)
save_classifier.close()
##NuSVC_classifier = SklearnClassifier(NuSVC())
##NuSVC_classifier.train(training_set)
##print("NuSVC_classifier accuracy percent:", (nltk.classify.accuracy(NuSVC_classifier, testing_set))*100)
SGDC_classifier = SklearnClassifier(SGDClassifier())
SGDC_classifier.train(training_set)
print("SGDClassifier accuracy percent:",nltk.classify.accuracy(SGDC_classifier, testing_set)*100)
save_classifier = open("SGDC_classifier5k.pickle","wb")
pickle.dump(SGDC_classifier, save_classifier, protocol=2)
save_classifier.close()
voted_classifier = VoteClassifier(
classifier,
LinearSVC_classifier,
MNB_classifier,
BernoulliNB_classifier,
LogisticRegression_classifier)
print("voted_classifier accuracy percent:", (nltk.classify.accuracy(voted_classifier, testing_set))*100)
def sentiment(text):
feats = find_features(text)
return voted_classifier.classify(feats)
|
ScriptingBeyondCS/CS-35
|
week_0_to_2/tree_analysis/recipe_analysis_examples.py
|
Python
|
mit
| 4,728
| 0.006768
|
import os
import os.path
import random
import recipe_generator
import subprocess
import shutil
#Comparing all recipes, which uses the fewest ingredients? ...kinda hacky
def fewest_ingredients(path):
""" Takes a path and returns the recipe txt file with the fewest ingredients
in the tree specified by that path.
We take advantage of the recipe structure
"""
fewest_ingredients = 50
fewest_ingredients_file_path = ""
for root, directories, files in os.walk(path):
for f in files:
with open(os.path.join(root, f), 'r') as f_in:
lines = f_in.readlines()
i = 0
while(not(lines[i] == "Instructions:\n")):
i +=1
if i < fewest_ingredients:
|
fewest_ingredients = i
fewest_ingredients_file_path = os.path.join(root, f)
return fewest_ingredients_file_path, (fewest_ingredients-7)
#Check if a given recip
|
e is a savory pie
def is_savory(recipe):
""" Takes a recipe and determines if it is Savory
"""
r = recipe.read()
if "Savory" in r:
return True
else:
return False
#Check if a given recipe is a sweet pie
def is_sweet(recipe):
""" Takes a recipe and determines if it is Sweet
"""
return not is_savory(recipe)
#Check if a given recipe is vegetarian i.e. no chicken, pork, or beef.
def is_vegetarian(recipe):
""" Takes a recipe and determines if it is vegetarian
"""
r = recipe.read()
if not (("chicken" in r) or ("beef" in r) or("pork" in r)):
return True
else:
return False
#List all of the vegetarian recipes
def list_recipes_by_condition(path, condition):
""" Takes a path and a condition function and returns a list of the paths of all recipes
at or below that path that satisfy the given condition
"""
recipes = []
for root, directories, files in os.walk(path):
for f in files:
with open(os.path.join(root, f), 'r') as f_in:
if(condition(f_in)):
recipes.append(os.path.join(root, f))
return recipes
#Move all of the vegetarian recipes to a directory called vegetarian_recipes
def move_recipes_by_condition(path, directory_name, condition):
""" Moves the recipes that satisfy conditon to a new directory called directory_name
"""
os.mkdir(directory_name)
recipe_list = list_recipes_by_condition(path, condition)
for recipe in recipe_list:
shutil.move(recipe, os.getcwd()+"/"+directory_name)
#Remove all empty directories
def remove_empty_directories(path):
""" Remove empty directories at or below path
"""
for root, directories, files in os.walk(path):
if not os.listdir(root):
os.rmdir(root)
#Across all recipes, which crust uses the most butter?
#Across all recipes, which recipe calls for the most kilograms of one ingredient?
#What is the ingredient and how much of it does the recipe call for?
def most_kilograms_of_one_ingredient(path):
most_kilos = 0
most_kilos_ingredient = ""
for root, directories, files in os.walk(path):
for f in files:
with open(os.path.join(root, f), 'r') as f_in:
lines = f_in.readlines()
for l in lines:
if "kilograms" in l:
l_split = l.split(" ")
kilos = int(l_split[0])
if kilos > most_kilos:
most_kilos = kilos
most_kilos_ingredient = l_split[3]
most_kilos_file = f
return most_kilos, most_kilos_ingredient, most_kilos_file
#Across all recipes, how many use the metric system, how many use the imperial system,
# and how many use a mix of both?
def main():
# Generate a tree of recipes for testing
ls = os.listdir()
if "recipes" in ls:
shutil.rmtree("recipes")
os.mkdir("recipes")
os.chdir("recipes")
recipe_generator.generate_recipes(50)
for i in range(5):
os.mkdir("recipes"+str(i))
os.chdir("recipes"+str(i))
recipe_generator.generate_recipes(60+(i*10), 50+(i*10))
os.chdir("..")
#test questions
path = os.getcwd()
fewest_ingredients_answer = fewest_ingredients(path)
print(fewest_ingredients_answer)
move_recipes_by_condition(path, "savory_recipes", is_savory)
move_recipes_by_condition(path, "sweet_recipes", is_sweet)
move_recipes_by_condition(path+"/savory_recipes","savory_recipes/vegetarian_recipes", is_vegetarian)
remove_empty_directories(path)
print(most_kilograms_of_one_ingredient(path))
if __name__ == '__main__':
main()
|
FrodoTheTrue/safeurl
|
tests/tests.py
|
Python
|
mit
| 1,050
| 0
|
from unittest import TestCase
from safeurl.core import getRealURL
class MainTestCase(TestCase):
def test_decodeUrl(self):
self.assertEqual(getRealURL('http://bit.ly/1gaiW96'),
'https://www.yandex.ru/')
def test_decodeUrlArray(self):
self.assertEqual(
getRealURL(['http://bit.ly/1gaiW96', 'http://bit.ly/1gaiW96']),
['https://www.yandex.ru/', 'https://www.yandex.ru/'])
def test_errorDecodeUrl(self):
self.assertEqual(getRealURL('http://bit.ly.wrong/wrong'),
|
'Failed')
def test_errorDecodeUrlArray(self):
self.assertEqual(
getRealURL(
['http://bit.ly.wrong/wrong', 'http://bit.ly.wrong/wrong']),
['Failed', 'Failed'])
def test_errorWithOkDecodeUrlArray(self):
self.assertEqual(
getRealURL(['http://bit.ly.wrong/wrong', 'http:
|
//bit.ly/1gaiW96',
'http://bit.ly.wrong/wrong']),
['Failed', 'https://www.yandex.ru/', 'Failed'])
|
Valloric/ycmd
|
ycmd/identifier_utils.py
|
Python
|
gpl-3.0
| 8,517
| 0.019493
|
# Copyright (C) 2014-2020 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from ycmd.utils import re, SplitLines
C_STYLE_COMMENT = '/\\*(?:\n|.)*?\\*/'
CPP_STYLE_COMMENT = '//.*?$'
PYTHON_STYLE_COMMENT = '#.*?$'
# Anything inside single quotes, '...', but mind:
# 1. that the starting single quote is not escaped
# 2. the escaped slash (\\)
# 3. the escaped single quote inside the string
SINGLE_QUOTE_STRING = r"(?<!\\)'(?:\\\\|\\'|.)*?'"
# Anything inside double quotes, "...", but mind:
# 1. that the starting double quote is not escaped
# 2. the escaped slash (\\)
# 3. the escaped double quote inside the string
DOUBLE_QUOTE_STRING = r'(?<!\\)"(?:\\\\|\\"|.)*?"'
# Anything inside back quotes, `...`, but mind:
# 1. that the starting back quote is not escaped
# 2. the escaped slash (\\)
# 3. the escaped back quote inside the string
BACK_QUOTE_STRING = r'(?<!\\)`(?:\\\\|\\`|.)*?`'
# Python-style multiline single-quote string
MULTILINE_SINGLE_QUOTE_STRING = "'''(?:\n|.)*?'''"
# Python-style multiline double-quote string
MULTILINE_DOUBLE_QUOTE_STRING = '"""(?:\n|.)*?"""'
DEFAULT_COMMENT_AND_STRING_REGEX = re.compile( "|".join( [
C_STYLE_COMMENT,
CPP_STYLE_COMMENT,
PYTHON_STYLE_COMMENT,
MULTILINE_SINGLE_QUOTE_STRING,
MULTILINE_DOUBLE_QUOTE_STRING,
SINGLE_QUOTE_STRING,
DOUBLE_QUOTE_STRING ] ), re.MULTILINE )
FILETYPE_TO_COMMENT_AND_STRING_REGEX = {
# Spec:
# http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3690.pdf
'cpp': re.compile( "|".join( [ C_STYLE_COMMENT,
CPP_STYLE_COMMENT,
SINGLE_QUOTE_STRING,
DOUBLE_QUOTE_STRING ] ), re.MULTILINE ),
# Spec:
# https://golang.org/ref/spec#Comments
# https://golang.org/ref/spec#String_literals
# https://golang.org/ref/spec#Rune_literals
'go': re.compile( "|".join( [ C_STYLE_COMMENT,
CPP_STYLE_COMMENT,
SINGLE_QUOTE_STRING,
DOUBLE_QUOTE_STRING,
BACK_QUOTE_STRING ] ), re.MULTILINE ),
# Spec:
# https://docs.python.org/3.6/reference/lexical_analysis.html#comments
# https://docs.python.org/3.6/reference/lexical_analysis.html#literals
'python': re.compile( "|".join( [ PYTHON_STYLE_COMMENT,
MULTILINE_SINGLE_QUOTE_STRING,
MULTILINE_DOUBLE_QUOTE_STRING,
SINGLE_QUOTE_STRING,
DOUBLE_QUOTE_STRING ] ), re.MULTILINE ),
# Spec:
# https://doc.rust-lang.org/reference.html#comments
# https://doc.rust-lang.org/reference.html#character-and-string-literals
'rust': re.compile( "|".join( [ CPP_STYLE_COMMENT,
SINGLE_QUOTE_STRING,
DOUBLE_QUOTE_STRING ] ), re.MULTILINE )
}
for filetype in [ 'c', 'cuda', 'objc', 'objcpp', 'javascript', 'typescript' ]:
FILETYPE_TO_COMMENT_AND_STRING_REGEX[ filetype ] = (
FILETYPE_TO_COMMENT_AND_STRING_REGEX[ 'cpp' ] )
# At least c++ and javascript support unicode identifiers, and identifiers may
# start with unicode character, e.g. ålpha. So we need to accept any identifier
# starting with an 'alpha' character or underscore. i.e. not starting with a
# 'digit'. The following regex will match:
# - A character which is alpha or _. That is a character which is NOT:
# - a digit (\d)
# - non-alphanumeric
# - not an underscore
# (The latter two come from \W which is the negation of \w)
# - Followed by any alphanumeric or _ characters
DEFAULT_IDENTIFIER_REGEX = re.compile( r"[^\W\d]\w*", re.UNICODE )
FILETYPE_TO_IDENTIFIER_REGEX = {
# Spec:
# http://www.ecma-international.org/ecma-262/6.0/#sec-names-and-keywords
# Default identifier plus the dollar sign.
'javascript': re.compile( r"(?:[^\W\d]|\$)[\w$]*", re.UNICODE ),
# Spec: https://www.w3.org/TR/css-syntax-3/#ident-token-diagram
'css': re.compile( r"-?[^\W\d][\w-]*", re.UNICODE ),
# Spec: http://www.w3.org/TR/html5/syntax.html#tag-name-state
# But not quite since not everything we want to pull out is a tag name. We
# also want attribute names (and probably unquoted attribute values).
# And we also want to ignore common template chars like `}` and `{`.
'html': re.compile( r"[a-zA-Z][^\s/>='\"}{\.]*", re.UNICODE ),
# Spec: http://cran.r-project.org/doc/manuals/r-release/R-lang.pdf
# Section 10.3.2.
# Can be any sequence of '.', '_' and alphanum BUT can't start with:
# - '.' followed by digit
# - digit
# - '_'
'r': re.compile( r"(?!(?:\.\d|\d|_))[\.\w]+", re.UNICODE ),
# Spec: http://clojure.org/reader
# Section: Symbols
'clojure': re.compile(
r"[-\*\+!_\?:\.a-zA-Z][-\*\+!_\?:\.\w]*/?[-\*\+!_\?:\.\w]*",
re.UNICODE ),
# Spec: http://www.haskell.org/onlinereport/lexemes.html
# Section 2.4
'haskell': re.compile( r"[_a-zA-Z][\w']+", re.UNICODE ),
# Spec: ?
# Colons are often used in labels (e.g. \label{fig:foobar}) so we accept
# them in the middle of an identifier but not at its extremities. We also
# accept dashes for compound words.
'tex': re.compile( r"[^\W\d](?:[\w:-]*\w)?", re.UNICODE ),
# Spec: http://doc.perl6.org/language/syntax
'perl6': re.compile( r"[_a-zA-Z](?:\w|[-'](?=[_a-zA-Z]))*", re.UNICODE ),
# https://www.scheme.com/tspl4/grammar.html#grammar:symbols
'scheme': re.compile( r"\+|\-|\.\.\.|"
r"(?:->|(:?\\x[0-9A-Fa-f]+;|[!$%&*/:<=>?~^]|[^\W\d]))"
r"(?:\\x[0-9A-Fa-f]+;|[-+.@!$%&*/:<=>?~^\w])*",
re.UNICODE ),
}
FILETYPE_TO_IDENTIFIER_REGEX[ 'typescript' ] = (
FILETYPE_TO_IDENTIFIER_REGEX[ 'javascript' ] )
FILETYPE_TO_IDENTIFIER_REGEX[ 'scss' ] = FILETYPE_TO_IDENTIFIER_REGEX[ 'css' ]
FILETYPE_TO_IDENTIFIER_REGEX[ 'sass' ] = FILETYPE_TO_IDENTIFIER_REGEX[ 'css' ]
FILETYPE_TO_IDENTIFIER_REGEX[ 'less' ] = FILETYPE_TO_IDENTIFIER_REGEX[ 'css' ]
FILETYPE_TO_IDENTIFIER_REGEX[ 'elisp' ] = (
FILETYPE_TO_IDENTIFIER_REGEX[ 'clojure' ] )
FILETYPE_TO_IDENTIFIER_REGEX[ 'lisp' ] = (
FILETYPE_TO_IDENTIFIER_REGEX[ 'clojure' ] )
def CommentAndStringRegexForFiletype( filetype ):
return FILETYPE_TO_COMMENT_AND_STRING_REGEX.get(
filetype, DEFAULT_COMMENT_AN
|
D_STRING_REGEX )
def IdentifierRegexForFiletype( filetype ):
return FILETYPE_TO_IDENTIFIER_REGEX.get
|
( filetype, DEFAULT_IDENTIFIER_REGEX )
def ReplaceWithEmptyLines( regex_match ):
return '\n' * ( len( SplitLines( regex_match.group( 0 ) ) ) - 1 )
def RemoveIdentifierFreeText( text, filetype = None ):
return CommentAndStringRegexForFiletype( filetype ).sub(
ReplaceWithEmptyLines, text )
def ExtractIdentifiersFromText( text, filetype = None ):
return re.findall( IdentifierRegexForFiletype( filetype ), text )
def IsIdentifier( text, filetype = None ):
if not text:
return False
regex = IdentifierRegexForFiletype( filetype )
match = regex.match( text )
return match and match.end() == len( text )
# index is 0-based and EXCLUSIVE, so ("foo.", 3) -> 0
# Works with both unicode and str objects.
# Returns the index on bad input.
def StartOfLongestIdentifierEndingAtIndex( text, index, filetype = None ):
if not text or index < 1 or index > len( text ):
return index
for i in range( index ):
if IsIdentifier( text[ i : index ], filetype ):
return i
return index
# If the index is not on
|
OpenFurry/submitify
|
submitify/views/test_calls.py
|
Python
|
mit
| 1,704
| 0
|
from submitify.tests import (
TestCase,
# CallMixin,
# GuidelineMixin,
# NotificationMixin,
# ReviewMixin,
# SubmissionMixin,
# UserMixin,
)
class TestListCalls(TestCase):
def test_lists_open_calls(self):
self.assertTrue(True)
def test_lists_other_calls_if_asked(self):
pass
class TestViewCall(TestCase):
def test_view_call(self):
pass
def test_lists_notifications(self):
pass
def test_can_submit_call_open_only(self):
pass
def test_can_submit_invite_only(self):
pass
def test_can_s
|
ubmit_if_reader(self):
pass
class TestCreateCall(TestCase):
def test_form_renders(self):
pass
def test_form_saves(self):
pass
def test_guidelines_save(self):
pass
class TestEditCall(TestCase):
def test_owner_only(self):
pass
def test_form_renders(self):
pass
def test_form_saves(self):
pass
def test_guidelines_save(self):
|
pass
class TestInviteReader(TestCase):
def test_reader_invited(self):
pass
def test_cant_invite_owner(self):
pass
class TestInviteWriter(TestCase):
def test_writer_invited(self):
pass
def test_cant_invite_owner(self):
pass
def test_cant_invite_unless_invite_only(self):
pass
class TestNextStep(TestCase):
def test_owner_only(self):
pass
def test_call_advanced(self):
pass
def test_cant_proceed_beyond_max(self):
pass
def test_cant_proceed_to_finished_with_unreviewed_submissions(test):
pass
def test_moves_submissions_to_review_if_closing(test):
pass
|
justacec/bokeh
|
examples/app/timeout.py
|
Python
|
bsd-3-clause
| 1,560
| 0.001923
|
''' Present a plot updating according to a set of fixed timeout
intervals.
Use the ``bokeh serve`` command to run the example by executing:
bokeh serve timeout.py
at your command prompt. Then navigate to the URL
http://localhost:5006/timeout
in your browser.
'''
import numpy as np
from bokeh.palettes import RdYlBu3
from bokeh.plotting import figure, curdoc
N = 50
p = figure(x_range=(0, 100), y_rang
|
e=(0, 100), toolbar_location=None)
p.border_fill_color = 'black'
p.background_fill_color = 'black'
p.outline_line_color = None
p.grid.grid_line_color = None
p.rect(x=50, y=50, width=80, height=80,
line_alpha=0.5, line_color="darkgrey", fill_color=None)
r = p.text(x=[], y=[], text=[], text_color=[],
text_font_size="20pt", text_baseline="middle", text_align="center")
# Add
|
plot to document
curdoc().add(p)
def make_callback(i):
ds = r.data_source
def func():
if i == N-1:
ds.data['x'].append(50)
ds.data['y'].append(95)
ds.data['text'].append("DONE")
ds.data['text_color'].append("white")
else:
ds.data['x'].append(np.random.random()*70 + 15)
ds.data['y'].append(np.random.random()*70 + 15)
ds.data['text_color'].append(RdYlBu3[i%3])
ds.data['text'].append(str(i))
ds.trigger('data', ds.data, ds.data)
func.interval = i * 100
return func
callbacks = [make_callback(i) for i in range(N)]
for callback in callbacks:
curdoc().add_timeout_callback(callback, callback.interval)
|
SKA-ScienceDataProcessor/integration-prototype
|
sip/examples/flask_processing_controller/app/api/scheduling_block_list.py
|
Python
|
bsd-3-clause
| 2,351
| 0
|
# -*- coding: utf-8 -*-
"""Scheduling Block Instance List API resource."""
import logging
from http import HTTPStatus
from random import choice
from flask import Blueprint, request
from .utils import add_scheduling_block, get_root_url, missing_db_response
from ..db.client import ConfigDb
BP = Blueprint("scheduling-blocks", __name__)
LOG = logging.getLogger('SIP.EC.PCI')
DB = ConfigDb()
@BP.route('/scheduling-blocks', methods=['GET'])
@missing_db_response
def get():
"""Return list of Scheduling Blocks Instances known to SDP ."""
LOG.debug('GET list of SBIs.')
# Construct response object.
_url = get_root_url()
response = dict(scheduling_blocks=[],
links=dict(home='{}'.format(_url)))
# Get ordered list of SBI ID's.
|
block_ids = DB.get_sched_block_instance_ids()
# Loop over SBIs and add summary of each to the list of SBIs in the
# response.
for block in DB.get_block_details(block_ids):
block_id = block['id']
LOG.debug('Adding SBI %s to list',
|
block_id)
LOG.debug(block)
block['num_processing_blocks'] = len(block['processing_block_ids'])
temp = ['OK'] * 10 + ['WAITING'] * 4 + ['FAILED'] * 2
block['status'] = choice(temp)
try:
del block['processing_block_ids']
except KeyError:
pass
block['links'] = {
'detail': '{}/scheduling-block/{}' .format(_url, block_id)
}
response['scheduling_blocks'].append(block)
return response, HTTPStatus.OK
@BP.route('/scheduling-blocks', methods=['POST'])
@missing_db_response
def create():
"""Create / register a Scheduling Block instance with SDP."""
config = request.data
return add_scheduling_block(config)
@BP.route('/scheduling-blocks/table')
@missing_db_response
def get_table():
"""Provides table of scheduling block instance metadata for use with AJAX
tables"""
response = dict(blocks=[])
block_ids = DB.get_sched_block_instance_ids()
for index, block_id in enumerate(block_ids):
block = DB.get_block_details([block_id]).__next__()
info = [
index,
block['id'],
block['sub_array_id'],
len(block['processing_blocks'])
]
response['blocks'].append(info)
return response, HTTPStatus.OK
|
pixies/academic
|
membro_profile/views.py
|
Python
|
gpl-3.0
| 4,644
| 0.004741
|
#-*- encoding: utf-8 -*-
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response, RequestContext, render
from membro_profile.forms import MembroForm, MembroProfileForm, EditProfileForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from membro_profile.models import MembroProfile
from submissao.model
|
s import Submissao
def some_view(request):
if not request.user.is_authenticated():
return HttpResponse("You are logged in.")
else:
return HttpResponse("You are not logged in.")
# Create your views here.
def register(r
|
equest):
context = RequestContext(request)
registered = False
if request.method == 'POST':
membro_form = MembroForm(data=request.POST)
membro_profile_form = MembroProfileForm(data=request.POST)
if membro_form.is_valid() and membro_profile_form.is_valid():
membro = membro_form.save()
membro.set_password(membro.password)
membro.save()
membro_profile = membro_profile_form.save(commit=False)
membro_profile.user = membro
if 'avatar' in request.FILES:
membro_profile.picture = request.FILES['avatar']
membro_profile.save()
registered = True
else:
print (membro_form.errors, membro_profile_form.errors)
else:
membro_form = MembroForm()
membro_profile_form = MembroProfileForm()
return render_to_response(
'profile/register.html',
# {'membro_form': membro_form, 'registered': registered},
{'membro_form': membro_form, 'membro_profile_form': membro_profile_form, 'registered': registered},
context)
def membro_login(request):
context = RequestContext(request)
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
membro = authenticate(username=username,password=password)
if membro:
if membro.is_active:
login(request, membro)
return HttpResponseRedirect('/')
else:
return HttpResponse('Sua conta ainda não foi liberada.')
else:
print ("Login e senha invalidos: {0}, {1}".format(username, password))
return HttpResponse("Login ou Senha, Invalidos")
else:
# return render_to_response('profile/404.html', {}, context)
return render_to_response('profile/login.html', {}, context)
@login_required
def user_logout(request):
# Since we know the user is logged in, we can now just log them out.
logout(request)
# Take the user back to the homepage.
return HttpResponseRedirect('/')
@login_required
def profile(request):
context = RequestContext(request)
print (context)
usuario = User.objects.get(username=request.user)
membro = MembroProfile.objects.get(user=usuario)
if membro:
return render_to_response('profile/profile.html', {'m':membro}, context)
else:
return HttpResponse('Inscrição não encontrado')
@login_required
def edit_profile(request):
membro = request.user
form = EditProfileForm(
request.POST or None,
initial={
'first_name': membro.first_name,
'last_name': membro.last_name,
'cpf': membro.membroprofile.cpf,
}
)
if form.is_valid():
membro.first_name = request.POST['first_name']
membro.last_name = request.POST['last_name']
membro.cpf = request.POST['cpf']
membro.save()
return HttpResponseRedirect('%s'%(reverse('profile')))
context = {
"form": form
}
return render(request, 'profile/editar.html', context)
#from submissao.models import Submissao
def index(request):
context = RequestContext(request)
print (str(request.user) == 'AnonymousUser')
if str(request.user) == 'AnonymousUser':
return render_to_response('profile/login.html', context)
else:
queryset = Submissao.objects.filter(autor_id=request.user.membroprofile.id or None)
if request.user.is_authenticated():
membro = MembroProfile.objects.filter(user__username=request.user).latest('user').user
context["membro"] = membro
context['lista_resumos'] = queryset
return render_to_response('profile/index.html', context)
else:
return render_to_response('profile/login.html', context)
|
hudvin/brighteye
|
facenet_experiments/vgg_utils/vgg_downloader.py
|
Python
|
apache-2.0
| 2,983
| 0.004358
|
import Image
import argparse
from StringIO import StringIO
from urlparse import urlparse
from threading import Thread
import httplib, sys
from Queue import Queue
import numpy as np
from scipy import misc
import os
def doWork():
while True:
task_data = q.get()
print task_data
url = task_data["url"]
image_path = task_data["image_path"]
error_path = task_data["error_path"]
try:
url = urlparse(url)
conn = httplib.HTTPConnection(url.netloc)
conn.request("GET", url.path)
res = conn.getresponse()
if res.status == 200:
img = res.read()
img = np.array(Image.open(StringIO(img)))
misc.imsave(image_path, img)
else:
save_error(error_path, res.status + " " + res.reason)
except Exception as e:
save_error(error_path, str(e))
q.task_done()
def save_error(error_path, error_message):
with open(error_path, "w") as textfile:
textfile.write(error_message)
concurrent = 200
q = Queue(concurrent * 2)
def main(args):
for i in range(concurrent):
t = Thread(target=doWork)
t.daemon = True
t.start()
try:
textfile_names = os.listdir(args.dataset_descriptor)
for textfile_name in textfile_names:
if textfile_name.endswith('.txt'):
with open(os.path.join(args.dataset_descriptor, textfile_name), 'rt') as f:
lines = f.readlines()
dir_name = textfile_name.split('.')[0]
class_path = os.path.join(args.output_dir, dir_name)
if not os.path.exists(class_path):
os.makedirs(class_path)
for line in lines:
x = line.split(' ')
|
filename = x[0]
url = x[1]
image_path = os.path.join(args.output_dir, dir_name, filename + '.' + args.output_format)
error_path = os.path.join(args.output_dir, dir_name, filename + '.err')
q.put({
"url": url.strip(),
"image_path":image_path,
|
"error_path":error_path
})
q.join()
except KeyboardInterrupt:
sys.exit(1)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('dataset_descriptor', type=str,
help='Directory containing the text files with the image URLs. Image files will also be placed in this directory.')
parser.add_argument('output_dir', type=str,
help='Directory to store fetched images grouped by person name')
parser.add_argument('--output_format', type=str, help='Format of the output images', default='png', choices=['png', 'jpg'])
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
blossomica/airmozilla
|
airmozilla/comments/tests/test_jinja_helpers.py
|
Python
|
bsd-3-clause
| 1,016
| 0
|
from nose.tools import eq_, ok_
from django.test import TestCase
from airmozilla.comments.templatetags.jinja_helpers import (
gravatar_src,
obscure_email,
)
class TestHelpers(TestCase):
def test_gravatar_src_http(self):
email = 'peterbe@mozilla.com'
result = gravatar_src(email, False)
ok_(result.startswith('//www.gravatar.com'))
# case insensitive
eq_(result, gravatar_src(email.upper(), False))
def test_gravatar_src_with_size(self):
result = gravatar_src('peterbe@mozilla.com', False, size=50)
ok_(result.startswith('//www.gravatar.com'))
ok_('s=50'
|
in result)
eq_(result.count('?'), 1)
def test_gravatar_src_https(self):
email = 'peterbe@mozilla.com'
result = gravatar_src(email, True)
ok_(result.startswith('//secure.gravatar.com'))
def test_obscure_email(self):
email = 'peterbe@mozilla.com'
result
|
= obscure_email(email)
eq_(result, 'pete...@...illa.com')
|
RobotsAndPencils/terrible
|
tests/test_run.py
|
Python
|
bsd-3-clause
| 3,415
| 0.000586
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from preggy import expect
import click
from click.testing import CliRunner
from terrible.run import compile_template
from tests.base import TestCase
import os
class CompileTemplateTestCase(TestCase):
def test_compile_template(self):
base_dir = os.path.dirname(os.path.realpath(__file__)) + "/../"
template_path = "%stests_resources/" % base_dir
template = "ansible-inventory.j2"
tfstate = "%stests_resources/terraform.tfstate" % base_dir
inventory_output = "%stests_resources/test_output" % base_dir
# Empty any previous test output
open(inventory_output, 'w').close()
runner = CliRunner()
result = runner.invoke(compile_template, [
'--template-path', template_path,
'--template', template,
'--tfstate', tfstate,
'--inventory-output', inventory_output])
expect(hasattr(runner, 'exception')).to_equal(False)
expect(result.exit_code).to_equal(0)
output = open(inventory_output).read()
expect(output).to_include("1.2.3.4")
def test_missing_required_params(self):
base_dir = os.path.dirname(os.path.realpath(__file__)) + "/../"
template_path = "%stests_resources/" % base_dir
template = "ansible-inventory.j2"
tfstate = "%stests_resources/terraform.tfstate" % base_dir
inventory_output = "%stests_resources/test_output" % base_dir
runner = CliRunner()
# Missing --template-path arg
result = runner.invoke(compile_template, [
'--template', template,
'--tfstate', tfstate,
'--inventory-output', inventory_output])
expect(result.exit_code).to_be_greater_than(0)
# Missing --template arg
result = runner.invoke(compile_template, [
'--template-path', template_path,
'--tfstate', tfstate,
'--inventory-output', inventory_output])
expect(result.exit_code).to_be_greater_than(0)
# Missing --tfstate arg
result = runner.invoke(compile_template, [
'--template-path', template_path,
'--template', template,
'--inventory-output', inventory_output])
expect(result.exit_code).to_be_greater_than(0)
# Missing --inventory-output arg
result = runner.invoke(compile_template, [
'--template-path', template_path,
'--template', template,
'--tfstate', tfstate])
expect(result.exi
|
t_code).to_be_greater_than(0)
# Give a file instead of a directory for template path
result = runner.invoke(compile_template, [
'--template-path', tfstate])
expect(res
|
ult.exit_code).to_be_greater_than(0)
# Give a path instead of an acutal template for --template
result = runner.invoke(compile_template, [
'--template-path', template_path,
'--template', template_path])
expect(result.exit_code).to_be_greater_than(0)
# Give an inviald path for tfstate
result = runner.invoke(compile_template, [
'--template-path', template_path,
'--template', template,
'--tfstate', tfstate + "blahblahdoesnotexist",
'--inventory-output', inventory_output])
expect(result.exit_code).to_be_greater_than(0)
|
ar4s/django
|
tests/timezones/tests.py
|
Python
|
bsd-3-clause
| 58,810
| 0.002041
|
import datetime
import re
import sys
from contextlib import contextmanager
from unittest import SkipTest, skipIf
from xml.dom.minidom import parseString
try:
import zoneinfo
except ImportError:
from backports import zoneinfo
try:
import pytz
except ImportError:
pytz = None
from django.contrib.auth.models import User
from django.core import serializers
from django.db import connection
from django.db.models import F, Max, Min
from django.http import HttpRequest
from django.template import (
Context, RequestContext, Template, TemplateSyntaxError, context_processors,
)
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, ignore_warnings,
override_settings, skipIfDBFeature, skipUnlessDBFeature,
)
from django.test.utils import requires_tz_support
from django.urls import reverse
from django.utils import timezone
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.timezone import timedelta
from .forms import (
EventForm, EventLocalizedForm, EventLocalizedModelForm, EventModelForm,
EventSplitForm,
)
from .models import (
AllDayEvent, DailyEvent, Event, MaybeEvent, Session, SessionEvent,
Timestamp,
)
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
# These tests use the EAT (Eastern Africa Time) and ICT (Indochina Time)
# who don't have daylight saving time, so we can represent them easily
# with fixed offset timezones and use them directly as tzinfo in the
# constructors.
# settings.TIME_ZONE is forced to EAT. Most tests use a variant of
# datetime.datetime(2011, 9, 1, 13, 20, 30), which translates to
# 10:20:30 in UTC and 17:20:30 in ICT.
UTC = timezone.utc
EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi
ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok
ZONE_CONSTRUCTORS = (zoneinfo.ZoneInfo,)
if pytz is not None:
ZONE_CONSTRUCTORS += (pytz.timezone,)
def get_timezones(key):
return [constructor(key) for constructor in ZONE_CONSTRUCTORS]
@contextmanager
def override_database_connection_timezone(timezone):
|
try:
orig_timezone = connection.settings_dict['TIME_ZONE']
connection.settings_dict['TIME_ZONE'] = timezone
# Clear cached prop
|
erties, after first accessing them to ensure they exist.
connection.timezone
del connection.timezone
connection.timezone_name
del connection.timezone_name
yield
finally:
connection.settings_dict['TIME_ZONE'] = orig_timezone
# Clear cached properties, after first accessing them to ensure they exist.
connection.timezone
del connection.timezone
connection.timezone_name
del connection.timezone_name
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=False)
class LegacyDatabaseTests(TestCase):
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipIfDBFeature('supports_timezones')
def test_aware_datetime_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
msg = 'backend does not support timezone-aware datetimes when USE_TZ is False.'
with self.assertRaisesMessage(ValueError, msg):
Event.objects.create(dt=dt)
def test_auto_now_and_auto_now_add(self):
now = datetime.datetime.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__iso_week_day=6).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40)
afternoon_min_dt
|
markovmodel/thermotools
|
test/test_callback.py
|
Python
|
lgpl-3.0
| 2,723
| 0.002203
|
# This file is part of thermotools.
#
# Copyright 2015, 2016 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# thermotools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import thermotools.wham as wham
import thermotools.mbar as mbar
import thermotools.dtram as dtram
import numpy as np
from numpy.testing import assert_allclose
from nose.tools import assert_true, assert_raises
from thermotools.callback import CallbackInterrupt, generic_callback_stop
# ************************************************************************************************
# test generic_callback_stop
# ************************************************************************************************
def test_callback_interrupt():
assert_raises(CallbackInterrupt, generic_callback_stop)
try:
generic_callback_stop()
except CallbackInterrupt as ci:
assert_true(ci.msg == "STOP")
assert_true(ci.__str__() == "[CALLBACKINTERRUPT] STOP")
def test_wham_stop():
T = 5
M = 10
therm_energies, conf_energies, increments, loglikelihoods = wham.estimate(
np.ones(shape=(T, M), dtype=np.intc),
np.zeros(shape=(T, M), dtype=np.float64),
maxiter=10, maxerr=-1.0, save_convergence_info=1,
callback=generic_callback_stop)
assert_allclose(therm_energies, 0.0, atol=1.0E-15)
assert_allclose(conf_energies, np.log(M), atol=1.0E-15)
assert_true(increments.shape[0] == 1)
assert_true(loglikelihoods.shape[0] == 1)
def test_dtram_stop():
T = 5
M = 10
therm_energies, conf_energies, log_lagrangian_mult, increments, loglikelihoods = dtram.est
|
imate(
np.ones(shape=(T, M, M), dtype=np.intc),
np.zeros(shape=(T, M), dtype=np.float64),
maxiter=10, maxerr=-1.0, save_convergence_info=1,
callback=generic_
|
callback_stop)
assert_allclose(therm_energies, 0.0, atol=1.0E-15)
assert_allclose(conf_energies, np.log(M), atol=1.0E-15)
assert_allclose(log_lagrangian_mult, np.log(M + dtram.get_prior()), atol=1.0E-15)
assert_true(increments.shape[0] == 1)
assert_true(loglikelihoods.shape[0] == 1)
|
niallrmurphy/simlir
|
instrumentation.py
|
Python
|
gpl-2.0
| 8,970
| 0.014939
|
#!/usr/bin/env python
# encoding: utf-8
"""
instrumentation.py
This file defines the various 'events' that can happen in the simlir system. Every
time an object in the simulation does something significant, it sends a message
to a global instrumentation object, which currently has a mild wrapping around them
for textual display purposes, and for later driving of a GUI.
Typical use case:
eventp = instrumentation.event_processor()
eventp.ReceiveEvent("FINISHED_SETUP")
Created by Niall Murphy on 2007-05-08.
"""
# TODO(niallm): do this with env variable passing from make
# at some point.
import constants
import logging
import logging.handlers
import os
import pprint
import sys
_EVENTS = { 'ADD_ROUTE': 'AddRouteEvent',
'ADD_PREFIX': 'AddPrefixEvent',
'REMOVE_ROUTE': 'RemoveRouteEvent',
'REQUEST_SPACE': 'RequestSpaceEvent',
'NEEDS_SPACE': 'NeedsSpaceEvent',
'GETS_SPACE': 'GetsSpaceEvent',
'TRADE_SPACE': 'TradeSpaceEvent',
'TAKE_STARTUP_SPACE': 'TakeStartupSpaceEvent',
'GENERATE_NAME': 'GenerateNameEvent',
'SET_NAME': 'SetNameEvent',
'SET_DATE': 'SetDateEvent',
'FIND_UNUSED': 'FindUnusedEvent',
'UNIT_TEST': 'JustReturnArgs',
'CREATE_LIR': 'CreateLIREvent',
'CREATE_RIR': 'CreateRIREvent',
'CREATE_IANA': 'CreateIANAEvent',
'LIR_INITIAL' : 'LIRInitialEvent',
'GET_NEXT_UNUSED' : 'GetNextUnusedEvent',
'CONSIDER_PREFIX' : 'ConsiderPrefixEvent',
'FOUND_GAP' : 'FoundGapEvent',
'CALC_REQS' : 'CalculateReqsEvent',
'NEXT_TIMELINE' : 'NextTimelineEvent',
'ADD_TIMELINE' : 'AddTimelineEvent',
'IANA_FREE_SPACE_CHANGE' : 'LostSpaceEvent',
'RIR_FREE_SPACE_CHANGE' : 'LostSpaceEvent',
'IANA_EXHAUSTED' : 'EntityExhaustedEvent',
'RIR_EXHAUSTED' : 'EntityExhaustedEvent',
'LIR_EXHAUSTED' : 'EntityExhaustedEvent',
'LIR_BLOCKED' : 'EntityBlockedEvent',
'RIR_BLOCKED' : 'EntityBlockedEvent',
'FINISHED_READIN' : 'FinishedReadinEvent',
'FINISHED_SETUP' : 'FinishedSetupEvent'}
class EventError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class event_processor:
"""The event_processor class is a way to instrument the internal operations
of the LIR/tree etc classes in an extendable way. A class holds
an event_processor object and sends various events to it. These events can
be processed in a text-based logging fashion or can in turn send events to
drive a gui, etc."""
def __init__(self,
mode = constants.defines._INSTRUMENTATION_DEFAULT_MODE,
verbosity = constants.defines._INSTRUMENTATION_DEFAULT_VERBOSITY):
progname = os.path.basename(sys.argv[0])
self.args = {}
self.mode = mode
self.proc = None
if self.mode == constants.defines._INSTRUMENTATION_MODES['stdout']:
self.proc = text_event_processor(verbosity)
elif self.mode == constants.defines._INSTRUMENTATION_MODES['syslog']:
self.logger = logging.getLogger(progname)
self.syslog_hndlr = logging.handlers.SysLogHandler(
facility = logging.handlers.SysLogHandler.LOG_DAEMON)
self.formatter = logging.Formatter('%(filename)s: %(levelname)s: %(message)s')
self.syslog_hndlr.setFormatter(self.formatter)
self.logger.addHandler(self.syslog_hndlr)
self.proc = syslog_event_processor
elif self.mode == constants.defines._INSTRUMENTATION_MODES['gui']:
raise ValueError, "gui mode not implemented yet"
else:
raise ValueError, "event_processor without defined mode!"
def ReceiveEvent(self, event, *varargs):
"""Receive an event from related objects. Check the event is something we know about.
If so, record it or log it or similar. If not, discard with error. """
if self.mode == constants.defines._INSTRUMENTATION_MODES['stdout']:
func = getattr(self.proc, _EVENTS[event])
return func(varargs)
elif self.mode == constants.defines._INSTRUMENTATION_MODES['syslog']:
return getattr(self.proc,_EVENTS[event])(varargs)
elif self.mode == constants.defines._INSTRUMENTATION_MODES['gui']:
raise ValueError, "gui mode not implemented yet"
else:
raise ValueError, "mode not implemented yet"
class text_event_processor:
"""The default, stdio output class."""
def __init__(self, supplied_verbosity):
self.verbosity = supplied_verbosity
def AddRouteEvent(self, args):
if self.verbosity > 1:
print "*** ADD ROUTE EVENT with route '%s', owner '%s' and note '%s'" % \
(args[0], args[1], args[2])
return args
def AddPrefixEvent(self, args):
if self.verbosity > 1:
|
print "*** ADD PREFIX EVENT for '%s' with prefix '%s'" % (args[0], args[1])
return args
def RemovePrefixEvent(self, args):
if self.verbosity > 1:
print "*** REMOVE PREFIX EVENT with route '%s', owner '%s' and note '%s'" % \
|
(args[0], args[1], args[2])
return args
def NeedsSpaceEvent(self, args): # TODO(niallm): implement
return args
def GetsSpaceEvent(self, args): # TODO(niallm): implement
return args
def TradeSpaceEvent(self, args): # TODO(niallm): implmenet
return args
def FindUnusedEvent(self, args): #
if self.verbosity > 1:
print "*** FIND UNUSED EVENT called to find a '/%s'" % args[0]
return args
def RequestSpaceEvent(self, args): # FIXME
if self.verbosity > 0:
print "*** REQUEST SPACE EVENT from '%s' for '/%s' fulfilling via '%s'" % \
(args[0], args[1], args[2])
return args
def GenerateNameEvent(self, args): # FIXME
if self.verbosity > 1:
print "*** GENERATE NAME EVENT generated '%s'" % args[0]
return args
def SetNameEvent(self, args):
if self.verbosity > 1:
print "*** SET NAME EVENT to '%s'" % args[0]
return args
def SetDateEvent(self, args):
if self.verbosity > 1:
print "*** SET DATE EVENT FOR '%s' TO '%s/%s/%s'" % \
(args[0], args[1], args[2], args[3])
return args
def CreateLIREvent(self, args):
if self.verbosity > 1:
print "*** CREATE LIR EVENT generated LIR '%s'" % args[0]
return args
def CreateRIREvent(self, args):
if self.verbosity > 1:
print "*** CREATE RIR EVENT generated RIR '%s'" % args[0]
return args
def CreateIANAEvent(self, args):
if self.verbosity > 1:
print "*** CREATE IANA EVENT"
return args
def GetNextUnusedEvent(self, args): # TODO(niallm): implement
return args
def TakeStartupSpaceEvent(self, args): # TODO(niallm): probably deprecated now
if self.verbosity > 0:
print "*** TAKE STARTUP SPACE for '%s' from '%s' gets prefix '%s'" % \
(args[0], args[1], args[2])
return args
def ConsiderPrefixEvent(self, args):
if self.verbosity > 0:
print "*** CONSIDER PREFIX looks at '%s' trying to find gap" % args[1]
return args
def FoundGapEvent(self, args):
if self.verbosity > 0:
print "*** FOUND GAP found a gap at '%s' length '%s'" % (args[0], args[1])
return args
def EntityExhaustedEvent(self, args):
if self.verbosity > 0:
print "*** ENTITY [%s] IS EXHAUSTED of space of prefix length '%s' on date '%s'" % \
(args[0], args[1], args[2])
return args
def EntityBlockedEvent(self, args):
if self.verbosity > 0:
print "*** ENTITY [%s] IS BLOCKED: wants space [%s] on '%s'" % \
(args[0], args[1], args[2])
return args
def CalculateReqsEvent(self, args):
if self.verbosity > 0:
print "*** ADDRESS REQUIREMENTS CALCULATED to be '%s' for '%s'" % \
(args[1], args[0])
return args
def NextTimelineEvent(self, args):
if self.verbosity > 0:
print "*** NEXT TIMELINE EVENT at '%s' is '%s'" % (args[0], args[1])
return args
def AddTimelineEvent(self, args):
if self.verbosity > 0:
print "*** ADD EVENT TO TIMELINE at date [%s]" % args[0]
return args
def
|
detiber/lib_openshift
|
test/test_v1_project.py
|
Python
|
apache-2.0
| 1,236
| 0.003236
|
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writi
|
ng, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
|
import sys
import unittest
import lib_openshift
from lib_openshift.rest import ApiException
from lib_openshift.models.v1_project import V1Project
class TestV1Project(unittest.TestCase):
""" V1Project unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1Project(self):
"""
Test V1Project
"""
model = lib_openshift.models.v1_project.V1Project()
if __name__ == '__main__':
unittest.main()
|
AndreyBalabanov/python_training
|
test/test_del_contact.py
|
Python
|
apache-2.0
| 633
| 0.00316
|
from model.contact import Contact
import random
def test_delete_some_contact(app
|
, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.add(Contact(firstname="test"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
app.contact.delete_contact_by_id(contact.id)
assert len(old_contacts) - 1 == app.contact.count()
new_contacts = db.get_contact_list()
old_contacts.remove(contact)
assert old_contacts == new_contacts
if check_ui:
assert sorted(new_con
|
tacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Common/Core/Testing/Python/TestOverloads.py
|
Python
|
gpl-3.0
| 2,562
| 0.005074
|
"""Test overloaded method resolution in VTK-Python
The wrappers should call overloaded C++ methods using similar
overload resolution rules as C++. Python itself does not have
method overloading.
Created on Feb 15, 2015 by David Gobbi
"""
import sys
import vtk
from vtk.test import Testing
class TestOverloads(Testing.vtkTest):
def testMethods(self):
"""Test overloaded methods"""
# single-argument method vtkTransform::SetMatrix()
t = vtk.vtkTransform()
m = vtk.vtkMatrix4x4()
m.SetElement(0, 0, 2)
t.SetMatrix(m)
self.assertEqual(t.GetMatrix().GetElement(0, 0), 2)
t.SetMatrix([0,1,0,0, 1,0,0,0, 0,0,-1,0, 0,0,0,1])
self.assertEqual(t.GetMatrix().GetElement(0, 0), 0)
# mixed number of arguments
fd = vtk.vtkFieldData()
fa = vtk.vtkFloatArray()
fa.SetName("Real")
ia = vtk.vtkIntArray()
ia.SetName("Integer")
fd.AddArray(fa)
fd.AddArray(ia)
a = fd.GetArray("Real")
self.assertEqual(id(a), id(fa))
i = vtk.mutable(0)
a = fd.GetArray("Integer", i)
self.assertEqual(id(a), id(ia))
self.assertEqual(i, 1)
def testConstructors(self):
"""Test overloaded constructors"""
# resolve by number of arguments
v = vtk.vtkVector3d(3, 4, 5)
self.assertEqual((v[0], v[1], v[2]), (3, 4, 5))
v = vtk.vtkVector3d(6)
self.assertEqual((v[0], v[1], v[2]), (6, 6, 6))
# resolve by argument type
v = vtk.vtkVariant(3.0)
self.assertEqual(v.GetType(), vtk.VTK_DOUBLE)
v = vtk.vtkVariant(1)
self.assertEqual(v.GetType(), vtk.VTK_INT)
v = vtk.vtkVariant("hello")
self.assertEqual(v.GetType(), vtk.VTK_STRING)
v = vtk.vtkVariant(vtk.vtkObject())
self.assertEqual(v.GetType(), vtk.VTK_OBJECT)
def testArgumentConversion(self):
"""Test argument conversion via implicit constructors"""
# automatic conversion to vtkVariant
a = vtk.vtkVariantArray()
a.InsertNextValue(2.5)
a.InsertNextValue(vtk.
|
vtkObject())
self.assertEqual(a.GetValue(0), vtk.vtkVariant(2.5))
self.assertEqual(a.GetValue(1).GetType(), vtk.VTK_OBJECT)
# same, but this one is via "const vtkVariant&" argument
a = vtk.vtkDenseArray[float]()
a.Resize(1)
a.SetVariantValue(0, 2.5)
self.assertEqual(a.GetVariantValue(0).ToDouble(), 2.5)
if __name__ == "__main__":
|
Testing.main([(TestOverloads, 'test')])
|
rbarrois/factory_boy
|
tests/cyclic/self_ref.py
|
Python
|
mit
| 467
| 0
|
# -*- coding: utf-8 -*-
# Copyright: See the LICENSE file.
"""Helper to test
|
circular factory dependencies."""
import factory
class TreeElement(object):
def __init__(self, name, parent):
self.parent = parent
self.name = name
class TreeElementFactory(factory.Factory):
|
class Meta:
model = TreeElement
name = factory.Sequence(lambda n: "tree%s" % n)
parent = factory.SubFactory('tests.cyclic.self_ref.TreeElementFactory')
|
kamitchell/py2app
|
py2app/simpleio.py
|
Python
|
mit
| 5,394
| 0.002225
|
"""
A simple file-system like interface that supports
both the regular filesystem and zipfiles
"""
__all__ = ('FileIO', 'ReadOnlyIO')
import os, time, zipfile
class FileIO (object):
"""
A simple interface that makes it possible
to write simple filesystem structures using
the interface that's exposed by the zipfile
module.
"""
def __init__(self, prefix):
self.prefix = prefix
def writestr(self, path, data):
"""
Write 'data' into file at 'path',
using read-only file permissions.
"""
while path.startswith('/'):
path = path[1:]
fname = os.join(self.prefix, path)
dirname = os.path.dirname(fname)
if not os.path.exists(fname):
os.makedirs(fname, mode=0755)
fp = open(fname, 'wb')
fp.write(data)
fp.close()
os.chmod(fname, 0444)
class ReadOnlyIO (object):
"""
A minimal read-only interface to the filesystem.
This interface transparently deals with zipfiles
(that is, ``io.read('/foo.zip/bar')`` extracts
the contents of ``bar`` from the zipfile.
This interface is designed to be useful for py2app
and is not intended to be fast or generally useful.
"""
def read(self, path):
"""
Return the contents of ``path``
"""
zf, zp = self._zippath(path)
if zf is None:
fp = open(path, 'rb')
data = fp.read()
fp.close()
return data
else:
zf = zipfile.ZipFile(zf, 'r')
return zf.read(zp)
def get_mtime(self, path):
"""
Return the ``mtime`` attribute of ``path``.
"""
zf, zp = self._zippath(path)
if zf is None:
return os.stat(path).st_mtime
else:
zf = zipfile.ZipFile(zf)
info = zf.getinfo(zp)
return time.mktime(info.date_time + (0, 0, 0))
def exists(self, path):
"""
Return True if ``path`` exists
"""
return self.is_file(path) or self.is_dir(path) or self.is_symlink(path)
def is_dir(self, path):
"""
Return True if ``path`` exists and is a directory
"""
zf, zp = self._zippath(path, strict=False)
if zf is None:
return os.path.isdir(path)
return bool(listdir(path))
def is_symlink(self, path):
"""
Return True if ``path`` exists and is a symbolic link
"""
zf, zp = self._zippath(path, strict=False)
if zf is not None:
return False
return os.path.islink(path)
def readlink(self, path):
zf, zp = self._zippath(path)
if zf is None:
return os.readlink(path)
raise IOError("%r is not a symlink"%(path,))
def is_file(self, path):
"""
Return True if ``path`` exists and is a regular file
"""
try:
zf, zp = self._zippath(self, path, strict=True)
except IOError:
return False
if zf is None:
return os.path.isdir(path)
else:
# 'strict==True' hence the object must
# exist in the zipfile and should therefore
# be a file and not a directory or link.
return True
def listdir(self, path):
"""
Return the contents of directory at ``path``.
NOTE: if ``path`` is in a zipfile this will
not raise an error if the directory does not
exist.
"""
zf, zp = self._zippath(path, strict=False)
if zf is None:
return os.listdir(path)
else:
_zf = zf
zf = zipfile.ZipFile(zf, 'r')
rest = rest + '/'
result = set()
for
|
nm in zf.namelist():
if nm == rest:
raise IOError("%r is not a directory in %r"%(path, _zf))
if nm.startswith(rest):
result.add(nm[len(rest):].split('/')[0])
return list(result)
def _zippath(self, path, strict=True):
"""
Return
|
either ``(zipfilename, zippath)`` or ``(None, path)``
If ``zipfilename`` is not None is points to a zipfile
that may contain the file as ``zippath``. Otherwise
the file is definitely not in a zipfile
Raises ``IOError`` when the file doesn't exist, but won't
check if the file exists in the zipfile unless ``strict``
is True.
"""
if os.path.exists(path):
return (None, path)
else:
rest = ''
while curpath and not os.path.exists(curpath):
curpath, r = os.path.split(curpath)
rest = os.path.join(r, rest)
if not curpath:
raise IOError("file %r does not exist"%(path,))
try:
zf = zipfile.ZipFile(curpath)
except zipfile.BadZipfile:
raise IOError("bad zipfile %r for %r"%(curpath, path))
if rest.endswith('/'):
rest = rest[:-1]
if strict:
try:
zf.getinfo(rest)
except KeyError:
raise IOError("file %r does not exist in %r", path, curpath)
return curpath, rest
|
bqbn/addons-server
|
src/olympia/bandwagon/tasks.py
|
Python
|
bsd-3-clause
| 1,228
| 0.002443
|
from datetime import datetime
from django.db.models import Count
import olympia.core.logger
from olympia.amo.celery import task
from olympia.amo.decorators import use_primary_db
from .models import Collection, CollectionAddon
log = olympia.core.logger.getLogger('z.task')
@task
@use_primary_db
def collection_meta(*ids, **kw):
log.info(
'[%s@%s] Updating collection metadata.' % (len(ids), collection_meta.rate_limit)
)
qs = CollectionAddon.objects.filter(collection__in=ids).values_list('collection')
counts = dict(qs.annotate(Count('id')))
now = datetime.now()
for collection_id, old_count in Collection.objects.filter(id__in=ids).values_list(
'pk', 'addon_count'
):
addon_count = counts.get(collection_id, 0)
if addon_cou
|
nt == old_count:
continue
# We want to set addon_count & modified without triggering post_save
# as it would cause an infinite loop (this task is called on
# post_save). So we update queryset.update() and set modified ourselves
# instead of relying on auto_now behaviour.
Collection.objects.filter(id=collecti
|
on_id).update(
addon_count=addon_count, modified=now
)
|
rskwan/mt
|
mt/mt/settings/base.py
|
Python
|
apache-2.0
| 2,992
| 0.003008
|
import os
from unipath import Path
from django.core.exceptions import ImproperlyConfigured
import dj_database_url
def env_var(var_name):
"""Get the environment variable var_name or return an exception."""
try:
return os.environ[var_name]
except KeyError:
msg = "Please set the environment variable {}".format(var_name)
raise ImproperlyConfigured(msg)
SECRET_KEY = env_var("MT_SECRET_KEY")
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
# ADMIN_PATH controls where the admin urls are.
# e.g. if ADMIN_PATH == 'adminsitemilktea', then the admin site
# should be available at /adminsitemilktea/ instead of /admin/.
ADMIN_PATH = env_var("MT_ADMIN_PATH")
DJANGO_CORE_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = [
'djmoney',
'nested_admin',
]
CUSTOM_APPS = [
'core',
]
INSTALLED_APPS = DJANGO_CORE_APPS + THIRD_PARTY_APPS + CUSTOM_APPS
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mt.urls'
WSGI_APPLICATION = 'mt.wsgi.application'
BASE_DIR = Path(__file__).ancestor(3)
MEDIA_ROOT = BASE_DIR.child("media")
STATIC_ROOT = BASE_DIR.child("static")
STATICFILES_DIRS = (
BASE_DIR.child("assets"),
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (BASE_DIR.child("templates"),),
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
DATABASES = {'default': dj_database_url.parse(env_var("MT_MYSQL_URL"), conn_max_age = 600)}
DATABASES['default']['ATOMIC_REQUESTS'] = True
AUTH_PASSW
|
ORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
|
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
TIME_ZONE = 'America/Los_Angeles'
LANGUAGE_CODE = 'en-us'
USE_I18N = False
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
|
klim-iv/phantomjs-qt5
|
src/webkit/Tools/Scripts/webkitpy/tool/steps/suggestreviewers_unittest.py
|
Python
|
bsd-3-clause
| 2,415
| 0.001656
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONT
|
RIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE
|
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.suggestreviewers import SuggestReviewers
class SuggestReviewersTest(unittest.TestCase):
def test_disabled(self):
step = SuggestReviewers(MockTool(), MockOptions(suggest_reviewers=False))
OutputCapture().assert_outputs(self, step.run, [{}])
def test_basic(self):
capture = OutputCapture()
step = SuggestReviewers(MockTool(), MockOptions(suggest_reviewers=True, git_commit=None))
expected_stdout = "The following reviewers have recently modified files in your patch:\nFoo Bar\n"
expected_logs = "Would you like to CC them?\n"
capture.assert_outputs(self, step.run, [{"bug_id": "123"}], expected_stdout=expected_stdout, expected_logs=expected_logs)
|
teeple/pns_server
|
work/install/node-v0.10.25/deps/v8/tools/run-tests.py
|
Python
|
gpl-2.0
| 13,499
| 0.010371
|
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import multiprocessing
import optparse
import os
from os.path import join
import subprocess
import sys
import time
from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.network import network_execution
from testrunner.objects import context
ARCH_GUESS = utils.DefaultArch()
DEFAULT_TESTS = ["mjsunit", "cctest", "message", "preparser"]
TIMEOUT_DEFAULT = 60
TIMEOUT_SCALEFACTOR = {"debug" : 4,
"release" : 1 }
# Use this to run several variants of the tests.
|
VARIANT_FLAGS = [[],
["--stress-opt", "--always-opt"],
["--nocrankshaft"]]
MODE_FLAGS = {
"debug" : ["--nobreak-on-ab
|
ort", "--nodead-code-elimination",
"--enable-slow-asserts", "--debug-code", "--verify-heap"],
"release" : ["--nobreak-on-abort", "--nodead-code-elimination"]}
SUPPORTED_ARCHS = ["android_arm",
"android_ia32",
"arm",
"ia32",
"mipsel",
"x64"]
def BuildOptions():
result = optparse.OptionParser()
result.add_option("--arch",
help=("The architecture to run tests for, "
"'auto' or 'native' for auto-detect"),
default="ia32,x64,arm")
result.add_option("--arch-and-mode",
help="Architecture and mode in the format 'arch.mode'",
default=None)
result.add_option("--buildbot",
help="Adapt to path structure used on buildbots",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--command-prefix",
help="Prepended to each shell command used to run a test",
default="")
result.add_option("--download-data", help="Download missing test suite data",
default=False, action="store_true")
result.add_option("--extra-flags",
help="Additional flags to pass to each test command",
default="")
result.add_option("--isolates", help="Whether to test isolates",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=0, type="int")
result.add_option("-m", "--mode",
help="The test modes in which to run (comma-separated)",
default="release,debug")
result.add_option("--no-network", "--nonetwork",
help="Don't distribute tests on the network",
default=(utils.GuessOS() != "linux"),
dest="no_network", action="store_true")
result.add_option("--no-presubmit", "--nopresubmit",
help='Skip presubmit checks',
default=False, dest="no_presubmit", action="store_true")
result.add_option("--no-stress", "--nostress",
help="Don't run crankshaft --always-opt --stress-op test",
default=False, dest="no_stress", action="store_true")
result.add_option("--outdir", help="Base directory with compile output",
default="out")
result.add_option("-p", "--progress",
help=("The style of progress indicator"
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("--shard-count",
help="Split testsuites into this number of shards",
default=1, type="int")
result.add_option("--shard-run",
help="Run this shard from the split up tests.",
default=1, type="int")
result.add_option("--shell", help="DEPRECATED! use --shell-dir", default="")
result.add_option("--shell-dir", help="Directory containing executables",
default="")
result.add_option("--stress-only",
help="Only run tests with --always-opt --stress-opt",
default=False, action="store_true")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default= -1, type="int")
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
return result
def ProcessOptions(options):
global VARIANT_FLAGS
# Architecture and mode related stuff.
if options.arch_and_mode:
tokens = options.arch_and_mode.split(".")
options.arch = tokens[0]
options.mode = tokens[1]
options.mode = options.mode.split(",")
for mode in options.mode:
if not mode.lower() in ["debug", "release"]:
print "Unknown mode %s" % mode
return False
if options.arch in ["auto", "native"]:
options.arch = ARCH_GUESS
options.arch = options.arch.split(",")
for arch in options.arch:
if not arch in SUPPORTED_ARCHS:
print "Unknown architecture %s" % arch
return False
# Special processing of other options, sorted alphabetically.
if options.buildbot:
# Buildbots run presubmit tests as a separate step.
options.no_presubmit = True
options.no_network = True
if options.command_prefix:
print("Specifying --command-prefix disables network distribution, "
"running tests locally.")
options.no_network = True
if options.j == 0:
options.j = multiprocessing.cpu_count()
if options.no_stress:
VARIANT_FLAGS = [[], ["--nocrankshaft"]]
if not options.shell_dir:
if options.shell:
print "Warning: --shell is deprecated, use --shell-dir instead."
options.shell_dir = os.path.dirname(options.shell)
if options.stress_only:
VARIANT_FLAGS = [["--stress-opt", "--always-opt"]]
if options.valgrind:
run_valgrind = os.path.join("tools", "run-valgrind.py")
# This is OK for distributed running,
|
zhengxinxing/bespeak_meal
|
__init__.py
|
Python
|
mit
| 120
| 0.01087
|
# 主要是为了使用中文显示 app 于 admin 界面
default_app_config = 'bespeak_meal.apps.Be
|
speak_meal_config'
| |
jiocloudservices/jcsclient
|
src/jcsclient/compute_api/instance.py
|
Python
|
apache-2.0
| 7,005
| 0.001713
|
# Copyright (c) 2016 Jiocloud.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import base64
import binascii
import requests
from jcsclient import exception
from jcsclient import utils
from jcsclient import requestify
def describe_instances(url, verb, headers, version, args):
params = {}
params['Action'] = utils.dash_to_camelcase(args[0])
params['Version'] = version
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--instance-ids', nargs='+', required=False)
# Right now filters functionality is broken, it works only
# for cases like --filters "Name=abc,Values=def"
parser.add_argument('--filters', nargs='+', required=False)
args = parser.parse_args(args)
utils.populate_params_from_cli_args(params, args)
return requestify.make_request(url, verb, headers, params)
def start_instances(url, verb, header
|
s, version, args):
params = {}
params['Action'] = utils.dash_to_camelcase(args[0])
params['Version'] = version
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--instance-ids', nargs='+', required=True)
|
args = parser.parse_args(args)
utils.populate_params_from_cli_args(params, args)
return requestify.make_request(url, verb, headers, params)
def stop_instances(url, verb, headers, version, args):
params = {}
params['Action'] = utils.dash_to_camelcase(args[0])
params['Version'] = version
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--instance-ids', nargs='+', required=True)
args = parser.parse_args(args)
utils.populate_params_from_cli_args(params, args)
return requestify.make_request(url, verb, headers, params)
def reboot_instances(url, verb, headers, version, args):
params = {}
params['Action'] = utils.dash_to_camelcase(args[0])
params['Version'] = version
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--instance-ids', nargs='+', required=True)
args = parser.parse_args(args)
utils.populate_params_from_cli_args(params, args)
return requestify.make_request(url, verb, headers, params)
def terminate_instances(url, verb, headers, version, args):
params = {}
params['Action'] = utils.dash_to_camelcase(args[0])
params['Version'] = version
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--instance-ids', nargs='+', required=True)
args = parser.parse_args(args)
utils.populate_params_from_cli_args(params, args)
return requestify.make_request(url, verb, headers, params)
def describe_instance_types(url, verb, headers, version, args):
params = {}
params['Action'] = utils.dash_to_camelcase(args[0])
params['Version'] = version
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--instance-type-ids', nargs='+', required=False)
args = parser.parse_args(args)
utils.populate_params_from_cli_args(params, args)
return requestify.make_request(url, verb, headers, params)
def run_instances(url, verb, headers, version, args):
params = {}
params['Action'] = utils.dash_to_camelcase(args[0])
params['Version'] = version
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--instance-type-id', required=True)
parser.add_argument('--image-id', required=True)
parser.add_argument('--subnet-id', required=False)
parser.add_argument('--security-group-ids', nargs='+', required=False)
parser.add_argument('--key-name', required=False)
parser.add_argument('--instance-count', type=int, required=False)
parser.add_argument('--private-ip-address', required=False)
parser.add_argument('--block-device-mappings', nargs='+', required=False)
args = parser.parse_args(args)
utils.populate_params_from_cli_args(params, args)
return requestify.make_request(url, verb, headers, params)
def decrypt_instance_password(password, private_key_file, passphrase):
key = utils.import_ssh_key(private_key_file, passphrase)
encrypted_data = base64.b64decode(base64.b64decode(password))
ciphertext = int(binascii.hexlify(encrypted_data), 16)
plaintext = key.decrypt(ciphertext)
decrypted_data = utils.long_to_bytes(plaintext)
unpadded_data = utils.pkcs1_unpad(decrypted_data)
return unpadded_data
def get_password_data(url, verb, headers, version, args):
params = {}
params['Action'] = utils.dash_to_camelcase(args[0])
params['Version'] = version
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--instance-id', required=True)
processed, remaining = parser.parse_known_args(args)
utils.populate_params_from_cli_args(params, processed)
response = requestify.make_request(url, verb, headers, params)
parser = utils.get_argument_parser()
parser.add_argument('--private-key-file', required=False, default=None)
parser.add_argument('--key-passphrase', required=False, default=None)
processed = parser.parse_args(remaining)
processed = vars(processed)
private_key_file = processed.get('private_key_file')
passphrase = processed.get('key_passphrase')
response_json = utils.web_response_to_json(response)
try:
response_body = response_json['GetPasswordDataResponse']
encrypted_password = response_body['passwordData']
if not private_key_file or not encrypted_password:
return response
decrypted_password = decrypt_instance_password(encrypted_password,
private_key_file,
passphrase)
response_json['GetPasswordDataResponse']['passwordData'] = \
decrypted_password
return response_json
except KeyError as ke:
raise exception.UnknownOutputFormat()
|
IT-SeanWANG/CodeJam
|
2017_2nd/Q2_Refer2.py
|
Python
|
apache-2.0
| 1,200
| 0.003333
|
import copy
max_pro = 0
def find(list_foot_pmt, max_pmt):
max_pmtn = max_pmt
a = list_foot_pmt.pop(0)
for i in range(0, len(list_foot_pmt)):
max_pmt = max_pmtn
list_foot_pmt1 = copy.deepcopy(list_foot_p
|
mt)
b =list_foot_pmt1.pop(i)
max_pmt += pro_matrix[a][b]
if len(list_foot_p
|
mt1) > 0:
find(list_foot_pmt1, max_pmt)
else:
global max_pro
if max_pmt > max_pro:
max_pro = max_pmt
return
N = int(input())
pro_matrix = []
for j in range(0, N):
str_tmp = input()
pro_row = str_tmp.split(" ")
pro_matrix.append(pro_row)
for i in range(0, N):
for j in range(0, N):
pro_matrix[i][j] = int(pro_matrix[i][j])
list_foot = []
for i in range(0, N):
list_foot.append(i)
max = 0
max_pro_odd = 0
if len(list_foot) % 2 == 0:
find(list_foot, max)
print(max_pro)
else:
for i in range(0, N):
list_foot_tmp = copy.deepcopy(list_foot)
list_foot_tmp.pop(i)
find(list_foot_tmp, max)
if max_pro > max_pro_odd:
max_pro_odd = max_pro
print(max_pro_odd)
|
juancarlospaco/vagrant
|
main.py
|
Python
|
gpl-3.0
| 23,005
| 0.005651
|
# -*- coding: utf-8 -*-
# PEP8:OK, LINT:OK, PY3:OK
#############################################################################
## This file may be used under the terms of the GNU General Public
## License version 2.0 or 3.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following information to ensure GNU
## General Public Licensing requirements will be met:
## http:#www.fsf.org/licensing/licenses/info/GPLv2.html and
## http:#www.gnu.org/copyleft/gpl.html.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#############################################################################
# metadata
' Vagrant Ninja '
__version__ = ' 2.6 '
__license__ = ' GPL '
__author__ = ' juancarlospaco '
__email__ = ' juancarlospaco@ubuntu.com '
__url__ = 'github.com/juancarlospaco'
__date__ = '10/10/2013'
__prj__ = 'vagrant'
__docformat__ = 'html'
__source__ = ''
__full_licence__ = ''
# imports
from os import environ, linesep, chmod, remove, path, chdir, makedirs
from sip import setapi
from datetime import datetime
from subprocess import check_output as getoutput
from random import choice
from getpass import getuser
try:
from os import startfile
except ImportError:
from subprocess import Popen
from PyQt4.QtGui import (QLabel, QCompleter, QDirModel, QPushButton, QMenu,
QDockWidget, QVBoxLayout, QLineEdit, QIcon, QCheckBox, QColor, QMessageBox,
QGraphicsDropShadowEffect, QGroupBox, QComboBox, QTabWidget, QButtonGroup,
QAbstractButton, QScrollArea, QSpinBox)
from PyQt4.QtCore import Qt, QDir, QProcess, QUrl
from PyQt4.QtNetwork import QNetworkProxy
try:
from PyKDE4.kdeui import KTextEdit as QTextEdit
except ImportError:
from PyQt4.QtGui import QTextEdit # lint:ok
from ninja_ide.core import plugin
# API 2
(setapi(a, 2) for a in ("QDate", "QDateTime", "QString", "QTime", "QUrl",
"QTextStream", "QVariant"))
# constans
HELPMSG = '''<h3>Vagrant</h3>
Vagrant provides easy to configure, reproducible, and portable work environments
built on top of industry-standard technology and controlled by a single
consistent workflow.<br>Machines are provisioned on top of VirtualBox.
Provisioning tools automatically install and configure software on the machine.
<br><br><b>If you are Developer</b>, Vagrant will isolate dependencies and
configuration within a single disposable, consistent environment, without
sacrificing any of tools you are used to working with (editors, debuggers, etc).
Once you or someone else creates a single Vagrantfile, you just need to vagrant
up and everything is installed and configured for you to work.
Other members of your team create their development environments from the same
configuration, so whether you are working on Linux, OSX, or Windows, all your
team members are running code in the same environment, against the same
dependencies, all configured same way. Say goodbye to "works on my machine" bugs
.<br><br>Visit <a href="http://vagrantup.com">Vagrantup.com</a> and
<a href="http://virtualbox.org">Virtualbox.org</a><br><br>
''' + ''.join((__doc__, __version__, __license__, 'by', __author__, __email__))
VBOXGUI = '''
config.vm.provider :virtualbox do |vb|
vb.gui = true # false for NO GUI
vb.customize ["modifyvm", :id, "--memory", "{}"] # RAM for VM
vb.customize ["modifyvm", :id, "--cpuexecutioncap", "{}"] # CPU for VM
end
'''
APTGET_PROXY = '''# proxy support for the VM
echo "Acquire::http::Proxy 'http://{}';" | tee /etc/apt/apt.conf.d/99proxy
echo "Acquire::https::Proxy 'https://{}';" >> /etc/apt/apt.conf.d/99proxy
echo "Acquire::ftp::Proxy 'ftp://{}';" >> /etc/apt/apt.conf.d/99proxy
export http_proxy='http://{}'
export https_proxy='https://{}'
export ftp_proxy='ftp://{}'
'''
CONFIG = '''
Vagrant.configure("2") do |config|
config.vm.box = "{}"
config.vm.hostname = "{}"
config.vm.box_url = "{}://cloud-images.ubuntu.com/vagrant/{}/current/{}-server-cloudimg-{}-vagrant-disk1.box"
config.vm.provision :shell, :path => "bootstrap.sh"
{}
{
|
}
end
'''
BASE = path.abspath(path.join(path.expanduser("~"), 'vagrant'))
###############################################################################
class Main(plugin.Plugin):
" Main Class "
def initialize(self, *args, **kwargs):
" Init Main
|
Class "
super(Main, self).initialize(*args, **kwargs)
self.completer, self.dirs = QCompleter(self), QDirModel(self)
self.dirs.setFilter(QDir.AllEntries | QDir.NoDotAndDotDot)
self.completer.setModel(self.dirs)
self.completer.setCaseSensitivity(Qt.CaseInsensitive)
self.completer.setCompletionMode(QCompleter.PopupCompletion)
self.desktop, self.project, menu = '', '', QMenu('Vagrant')
menu.addAction('UP', lambda: self.vagrant_c('up'))
menu.addAction('HALT', lambda: self.vagrant_c('halt'))
menu.addAction('RELOAD', lambda: self.vagrant_c('reload'))
menu.addAction('STATUS', lambda: self.vagrant_c('status'))
menu.addAction('SUSPEND', lambda: self.vagrant_c('suspend'))
menu.addAction('RESUME', lambda: self.vagrant_c('resume'))
menu.addAction('PROVISION', lambda: self.vagrant_c('provision'))
menu.addAction('PACKAGE', lambda: self.vagrant_c('package'))
menu.addAction('INIT', lambda: self.vagrant_c('init'))
menu.addSeparator()
menu.addAction('DESTROY (!!!)', lambda: self.vagrant_c('destroy'))
self.locator.get_service('explorer').add_project_menu(menu, lang='all')
self.process = QProcess()
self.process.readyReadStandardOutput.connect(self.readOutput)
self.process.readyReadStandardError.connect(self.readErrors)
self.process.finished.connect(self._process_finished)
self.process.error.connect(self._process_finished)
# Proxy support, by reading http_proxy os env variable
proxy_url = QUrl(environ.get('http_proxy', ''))
QNetworkProxy.setApplicationProxy(QNetworkProxy(QNetworkProxy.HttpProxy
if str(proxy_url.scheme()).startswith('http')
else QNetworkProxy.Socks5Proxy, proxy_url.host(), proxy_url.port(),
proxy_url.userName(), proxy_url.password())) \
if 'http_proxy' in environ else None
self.mainwidget = QTabWidget()
self.mainwidget.tabCloseRequested.connect(lambda:
self.mainwidget.setTabPosition(1)
if self.mainwidget.tabPosition() == 0
else self.mainwidget.setTabPosition(0))
self.mainwidget.setStyleSheet('QTabBar{font-weight:bold;}')
self.mainwidget.setMovable(True)
self.mainwidget.setTabsClosable(True)
self.dock, self.scrollable = QDockWidget(), QScrollArea()
self.scrollable.setWidgetResizable(True)
self.scrollable.setWidget(self.mainwidget)
self.dock.setWindowTitle(__doc__)
self.dock.setStyleSheet('QDockWidget::title{text-align: center;}')
self.dock.setWidget(self.scrollable)
self.locator.get_service('misc').add_widget(self.dock,
QIcon.fromTheme("virtualbox"), __doc__)
self.tab1, self.tab2, self.tab3 = QGroupBox(), QGroupBox(), QGroupBox()
self.tab4, self.tab5, self.tab6 = QGroupBox(), QGroupBox(), QGroupBox()
for a, b in ((self.tab1, 'Basics'), (self.tab2, 'General Options'),
(self.tab3, 'VM Package Manager'), (self.tab4, 'VM Provisioning'),
(self.tab5, 'VM Desktop GUI'), (self.tab6, 'Run')):
a.setTitle(b)
a.setToolTip(b)
self.mainwidget.addTab(a, QIcon.fromTheme("virtualbox"), b)
QPushButton(QIcon.fromTheme("help-about"), 'About', self.dock
).clicked.connect(lambda: QMessageBox.information(self.dock, __doc__,
HELPMSG))
self.vmname = QLineEdit(self.get_name())
self.vmname.setPlaceholderText('type_your_VM_name_here_without_spaces')
self.vmname.setToolTip('Type VM
|
pankajn17/intern
|
web/db.py
|
Python
|
gpl-3.0
| 40,670
| 0.007303
|
"""
Database API
(part of web.py)
"""
__all__ = [
"UnknownParamstyle", "UnknownDB", "TransactionError",
"sqllist", "sqlors", "reparam", "sqlquote",
"SQLQuery", "SQLParam", "sqlparam",
"SQLLiteral", "sqlliteral",
"database", 'DB',
]
import time
try:
import datetime
except ImportError:
datetime = None
try: set
except NameError:
from sets import Set as set
from utils import threadeddict, storage, iters, iterbetter, safestr, safeunicode
try:
# db module can work independent of web.py
from webapi import debug, config
except:
import sys
debug = sys.stderr
config = storage()
class UnknownDB(Exception):
"""raised for unsupported dbms"""
pass
class _ItplError(ValueError):
def __init__(self, text, pos):
ValueError.__init__(self)
self.text = text
self.pos = pos
def __str__(self):
return "unfinished expression in %s at char %d" % (
repr(self.text), self.pos)
class TransactionError(Exception): pass
class UnknownParamstyle(Exception):
"""
raised for unsupported db paramstyles
(currently supported: qmark, numeric, format, pyformat)
"""
pass
class SQLParam(object):
"""
Parameter in SQLQuery.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")])
>>> q
<sql: "SELECT * FROM test WHERE name='joe'">
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.values()
['joe']
"""
__slots__ = ["value"]
def __init__(self, value):
self.value = value
def get_marker(self, paramstyle='pyformat'):
if paramstyle == 'qmark':
return '?'
elif paramstyle == 'numeric':
return ':1'
elif paramstyle is None or paramstyle in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle, paramstyle
def sqlquery(self):
return SQLQuery([self])
def __add__(self, other):
return self.sqlquery() + other
def __radd__(self, other):
return other + self.sqlquery()
def __str__(self):
return str(self.value)
def __repr__(self):
return '<param: %s>' % repr(self.value)
sqlparam = SQLParam
class SQLQuery(object):
"""
You can pass this sort of thing as a clause in any db function.
Otherwise, you can pass a dictionary to the keyword argument `vars`
and the function will call reparam for you.
Internally, consists of `items`, which is a list of strings and
SQLParams, which get concatenated to produce the actual query.
"""
__slots__ = ["items"]
# tested in sqlquote's docstring
def __init__(self, items=None):
r"""Creates a new SQLQuery.
>>> SQLQuery("x")
<sql: 'x'>
>>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)])
>>> q
<sql: 'SELECT * FROM test WHERE x=1'>
>>> q.query(), q.values()
('SELECT * FROM test WHERE x=%s', [1])
>>> SQLQuery(SQLParam(1))
<sql: '1'>
"""
if items is None:
self.items = []
elif isinstance(items, list):
self.items = items
elif isinstance(items, SQLParam):
self.items = [items]
elif isinstance(items, SQLQuery):
self.items = list(items.items)
else:
self.items = [items]
# Take care of SQLLiterals
for i, item in enumerate(self.items):
if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral):
self.items[i] = item.value.v
def append(self, value):
self.items.append(value)
def __add__(self, other):
if isinstance(other, basestring):
items = [other]
elif isinstance(other, SQLQuery):
items = other.items
else:
return NotImplemented
return SQLQuery(self.items + items)
def __radd__(self, other):
if isinstance(other, basestring):
items = [other]
else:
return NotImplemented
return SQLQuery(items + self.items)
def __iadd__(self, other):
if isinstance(other, (basestring, SQLParam)):
self.items.append(other)
elif isinstance(other, SQLQuery):
self.items.extend(other.items)
else:
return NotImplemented
return self
def __len__(self):
return len(self.query())
def query(self, paramstyle=None):
"""
Returns the query part of the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.query(paramstyle='qmark')
'SELECT * FROM test WHERE name=?'
"""
s = []
for x in self.items:
if isinstance(x, SQLParam):
x = x.get_marker(paramstyle)
s.append(safestr(x))
else:
x = safestr(x)
# automatically escape % characters in the query
# For backward compatability, ignore escaping when the query looks already escaped
if paramstyle in ['format', 'pyformat']:
if '%' in x and '%%' not in x:
x = x.replace('%', '%%')
s.append(x)
return "".join(s)
def values(self):
"""
Returns the values of the parameters used in the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.values()
['joe']
"""
return [i.value for i in self.items if isinstance(i, SQLParam)]
def join(items, sep=' ', prefix=None, suffix=None, target=None):
"""
Joins multiple queries.
>>> SQLQuery.join(['a', 'b'], ', ')
<sql: 'a, b'>
Optinally, prefix and suffix arguments can be provided.
>>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')')
<sql: '(a, b)'>
If target argument is provided, the items are appended to target instead of creating a new SQLQuery.
"""
if target is None:
target = SQLQuery()
target_items = target.items
if prefix:
target_items.append(prefix)
for i, item in enumerate(items):
if i != 0:
target_items.append(sep)
if isinstance(item, SQLQuery):
target_items.extend(item.items)
else:
target_items.append(item)
if suffix:
target_items.append(suffix)
return target
join = staticmethod(join)
def _str(self):
try:
return self.query() % tuple([sqlify(x) for x in self.values()])
except (ValueError, TypeError):
return self.query()
def __str__(self):
return safestr(self._str())
def __unicode__(self):
return safeunicode(self._str())
def __repr__(self):
return '<sql: %s>' % repr(str(self))
class SQLLiteral:
"""
Protects a string from `sqlquote`.
>>> sqlquote('NOW()')
<sql: "'NOW()'">
>>> sqlquote(SQLLiteral('NOW()'))
<sql: 'NOW()'>
"""
def __init__(self, v):
self.v = v
def __repr__(self):
return self.v
sqlliteral = SQLLiteral
def _sqllist(values):
"""
>>> _sqllist([1, 2, 3])
<sql: '(1, 2, 3)'>
"""
items = []
items.append('(')
for i, v in enumerate(values):
if i != 0:
items.append(', ')
items.append(sqlparam(v))
|
items.append(')')
return SQLQuery(items)
def reparam(string_, dictionary):
"""
Takes a string and a dictionary and interpolates the string
using values from the dictionary. Returns an `SQLQuery` for the result.
>>> reparam("s = $s", dict(s=True))
|
<sql: "s = 't'">
>>> rep
|
intenthq/code-challenges
|
python/connected_graph/test_connected_graph.py
|
Python
|
mit
| 910
| 0.002198
|
import unittest
from .connected_graph import Node
class TestConnectedGraph(unittest.TestCase):
def test_acyclic_graph(self):
"""Example graph fr
|
om https://upload.wikimedia.org/wikipedia/commons/0/03/Directed_acyclic_graph_2.svg"""
n9 = Node(9)
n10 = Node(10)
n8 = Node(8, [n9])
n3 = Node(3, [n8, n10])
n2 = Node(2)
n11 = Node(11, [n2, n9, n10])
n5 = Node(5, [n11])
self.assertTrue(n3.connected_to(n9))
self.assertTrue(n11.connected_to(n9))
self.assertTrue(n3.connected
|
_to(n9))
self.assertTrue(n11.connected_to(n9))
self.assertTrue(n5.connected_to(n9))
self.assertFalse(n9.connected_to(n5))
self.assertFalse(n9.connected_to(n11))
self.assertFalse(n3.connected_to(n11))
def test_connected_to_self(self):
n1 = Node(1)
self.assertTrue(n1.connected_to(n1))
|
kentya6/swift
|
utils/benchmark/Graph/generate-data.py
|
Python
|
apache-2.0
| 748
| 0.002674
|
import pygraph.algorithms.generators as gen
import pygraph.algorithms.accessibility as acc
import pygraph.algorithms.minmax as minmax
graph =
|
gen.generate(5000, 10000, weight_range=(50, 2000))
components = acc.connected_components(graph)
nodes = [g for g in graph if components[g] == 1]
print "GRAPH NODES"
for n in graph.nodes():
print n
print "GRAPH EDGES"
for e in graph.edges():
if components[e[0]] == 1:
w = graph.edge_weight(e)
print (e[0], e[1], w)
# MST = minmax.minimal_spanning_tree(graph)
# print "MST NODES"
# for n in MST.keys():
# print n
# print "MST EDGES"
# for k in MST.keys():
# if MST[k]
|
is not None:
# print "(%d, %d)" % (k, MST[k])
# else:
# print "(%d, %d)" % (k, k)
|
mmcfarland/model-my-watershed
|
deployment/cfn/utils/constants.py
|
Python
|
apache-2.0
| 369
| 0
|
EC2_
|
INSTANCE_TYPES = [
't2.micro',
't2.small',
't2.medium'
]
RDS_INSTANCE_TYPES = [
'db.t2.micro'
]
ELASTICACHE_INSTANCE_TYPES = [
'cache.t2.micro'
]
ALLOW_ALL_CIDR = '0.0.0.0/0'
VPC_CIDR = '10.0.0.0/16'
GRAPHITE = 2003
GRAPHITE_WEB = 8080
HTTP = 80
HTTPS = 443
KIBANA = 5601
POSTGRESQL = 5432
REDIS = 6379
RELP = 2051
|
4
SSH = 22
STATSITE = 8125
|
tommybobbins/pipoegusca
|
test2.py
|
Python
|
gpl-2.0
| 347
| 0.002882
|
#!/usr/bin/python
import time
import d
|
atetime
import logging
import os
import syslog
#from os import path, access, R_OK
from time import sleep
import os
import RPi.GPIO as GPIO
GPIO.setmode
|
(GPIO.BCM)
# 22 = Relay 1, 27 = Relay 2, 17 = Relay 3
GPIO.setup(27, GPIO.OUT)
GPIO.setup(27, False)
sleep(2)
GPIO.setup(27, True)
sleep(2)
GPIO.cleanup()
|
liveaverage/baruwa
|
src/baruwa/manage.py
|
Python
|
gpl-2.0
| 575
| 0.006957
|
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in"
" the directory
|
containing %r. It appears you've customized "
|
"things.\nYou'll have to run django-admin.py, passing it your"
" settings module.\n(If the file settings.py does indeed exist,"
" it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
|
Masood-M/yalih
|
mechanize/_mechanize.py
|
Python
|
apache-2.0
| 31,059
| 0
|
"""Stateful programmatic WWW navigation, after Perl's WWW::Mechanize.
Copyright 2003-2006 John J. Lee <jjl@pobox.com>
Copyright 2003 Andy Lester (original Perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
from __future__ import absolute_import
import copy
import os
import re
from . import _request, _response, _rfc3986, _sockettimeout, _urllib2_fork
from ._clientcookie import Cookie
from ._headersutil import normalize_header_name
from ._html import Factory
from ._useragent import UserAgentBase
from .polyglot import pathname2url, HTTPError, is_string, iteritems
class BrowserStateError(Exception):
pass
class LinkNotFoundError(Exception):
pass
class FormNotFoundError(Exception):
pass
def sanepathname2url(path):
urlpath = pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class History:
"""
Though this will become public, the implied interface is not yet stable.
"""
def __init__(self):
self._history = [] # LIFO
def add(self, request, response):
self._history.append((request, response))
def back(self, n, _response):
response = _response # XXX move Browser._response into this class?
while n > 0 or response is None:
try:
request, response = self._history.pop()
except IndexError:
raise BrowserStateError("already at start of history")
n -= 1
return request, response
def clear(self):
del self._history[:]
def close(self):
for request, response in self._history:
if response is not None:
response.close()
del self._history[:]
def __copy__(self):
ans = self.__class__()
ans._history = self._history[:]
return ans
class HTTPRefererProcessor(_urllib2_fork.BaseHandler):
def http_request(self, request):
# See RFC 2616 14.36. The only times we know the source of the
# request URI has a URI associated with it are redirect, and
# Browser.click() / Browser.submit() / Browser.follow_link().
# Otherwise, it's the user's job to add any Referer header before
# .open()ing.
if hasattr(request, "redirect_dict"):
request = self.parent._add_referer_header(
request, origin_request=False)
return request
https_request = http_request
class Browser(UserAgentBase):
"""Browser-like class with support for history, forms and links.
:class:`BrowserStateError` is raised whenever the browser is in the wrong
state to complete the requested operation - e.g., when :meth:`back()` is
called when the browser history is empty, or when :meth:`follow_link()` is
called when the current response does not contain HTML data.
Public attributes:
request: current request (:class:`mechanize.Request`)
form: currently selected form (see :meth:`select_form()`)
:param history: object implementing the :class:`mechanize.History`
interface. Note this interface is still experimental
and may change in future. This object is owned
by the browser instance and must not be shared
among browsers.
:param request_class: Request class to use. Defaults to
:class:`mechanize.Request`
:param content_parser: A function that is responsible for parsing
received html/xhtml content. See the builtin
:func:`mechanize._html.content_parser()` function for details
on the interface this function must support.
"""
handler_classes = copy.copy(UserAgentBase.handler_classes)
handler_classes["_referer"] = HTTPRefererProcessor
default_features = copy.copy(UserAgentBase.default_features)
default_features.append("_referer")
def __init__(
self,
history=None,
request_class=None,
content_parser=None,
allow_xhtml=False, ):
"""
Only named arguments should be passed to this constructor.
"""
self._handle_referer = True
if history is None:
history = History()
self._history = history
if request_class is None:
request_class = _request.Request
factory = Factory(allow_xhtml=allow_xhtml)
factory.set_request_class(request_class)
if content_parser is not None:
factory.set_content_parser(content_parser)
self._factory = factory
self.request_class = request_class
self.request = None
self._set_response(None, False)
# do this last to avoid __getattr__ problems
UserAgentBase.__init__(self)
def __copy__(self):
'''
Clone this browser instance. The clone will share the same, thread-safe
cookie jar, and have all the same handlers/settings, but will not share
any other state, making it safe to use in another thread.
'''
ans = self.__class__()
self._copy_state(ans)
ans._handle_referer = self._handle_referer
for attr in ('_response_type_finder', '_encoding_finder',
'_content_parser'):
setattr(ans._factory, attr, getattr(self._factory, attr))
ans.request_class = self.request_class
ans._history = copy.copy(self._history)
return ans
def close(self):
UserAgentBase.close(self)
if self._response is not None:
self._response.close()
if self._history is not None:
self._history.close()
self._history = None
# make use after .close easy to spot
self.form = None
self.request = self._response = None
self.request = self.response = self.set_response = None
self.geturl = self.reload = self.back = None
self.clear_history = self.set_cookie = self.links = self.forms = None
self.viewing_html = self.encoding = self.title = None
self.select_form = self.click = self.submit = self.click_link = None
self.follow_link = self.find_link = None
def set_handle_referer(self, handle):
"""Set whether to add Referer header to each request."""
self._set_handler("_referer", handle)
self._handle_referer = bool(handle)
def _add_referer_header(self, request, origin_request=True):
if self.request is None:
return request
scheme = request.get_type()
original_scheme = self.request.get_type()
if scheme not in ["http", "https"]:
return request
if not origin_request and not self.request.has_header("Referer"):
return request
if (self._handle_referer and original_scheme in ["http", "https"] and
not (original_scheme == "https" and scheme != "https")):
# strip URL fragment (RFC 2616 14.36)
parts = _rfc3986.urlsplit(self.request.get_full_url())
parts = parts[:-1] + (None, )
referer = _rfc3986.urlunsplit(parts)
request.add_unredirected_header("Referer", referer)
return request
def open_novisit(self,
url_or_request,
data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
"""Open a URL without visiting it.
Browser state (including request, response, history, forms and links)
is left unchanged by calling this function.
The interface is the same as for :meth:`open()`.
This is useful for things like fetching images.
See also :meth:`retrieve()`
"""
return self._mech_open(
url_or_request, data, visi
|
t=False, timeout=timeout)
|
def open(self,
url_or_request,
data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
'''
|
marguslaak/django-xadmin
|
xadmin/models.py
|
Python
|
bsd-3-clause
| 4,934
| 0.001216
|
import json
import django
from django.db import models
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.base import ModelBase
from django.utils.encoding import smart_unicode
from django.db.models.signals import post_syncdb
from django.contrib.auth.models import Permission
import sys
import datetime
import decimal
if 4 < django.VERSION[1] < 7:
AUTH_USER_MODEL = django.contrib.auth.get_user_model()
else:
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
def add_view_permissions(sender, **kwargs):
"""
This syncdb hooks takes care of adding a view permission too all our
content types.
"""
argv = sys.argv
permissions_with_tests = getattr(settings, "XADMIN_TEST_VIEW_PERMISSIONS", True)
if not permissions_with_tests and len(argv) > 1 \
and (argv[1] == "test" or argv[1] == "jenkins"):
return
# for each of our content types
for content_type in ContentType.objects.all():
# build our permission slug
codename = "view_%s" % content_type.model
# if it doesn't exist..
if not Permission.objects.filter(content_type=content_type, codename=codename):
# add it
Permission.objects.create(content_type=content_type,
codename=codename,
name="Can view %s" % content_type.name)
#print "Added view permission for %s" % content_type.name
# check for all our view permissions after a syncdb
post_syncdb.connect(add_view_permissions)
class Bookmark(models.Model):
title = models.CharField(_(u'Title'), max_length=128)
user = models.ForeignKey(AUTH_USER_MODEL, verbose_name=_(u"user"), blank=True, null=True)
url_name = models.CharField(_(u'Url Name'), max_length=64)
content_type = models.ForeignKey(ContentType)
query = models.CharField(_(u'Query String'), max_length=1000, blank=True)
is_share = models.BooleanField(_(u'Is Shared'), default=False)
@property
def url(self):
base_url = reverse(self.url_name)
if self.query:
base_url = base_url + '?' + self.query
return base_url
def __unicode__(self):
return self.title
class Meta:
verbose_name = _(u'Bookmark')
verbose_name_plural = _('Bookmarks')
class JSONEncoder(DjangoJSONEncoder):
def default(self, o):
if isinstance(o, datetime.date):
return o.strftime('%Y-%m-%d')
elif isinstance(o, datetime.datetime):
return o.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(o, decimal.Decimal):
return str(o)
elif isinstance(o, ModelBase):
return '%s.%s' % (o._meta.app_label, o._meta.model_name)
else:
try:
return super(JSONEncoder, self).default(o)
except Exception:
return smart_unicode(o)
class UserSettings(models.Model):
user = models.ForeignKey(AUTH_USER_MODEL, verbose_name=_(u"user"))
key = models.CharField(_('Settings Key'), max_length=256)
value = models.TextField(_('Settings Content'))
def json_value(self):
return json.loads(self.value)
def set_json(self, obj):
self.value = json.dumps(obj, cls=JSONEncoder, ensure_ascii=False)
|
def __unicode__(self):
|
return "%s %s" % (self.user, self.key)
class Meta:
verbose_name = _(u'User Setting')
verbose_name_plural = _('User Settings')
class UserWidget(models.Model):
user = models.ForeignKey(AUTH_USER_MODEL, verbose_name=_(u"user"))
page_id = models.CharField(_(u"Page"), max_length=256)
widget_type = models.CharField(_(u"Widget Type"), max_length=50)
value = models.TextField(_(u"Widget Params"))
def get_value(self):
value = json.loads(self.value)
value['id'] = self.id
value['type'] = self.widget_type
return value
def set_value(self, obj):
self.value = json.dumps(obj, cls=JSONEncoder, ensure_ascii=False)
def save(self, *args, **kwargs):
created = self.pk is None
super(UserWidget, self).save(*args, **kwargs)
if created:
try:
portal_pos = UserSettings.objects.get(
user=self.user, key="dashboard:%s:pos" % self.page_id)
portal_pos.value = "%s,%s" % (self.pk, portal_pos.value) if portal_pos.value else self.pk
portal_pos.save()
except Exception:
pass
def __unicode__(self):
return "%s %s widget" % (self.user, self.widget_type)
class Meta:
verbose_name = _(u'User Widget')
verbose_name_plural = _('User Widgets')
|
braubar/braubar-pi
|
test/testReadTempSocket.py
|
Python
|
gpl-3.0
| 139
| 0.021583
|
from helper.readtempsocket import ReadTempSocket
class asd:
|
def __init__(self):
r = ReadTempSocket()
|
r.run()
asd()
|
teeple/pns_server
|
work/install/Python-2.7.4/Tools/freeze/makeconfig.py
|
Python
|
gpl-2.0
| 1,676
| 0.002983
|
import re
import sys
# Write the config.c file
never = ['marshal', '__main__', '__builtin__', 'sys', 'exceptions', '_warnings']
def makeconfig(infp, outfp, modules, with_ifdef=0):
m1 = re.compile('-- ADDMODULE MARKER 1 --')
m2 = re.compile('-- ADDMODULE MARKER 2 --')
while 1:
line = infp.readline()
if not line: break
outfp.write(line)
if m1 and m1.search(line):
m1 = None
for mod in modules:
if mod in never:
continue
if with_ifdef:
outfp.write("#ifndef init%s\n"%mod)
outfp.write('extern void init%s(void);\n' % mod)
if with_ifdef:
outfp.write("#endif\n")
elif m2 and m2.search(line):
m2 = None
for mod in modules:
if mod in never:
continue
outfp.write('\t{"%s", init%s},\n' %
(mod, mod))
if m1:
sys.stderr.write('MARKER 1 never found\n')
elif m2:
sys.stderr.write('MARKER 2 never found\n')
# Test program.
def test(
|
):
if not sys.argv[3:]:
print 'usage: python makeconfig.py config.c.in outputfile',
print 'modulename ...'
sys.exit(2)
if sys.argv[1] == '-':
|
infp = sys.stdin
else:
infp = open(sys.argv[1])
if sys.argv[2] == '-':
outfp = sys.stdout
else:
outfp = open(sys.argv[2], 'w')
makeconfig(infp, outfp, sys.argv[3:])
if outfp != sys.stdout:
outfp.close()
if infp != sys.stdin:
infp.close()
if __name__ == '__main__':
test()
|
hiveary/hiveary-agent
|
setup.py
|
Python
|
bsd-3-clause
| 3,550
| 0.010986
|
#!/usr/bin/env python
"""
Hiveary
https://hiveary.com
Licensed under Simplified BSD License (see LICENSE)
(C) Hiveary, Inc. 2013-2014 all rights reserved
"""
import platform
import sys
from hiveary import __version__ as version
current_platform = platform.system()
FROZEN_NAME = 'hiveary-agent'
AUTHOR = "Hiveary"
AUTHOR_EMAIL = "info@hiveary.com"
DESCRIPTION = "Hiveary Monitoring Agent"
LICENSE = "Simplified BSD"
URL = "http://hiveary.com"
# OS-specific setup
if 'bdist_esky' in sys.argv and current_platform == 'Windows':
# Use esky/cxfreeze to build the agent and py2exe to build the service
from esky.bdist_esky import Executable
from glob import glob
import os
import py2exe # noqa
import setuptools
import shutil
modules = [
'kombu.transport.pyamqp',
'kombu.transport.base',
'kombu.transport.amqplib',
]
sys.path.append('C:\\Program Files (x86)\\Microsoft Visual Studio 9.0\\VC\\redist\\x86\\Microsoft.VC90.CRT')
# Add in Visual Studio C++ compiler library
data_files = [
('Microsoft.VC90.CRT', glob(r'C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\redist\x86\Microsoft.VC90.CRT\*.*')),
r'hiveary\ca-bundle.pem',
('monitors', glob(r'monitors\*.py'))
]
script = Executable('hiveary-agent', gui_only=False)
options = {
'bdist_esky': {
'freezer_module': 'cxfreeze',
'includes': modules,
}
}
# Build the agent
setuptools.setup(name=FROZEN_
|
NAME,
version=version,
scripts=[script],
options=options,
data_files=data_files,
)
sys.argv.remove('bdist_esky')
sys.argv.app
|
end('py2exe')
# used for the versioninfo resource
class Target(object):
def __init__(self, **kw):
self.__dict__.update(kw)
self.version = version
self.company_name = 'Hiveary'
self.name = "HivearyService"
script = Target(
description='Hiveary Agent Service Launcher',
modules=["HivearyService"],
cmdline_style='pywin32')
data_files = []
# Build the service
setuptools.setup(name='HivearyService',
version=version,
options={'py2exe': {}},
service=[script]
)
# python27.dll will be available at the root once the esky zip is extracted,
# so we can remove it now
os.remove(r'dist\python27.dll')
shutil.rmtree('build')
else:
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
# Include all files from the package.
install_requires = [
'amqplib>=1.0.2',
'kombu>=3.0.8',
'netifaces-merged>=0.9.0',
'oauth2>=1.5.211',
'psutil>=1.1.0',
'simplejson>=3.0.5',
'Twisted>=13.2.0',
'impala>=0.1.1',
]
data_files = [
('/etc/hiveary', ['hiveary.conf.example', 'README.md']),
('/etc/hiveary/init', ['initd/hiveary-agent']),
('/etc/hiveary/systemd', ['arch/hiveary-agent.service']),
('/usr/lib/hiveary', ['monitors/resources.py']),
]
setup(name=FROZEN_NAME,
version=version,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
include_package_data=True,
data_files=data_files,
install_requires=install_requires,
packages=find_packages(),
scripts=['hiveary-agent']
)
|
saxix/django-concurrency
|
setup.py
|
Python
|
mit
| 2,485
| 0.000805
|
#!/usr/bin/env python
import ast
import os
import re
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__)))
init = os.path.join(ROOT, 'src', 'concurrency', '__init__.py')
_version_re = re.compile(r'__version__\s+=\s+(.*)')
_name_re = re.compile(r'NAME\s+=\s+(.*)')
with open(init, 'rb') as f:
content = f.read().decode('utf-8')
VERSION = str(ast.literal_eval(_version_re.search(content).group(1)))
NAME = str(a
|
st.literal_eval(_name_re.search(
|
content).group(1)))
base_url = 'https://github.com/saxix/django-concurrency/'
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['tests']
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import sys
import pytest
sys.path.insert(0, os.path.join(ROOT, 'tests', 'demoapp'))
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name=NAME,
version=VERSION,
url='https://github.com/saxix/django-concurrency',
author='Stefano Apostolico',
author_email='s.apostolico@gmail.com',
package_dir={'': 'src'},
packages=find_packages('src'),
include_package_data=True,
description='Optimistic lock implementation for Django. Prevents users from doing concurrent editing.',
long_description=open('README.rst').read(),
license='MIT License',
keywords='django, concurrency, optimistic lock, locking, concurrent editing',
setup_requires=['pytest-runner', ],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django :: 3.0',
'Framework :: Django :: 3.1',
'Framework :: Django :: 3.2',
'Framework :: Django :: 4.0',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
# platforms=['any']
)
|
Perkville/django-tastypie
|
tastypie/resources.py
|
Python
|
bsd-3-clause
| 100,856
| 0.001874
|
from __future__ import unicode_literals
from copy import copy, deepcopy
from datetime import datetime
import logging
import sys
from time import mktime
import traceback
import warnings
from wsgiref.handlers import format_date_time
import django
from django.conf import settings
from django.conf.urls import url
from django.core.exceptions import (
ObjectDoesNotExist, MultipleObjectsReturned, ValidationError,
)
from django.core.signals import got_request_exception
from django.core.exceptions import ImproperlyConfigured
from django.db.models.fields.related import ForeignKey
try:
from django.contrib.gis.db.models.fields import GeometryField
except (ImproperlyConfigured, ImportError):
GeometryField = None
from django.db.models.constants import LOOKUP_SEP
try:
from django.db.models.fields.related import\
SingleRelatedObjectDescriptor as ReverseOneToOneDescriptor
except ImportError:
from django.db.models.fields.related_descriptors import\
ReverseOneToOneDescriptor
from django.db.models.sql.constants import QUERY_TERMS
from django.http import HttpResponse, HttpResponseNotFound, Http404
from django.utils import six
from django.utils.cache import patch_cache_control, patch_vary_headers
from django.utils.html import escape
from django.views.decorators.csrf import csrf_exempt
from tastypie.authentication import Authentication
from tastypie.authorization import ReadOnlyAuthorization
from tastypie.bundle import Bundle
from tastypie.cache import NoCache
from tastypie.compat import NoReverseMatch, reverse, Resolver404, get_script_prefix
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.exceptions import (
NotFound, BadRequest, InvalidFilterError, HydrationError, InvalidSortError,
ImmediateHttpResponse, Unauthorized, UnsupportedFormat,
UnsupportedSerializationFormat, UnsupportedDeserializationFormat,
)
from tastypie import fields
from tastypie import http
from tastypie.paginator import Paginator
from tastypie.serializers import Serializer
from tastypie.throttle import BaseThrottle
from tastypie.utils import (
dict_strip_uni
|
code_keys, is_valid_jsonp_callback_value, string_to_python,
trailing_slash,
)
from tastypie.utils.mime import determine_format, build_conten
|
t_type
from tastypie.validation import Validation
from tastypie.compat import get_module_name, atomic_decorator
def sanitize(text):
# We put the single quotes back, due to their frequent usage in exception
# messages.
return escape(text).replace(''', "'").replace('"', '"')
class ResourceOptions(object):
"""
A configuration class for ``Resource``.
Provides sane defaults and the logic needed to augment these settings with
the internal ``class Meta`` used on ``Resource`` subclasses.
"""
serializer = Serializer()
authentication = Authentication()
authorization = ReadOnlyAuthorization()
cache = NoCache()
throttle = BaseThrottle()
validation = Validation()
paginator_class = Paginator
allowed_methods = ['get', 'post', 'put', 'delete', 'patch']
list_allowed_methods = None
detail_allowed_methods = None
limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20)
max_limit = 1000
api_name = None
resource_name = None
urlconf_namespace = None
default_format = 'application/json'
filtering = {}
ordering = []
object_class = None
queryset = None
fields = None
excludes = []
include_resource_uri = True
include_absolute_url = False
always_return_data = False
collection_name = 'objects'
detail_uri_name = 'pk'
def __new__(cls, meta=None):
overrides = {}
# Handle overrides.
if meta:
for override_name in dir(meta):
# No internals please.
if not override_name.startswith('_'):
overrides[override_name] = getattr(meta, override_name)
allowed_methods = overrides.get('allowed_methods', ['get', 'post', 'put', 'delete', 'patch'])
if overrides.get('list_allowed_methods', None) is None:
overrides['list_allowed_methods'] = allowed_methods
if overrides.get('detail_allowed_methods', None) is None:
overrides['detail_allowed_methods'] = allowed_methods
if six.PY3:
return object.__new__(type('ResourceOptions', (cls,), overrides))
else:
return object.__new__(type(b'ResourceOptions', (cls,), overrides))
class DeclarativeMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = {}
declared_fields = {}
# Inherit any fields from parent(s).
parents = [b for b in bases if issubclass(b, Resource)]
# Simulate the MRO.
parents.reverse()
for p in parents:
parent_fields = getattr(p, 'base_fields', {})
for field_name, field_object in parent_fields.items():
attrs['base_fields'][field_name] = deepcopy(field_object)
for field_name, obj in attrs.copy().items():
# Look for ``dehydrated_type`` instead of doing ``isinstance``,
# which can break down if Tastypie is re-namespaced as something
# else.
if hasattr(obj, 'dehydrated_type'):
field = attrs.pop(field_name)
declared_fields[field_name] = field
attrs['base_fields'].update(declared_fields)
attrs['declared_fields'] = declared_fields
new_class = super(DeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
opts = getattr(new_class, 'Meta', None)
new_class._meta = ResourceOptions(opts)
abstract = getattr(new_class._meta, 'abstract', False)
if not getattr(new_class._meta, 'resource_name', None):
# No ``resource_name`` provided. Attempt to auto-name the resource.
class_name = new_class.__name__
name_bits = [bit for bit in class_name.split('Resource') if bit]
resource_name = ''.join(name_bits).lower()
new_class._meta.resource_name = resource_name
if getattr(new_class._meta, 'include_resource_uri', True):
if 'resource_uri' not in new_class.base_fields:
new_class.base_fields['resource_uri'] = fields.CharField(readonly_post=True, readonly_patch=True, verbose_name="resource uri")
elif 'resource_uri' in new_class.base_fields and 'resource_uri' not in attrs:
del(new_class.base_fields['resource_uri'])
if abstract and 'resource_uri' not in attrs:
# abstract classes don't have resource_uris unless explicitly provided
if 'resource_uri' in new_class.base_fields:
del(new_class.base_fields['resource_uri'])
for field_name, field_object in new_class.base_fields.items():
if hasattr(field_object, 'contribute_to_class'):
field_object.contribute_to_class(new_class, field_name)
return new_class
class Resource(six.with_metaclass(DeclarativeMetaclass)):
"""
Handles the data, request dispatch and responding to requests.
Serialization/deserialization is handled "at the edges" (i.e. at the
beginning/end of the request/response cycle) so that everything internally
is Python data structures.
This class tries to be non-model specific, so it can be hooked up to other
data sources, such as search results, files, other data, etc.
"""
def __init__(self, api_name=None):
# this can cause:
# TypeError: object.__new__(method-wrapper) is not safe, use method-wrapper.__new__()
# when trying to copy a generator used as a default. Wrap call to
# generator in lambda to get around this error.
self.fields = {k: copy(v) for k, v in self.base_fields.items()}
if api_name is not None:
self._meta.api_name = api_name
def __getattr__(self, name):
if name == '__setstate__':
raise AttributeError(name)
try:
return self.fields[name]
except KeyError:
raise AttributeError(name)
def wrap_view(self, view):
|
cyllyq/nutsbp-test
|
test_case/demo.py
|
Python
|
gpl-2.0
| 407
| 0.004914
|
# _*_ coding:utf-8 _*_
__author__ = 'Y-ling'
__date__ = '2017/9/15 11:11'
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
import unittest
import os
|
import time
import copy
import utils
from elements_path import LOGIN_
|
FORM, TOP_BAR, CENTER, CENTER_PERSONAL, CENTER_RESET_PASSWORD, FIND_WAIT_TIME, MY_BP
for x in range(70):
print utils.random_chinese(1000)
|
bSr43/capstone
|
suite/disasm_mc.py
|
Python
|
bsd-3-clause
| 7,561
| 0.003835
|
#!/usr/bin/python
# Test tool to disassemble MC files. By Nguyen Anh Quynh, 2017
import array, os.path, sys
from capstone import *
# convert all hex numbers to decimal numbers in a text
def normalize_hex(a):
while(True):
i = a.find('0x')
if i == -1: # no more hex number
break
hexnum = '0x'
for c in a[i + 2:]:
if c in '0123456789abcdefABCDEF':
hexnum += c
else:
break
num = int(hexnum, 16)
a = a.replace(hexnum, str(num))
return a
def test_file(fname):
print("Test %s" %fname);
f = open(fname)
lines = f.readlines()
f.close()
if not lines[0].startswith('# '):
print("ERROR: decoding information is missing")
return
# skip '# ' at the front, then split line to get out hexcode
# Note: option can be '', or 'None'
#print lines[0]
#print lines[0][2:].split(', ')
(arch, mode, option) = lines[0][2:].split(', ')
mode = mode.replace(' ', '')
option = option.strip()
archs = {
"CS_ARCH_ARM": CS_ARCH_ARM,
"CS_ARCH_ARM64": CS_ARCH_ARM64,
"CS_ARCH_MIPS": CS_ARCH_MIPS,
"CS_ARCH_PPC": CS_ARCH_PPC,
"CS_ARCH_SPARC": CS_ARCH_SPARC,
"CS_ARCH_SYSZ": CS_ARCH_SYSZ,
"CS_ARCH_X86": CS_ARCH_X86,
"CS_ARCH_XCORE": CS_ARCH_XCORE,
"CS_ARCH_M68K": CS_ARCH_M68K,
}
modes = {
"CS_MODE_16": CS_MODE_16,
"CS_MODE_32": CS_MODE_32,
"CS_MODE_64": CS_MODE_64,
"CS_MODE_MIPS32": CS_MODE_MIPS32,
"CS_MODE_MIPS64": CS_MODE_MIPS64,
"0": CS_MODE_ARM,
"CS_MODE_ARM": CS_MODE_ARM,
"CS_MODE_THUMB": CS_MODE_THUMB,
"CS_MODE_ARM+CS_MODE_V8": CS_MODE_ARM+CS_MODE_V8,
"CS_MODE_THUMB+CS_MODE_V8": CS_MODE_THUMB+CS_MODE_V8,
"CS_MODE_THUMB+CS_MODE_MCLASS": CS_MODE_THUMB+CS_MODE_MCLASS,
"CS_MODE_LITTLE_ENDIAN": CS_MODE_LITTLE_ENDIAN,
"CS_MODE_BIG_ENDIAN": CS_MODE_BIG_ENDIAN,
"CS_MODE_64+CS_MODE_LITTLE_ENDIAN": CS_MODE_64+CS_MODE_LITTLE_ENDIAN,
"CS_MODE_64+CS_MODE_BIG_ENDIAN": CS_MODE_64+CS_MODE_BIG_ENDIAN,
"CS_MODE_MIPS32+CS_MODE_MICRO": CS_MODE_MIPS32+CS_MODE_MICRO,
"CS_MODE_MIPS32+CS_MODE_MICRO+CS_MODE_BIG_ENDIAN": CS_MODE_MIPS32+CS_MODE_MICRO+CS_MODE_BIG_ENDIAN,
"CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN+CS_MODE_MICRO": CS_MODE_MIPS32+CS_MODE_MICRO+CS_MODE_BIG_ENDIAN,
"CS_MODE_BIG_ENDIAN+CS_MODE_V9": CS_MODE_BIG_ENDIAN + CS_MODE_V9,
"CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN": CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN,
"CS_MODE_MIPS32+CS_MODE_LITTLE_ENDIAN": CS_MODE_MIPS32+CS_MODE_LITTLE_ENDIAN,
"CS_MODE_MIPS64+CS_MODE_LITTLE_ENDIAN": CS_MODE_MIPS64+CS_MODE_LITTLE_ENDIAN,
"CS_MODE_MIPS64+CS_MODE_BIG_ENDIAN": CS_MODE_MIPS64+CS_MODE_BIG_ENDIAN,
}
options = {
"CS_OPT_SYNTAX_ATT": CS_OPT_SYNTAX_ATT,
"CS_OPT_SYNTAX_NOREGNAME": CS_OPT_SYNTAX_NOREGNAME,
}
mc_modes = {
("CS_ARCH_X86", "CS_MODE_32"): ['-triple=i386'],
("CS_ARCH_X86", "CS_MODE_64"): ['-triple=x86_64'],
("CS_ARCH_ARM", "CS_MODE_ARM"): ['-triple=armv7'],
("CS_ARCH_ARM", "CS_MODE_THUMB"): ['-triple=thumbv7'],
("CS_ARCH_ARM", "CS_MODE_ARM+CS_MODE_V8"): ['-triple=armv8'],
("CS_ARCH_ARM", "CS_MODE_THUMB+CS_MODE_V8"): ['-triple=thumbv8'],
("CS_ARCH_ARM", "CS_MODE_THUMB+CS_MODE_MCLASS"): ['-triple=thumbv7m'],
("CS_ARCH_ARM64", "0"): ['-triple=aarch64'],
("CS_ARCH_MIPS", "CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN"): ['-triple=mips'],
("CS_ARCH_MIPS", "CS_MODE_MIPS32+CS_MODE_MICRO"): ['-triple=mipsel', '-mattr=+micromips'],
("CS_ARCH_MIPS", "CS_MODE_MIPS64"): ['-triple=mips64el'],
("CS_ARCH_MIPS", "CS_MODE_MIPS32"): ['-triple=mipsel'],
("CS_ARCH_MIPS", "CS_MODE_MIPS64+CS_MODE_BIG_ENDIAN"): ['-triple=mips64'],
("CS_ARCH_MIPS", "CS_MODE_MIPS32+CS_MODE_MICRO+CS_MODE_BIG_ENDIAN"): ['-triple=mips', '-mattr=+micromips'],
("CS_ARCH_MIPS", "CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN+CS_MODE_MICRO"): ['-triple=mips', '-mattr=+micromips'],
("CS_ARCH_PPC", "CS_MODE_BIG_ENDIAN"): ['-triple=powerpc64'],
('CS_ARCH_SPARC', 'CS_MODE_BIG_ENDIAN'): ['-triple=sparc'],
('CS_ARCH_SPARC', 'CS_MODE_BIG_ENDIAN+CS_MODE_V9'): ['-triple=sparcv9'],
('CS_ARCH_SYSZ', '0'): ['-triple=s390x', '-mcpu=z196'],
}
#if not option in ('', 'None'):
# print archs[arch], modes[mode], options[option]
#print(arch, mode, option)
md = Cs(archs[arch], modes[mode])
if arch == 'CS_ARCH_ARM' or arch == 'CS_ARCH_PPC' :
md.syntax = CS_OPT_SYNTAX_NOREGNAME
if fname.endswith('3DNow.s.cs'):
md.syntax = CS_OPT_SYNTAX_ATT
for line in lines[1:]:
# ignore all the input lines having # in front.
if line.startswith('#'):
continue
#print("Check %s" %line)
code = line.split(' = ')[0]
asm = ''.join(line.split(' = ')[1:])
hex_code = code.replace('0x', '')
hex_code = hex_code.replace(',', '')
hex_data = hex_code.decode('hex')
#hex_bytes = array.array('B', hex_data)
x = list(md.disasm(hex_data, 0))
if len(x) > 0:
if x[0].op_str != '':
cs_output = "%s %s" %(x[0].mnemonic, x[0].op_str)
else:
cs_output = x[0].mnemonic
else:
cs_output = 'FAILED to disassemble'
cs_output2 = normalize_hex(cs_output)
cs_output2 = cs_output2.replace(' ', '')
if arch == 'CS_ARCH_MIPS':
# normalize register alias names
cs_output2 = cs_output2.replace('$at', '$1')
cs_output2 = cs_output2.replace('$v0', '$2')
cs_output2 = cs_output2.replace('$v1', '$3')
cs_output2 = cs_output2.replace('$a0', '$4')
cs_output2 = cs_output2.replace('$a1', '$5')
cs_output2 = cs_output2.replace('$a2', '$6')
cs_output2 = cs_output2.replace('$a3', '$7')
cs_output2 = cs_output2.replace('$t0', '$8')
cs_output2 = cs_output2.replace('$t1', '$9')
cs_output2 = cs_output2.replace('$t2', '$10')
|
cs_output2 = cs_output2.replace('$t3', '$11')
cs_output2 = cs_output2.replace('$t4', '$12')
cs_output2 = cs_output2.replace('$t5', '$13')
cs_output2 = cs_output2.replace('$t6', '$14')
cs_output2 = cs_output2.replace('$t7', '$15')
cs_output2 = cs_output2.replace('$t8', '$24')
cs_output2 = cs_output2.replace('$t9', '$25')
|
cs_output2 = cs_output2.replace('$s0', '$16')
cs_output2 = cs_output2.replace('$s1', '$17')
cs_output2 = cs_output2.replace('$s2', '$18')
cs_output2 = cs_output2.replace('$s3', '$19')
cs_output2 = cs_output2.replace('$s4', '$20')
cs_output2 = cs_output2.replace('$s5', '$21')
cs_output2 = cs_output2.replace('$s6', '$22')
cs_output2 = cs_output2.replace('$s7', '$23')
cs_output2 = cs_output2.replace('$k0', '$26')
cs_output2 = cs_output2.replace('$k1', '$27')
print("\t%s = %s" %(hex_code, cs_output))
if __name__ == '__main__':
if len(sys.argv) == 1:
fnames = sys.stdin.readlines()
for fname in fnames:
test_file(fname.strip())
else:
#print("Usage: ./test_mc.py <input-file.s.cs>")
test_file(sys.argv[1])
|
falanxia/tileset_baker
|
merge.py
|
Python
|
mit
| 9,760
| 0.003381
|
#!/usr/bin/python
__author__ = 'Martin Samsula'
__email__ = '<martin@falanxia.com>'
import sys
import glob
import math
import xml.etree.ElementTree
import os
import os.path
try:
import simplejson
except:
import json
simplejson = json
def ds(data): return simplejson.dumps(data, indent=4, default=str)
from PIL import Image
print 'usage: python merge.py collision_tile [source_dir(default:src)]'
collision_tile = sys.argv[1]
src_dir = sys.argv[2] if len(sys.argv) > 2 else 'src'
output_dir = 'output'
try:
os.makedirs(output_dir)
except os.error:
pass
src_files = os.path.join(src_dir, '*.tmx')
class TileSet():
"""obrazek mapa tilu"""
def __init__(self):
self.name = ''
self.tile_width = 0
self.tile
|
_height = 0
self.source = ''
self.width = 0
self.height =
|
0
self.first_gid = 0
self.collision_tile = 0
self._image = None
self._cut_tiles = {}
def out(self):
return {'name': self.name,
'tile_width': self.tile_width,
'tile_height': self.tile_height,
'source': self.source,
'width': self.width,
'height': self.height,
'first_gid': self.first_gid,
'collision_tile': self.collision_tile}
@property
def tiles_count(self):
return (self.width / self.tile_width) * (self.height / self.tile_height)
def get_new_tile_image(self):
return Image.new('RGBA', (self.tile_width, self.tile_height), (0, 0, 0, 0))
def get_tile_set_image(self):
if not self._image:
self._image = Image.open(os.path.join(src_dir, self.source))
return self._image
def get_tile_image(self, index):
index -= self.first_gid
left = (self.tile_width * index) % self.width
top = (self.tile_width * index) / self.width * self.tile_height
if index not in self._cut_tiles:
self._cut_tiles[index] = self.get_tile_set_image().crop((left, top, left+32, top+32))
return self._cut_tiles[index]
class Tile():
def __init__(self):
self.id = 0
class MergeSet(list):
"""list id tilu ktere se slepuji dohromady"""
def __init__(self, l):
list.__init__(self, l)
self.image = None
self.original_positions = {}
self.collision = 0
def set_position(self, file, pos):
if file not in self.original_positions:
self.original_positions[file] = []
self.original_positions[file].append(pos)
class Layer():
def __init__(self):
self.tiles = [Tile()]
self.name = ''
def out(self):
return {'name': self.name, 'first_100_tiles': ','.join(list(str(item.id) for item in self.tiles[0:100]))}
class LevelMap():
def __init__(self):
self.tile_set = TileSet()
self.layers = [Layer()]
self.file = ''
def out(self):
return {'tile_set': self.tile_set.out(), 'layers': list(item.out() for item in self.layers)}
@property
def tiles_count(self):
return len(self.layers[0].tiles)
def get_merge_sets(self):
merge_sets = []
for i in range(0, self.tiles_count):
merge_set = []
for layer in self.layers:
merge_set.append(layer.tiles[i].id)
merge_set = filter(None, merge_set)
merge_sets.append(merge_set)
return merge_sets
class Composer():
TITLESET_MAX_WIDTH = 1024
def __init__(self):
self.lmap = LevelMap()
self.merge_sets = []
self.final = TileSet()
def _is_included(self, merge_set):
for check in self.merge_sets:
if check[:] == merge_set[:]:
return check
merge_set = MergeSet(merge_set)
self.merge_sets.append(merge_set)
return merge_set
def merge_tiles(self, lmap):
assert isinstance(lmap, LevelMap)
self.lmap = lmap
i = -1
for merge_set in self.lmap.get_merge_sets():
i += 1
# get_merge_sets() nam vraci i prazdne sety, potrebuje si ale drzet index i (pozici v mape levelu)
if not merge_set:
continue
merge_set = self._is_included(merge_set)
merge_set.set_position(self.lmap.file, i)
for tile_id in merge_set:
if tile_id >= self.lmap.tile_set.collision_tile:
merge_set.collision = 1
merge_set.image = self.lmap.tile_set.get_new_tile_image()
for tile_id in merge_set:
if tile_id:
part_image = self.lmap.tile_set.get_tile_image(tile_id)
merge_set.image.paste(part_image, None, part_image)
merge_set.image = merge_set.image.convert('RGB')
def sort_by_collision(self):
self.merge_sets = sorted(self.merge_sets, key=lambda x: x.collision)
i = 0
for merge_set in self.merge_sets:
if merge_set.collision:
break
i += 1
return i
def make_new_tile_set(self):
tiles_count = len(self.merge_sets)
tiles_in_width = int(math.sqrt(tiles_count)) + 1
self.final.width = tiles_in_width * self.lmap.tile_set.tile_width
if self.final.width > self.TITLESET_MAX_WIDTH:
print 'new baked tileset will be larger than 1024x1024px'
# deleni na cela cisla
tiles_in_width = self.TITLESET_MAX_WIDTH / self.lmap.tile_set.tile_width
self.final.width = tiles_in_width * self.lmap.tile_set.tile_width
tiles_in_height = (tiles_count / tiles_in_width) + 1
self.final.height = tiles_in_height * self.lmap.tile_set.tile_height
new_tile_set = Image.new('RGBA', (self.final.width, self.final.height), (0, 0, 0, 0))
# zaciname na druhem tilu, prvni zustava transparentni
left = self.lmap.tile_set.tile_width
top = 0
for merge_set in self.merge_sets:
new_tile_set.paste(merge_set.image, (left, top))
left += self.lmap.tile_set.tile_width
if left >= self.final.width:
left = 0
top += self.lmap.tile_set.tile_height
new_tile_set.save(os.path.join(output_dir, os.path.basename(self.lmap.tile_set.source)))
composer = Composer()
print 'parsing tmx files'
for file in glob.glob(src_files):
print file
lmap_xml = xml.etree.ElementTree.fromstring(open(file).read())
lmap = LevelMap()
lmap.layers = []
lmap.file = file
title_set = lmap_xml.find('tileset')
image = title_set.find('image')
lmap.tile_set = TileSet()
lmap.tile_set.name = title_set.get('name')
lmap.tile_set.collision_tile = int(collision_tile)
lmap.tile_set.tile_height = int(title_set.get('tileheight'))
lmap.tile_set.tile_width = int(title_set.get('tilewidth'))
lmap.tile_set.first_gid = int(title_set.get('firstgid'))
lmap.tile_set.source = image.get('source')
lmap.tile_set.height = int(image.get('height'))
lmap.tile_set.width = int(image.get('width'))
for item in lmap_xml.getiterator('layer'):
layer = Layer()
layer.tiles = []
layer.name = item.get('name')
lmap.layers.append(layer)
data = item.find('data')
if data.get('compression'):
print 'this script does not support tile data compression, pls use XML format'
sys.exit()
if data.get('compression') or data.get('encoding'):
print 'this script does not support tile data encoding, pls use XML format'
sys.exit()
for dtile in data:
tile = Tile()
tile.id = int(dtile.get('gid'))
layer.tiles.append(tile)
# print ds(lmap.out())
if not lmap.layers:
continue
print 'working on level part', lmap.file
print 'tiles in level map:', lmap.tiles_count
print 'tiles in original tile set:', lmap.tile_set.tiles_count
print 'merging layers...',
composer.merge_tiles(lmap)
print 'done'
if not composer.merge_sets:
print 'nothin
|
jeremiahyan/odoo
|
addons/account_edi/models/ir_actions_report.py
|
Python
|
gpl-3.0
| 1,420
| 0.002113
|
# -*- coding: utf-8 -*-
import io
from odoo import models
from odoo.tools.pdf import OdooPdfFileReader, OdooPdfFileWriter
class IrActionsReport(models.Model):
_inherit = 'ir.actions.report'
def _post_pdf(self, save_in_attachment, pdf_content=None, res_ids=None
|
):
# OVERRIDE to embed some EDI documents inside the PDF.
if self.model == 'account.move' and res_ids and len(res_ids) == 1 and pdf_content:
invoice = self.env['account.move'].browse(res_ids)
if invoice.is_sale_document() and invoice.state != 'draft':
to_embed = invoice.edi_document_ids
# Add the attachments to the pdf file
if to_embed:
reader_buffer = io.BytesIO(pdf_content)
|
reader = OdooPdfFileReader(reader_buffer, strict=False)
writer = OdooPdfFileWriter()
writer.cloneReaderDocumentRoot(reader)
for edi_document in to_embed:
edi_document.edi_format_id._prepare_invoice_report(writer, edi_document)
buffer = io.BytesIO()
writer.write(buffer)
pdf_content = buffer.getvalue()
reader_buffer.close()
buffer.close()
return super(IrActionsReport, self)._post_pdf(save_in_attachment, pdf_content=pdf_content, res_ids=res_ids)
|
lewixliu/git-repo
|
subcmds/init.py
|
Python
|
apache-2.0
| 20,047
| 0.007333
|
# -*- coding:utf-8 -*-
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import optparse
import os
import platform
import re
import sys
from pyversion import is_python3
if is_python3():
import urllib.parse
else:
import imp
import urlparse
urllib = imp.new_module('urllib')
urllib.parse = urlparse
from color import Coloring
from command import InteractiveCommand, MirrorSafeCommand
from error import ManifestParseError
from project import SyncBuffer
from git_config import GitConfig
from git_command import git_require, MIN_GIT_VERSION_SOFT, MIN_GIT_VERSION_HARD
import platform_utils
from wrapper import Wrapper
class Init(InteractiveCommand, MirrorSafeCommand):
common = True
helpSummary = "Initialize repo in the current directory"
helpUsage = """
%prog [options]
"""
helpDescription = """
The '%prog' command is run once to install and initialize repo.
The latest repo source code and manifest collection is downloaded
from the server and is installed in the .repo/ directory in the
current working directory.
The optional -b argument can be used to select the manifest branch
to checkout and use. If no branch is specified, the remote's default
branch is used.
The optional -m argument can be used to specify an alternate manifest
to be used. If no manifest is specified, the manifest default.xml
will be used.
The --reference option can be used to point to a directory that
has the content of a --mirror sync. This will make the working
directory use as much data as possible from the local reference
directory when fetching from the server. This will make the sync
go a lot faster by reducing data traffic on the network.
The --dissociate option can be used to borrow the objects from
the directory specified with the --reference option only to reduce
network transfer, and stop borrowing from them after a first clone
is made by making necessary local copies of borrowed objects.
The --no-clone-bundle option disables any attempt to use
$URL/clone.bundle to bootstrap a new Git repository from a
resumeable bundle file on a content delivery network. This
may be necessary if there are problems with the local Python
HTTP client or proxy configuration, but the Git binary works.
# Switching Manifest Branches
To switch to another manifest branch, `repo init -b otherbranch`
may be used in an existing client. However, as this only updates the
manifest, a subsequent `repo sync` (or `repo sync -d`) is necessary
to update the working directory files.
"""
def _Options(self, p, gitc_init=False):
# Logging
g = p.add_option_group('Logging options')
g.add_option('-v', '--verbose',
dest='output_mode', action='store_true',
help='show all output')
g.add_option('-q', '--quiet',
dest='output_mode', action='store_false',
help='only show errors')
# Manifest
g = p.add_option_group('Manifest options')
g.add_option('-u', '--manifest-url',
dest='manifest_url',
help='manifest repository location', metavar='URL')
g.add_option('-b', '--manifest-branch',
dest='manifest_branch',
help='manifest branch or revision', metavar='REVISION')
cbr_opts = ['--current-branch']
# The gitc-init subcommand allocates -c itself, but a lot of init users
# want -c, so try to satisfy both as best we can.
if not gitc_init:
cbr_opts += ['-c']
g.add_option(*cbr_opts,
dest='current_branch_only', action='store_true',
help='fetch only current manifest branch from server')
g.add_option('-m', '--manifest-name',
dest='manifest_name', default='default.xml',
help='initial manifest file', metavar='NAME.xml')
g.add_option('--mirror',
dest='mirror', action='store_true',
help='create a replica of the remote repositories '
'rather than a client working directory')
g.add_option('--reference',
dest='reference',
help='location of mirror directory', metavar='DIR')
g.add_option('--dissociate',
dest='dissociate', action='store_true',
help='dissociate from reference mirrors after clone')
g.add_option('--depth', type='int', default=None,
dest='depth',
help='create a shallow clone with given depth; see git clone')
g.add_option('--partial-clone', action='store_true',
dest='partial_clone',
help='perform partial clone (https://git-scm.com/'
'docs/gitrepository-layout#_code_partialclone_code)')
g.add_option('--clone-filter', action='store', default='blob:none',
dest='clone_filter',
help='filter for use with --partial-clone [default: %default]')
# TODO(vapier): Expose option with real help text once this has been in the
# wild for a while w/out significant bug reports. Goal is by ~Sep 2020.
g.add_option('--worktree', action='store_true',
help=optparse.SUPPRESS_HELP)
g.add_option('--archive',
dest='archive', action='store_true',
help='checkout an archive instead of a git repository for '
'each project. See git archive.')
g.add_option('--submodules',
dest='submodules', action='store_true',
help='sync any submodules associated with the manifest repo')
g.add_option('-g', '--groups',
dest='groups', default='default',
help='restrict manifest projects to ones with specified '
'group(s) [default|all|G1,G2,G3|G4,-G5,-G6]',
metavar='GROUP')
g.add_option('-p', '--platform',
dest='platform', default='auto',
help='restrict manifest projects to ones with a specified '
'platform group [auto|all|none|linux|darwin|...]',
metavar='PLATFORM')
g.add_option('--clone-bundle', action='store_true',
help='force use of /clone.bundle on HTTP/HTTPS (default if not --partial-clone)')
g.add_option('--no-clone-bundle',
dest='clone_bundle', action='store_false',
help='disable use of /clone.bundle on HTTP/HTTPS (default if --partial-clone)')
g.add_option('--no-tags',
dest='tags', default=True, action='store_false',
help="don't fetch tags in the
|
manifest")
# Tool
g = p.add_option_group('repo Version options')
g.add_option('--repo-url',
dest='repo_url',
|
help='repo repository location', metavar='URL')
g.add_option('--repo-rev', metavar='REV',
help='repo branch or revision')
g.add_option('--repo-branch', dest='repo_rev',
help=optparse.SUPPRESS_HELP)
g.add_option('--no-repo-verify',
dest='repo_verify', default=True, action='store_false',
help='do not verify repo source code')
# Other
g = p.add_option_group('Other options')
g.add_option('--config-name',
dest='config_name', action="store_true", default=False,
help='Always prompt for name/e-mail')
def _RegisteredEnvironmentOptions(self):
return {'REPO_MANIFEST_URL': 'manifest_url',
'REPO_MIRROR_LOCATION': 'reference'}
def _SyncManifest(self, opt):
m = self.manifest.manifestProject
is_new = not m.Exists
if is
|
bdh1011/wau
|
venv/lib/python2.7/site-packages/twisted/plugins/cred_anonymous.py
|
Python
|
mit
| 968
| 0.003099
|
# -*- test-case-name: twisted.test.test_strcred -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Cred plugin for anonymous logins.
"""
from zope.interface import implementer
from twisted import plugin
from twisted.cred.checkers import AllowAnonymousAccess
from twisted.cred.strcred import ICheckerFactory
from twisted.cred.credentials import IAnonymous
anonymousCheckerFactoryHelp = """
This allows anonymous authentication for servers that support it.
"""
@implementer(ICheckerFactory, plugin
|
.IPlugin)
class AnonymousCheckerFactory(object):
"""
Generates checkers that will authenticate an anonymous request.
|
"""
authType = 'anonymous'
authHelp = anonymousCheckerFactoryHelp
argStringFormat = 'No argstring required.'
credentialInterfaces = (IAnonymous,)
def generateChecker(self, argstring=''):
return AllowAnonymousAccess()
theAnonymousCheckerFactory = AnonymousCheckerFactory()
|
Huyuwei/tvm
|
topi/python/topi/nn/sparse.py
|
Python
|
apache-2.0
| 7,132
| 0.00028
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Sparse operators"""
from __future__ import absolute_import
import tvm
from ..util import get_const_tuple
@tvm.target.generic_func
def sparse_dense(data, weight_data, weight_indices, weight_indptr):
"""
Computes sparse-dense matrix multiplication of `data` and
`(weight_data, weight_indices, weight_indptr).T`
Parameters
----------
x : tvm.Tensor
2-D with shape [M, K], float32
weight_data : tvm.Tensor
1-D with shape [nnz] (CSR) or
3-D with shape [num_blocks, bs_r, bs_c] (BSR)
weight_indices : tvm.Tensor
1-D with shape [nnz] (CSR) or
1-D with shape [num_blocks] (BSR)
weight_indptr : tvm.Tensor
1-D with shape [N + 1] (CSR) or
1-D with shape [(N + 1) // bs_r] (BSR)
Returns
-------
output : tvm.Tensor
2-D with shape [M, N]
"""
assert len(weight_data.shape) in (1, 3)
if len(weight_data.shape) == 1:
func = _sparse_dense_csrmm
if len(weight_data.shape) == 3:
func = _sparse_dense_bsrmm
return func(data, weight_data, weight_indices, weight_indptr)
def _sparse_dense_csrmm(data, weight_data, weight_indices, weight_indptr):
oshape = (
get_const_tuple(data.shape)[0],
get_const_tuple(weight_indptr.shape)[0] - 1)
def f(i, row):
row_start = weight_indptr[row]
row_end = weight_indptr[row + 1]
row_elems = row_end - row_start
elem_idx = tvm.reduce_axis((0, row_elems), name="elem_idx")
elem = row_start + elem_idx
a_val = weight_data[elem]
weight_val = data[i, weight_indices[elem]]
return tvm.sum(a_val * weight_val, axis=elem_idx)
return tvm.compute(oshape, f, tag="sparse_dense_csrmm")
def _sparse_dense_bsrmm(data, weight_data, weight_indices, weight_indptr):
(m, _) = get_const_tuple(data.shape)
(_, bs_r, bs_c) = get_const_tuple(weight_data.shape)
(num_blocks_plus_1, ) = get_const_tuple(weight_indptr.shape)
num_blocks = num_blocks_plus_1 - 1
def _compute_block(i, nb_j, j):
row_start = weight_indptr[nb_j]
row_end = weight_indptr[nb_j + 1]
row_elems = row_end - row_start
elem_idx = tvm.reduce_axis(
(0, row_elems), name="elem_idx")
block_offset = row_start + elem_idx
c = tvm.reduce_axis((0, bs_c), name="c")
block_j = weight_indices[block_offset]
block_ij_val = weight_data[block_offset][j][c]
x_val = data[i, bs_c * block_j + c]
return tvm.sum(block_ij_val * x_val, axis=[elem_idx, c])
bsrmm_block = tvm.compute(
(m, num_blocks, bs_r), _compute_block,
tag="sparse_dense_bsrmm_block")
return tvm.compute(
(m, num_blocks * bs_r),
lambda m, n: bsrmm_block[m, n // bs_r, n % bs_r],
tag="sparse_dense_bsrmm")
@tvm.target.generic_func
def sparse_transpose(sparse_data, sparse_indices, sparse_indptr):
"""
Transpose a square sparse matrix,
`A` is an n-by-n sparse matrix in the CSR format.
** Currently only support Square Matrices **
Parameters
----------
sparse_data : tvm.Tensor
1-D with shape [nonzeros], dtype of 'float32'
sparse_indices : tvm.Tensor
1-D with shape [nonze
|
ros], dtype of 'int32'
sparse_indptr : tvm.Tensor
1-D with shape [n+1], dtype of 'int32'
Returns
-------
out_data : tvm.Tensor
1-D with shape [nonzeros], dtype of 'float32'
out_indices : tvm.Tensor
1-D with shape [nonzeros], dtype of 'int32'
out_indptr : tvm.Tensor
1-D with shape [n+1], dtype of
|
'int32'
"""
assert len(sparse_data.shape) == 1, "error in data dimension"
assert len(sparse_indices.shape) == 1, "error in indices dimension"
assert len(sparse_indptr.shape) == 1, "error in indptr dimension"
nnz = get_const_tuple(sparse_data.shape)[0]
n = get_const_tuple(sparse_indptr.shape)[0] - 1
output_shape = [(nnz,), (nnz,), (n+1,)]
# TODO: Add BSR transpose support
output_data, output_indices, output_indptr = tvm.extern(
shape=output_shape,
inputs=[sparse_data, sparse_indices, sparse_indptr],
fcompute=lambda ins, outs:
csr_transpose_ir(ins[0], ins[1], ins[2], outs[0], outs[1], outs[2]),
tag="sparse_transpose_csr",
dtype=['float32', 'int32', 'int32'],
name='out')
return [output_data, output_indices, output_indptr]
def csr_transpose_ir(data, indices, indptr, out_data, out_indices, out_indptr):
"""define ir for csr_transpose"""
irb = tvm.ir_builder.create()
data_ptr = irb.buffer_ptr(data)
indices_ptr = irb.buffer_ptr(indices)
indptr_ptr = irb.buffer_ptr(indptr)
out_data_ptr = irb.buffer_ptr(out_data)
out_indices_ptr = irb.buffer_ptr(out_indices)
out_indptr_ptr = irb.buffer_ptr(out_indptr)
n = get_const_tuple(indptr.shape)[0] - 1
nnz = get_const_tuple(data.shape)[0]
with irb.for_range(0, n, for_type="parallel", name='col') as col:
out_indptr_ptr[col] = 0
with irb.for_range(0, nnz, for_type="serial", name='nz_idx') as nz_idx:
out_indptr_ptr[indices_ptr[nz_idx]] += 1
cumsum = irb.allocate('int32', (1,), name='cumsum', scope='local')
temp = irb.allocate('int32', (1,), name='temp', scope='local')
cumsum[0] = 0
with irb.for_range(0, n, for_type="serial", name='col') as col:
temp[0] = out_indptr_ptr[col]
out_indptr_ptr[col] = cumsum[0]
cumsum[0] += temp[0]
out_indptr_ptr[n] = nnz
with irb.for_range(0, n, for_type="serial", name='row') as row:
offset = indptr_ptr[row]
diff = indptr_ptr[row+1] - indptr_ptr[row]
with irb.for_range(0, diff, for_type="serial", name='idx') as idx:
real_idx = offset + idx
col = indices_ptr[real_idx]
dest = out_indptr_ptr[col]
out_indices_ptr[dest] = row
out_data_ptr[dest] = data_ptr[real_idx]
out_indptr_ptr[col] += 1
last = irb.allocate('int32', (1,), name='last', scope='local')
temp2 = irb.allocate('int32', (1,), name='temp2', scope='local')
last[0] = 0
with irb.for_range(0, n, for_type="serial", name="col") as col:
temp2[0] = out_indptr_ptr[col]
out_indptr_ptr[col] = last[0]
last[0] = temp2[0]
return irb.get()
|
jasonballensky/django-guardian
|
example_project/posts/urls.py
|
Python
|
bsd-2-clause
| 222
| 0.009009
|
from gua
|
rdian.compat import url, patterns
urlpatterns = patterns('posts.views',
url(r'^$', view='post_list', name='posts_post_l
|
ist'),
url(r'^(?P<slug>[-\w]+)/$', view='post_detail', name='posts_post_detail'),
)
|
matteoferla/Geobacillus
|
geo_mutagenesis.py
|
Python
|
gpl-2.0
| 2,769
| 0.015529
|
__author__ = 'Matteo'
__doc__='''This could be made into a handy mutagenesis library if I had time.'''
from Bio.Seq import Seq,MutableSeq
from Bio import SeqIO
from Bio.Alphabet import IUPAC
from difflib import Differ
def Gthg01471():
ori=Seq("ATGAGCATAAGTTTATCGGTTCCAAAATGGTTATTAACAGTTTTATCAATTTTATCTTTAGTCGTAGCATTTATTTTCGGTACCGTTTCCAATGCATCAGCAACAATTAACTATGGGGAGGAAGTCGCGGCAGTAGCAAATGACTATGTAGGAAGCCCATATAAATATGGAGGTACAACGCCAAAAGGATTTGATGCGAGTGGCTTTACTCAGTATGTGTATAAAAATGCTGCAACCAAATTGGCTATTCCGCGAACGAGTGCCGCACAGTATAAAGTCGGTAAATTTGTTAAACAAAGTGCGTTACAAAGAGGCGATTTAGTGTTTTATGCAACAGGAGCAAAAGGAAAGGTATCCTTTGTGGGAATTTATAATGGAAATGGTACGTTTATTGGTGCCACATCAAAAGGGGTAAAAGTGGTTAAAATGAGTGATAAATATTGGAAAGACCGGTATATAGGGGCTAAGCGAGTCATTAAGTAA", IUPAC.unambiguous_dna)
mut=MutableSeq("ATGAGCATAAGTTTATCGGTTCCAAAATGGTTATTAACAGTTTTATCAATTTTATCTTTAGTCGTAGCATTTATTTTCGGTACCGTTTCCAATGCATCAGCAACAATTAACTATGGGGAGGAAGTCGCGGCAGTAGCAAATGACTATGTAGGAAGCCCATATAAATATGGAGGTACAACGCCAAAAGGATTTGATGCGAGTGGCTTTACTCAGTATGTGTATAAAAATGCTGCAACCAAATTGGCTATTCCGCGAACGAGTGCCGCACAGTATAAAGTCGGTAAATTTGTTAAACAAAGTGCGTTACAAAGAGGCGATTTAGTGTTTTATGCAACAGGAGCAAAAGGAAAGGTATCCTTTGTGGGAATTTATAATGGAAATGGTACGTTTATTGGTGCCACATCAAAAGGGGTAAAAGTGGTTAAAATGAGTGATAAATATTGGAAAGACCGGTATATAGGGGCTAAGCGAGTCATTAAGTAA", IUPAC.unambiguous_dna)
a="AGTCGA"
b="GACTAG"
for i,v in enumerate([259,277,282,295,299,306]):
print(mut[v-1]+a[i])
mut[v-1]=b[i]
print(ori.translate())
print(mut.toseq().translate())
def Gthg04369():
filepath="Gthg_from_embl_pfamed.gb"
genome = list(SeqIO.parse(open(filepath, "rU"), "genbank"))
z=genome[0].seq[3583975:3585290].translate(to_stop=1)
x=genome[0].seq[3583975:3585290].tomutable()
print(x.pop(895-1))
y=x.toseq().translate(to_stop=1)
print(z)
print(y)
print(list(Differ().compare(str(z),str(y))))
print(len(z),len(y))
def Gthg01115():
filepath="Gthg_from_embl_pfamed.gb"
genome = list(SeqIO.parse(open(filepath, "rU"), "genbank"))
z=genome[0].seq[891404:892205].reverse_complement().translate(to_stop=1)
x=genome[0].seq[891404:892205].reverse_complement().tomutable()
print(x.pop(421-1))
y=x.toseq().translate(to_stop=1)
print(z)
print(y)
print(list(Differ().compare(str(z),str(y))))
print(len(z),len(y))
def Gthg03544():
filepath="Gthg_from_embl_pfamed.gb"
genome = list(SeqIO.parse(open(filepath, "rU"), "genbank"))
z=genome[0].seq[2885410:2887572].reverse_complement().translate(to_stop=1)
x=genome[0].seq[2885410:2887572].reverse_complement().tomuta
|
ble()
print(x.pop(1748-1))
y=x.toseq().translate(to_stop
|
=1)
print(z)
print(y)
print(list(Differ().compare(str(z),str(y))))
print(len(z),len(y))
if __name__ == "main":
pass
|
theKono/mobile-push
|
bin/competing_consumer.py
|
Python
|
apache-2.0
| 1,384
| 0
|
#!/usr/bin/env python
# standard library imports
import signal
# third party related imports
import boto.sqs
import ujson
# local library imports
from mobile_push.config import setting
from mobile_push.logger import logger
from mobile_push.message_router import MessageRouter
keep_running = True
def sigterm_handler(signum, _):
global
|
keep_running
logger.warn('Receive SIGTER
|
M')
keep_running = False
def get_queue():
conn = boto.sqs.connect_to_region(setting.get('sqs', 'region'))
return conn.get_queue(setting.get('sqs', 'queue'))
def poll_message(queue):
message = queue.read(wait_time_seconds=20)
if message is None:
return
try:
body = message.get_body()
units = ujson.loads(body)
except ValueError:
logger.error('Cannot parse: %s', body)
units = []
if not isinstance(units, list):
units = [units]
for unit in units:
try:
MessageRouter(unit).get_actor().run(unit)
except MessageRouter.BaseError:
logger.error('Cannot route message: %s', ujson.dumps(unit))
except Exception as e:
logger.exception(e)
queue.delete_message(message)
def main():
signal.signal(signal.SIGTERM, sigterm_handler)
q = get_queue()
while keep_running:
poll_message(q)
if __name__ == '__main__':
main()
|
bzamecnik/ml-playground
|
ml-playground/boston_dataset_exploration/data_analysis.py
|
Python
|
mit
| 6,084
| 0.006903
|
'''
This script analyzes the Boston housing dataset available via scikit-learn. It
generates a textual report and a set of plot images into the 'report' directory.
'''
import logging
import matplotlib
# non-interactive plotting - just outputs the images and doesn't open the window
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import seaborn as sns
import seaborn.linearmodels as snslm
from sklearn.datasets import load_boston
def dataset_to_dataframe(dataset, target_name):
df = pd.DataFrame(dataset.data, columns=dataset.feature_names)
df[target_name] = dataset.target
return df
def print_structure(dataset, file):
logging.debug('Analyzing dataset structure')
print('Number of instances:', dataset.data.shape[0], file=file)
print('Number of attributes:', dataset.data.shape[1], file=file)
print('Attribute names:', ', '.join(dataset.feature_names), file=file)
def summarize_distributions(df, file):
logging.debug('Summarizing attribute distributions')
print('Attribute distribution summary:', file=file)
# pd.set_option('display.width', 200)
desc = df.describe().T
desc['mode'] = df.mode().ix[0]
print(desc, file=file)
# print(df.describe().T[['count','mean','std','min','50%','max']], file=file)
missing_counts = pd.isnull(df).sum()
if missing_counts.any():
print('Missing values:', file=file)
print(missing_counts, file=file)
else:
print('Missing values: NONE', file=file)
def print_correlations(df, file):
logging.debug('Analyzing attribute pairwise correlations')
print("Pearson's correlation:", file=file)
pearson = df.corr(method='pearson')
print(pearson, file=file)
print("Spearman's correlation:", file=file)
spearman = df.corr(method='spearman')
print(spearman, file=file)
def predictivity(correlations):
corrs_with_target = correlations.ix[-1][:-1]
return corrs_with_target[abs(corrs_with_target).argsort()[::-1]]
print('Attribute-target correlations (Pearson):', file=file)
print(predictivity(pearson), file=file)
print('Attribute-target correlations (Spearman):', file=file)
print(predictivity(spearman), file=file)
print('Important attribute correlations (Pearson):', file=file)
attrs = pearson.iloc[:-1,:-1] # all except target
# only important correlations and not auto-correlations
threshold = 0.5
important_corrs = (attrs[abs(attrs) > threshold][attrs != 1.0]) \
.unstack().dropna().to_dict()
unique_important_corrs = pd.DataFrame(
list(set([(tuple(sorted(key)), important_corrs[key]) \
for key in important_corrs])), columns=['attribute pair', 'correlation'])
unique_important_corrs = unique_important_corrs.ix[
abs(unique_important_corrs['correlation']).argsort()[::-1]]
print(unique_important_corrs, file=file)
def attribute_correlations(df, img_file='attr_correlations.png'):
logging.debug('Plotting attribute pairwise correlations')
# custom figure size (in inches) to cotrol the relative font size
fig, ax = plt.subplots(figsize=(10, 10))
# nice custom red-blue diverging colormap with white center
cmap = sns.diverging_palette(250, 10, n=3, as_cmap=True)
# Correlation plot
# - attribute names on diagonal
# - color-coded correlation value in lower triangle
# - values and significance in the upper triangle
# - color bar
# If there a lot of attributes we can disable the annotations:
# annot=False, sig_stars=False, diag_names=False
snslm.corrplot(df, ax=ax, cmap=cmap)
# remove white borders
fig.tight_layout()
fig.savefig(img_file)
plt.close(fig)
def attribute_histograms(df, real_cols, int_cols):
def plot_hist(col, func):
file = 'dist_{}.png'.format(col)
logging.debug('histogram: %s', file)
fig = plt.figure()
func(col)
fig.tight_layout()
fig.savefig(file)
plt.close(fig)
def plot_real(col):
sns.distplot(df[col])
def plot_int(col):
plt.bar(*list(zip(*df[col].value_counts().items())), alpha=0.5)
plt.xlabel(col)
logging.debug('Plotting attribute histograms')
for col in real_cols:
plot
|
_hist(col, plot_real)
for col in int_cols:
plot_hist(col, plot_int)
def pairwise_scatter_matrix(df, img_file='pairwise_scatter_matrix.png'):
logging.debug('Plotting pairwise sc
|
atter matrix')
grid = sns.pairplot(df)
grid.savefig(img_file)
plt.close()
def pairwise_joint_plots(df, cols):
logging.debug('Plotting pairwise joint distributions')
cols = sorted(cols)
for colA, colB in [(a,b) for a in cols for b in cols if a < b]:
file = 'joint_{}_{}.png'.format(colA, colB)
logging.debug('joint plot: %s', file)
fig = plt.figure()
sns.jointplot(df[colA], df[colB], kind='hex')
plt.savefig(file)
plt.close()
def make_report(dataset, df, report_file_name='data_analysis_report.txt'):
report_file = open(report_file_name, 'w')
print_structure(dataset, report_file)
summarize_distributions(df, report_file)
print_correlations(df, report_file)
logging.info('Report is in file: %s', report_file_name)
def visualize(df, int_cols):
sns.set(style='darkgrid')
int_cols = set(int_cols)
real_cols = set(df.columns) - int_cols
attribute_correlations(df)
attribute_histograms(df, real_cols, int_cols)
pairwise_joint_plots(df, real_cols)
pairwise_scatter_matrix(df)
if __name__ == '__main__':
log_format='%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(format=log_format, level=logging.DEBUG)
# load data
boston = load_boston()
df = dataset_to_dataframe(boston, target_name='MEDV')
report_dir = 'report'
os.makedirs(report_dir, exist_ok=True)
os.chdir(report_dir)
make_report(boston, df)
visualize(df, int_cols=['CHAS', 'RAD'])
logging.debug('Done')
|
pas256/troposphere
|
troposphere/s3.py
|
Python
|
bsd-2-clause
| 11,735
| 0
|
# Copyright (c) 2013, Bob Van Zant <bob@veznat.com>
# All rights reserved.
#
# See LICENSE file for full license.
import warnings
from . import AWSHelperFn, AWSObject, AWSProperty, Tags
from .validators import boolean, positive_integer, s3_bucket_name
from .validators import s3_transfer_acceleration_status
try:
from awacs.aws import Policy
policytypes = (dict, Policy)
except ImportError:
policytypes = dict,
Private = "Private"
PublicRead = "PublicRead"
PublicReadWrite = "PublicReadWrite"
AuthenticatedRead = "AuthenticatedRead"
BucketOwnerRead = "BucketOwnerRead"
BucketOwnerFullControl = "BucketOwnerFullControl"
LogDeliveryWrite = "LogDeliveryWrite"
class CorsRules(AWSProperty):
props = {
'AllowedHeaders': ([basestring], False),
'AllowedMethods': ([basestring], True),
'AllowedOrigins': ([basestring], True),
'ExposedHeaders': ([basestring], False),
'Id': (basestring, False),
'MaxAge': (positive_integer, False),
}
class CorsConfiguration(AWSProperty):
props = {
'CorsRules': ([CorsRules], True),
}
class VersioningConfiguration(AWSProperty):
props = {
'Status': (basestring, False),
}
class AccelerateConfiguration(AWSProperty):
props = {
'AccelerationStatus': (s3_transfer_acceleration_status, True),
}
class RedirectAllRequestsTo(AWSProperty):
props = {
'HostName': (basestring, True),
'Protocol': (basestring, False),
}
class RedirectRule(AWSProperty):
props = {
'HostName': (basestring, False),
'HttpRedirectCode': (basestring, False),
'Protocol': (basestring, False),
'ReplaceKeyPrefixWith': (basestring, False),
'ReplaceKeyWith': (basestring, False),
}
class RoutingRuleCondition(AWSProperty):
props = {
'HttpErrorCodeReturnedEquals': (basestring, False),
'KeyPrefixEquals': (basestring, False),
}
class RoutingRule(AWSProperty):
props = {
'RedirectRule': (RedirectRule, True),
'RoutingRuleCondition': (RoutingRuleCondition, False),
}
class WebsiteConfiguration(AWSProperty):
props = {
'IndexDocument': (basestring, False),
'ErrorDocument': (basestring, False),
'RedirectAllRequestsTo': (RedirectAllRequestsTo, False),
'RoutingRules': ([RoutingRule], False),
}
class LifecycleRuleTransition(AWSProperty):
props = {
'StorageClass': (basestring, True),
'TransitionDate': (basestring, False),
'TransitionInDays': (positive_integer, False),
}
class AbortIncompleteMultipartUpload(AWSProperty):
props = {
'DaysAfterInitiation': (positive_integer, True),
}
class NoncurrentVersionTransition(AWSProperty):
props = {
'StorageClass': (basestring, True),
'TransitionInDays': (positive_integer, True),
}
class TagFilter(AWSProperty):
props = {
'Key': (basestring, True),
'Value': (basestring, True),
}
class LifecycleRule(AWSProperty):
props = {
'AbortIncompleteMultipartUpload':
(AbortIncompleteMultipartUpload, False),
'ExpirationDate': (basestring, False),
'ExpirationInDays': (positive_integer, False),
'Id': (basestring, False),
'NoncurrentVersionExpirationInDays': (positive_integer, False),
'NoncurrentVersionTransition': (NoncurrentVersionTransition, False),
'NoncurrentVersionTransitions': ([NoncurrentVersionTransition], False),
'Prefix': (basestring, False),
'Status': (basestring, True),
'TagFilters': ([TagFilter], False),
'Transition': (LifecycleRuleTransition, False),
'Transitions': ([LifecycleRuleTransition], False)
}
def validate(self):
if 'Transition' in self.properties:
if 'Transitions' not in self.properties:
# aws moved from a single transition to a list of them
# and deprecated 'Transition', so let's just move it to
# the new property and not annoy the user.
self.properties['Transitions'] = [
self.properties.pop('Transition')]
else:
raise ValueError(
'Cannot specify both "Transition" and "Transitions" '
'properties on S3 Bucket Lifecycle Rule. Pl
|
ease use '
'"Transitions" since the former has been deprecated.')
if 'NoncurrentVersionTransition' in self.properties:
if 'NoncurrentVersionTransitions' not in self.properties:
warnings.warn(
'NoncurrentVersionTransition has been deprecated in '
'favour of NoncurrentVersionTransitions.'
)
# Translate the old transitio
|
n format to the new format
self.properties['NoncurrentVersionTransitions'] = [
self.properties.pop('NoncurrentVersionTransition')]
else:
raise ValueError(
'Cannot specify both "NoncurrentVersionTransition" and '
'"NoncurrentVersionTransitions" properties on S3 Bucket '
'Lifecycle Rule. Please use '
'"NoncurrentVersionTransitions" since the former has been '
'deprecated.')
if 'ExpirationInDays' in self.properties and 'ExpirationDate' in \
self.properties:
raise ValueError(
'Cannot specify both "ExpirationDate" and "ExpirationInDays"'
)
class LifecycleConfiguration(AWSProperty):
props = {
'Rules': ([LifecycleRule], True),
}
class LoggingConfiguration(AWSProperty):
props = {
'DestinationBucketName': (s3_bucket_name, False),
'LogFilePrefix': (basestring, False),
}
class Rules(AWSProperty):
props = {
'Name': (basestring, True),
'Value': (basestring, True)
}
class S3Key(AWSProperty):
props = {
'Rules': ([Rules], True)
}
class Filter(AWSProperty):
props = {
'S3Key': (S3Key, True)
}
class LambdaConfigurations(AWSProperty):
props = {
'Event': (basestring, True),
'Filter': (Filter, False),
'Function': (basestring, True),
}
class QueueConfigurations(AWSProperty):
props = {
'Event': (basestring, True),
'Filter': (Filter, False),
'Queue': (basestring, True),
}
class TopicConfigurations(AWSProperty):
props = {
'Event': (basestring, True),
'Filter': (Filter, False),
'Topic': (basestring, True),
}
class MetricsConfiguration(AWSProperty):
props = {
'Id': (basestring, True),
'Prefix': (basestring, False),
'TagFilters': ([TagFilter], False),
}
class NotificationConfiguration(AWSProperty):
props = {
'LambdaConfigurations': ([LambdaConfigurations], False),
'QueueConfigurations': ([QueueConfigurations], False),
'TopicConfigurations': ([TopicConfigurations], False),
}
class AccessControlTranslation(AWSProperty):
props = {
'Owner': (basestring, True),
}
class EncryptionConfiguration(AWSProperty):
props = {
'ReplicaKmsKeyID': (basestring, True),
}
class ReplicationConfigurationRulesDestination(AWSProperty):
props = {
'AccessControlTranslation': (AccessControlTranslation, False),
'Account': (basestring, False),
'Bucket': (basestring, True),
'EncryptionConfiguration': (EncryptionConfiguration, False),
'StorageClass': (basestring, False),
}
class SseKmsEncryptedObjects(AWSProperty):
props = {
'Status': (basestring, True),
}
class SourceSelectionCriteria(AWSProperty):
props = {
'SseKmsEncryptedObjects': (SseKmsEncryptedObjects, True),
}
class ReplicationConfigurationRules(AWSProperty):
props = {
'Destination': (ReplicationConfigurationRulesDestination, True),
'Id': (basestring, False),
'Prefix': (basestring, True),
'SourceSelectionCriteria': (SourceSelectionCriteria,
|
rogerwang/chromium
|
tools/checkdeps/checkdeps.py
|
Python
|
bsd-3-clause
| 17,591
| 0.008925
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that files include headers from allowed directories.
Checks DEPS files in the source tree for rules, and applies those rules to
"#include" commands in source files. Any source file including something not
permitted by the DEPS files will fail.
The format of the deps file:
First you have the normal module-level deps. These are the ones used by
gclient. An example would be:
deps = {
"base":"http://foo.bar/trunk/base"
}
DEPS files not in the top-level of a module won't need this. Then you have
any additional include rules. You can add (using "+") or subtract (using "-")
from the previously specified rules (including module-level deps).
include_rules = {
# Code should be able to use base (it's specified in the module-level
# deps above), but nothing in "base/evil" because it's evil.
"-base/evil",
# But this one subdirectory of evil is OK.
"+base/evil/not",
# And it can include files from this other directory even though there is
# no deps rule for it.
"+tools/crime_fighter"
}
DEPS files may be placed anywhere in the tree. Each one applies to all
subdirectories, where there may be more DEPS files that provide
|
additions or
subtractions for their own sub-trees.
There is an implicit rule for the current directory (where the DEPS file lives)
and all of its subdirectori
|
es. This prevents you from having to explicitly
allow the current directory everywhere. This implicit rule is applied first,
so you can modify or remove it using the normal include rules.
The rules are processed in order. This means you can explicitly allow a higher
directory and then take away permissions from sub-parts, or the reverse.
Note that all directory separators must be slashes (Unix-style) and not
backslashes. All directories should be relative to the source root and use
only lowercase.
"""
import os
import optparse
import pipes
import re
import sys
import copy
# Variable name used in the DEPS file to specify module-level deps.
DEPS_VAR_NAME = "deps"
# Variable name used in the DEPS file to add or subtract include files from
# the module-level deps.
INCLUDE_RULES_VAR_NAME = "include_rules"
# Optionally present in the DEPS file to list subdirectories which should not
# be checked. This allows us to skip third party code, for example.
SKIP_SUBDIRS_VAR_NAME = "skip_child_includes"
# The maximum number of lines to check in each source file before giving up.
MAX_LINES = 150
# The maximum line length, this is to be efficient in the case of very long
# lines (which can't be #includes).
MAX_LINE_LENGTH = 128
# Set to true for more output. This is set by the command line options.
VERBOSE = False
# This regular expression will be used to extract filenames from include
# statements.
EXTRACT_INCLUDE_PATH = re.compile('[ \t]*#[ \t]*(?:include|import)[ \t]+"(.*)"')
# In lowercase, using forward slashes as directory separators, ending in a
# forward slash. Set by the command line options.
BASE_DIRECTORY = ""
# The directories which contain the sources managed by git.
GIT_SOURCE_DIRECTORY = set()
# Specifies a single rule for an include, which can be either allow or disallow.
class Rule(object):
def __init__(self, allow, dir, source):
self._allow = allow
self._dir = dir
self._source = source
def __str__(self):
if (self._allow):
return '"+%s" from %s.' % (self._dir, self._source)
return '"-%s" from %s.' % (self._dir, self._source)
def ParentOrMatch(self, other):
"""Returns true if the input string is an exact match or is a parent
of the current rule. For example, the input "foo" would match "foo/bar"."""
return self._dir == other or self._dir.startswith(other + "/")
def ChildOrMatch(self, other):
"""Returns true if the input string would be covered by this rule. For
example, the input "foo/bar" would match the rule "foo"."""
return self._dir == other or other.startswith(self._dir + "/")
def ParseRuleString(rule_string, source):
"""Returns a tuple of a boolean indicating whether the directory is an allow
rule, and a string holding the directory name.
"""
if len(rule_string) < 1:
raise Exception('The rule string "%s" is too short\nin %s' %
(rule_string, source))
if rule_string[0] == "+":
return (True, rule_string[1:])
if rule_string[0] == "-":
return (False, rule_string[1:])
raise Exception('The rule string "%s" does not begin with a "+" or a "-"' %
rule_string)
class Rules:
def __init__(self):
"""Initializes the current rules with an empty rule list."""
self._rules = []
def __str__(self):
ret = "Rules = [\n"
ret += "\n".join([" %s" % x for x in self._rules])
ret += "]\n"
return ret
def AddRule(self, rule_string, source):
"""Adds a rule for the given rule string.
Args:
rule_string: The include_rule string read from the DEPS file to apply.
source: A string representing the location of that string (filename, etc.)
so that we can give meaningful errors.
"""
(add_rule, rule_dir) = ParseRuleString(rule_string, source)
# Remove any existing rules or sub-rules that apply. For example, if we're
# passed "foo", we should remove "foo", "foo/bar", but not "foobar".
self._rules = [x for x in self._rules if not x.ParentOrMatch(rule_dir)]
self._rules.insert(0, Rule(add_rule, rule_dir, source))
def DirAllowed(self, allowed_dir):
"""Returns a tuple (success, message), where success indicates if the given
directory is allowed given the current set of rules, and the message tells
why if the comparison failed."""
for rule in self._rules:
if rule.ChildOrMatch(allowed_dir):
# This rule applies.
if rule._allow:
return (True, "")
return (False, rule.__str__())
# No rules apply, fail.
return (False, "no rule applying")
def ApplyRules(existing_rules, deps, includes, cur_dir):
"""Applies the given deps and include rules, returning the new rules.
Args:
existing_rules: A set of existing rules that will be combined.
deps: The list of imports from the "deps" section of the DEPS file.
include: The list of rules from the "include_rules" section of DEPS.
cur_dir: The current directory. We will create an implicit rule that
allows inclusion from this directory.
Returns: A new set of rules combining the existing_rules with the other
arguments.
"""
rules = copy.copy(existing_rules)
# First apply the implicit "allow" rule for the current directory.
if cur_dir.lower().startswith(BASE_DIRECTORY):
relative_dir = cur_dir[len(BASE_DIRECTORY) + 1:]
# Normalize path separators to slashes.
relative_dir = relative_dir.replace("\\", "/")
source = relative_dir
if len(source) == 0:
source = "top level" # Make the help string a little more meaningful.
rules.AddRule("+" + relative_dir, "Default rule for " + source)
else:
raise Exception("Internal error: base directory is not at the beginning" +
" for\n %s and base dir\n %s" %
(cur_dir, BASE_DIRECTORY))
# Next apply the DEPS additions, these are all allowed. Note that DEPS start
# out with "src/" which we want to trim.
for (index, key) in enumerate(deps):
if key.startswith("src/"):
key = key[4:]
rules.AddRule("+" + key, relative_dir + "'s deps for " + key)
# Last, apply the additional explicit rules.
for (index, rule_str) in enumerate(includes):
if not len(relative_dir):
rule_description = "the top level include_rules"
else:
rule_description = relative_dir + "'s include_rules"
rules.AddRule(rule_str, rule_description)
return rules
def ApplyDirectoryRules(existing_rules, dir_name):
"""Combines rules from the existing rules and the new directory.
Any directory can contain a DEPS file. Toplevel DEPS files can contain
module dependencies
|
asidev/aybu-manager
|
tests/test_activity_log.py
|
Python
|
apache-2.0
| 7,363
| 0.001087
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2010-2012 Asidev s.r.l.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import namedtuple
import os
import shutil
import stat
import tempfile
import unittest
from aybu.manager.activity_log import ActivityLog
from aybu.manager.activity_log.fs import (mkdir,
create,
copy,
mv,
rm,
rmdir,
rmtree)
from aybu.manager.activity_log.exc import TransactionError
from aybu.manager.activity_log.template import render
class ActivityLogTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_create(self):
al = ActivityLog()
# test rollback
file_= os.path.join(self.tempdir, 'test.txt')
al.add(create, file_)
self.assertTrue(os.path.exists(file_))
al.rollback()
self.assertFalse(os.path.exists(file_))
# test successfull create
al.add(create, file_)
al.commit()
self.assertTrue(os.path.exists(file_))
# test unsuccessfull create
with self.assertRaises(OSError):
al.add(create, file_)
self.assertTrue(os.path.exists(file_))
def test_transaction_status(self):
al = ActivityLog(autobegin=False)
with self.assertRaises(TransactionError):
al.commit()
with self.assertRaises(TransactionError):
al.rollback()
al.begin()
al.commit()
with self.assertRaises(TransactionError):
al.commit()
def test_transaction(self):
al = ActivityLog()
dir_ = os.path.join(self.tempdir, 'test')
join = os.path.join
def dostuff():
al.add(mkdir, dir_)
al.add(create, join(dir_, 'testfile.txt'), content="Test")
al.add(copy, join(dir_, 'testfile.txt'), join(dir_, 'test2.txt'))
dostuff()
al.rollback()
self.assertFalse(os.path.exists(join(dir_, 'test2.txt')))
self.assertFalse(os.path.exists(join(dir_, 'testfile.txt')))
self.assertFalse(os.path.exists(dir_))
dostuff()
al.commit()
self.assertTrue(os.path.exists(dir_))
self.assertTrue(os.path.exists(join(dir_, 'testfile.txt')))
self.assertTrue(os.path.exists(join(dir_, 'test2.txt')))
def test_failed_rollback(self):
al = ActivityLog()
dir_ = os.path.join(self.tempdir, 'test')
inner_dir = os.path.join(dir_, 'inner')
al.add(mkdir, dir_)
al.add(mkdir, inner_dir)
os.chmod(dir_, stat.S_IRUSR|stat.S_IXUSR)
with self.assertRaises(OSError):
al.rollback()
self.assertTrue(os.path.exists(dir_))
self.assertTrue(os.path.exists(inner_dir))
os.chmod(dir_, stat.S_IRWXU | stat.S_IRWXG)
def test_error_on_exists(self):
al = ActivityLog()
dir_ = os.path.join(self.tempdir, 'test')
al.add(mkdir, dir_)
al.commit()
al.add(mkdir, dir_, error_on_exists=False)
al.rollback()
self.assertTrue(os.path.exists(dir_))
def test_render(self):
al = ActivityLog()
instance = namedtuple('Instance', ['paths', 'environment'])(
paths=namedtuple('Paths', ['pyramid_config', 'alembic_config'])(
pyramid_config='MYDUMMYCONFIG',
alembic_config='MYDUMMYCONFIG'
),
environment= namedtuple('Environment', ['settings',
'smtp_config',
'uwsgi_config',
'os_config'])(
smtp_config=None,
uwsgi_config=None,
os_config=None,
settings=None
)
)
template_name = 'main.py.mako'
target = os.path.join(self.tempdir, 'main.py')
al.add(render, template_name, target, instance=instance)
self.assertTrue(os.path.exists(target))
with open(target) as f:
self.assertIn('MYDUMMYCONFIG', f.read())
al.rollback()
self.assertFalse(os.path.exists(target))
al.add(render, template_name, target, deferre
|
d=True, instance=instance)
self.assertFalse
|
(os.path.exists(target))
al.commit()
self.assertTrue(os.path.exists(target))
def test_delete(self):
al = ActivityLog()
testfile = os.path.join(self.tempdir, 'test.txt')
with self.assertRaises(OSError):
al.add(rm, testfile)
al.add(rm, testfile, error_on_not_exists=False)
al.commit()
with open(testfile, "w") as f:
f.write("###")
al.add(rm, testfile)
self.assertFalse(os.path.exists(testfile))
al.rollback()
self.assertTrue(os.path.exists(testfile))
al.add(rm, testfile)
self.assertFalse(os.path.exists(testfile))
al.commit()
self.assertFalse(os.path.exists(testfile))
testdir = os.path.join(self.tempdir, 'test')
al.add(mkdir, testdir)
al.commit()
# test rmdir
al.add(rmdir, testdir)
self.assertFalse(os.path.exists(testdir))
al.rollback()
self.assertTrue(os.path.exists(testdir))
al.add(rmdir, testdir)
al.commit()
self.assertFalse(os.path.exists(testdir))
# test rmtree
al.add(mkdir, testdir)
inner = os.path.join(testdir, 'inner')
al.add(mkdir, inner)
al.commit()
al.add(rmtree, testdir)
self.assertFalse(os.path.exists(testdir))
al.rollback()
self.assertTrue(os.path.exists(testdir))
al.add(rmtree, testdir)
al.commit()
self.assertFalse(os.path.exists(testdir))
def test_mv(self):
al = ActivityLog()
source = os.path.join(self.tempdir, "source")
destination = os.path.join(self.tempdir, "destination")
os.mkdir(source)
os.mkdir(destination)
with self.assertRaises(OSError):
al.add(mv, source, destination)
shutil.rmtree(destination)
al.add(mv, source, destination)
self.assertFalse(os.path.exists(source))
self.assertTrue(os.path.exists(destination))
al.rollback()
self.assertTrue(os.path.exists(source))
self.assertFalse(os.path.exists(destination))
al.add(mv, source, destination)
al.commit()
self.assertFalse(os.path.exists(source))
self.assertTrue(os.path.exists(destination))
|
penny4860/SVHN-deep-digit-detector
|
tests/cifar_loader.py
|
Python
|
mit
| 487
| 0.008214
|
import cP
|
ickle
import numpy as np
import cv2
def unpickle(file):
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
files = ['../../datasets/svhn/cifar-10-batches-py/data_batch_1']
dict = unpickle(files[0])
images = dict['data'].reshape(-1, 3, 32, 32)
labels = np.array(dict['labels'])
images = np.swapaxes(images, 1, 3)
#images[0] = cv2.cvtColor(images[0], cv2.COLOR_RGB2BGR)
c
|
v2.imshow("", images[1000])
cv2.waitKey(0)
cv2.destroyAllWindows()
|
danriti/python-traceview
|
docs/conf.py
|
Python
|
mit
| 8,222
| 0.006324
|
# -*- coding: utf-8 -*-
#
# traceview documentation build configuration file, created by
# sphinx-quickstart on Fri May 2 20:12:10 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
import traceview
from traceview import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'traceview'
copyright = u'2016, Dan Riti'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to
|
template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template nam
|
es.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'traceviewdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'traceview.tex', u'traceview Documentation',
u'Dan Riti', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'traceview', u'traceview Documentation',
[u'Dan Riti'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'traceview', u'traceview Documentation',
u'Dan Riti', 'traceview', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" no
|
the5fire/wechat
|
setup.py
|
Python
|
apache-2.0
| 542
| 0.009225
|
# coding: utf-8
#!/usr/bin/env python
from setuptools import setup, find_packages
readme = open('
|
README.rst').read()
setup(
name='wecha',
version='${version}',
description='',
long_description=readme,
author='the5fire',
author_email='thefivefire@gmail.com',
url='http://chat.the5fire.com',
packages=['src',],
package_data={
'src':['*.py', 'static/*', 'templates/*'],
},
include_package_data = True,
install_requires=[
'web.py',
'jinja2',
'gunico
|
rn',
],
)
|
arm-hpc/allinea_json_analysis
|
PR_JSON_Scripts/plot_pr_bar.py
|
Python
|
apache-2.0
| 5,284
| 0.005678
|
#!/usr/bin/env python
# Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain
|
a copy of the License at
#
# http://www.apache.or
|
g/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import matplotlib.pyplot as plt
import argparse
import json
from pr_json_common import *
import sys
sys.path.append('../JSON_Common')
from json_dict_common import *
def plot_metrics_as_bar(fileList, metricList, labelList, threads, ylabel):
"""
Plot metrics on a bar char from the list of metrics supplied, where the
metric values are read from the list of files supplied. It is assumed that
the list of files are generated from a series of runs which show strong
scaling of a code
Args:
fileList (list): List of filenames from which to read information
metricList (list): List of metrics to read
labelList (list): List of labels for the metrics to use in the legend
threads (bool): Indicates whether threads or processes are used
ylabel (str): Label for the y-axis
Returns:
Nothing
"""
yData = {}
for filename in fileList:
profileDict = {}
# Read the json in from file
with open(filename, 'r') as f:
profileDict = json.load(f)
# Get the number of processes or threads used
numProcs = get_num_threads(profileDict) if threads else get_num_processes(profileDict)
# Read the given metrics and update the values to plot
yData.update({numProcs : get_dict_field_vals(profileDict, metricList)})
# Plot the data
# Get the x-axis data
xData = range(len(yData))
# Get the width of an individual bar
totalBarsWidth = 0.95
barsPerProc = len(list(yData.values())[0])
barWidth = float(totalBarsWidth) / barsPerProc
barsPerProc -= 1
# For each of the processes plot a bar
colors = ['r', 'b', 'g', 'k']
sortedKeys = sorted(yData.keys())
xInd = 0
for key in sortedKeys:
# For each of the metrics plot a bar
barData = yData[key]
ind = 0
barLoc = xData[xInd] - float(barsPerProc) * barWidth / 2
barHandles = []
for barItem in barData:
barHandles.append(plt.bar(barLoc, barItem, width=barWidth, color=colors[ind % len(colors)],
align='center', label=labelList[ind]))
barLoc += barWidth
ind += 1
xInd += 1
plt.xticks(xData, sortedKeys)
if (threads):
plt.xlabel("Number of Threads")
else:
plt.xlabel("Number of Processes")
plt.ylabel(ylabel)
plt.legend(handles=barHandles, loc=1, bbox_to_anchor=(1.1, 1.1))
#### End of function plot_metrics_as_bar
if (__name__ == "__main__"):
parser = argparse.ArgumentParser(description="Utility to plot a bar chart" +
" of different metrics stored in a series of JSON files, assumed to" +
" be the export of a Performance Report. It is also assumed " +
"that the files are generated from a series of runs that show " +
"strong / weak scaling of an application")
# Add a file containing a list of files to read data from
parser.add_argument("infile", help="JSON file to read a list of input files from",
type=argparse.FileType('r'))
# Add an argument to provide a file with a list of metrics in
parser.add_argument("metricFile", help="File from which to read a list of " +
"metrics to show. The contents of the file is of the following form:\n" +
"\tlist, of, dictionary, keys [: label]\n" +
"where the label is optional, and is used as a label in a legend",
type=argparse.FileType('r'))
# Add an argument to show if the strong scaling is for threads or processes
parser.add_argument("--threads", help="Indicates whether threads or processes" +
" should used in the scaling analysis", action="store_true",
default=False)
defaultYLabel = "Proportion of Time (%)"
parser.add_argument("--ylabel", help="Label for the y-axis. Default is " +
defaultYLabel.replace('%','%%'), default=defaultYLabel)
args = parser.parse_args()
# Read in the list of files
fileList = [line.strip() for line in args.infile.readlines()]
# Read in the list of metrics
metricList = []
labelList = []
for line in args.metricFile.readlines():
vals = line.strip().split(':')
if (len(vals) == 1):
metricList.append([val.strip() for val in vals[0].split(',')])
labelList.append(''.join(vals[0].split()[-1]))
else:
metricList.append([val.strip() for val in vals[0].split(',')])
labelList.append(' '.join(vals[1:]))
# Plot the metrics from the files
plot_metrics_as_bar(fileList, metricList, labelList, args.threads, args.ylabel)
plt.show()
|
fras2560/graph-helper
|
algorithms/critical.py
|
Python
|
apache-2.0
| 49
| 0
|
'''
|
Created on Oct 20, 2015
@author: Dal
|
las
'''
|
seakers/daphne_brain
|
AT/migrations/0016_auto_20200909_1730.py
|
Python
|
mit
| 1,470
| 0.001361
|
# Generated by Django 3.0.7 on 2020-09-09 22:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('daphne_context', '0007_auto_20191111_1756'),
('AT', '0015_auto_20200909_1334'),
]
operations = [
migrations.RemoveField(
model_name='atcontext',
name='all_steps_from_procedure',
),
migrations.RemoveField(
model_name='atcontext',
name='current_step_pointer',
|
),
migrations.RemoveField(
model_name='atcontext',
name='next_step_pointer',
),
migrations.RemoveField(
model_name='a
|
tcontext',
name='previous_step_pointer',
),
migrations.CreateModel(
name='ATDialogueContext',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('all_steps_from_procedure', models.TextField(default='')),
('next_step_pointer', models.IntegerField(default=-1)),
('previous_step_pointer', models.IntegerField(default=-1)),
('current_step_pointer', models.IntegerField(default=-1)),
('dialoguecontext', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='daphne_context.DialogueContext')),
],
),
]
|
botswana-harvard/edc-contact
|
edc_contact/models/call_log.py
|
Python
|
gpl-2.0
| 1,199
| 0
|
from django.db import models
from django_crypto_fields.fields import EncryptedTextField
from edc_base.model.models import BaseUuidModel
try:
from edc_sync.mixins import SyncMixin
except ImportError:
SyncMixin = type('SyncMixin', (object, ), {})
from ..managers import CallLogManager
class CallLog (SyncMixin, BaseUuidModel):
"""Maintains a log of calls for a particular participant."""
subject_identi
|
fier = models.CharField(
verbose_name="
|
Subject Identifier",
max_length=50,
blank=True,
db_index=True,
unique=True,
)
locator_information = EncryptedTextField(
help_text=('This information has been imported from'
'the previous locator. You may update as required.')
)
contact_notes = EncryptedTextField(
null=True,
blank=True,
help_text=''
)
label = models.CharField(
max_length=25,
null=True,
editable=False,
help_text="from followup list"
)
# history = AuditTrail()
objects = CallLogManager()
def natural_key(self):
return self.subject_identifier
class Meta:
app_label = 'edc_contact'
|
Feduch/pyMessengerBotApi
|
messengerbot/__init__.py
|
Python
|
gpl-3.0
| 174
| 0.005747
|
from .api.api import Api
from .api.bot_configuration import BotConfiguration
from .version import __version__
__all__ = ['A
|
pi', 'BotConfi
|
guration']
__version__ = __version__
|
philanthropy-u/edx-platform
|
lms/djangoapps/courseware/field_overrides.py
|
Python
|
agpl-3.0
| 11,496
| 0.001131
|
"""
This module provides a :class:`~xblock.field_data.FieldData` implementation
which wraps an other `FieldData` object and provides overrides based on the
user. The use of providers allows for overrides that are arbitrarily
extensible. One provider is found in `lms.djangoapps.courseware.student_field_overrides`
which allows for fields to be overridden for individual students. One can
envision other providers being written that allow for fields to be overridden
base on membership of a student in a cohort, or similar. The use of an
extensible, modular architecture allows for overrides being done in ways not
envisioned by the authors.
Currently, this module is used in the `module_render` module in this same
package and is used to wrap the `authored_data` when constructing an
`LmsFieldData`. This means overrides will be in effect for all scopes covered
by `authored_data`, e.g. course content and settings stored in Mongo.
"""
import threading
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
from django.conf import settings
from edx_django_utils.cache import DEFAULT_REQUEST_CACHE
from xblock.field_data import FieldData
from xmodule.modulestore.inheritance import InheritanceMixin
NOTSET = object()
ENABLED_OVERRIDE_PROVIDERS_KEY = u'courseware.field_overrides.enabled_providers.{course_id}'
ENABLED_MODULESTORE_OVERRIDE_PROVIDERS_KEY = u'courseware.modulestore_field_overrides.enabled_providers.{course_id}'
def resolve_dotted(name):
"""
Given the dotted name for a Python object, performs any necessary imports
and returns the object.
"""
names = name.split('.')
path = names.pop(0)
target = __import__(path)
while names:
segment = names.pop(0)
path += '.' + segment
try:
target = getattr(target, segment)
except AttributeError:
__import__(path)
target = getattr(target, segment)
return target
def _lineage(block):
"""
Returns an iterator over all ancestors of the given block, starting with
its immediate parent and ending at the root of the block tree.
"""
parent = block.get_parent()
while parent:
yield parent
parent = parent.get_parent()
class _OverridesDisabled(threading.local):
"""
A thread local used to manage state of overrides being disabled or not.
"""
disabled = ()
_OVERRIDES_DISABLED = _OverridesDisabled()
@contextmanager
def disable_overrides():
"""
A context manager which disables field overrides inside the context of a
`with` statement, allowing code to get at the `original` value of a field.
"""
prev = _OVERRIDES_DISABLED.disabled
_OVERRIDES_DISABLED.disabled += (True,)
yield
_OVERRIDES_DISABLED.disabled = prev
def overrides_disabled():
"""
Checks to see whether overrides are disabled in the current context.
Returns a boolean value. See `disable_overrides`.
"""
return bool(_OVERRIDES_DISABLED.disabled)
class FieldOverrideProvider(object):
"""
Abstract class which defines the interface that a `FieldOverrideProvider`
must provide. In general, providers should derive from this class, but
it's not strictly necessary as long as they correctly implement this
interface.
A `FieldOverrideProvider` implementation is only responsible for looking up
field overrides. To set overrides, there will be a domain specific API for
the concrete override implementation being used.
"""
__metaclass__ = ABCMeta
def __init__(self, user, fallback_field_data):
self.user = user
self.fallback_field_data = fallback_field_data
@abstractmethod
def get(self, block, name, default): # pragma no cover
"""
Look for an override value for the field named `name` in `block`.
Returns the overridden value or `default` if no override is found.
"""
raise NotImplementedError
@abstractmethod
def enabled_for(self, course): # pragma no cover
"""
Return True if this provider should be enabled for a given course,
and False otherwise.
Concrete implementations are responsible for implementing this method.
Arguments:
course (CourseModule or None)
Returns:
bool
"""
return False
class OverrideFieldData(FieldData):
"""
A :class:`~xblock.field_data.FieldData` which wraps another `FieldData`
object and allows for fields handled by the wrapped `FieldData` to be
overriden by arbitrary providers.
Providers are configured by use of the Django setting,
`FIELD_OVERRIDE_PROVIDERS` which should be a tuple of dotted names of
:class:`FieldOverrideProvider` concrete implementations. Note that order
is important for this setting. Override providers will tried in the order
configured in the setting. The first provider to find an override 'wins'
for a particular field lookup.
"""
provider_classes = None
@classmethod
def wrap(cls, user, course, wrapped):
"""
Will return a :class:`OverrideFieldData` which wraps the field data
given in `wrapped` for the given `user`, if override providers are
configred. If no override providers are configured, using the Django
setting, `FIELD_OVERRIDE_PROVIDERS`, returns `wrapped`, eliminating
any performance impact of this feature if no override providers are
configured.
"""
if cls.provider_classes is None:
cls.provider_classes = tuple(
(resolve_dotted(name) for name in
settings.FIELD_OVERRIDE_PROVIDERS))
enabled_providers = cls._providers_for_course(course)
if enabled_providers:
# TODO: we might not actually want to return here. Might be better
# to check for instance.providers after the instance is built. This
# would allow for the case where we have registered providers but
# none are enabled for the provided course
return cls(user, wrapped, enabled_providers)
return wrapped
@classmethod
def _providers_for_course(cls, course):
"""
Return a filtered list of enabled providers based
on the course passed in. Cache this result per request to avoid
needing to call the provider filter api hundreds of times.
Arguments:
course: The course XBlock
"""
request_cache = DEFAULT_REQUEST_CACHE
if course is None:
cache_key = ENABLED_OVERRIDE_PROVIDERS_KEY.format(course_id='None')
else:
cache
|
_key = ENABLED_OVERRIDE_PROVIDE
|
RS_KEY.format(course_id=unicode(course.id))
enabled_providers = request_cache.data.get(cache_key, NOTSET)
if enabled_providers == NOTSET:
enabled_providers = tuple(
(provider_class for provider_class in cls.provider_classes if provider_class.enabled_for(course))
)
request_cache.data[cache_key] = enabled_providers
return enabled_providers
def __init__(self, user, fallback, providers):
self.fallback = fallback
self.providers = tuple(provider(user, fallback) for provider in providers)
def get_override(self, block, name):
"""
Checks for an override for the field identified by `name` in `block`.
Returns the overridden value or `NOTSET` if no override is found.
"""
if not overrides_disabled():
for provider in self.providers:
value = provider.get(block, name, NOTSET)
if value is not NOTSET:
return value
return NOTSET
def get(self, block, name):
value = self.get_override(block, name)
if value is not NOTSET:
return value
return self.fallback.get(block, name)
def set(self, block, name, value):
self.fallback.set(block, name, value)
def delete(self, block, name):
self.fallback.delete(block, name)
def has(self, block, name):
if not self.providers:
|
f5devcentral/f5-cccl
|
f5_cccl/resource/ltm/policy/action.py
|
Python
|
apache-2.0
| 5,095
| 0.000196
|
"""Provides a class for managing BIG-IP L7 Rule Action resources."""
# coding=utf-8
#
# Copyright (c) 2017-2021 F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from f5_cccl.resource import Resource
LOGGER = logging.getLogger(__name__)
class Action(Resource):
"""L7 Rule Action class."""
# The property names class attribute defines the names of the
# properties that we wish to compare.
properties = dict(
expression=None,
forward=False,
location=None,
pool=None,
redirect=False,
request=True,
reset=False,
setVariable=False,
tcl=False,
tmName=None,
httpHost=False,
httpUri=False,
path=None,
replace=False,
value=None,
shutdown=True,
select=True,
)
def __init__(self, name, data):
"""Initialize the Action object.
Actions do not have explicit partition attributes, the are
implied by the partition of the rule to which they belong.
"""
super(Action, self).__init__(name, partition=None)
# Actions are Only supported on requests.
self._data['request'] = True
# Is this a forwarding action?
if data.get('forward', False):
self._data['forward'] = True
# Yes, there are two supported forwarding actions:
# forward to pool and reset, these are mutually
# exclusive options.
pool = data.get('pool', None)
reset = data.get('reset', False)
# This allows you to specify an empty node. This is
# what Container Connector does.
select = data.get('select', False)
# This was added in 13.1.0
shutdown = data.get('shutdown', False)
if pool:
self._data['pool'] = pool
elif reset:
self._data['reset'] = reset
elif select:
self._data['select'] = select
elif shutdown:
self._data['shutdown'] = shutdown
else:
raise ValueError(
"Unsupported forward action, must be one of reset, "
"forward to pool, select, or shutdown.")
# Is this a redirect action?
elif data.get('redirect', False):
self._data['redirect'] = True
# Yes, set the location and httpReply attribute
self._data['location'] = data.get('locati
|
on', None)
self._data['httpReply'] = data.get('httpReply', True)
# Is this a setVariable action?
elif data.get('setVariable', False):
self._data['setV
|
ariable'] = True
# Set the variable name and the value
self._data['tmName'] = data.get('tmName', None)
self._data['expression'] = data.get('expression', None)
self._data['tcl'] = True
# Is this a replace URI host action?
elif data.get('replace', False) and data.get('httpHost', False):
self._data['replace'] = True
self._data['httpHost'] = True
self._data['value'] = data.get('value', None)
# Is this a replace URI path action?
elif data.get('replace', False) and data.get('httpUri', False) and \
data.get('path', False):
self._data['replace'] = True
self._data['httpUri'] = True
self._data['path'] = data.get('path', None)
self._data['value'] = data.get('value', None)
# Is this a replace URI action?
elif data.get('replace', False) and data.get('httpUri', False):
self._data['replace'] = True
self._data['httpUri'] = True
self._data['value'] = data.get('value', None)
else:
# Only forward, redirect and setVariable are supported.
raise ValueError("Unsupported action, must be one of forward, "
"redirect, setVariable, replace, or reset.")
def __eq__(self, other):
"""Check the equality of the two objects.
Do a straight data to data comparison.
"""
if not isinstance(other, Action):
return False
return super(Action, self).__eq__(other)
def __str__(self):
return str(self._data)
def _uri_path(self, bigip):
"""Return the URI path of an action object.
Not implemented because the current implementation does
not manage Actions individually."""
raise NotImplementedError
|
cmshobe/landlab
|
landlab/components/taylor_nonlinear_hillslope_flux/__init__.py
|
Python
|
mit
| 108
| 0
|
from .taylor_nonlinear_hillslope_flux import TaylorNonLinearDiffuser
__all
|
__ = ["TaylorNonLinearDiffu
|
ser"]
|
alexin-ivan/zfs-doc
|
filters/pandoc_fignos.py
|
Python
|
mit
| 11,034
| 0.002991
|
#! /usr/bin/env python
"""pandoc-fignos: a pandoc filter that inserts figure nos. and refs."""
# Copyright 2015, 2016 Thomas J. Duck.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# OVERVIEW
#
# The basic idea is to scan the AST two times in order to:
#
# 1. Insert text for the figure number in each figure caption.
# For LaTeX, insert \label{...} instead. The figure labels
# and associated figure numbers are stored in the global
# references tracker.
#
# 2. Replace each reference with a figure number. For LaTeX,
# replace with \ref{...} instead.
#
# There is also an initial scan to do some preprocessing.
import re
import functools
import itertools
import io
import sys
# pylint: disable=import-error
import pandocfilters
from pandocfilters import stringify, walk
from pandocfilters import RawInline, Str, Space, Para, Plain, Cite, elt
from pandocattributes import PandocAttributes
# Create our own pandoc image primitives to accommodate different pandoc
# versions.
# pylint: disable=invalid-name
Image = elt('Image', 2) # Pandoc < 1.16
AttrImage = elt('Image', 3) # Pandoc >= 1.16
# Patterns for matching labels and references
LABEL_PATTERN = re.compile(r'(fig:[\w/-]*)(.*)')
REF_PATTERN = re.compile(r'@(fig:[\w/-]+)')
# Detect python 3
PY3 = sys.version_info > (3,)
# Pandoc uses UTF-8 for both input and output; so must we
if PY3: # Force utf-8 decoding (decoding of input streams is automatic in py3)
STDIN = io.TextIOWrapper(sys.stdin.buffer, 'utf-8', 'strict')
STDOUT = io.TextIOWrapper(sys.stdout.buffer, 'utf-8', 'strict')
else: # No decoding; utf-8-encoded strings in means the same out
STDIN = sys.stdin
STDOUT = sys.stdout
# pylint: disable=invalid-name
references = {} # Global references tracker
def is_attrimage(key, value):
"""True if this is an attributed image; False otherwise."""
try:
if key == 'Para' and value[0]['t'] == 'Image':
# Old pandoc < 1.16
if len(value[0]['c']) == 2:
s = stringify(value[1:]).strip()
if s.startswith('{') and s.endswith('}'):
return True
else:
return False
# New pandoc >= 1.16
else:
assert len(value[0]['c']) == 3
return True # Pandoc >= 1.16 has image attributes by default
# pylint: disable=bare-except
except:
return False
def parse_attrimage(value):
"""Parses an attributed image."""
if len(value[0]['c']) == 2: # Old pandoc < 1.16
attrs, (caption, target) = None, value[0]['c']
s = stringify(value[1:]).strip() # The attribute string
# Extract label from attributes (label, classes, kvs)
label = PandocAttributes(s, 'markdown').to_pandoc()[0]
if label == 'fig:': # Make up a unique description
label = label + '__'+str(hash(target[0]))+'__'
return attrs, caption, target, label
else: # New pandoc >= 1.16
assert len(value[0]['c']) == 3
attrs, caption, target = value[0]['c']
s = stringify(value[1:]).strip() # The attribute string
# Extract label from attributes
label = attrs[0]
if label == 'fig:': # Make up a unique description
label = label + '__'+str(hash(target[0]))+'__'
return attrs, caption, target, label
def is_ref(key, value):
"""True if this is a figure reference; False otherwise."""
return key == 'Cite' and REF_PATTERN.match(value[1][0]['c']) and \
parse_ref(value)[1] in references
def parse_ref(value):
"""Parses a figure reference."""
prefix = value[0][0]['citationPrefix']
label = REF_PATTERN.match(value[1][0]['c']).groups()[0]
suffix = value[0][0]['citationSuffix']
return prefix, label, suffix
def ast(string):
"""Returns an AST representation of the string."""
toks = [Str(tok) for tok in string.split()]
spaces = [Space()]*len(toks)
ret = list(itertools.chain(*zip(toks, spaces)))
if string[0] == ' ':
ret = [Space()] + ret
return ret if string[-1] == ' ' else ret[:-1]
def is_broken_ref(key1, value1, key2, value2):
"""True if this is a broken link; False otherwise."""
try: # Pandoc >= 1.16
return key1 == 'Link' and value1[1][0]['t'] == 'Str' and \
value1[1][0]['c'].endswith('{@fig') \
and key2 == 'Str' and '}' in value2
except TypeError: # Pandoc < 1.16
return key1 == 'Link' and value1[0][0]['t'] == 'Str' and \
value1[0][0]['c'].endswith('{@fig') \
and key2 == 'Str' and '}' in value2
def repair_broken_refs(value):
"""Repairs references broken by pandoc's --autolink_bare_uris."""
# autolink_bare_uris splits {@fig:label} at the ':' and treats
# the first half as if it is a mailto url and the second half as a string.
# Let's replace this mess with Cite and Str elements that we normally
# get.
flag = False
for i in range(len(value)-1):
if value[i] == None:
continue
if is_broken_ref(value[i]['t'], value[i]['c'],
value[i+1]['t'], value[i+1]['c']):
flag = True # Found broken reference
try: # Pandoc >= 1.16
s1 = value[i]['c'][1][0]['c'] # Get the first half of the ref
except TypeError: # Pandoc < 1.16
s1 = value[i]['c'][0][0]['c'] # Get the first half of the ref
s2 = value[i+1]['c'] # Get the second half of the ref
ref = '@fig' + s2[:s2.index('}')] # Form the reference
prefix = s1[:s1.index('{@fig')] # Get the prefix
suffix = s2[s2.index('}')+1:] # Get the suffix
# We need to be careful with the prefix string because it might be
# part of another broken reference. Simply put it back into the
# stream and repeat the preprocess() call.
if i > 0 and value[i-1]['t'] == 'Str':
value[i-1]['c'] = value[i-1]['c'] + prefix
value[i] = None
else:
value[i] = Str(prefix)
# Put fixed reference in as a citation that can be processed
value[i+1] = Cite(
[{"citationId":ref[1:],
"citationPrefix":[],
"citationSuffix":[Str(suffix)],
"citationNoteNum":0,
"citationMode":{"t":"AuthorInText", "c":[]},
"citationHash":0}],
[Str(ref)])
if flag:
return [v for v in value if v is not None]
def is_braced_ref(i, value):
"""Returns true if a reference is braced; otherwise False."""
return is_ref(value[i]['t'], value[i]['c']) \
and value[i-1]['t'] == 'Str' and value[i+1]['t'] == 'Str' \
and value[i-1]['c'].endswith('{') and value[i+1]['c'].startswith('}')
def remove_braces(value):
"""Search for refe
|
rences and remove curly braces around them."""
flag = False
for i in
|
range(len(value)-1)[1:]:
if is_braced_ref(i, value):
flag = True # Found reference
# Remove the braces
value[i-1]['c'] = value[i-1]['c'][:-1]
value[i+1]['c'] = value[i+1]['c'][1:]
return flag
# pylint: disable=unused-argument
def preprocess(key, value, fmt, meta):
"""Preprocesses to correct for problems."""
if key in ('Para', 'Plain'):
while True:
newvalue = repair_broken_refs(value)
if newvalue:
value = newvalue
else:
break
if key
|
louyihua/edx-platform
|
lms/djangoapps/django_comment_client/management/commands/assign_role.py
|
Python
|
agpl-3.0
| 1,144
| 0
|
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django_comment_common.models import Role
from django.contrib.auth.models import User
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--remove',
action='store_true',
dest='remove',
default=False,
help='Remove the role instead of adding it'),
)
args = '<user|email> <role> <course_id>'
help = 'Assign a discussion forum role to a user '
def handle(self, *arg
|
s, **options):
if len(args) != 3:
raise CommandError('Usage is assign_role {0}'.format(self.args))
name_or_email, role, course_id = args
role = Role.objects.get(name=role, course_id=course_id)
if '@' in name_or_email:
user = User.objects.get(email=name_or_email)
else:
user = User.objects.get(username=name_or_email)
if options['remove']:
us
|
er.roles.remove(role)
else:
user.roles.add(role)
print 'Success!'
|
hmpf/nav
|
tests/functional/netmap_test.py
|
Python
|
gpl-3.0
| 394
| 0
|
"""Selenium tests for ne
|
tmap"""
def test_netmap_index_should_not_have_syntax_errors(selenium, base_url):
s
|
elenium.get("{}/netmap/".format(base_url))
log = selenium.get_log("browser")
syntax_errors = [
line
for line in log
if "syntaxerror" in line.get("message", "").lower()
and line.get("source") == "javascript"
]
assert not syntax_errors
|
tquilian/exelearningTest
|
twisted/pb/promise.py
|
Python
|
gpl-2.0
| 3,532
| 0.001133
|
# -*- test-case-name: twisted.pb.test.test_promise -*-
from twisted.python import util, failure
from twisted.internet import defer
id = util.unsignedID
EVENTUAL, FULFILLED, BROKEN = range(3)
class Promise:
"""I am a promise of a future result. I am a lot like a Deferred, except
that my promised result is usually an instance. I make it possible to
schedule method invocations on this future instance, returning Promises
for the results.
Promises are always in one of three states: Eventual, Fulfilled, and
Broken. (see http://www.erights.org/elib/concurrency/refmech.html for a
pretty picture). They start as Eventual, meaning we do not yet know
whether they will resolve or not. In this state, method invocations are
queued. Eventually the Promise will be 'resolved' into either the
Fulfilled or the Broken state. Fulfilled means that the promise contains
a live object to which methods can be dispatched synchronously. Broken
promises are incapable of invoking methods: they all result in Failure.
Method invocation is always asynchronous: it always returns a Promise.
"""
# all our internal methods are private, to avoid colliding with normal
# method names that users may invoke on our eventual target.
_state = EVENTUAL
_resolution = None
def __init__(self, d):
self._watchers = []
self._pendingMethods = []
d.addCallbacks(self._ready, self._broken)
def _wait_for_resolution(self):
if self._state == EVENTUAL:
|
d = defer.Deferred()
self._watchers.append(d)
else:
d = defer.succeed(self._resolution)
return d
def _ready(self, resolution):
self._resolution = resolution
self._state = FULFILLED
self._run_methods()
def _broken(self, f):
self._resolution = f
self._state = BROKEN
self._run_methods()
def _invoke
|
_method(self, name, args, kwargs):
if isinstance(self._resolution, failure.Failure):
return self._resolution
method = getattr(self._resolution, name)
res = method(*args, **kwargs)
return res
def _run_methods(self):
for (name, args, kwargs, result_deferred) in self._pendingMethods:
d = defer.maybeDeferred(self._invoke_method, name, args, kwargs)
d.addBoth(result_deferred.callback)
del self._pendingMethods
for d in self._watchers:
d.callback(self._resolution)
del self._watchers
def __repr__(self):
return "<Promise %#x>" % id(self)
def __getattr__(self, name):
if name.startswith("__"):
raise AttributeError
def newmethod(*args, **kwargs):
return self._add_method(name, args, kwargs)
return newmethod
def _add_method(self, name, args, kwargs):
if self._state == EVENTUAL:
d = defer.Deferred()
self._pendingMethods.append((name, args, kwargs, d))
else:
d = defer.maybeDeferred(self._invoke_method, name, args, kwargs)
return Promise(d)
def when(p):
"""Turn a Promise into a Deferred that will fire with the enclosed object
when it is ready. Use this when you actually need to schedule something
to happen in a synchronous fashion. Most of the time, you can just invoke
methods on the Promise as if it were immediately available."""
assert isinstance(p, Promise)
return p._wait_for_resolution()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.